idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
2,600
def count_lines_in_file ( self , fname = '' ) : i = 0 if fname == '' : fname = self . fullname try : with codecs . open ( fname , "r" , encoding = 'utf8' , errors = 'ignore' ) as f : for i , _ in enumerate ( f ) : pass return i + 1 except Exception as ex : print ( 'cant count lines in file in "' , fname , '":' , str ( ex ) ) return 0
you wont believe what this method does
2,601
def count_lines_of_code ( self , fname = '' ) : if fname == '' : fname = self . fullname loc = 0 try : with open ( fname ) as f : for l in f : if l . strip ( ) != '' : loc += 1 return loc except Exception as ex : print ( 'cant count lines of code in "' , fname , '":' , str ( ex ) ) return 0
counts non blank lines
2,602
def get_file_sample ( self , numLines = 10 ) : res = '' try : with open ( self . fullname , 'r' ) as f : for line_num , line in enumerate ( f ) : res += str ( line_num ) . zfill ( 5 ) + ' ' + line if line_num >= numLines - 1 : break return res except Exception as ex : print ( 'cant get_file_sample in "' , self . fullname , '":' , str ( ex ) ) return res
retrieve a sample of the file
2,603
def append_text ( self , txt ) : with open ( self . fullname , "a" ) as myfile : myfile . write ( txt )
adds a line of text to a file
2,604
def load_file_to_string ( self ) : try : with open ( self . fullname , 'r' ) as f : txt = f . read ( ) return txt except IOError : return ''
load a file to a string
2,605
def load_file_to_list ( self ) : lst = [ ] try : with open ( self . fullname , 'r' ) as f : for line in f : lst . append ( line ) return lst except IOError : return lst
load a file to a list
2,606
def get_program_list ( ) : colList = [ 'FileName' , 'FileSize' , 'Functions' , 'Imports' ] txt = '<TABLE width=90% border=0>' txt += format_file_table_header ( colList ) fl = web . GetFileList ( aikif_folder , [ '*.py' ] , 'N' ) for f in fl : if '__init__.py' in f : txt += '<TR><TD colspan=4><HR><H3>' + get_subfolder ( f ) + '</h3></td></tr>\n' else : txt += format_file_to_html_row ( f , colList ) txt += '</TABLE>\n\n' return txt
get a HTML formatted view of all Python programs in all subfolders of AIKIF including imports and lists of functions and classes
2,607
def get_subfolder ( txt ) : root_folder = os . sep + 'aikif' + os . sep ndx = txt . find ( root_folder , 1 ) return txt [ ndx : ] . replace ( '__init__.py' , '' )
extracts a displayable subfolder name from full filename
2,608
def get_functions ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line . strip ( ) [ 0 : 4 ] == 'def ' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) [ 4 : ] , ':' ) + '</PRE>\n' if line [ 0 : 5 ] == 'class' : txt += '<PRE>' + strip_text_after_string ( strip_text_after_string ( line , '#' ) , ':' ) + '</PRE>\n' return txt + '<BR>'
get a list of functions from a Python program
2,609
def strip_text_after_string ( txt , junk ) : if junk in txt : return txt [ : txt . find ( junk ) ] else : return txt
used to strip any poorly documented comments at the end of function defs
2,610
def get_imports ( fname ) : txt = '' with open ( fname , 'r' ) as f : for line in f : if line [ 0 : 6 ] == 'import' : txt += '<PRE>' + strip_text_after_string ( line [ 7 : ] , ' as ' ) + '</PRE>\n' return txt + '<BR>'
get a list of imports from a Python program
2,611
def main ( arg1 = 55 , arg2 = 'test' , arg3 = None ) : print ( 'Starting dummy AI algorithm with :' , arg1 , arg2 , arg3 ) if arg3 is None : arg3 = [ 5 , 6 , 7 , 5 , 4 , ] result = arg1 + arg3 [ 0 ] * 7566.545 print ( 'Done - returning ' , result ) return result
This is a sample program to show how a learning agent can be logged using AIKIF . The idea is that this main function is your algorithm which will run until it finds a successful result . The result is returned and the time taken is logged . There can optionally be have additional functions to call to allow for easy logging access
2,612
def get ( self , key ) : res = self . connection . get ( key ) print ( res ) return res
get a set of keys from redis
2,613
def creator ( _ , config ) : packer_script = render ( config . script , model = config . model , env = config . env , variables = config . variables , item = config . item ) filename = "packer.dry.run.see.comment" if not config . dry_run : filename = write_temporary_file ( packer_script , 'packer-' , '.json' ) packer_script = '' template_file = os . path . join ( os . path . dirname ( __file__ ) , 'templates/packer-image.sh.j2' ) with open ( template_file ) as handle : template = handle . read ( ) config . script = render ( template , debug = config . debug , packer_content = packer_script , packer_filename = filename ) return Packer ( config )
Creator function for creating an instance of a Packer image script .
2,614
def process_jpeg_bytes ( bytes_in , quality = DEFAULT_JPEG_QUALITY ) : bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_jpeg_bytes ( bytes_in , len ( bytes_in ) , bytes_out_p_gc , quality ) if length == 0 : raise ValueError ( "Invalid JPEG: Guetzli was not able to decode the image" ) bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length )
Generates an optimized JPEG from JPEG - encoded bytes .
2,615
def process_rgb_bytes ( bytes_in , width , height , quality = DEFAULT_JPEG_QUALITY ) : if len ( bytes_in ) != width * height * 3 : raise ValueError ( "bytes_in length is not coherent with given width and height" ) bytes_out_p = ffi . new ( "char**" ) bytes_out_p_gc = ffi . gc ( bytes_out_p , lib . guetzli_free_bytes ) length = lib . guetzli_process_rgb_bytes ( bytes_in , width , height , bytes_out_p_gc , quality ) bytes_out = ffi . cast ( "char*" , bytes_out_p_gc [ 0 ] ) return ffi . unpack ( bytes_out , length )
Generates an optimized JPEG from RGB bytes .
2,616
def singleton ( the_class ) : class_instances = { } def get_instance ( * args , ** kwargs ) : key = ( the_class , args , str ( kwargs ) ) if key not in class_instances : class_instances [ key ] = the_class ( * args , ** kwargs ) return class_instances [ key ] return get_instance
Decorator for a class to make a singleton out of it .
2,617
def build_board_2048 ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) return grd
builds a 2048 starting board Printing Grid 0 0 0 2 0 0 4 0 0 0 0 0 0 0 0 0
2,618
def build_board_checkers ( ) : grd = Grid ( 8 , 8 , [ "B" , "W" ] ) for c in range ( 4 ) : grd . set_tile ( 0 , ( c * 2 ) - 1 , "B" ) grd . set_tile ( 1 , ( c * 2 ) - 0 , "B" ) grd . set_tile ( 6 , ( c * 2 ) + 1 , "W" ) grd . set_tile ( 7 , ( c * 2 ) - 0 , "W" ) print ( grd ) return grd
builds a checkers starting board Printing Grid 0 B 0 B 0 B 0 B B 0 B 0 B 0 B 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 W 0 W 0 W 0 W W 0 W 0 W 0 W 0
2,619
def TEST ( ) : grd = Grid ( 4 , 4 , [ 2 , 4 ] ) grd . new_tile ( ) grd . new_tile ( ) print ( grd ) print ( "There are " , grd . count_blank_positions ( ) , " blanks in grid 1\n" ) grd2 = Grid ( 5 , 5 , [ 'A' , 'B' ] ) grd2 . new_tile ( 26 ) print ( grd2 ) build_board_checkers ( ) print ( "There are " , grd2 . count_blank_positions ( ) , " blanks in grid 2" )
tests for this module
2,620
def url ( self , name ) : key = blobstore . create_gs_key ( '/gs' + name ) return images . get_serving_url ( key )
Ask blobstore api for an url to directly serve the file
2,621
def process ( self , stage ) : self . logger . info ( "Processing pipeline stage '%s'" , self . title ) output = [ ] for entry in stage : key = list ( entry . keys ( ) ) [ 0 ] if key == "env" : self . pipeline . data . env_list [ 1 ] . update ( entry [ key ] ) self . logger . debug ( "Updating environment at level 1 with %s" , self . pipeline . data . env_list [ 1 ] ) continue tasks = Tasks ( self . pipeline , re . match ( r"tasks\(parallel\)" , key ) is not None ) result = tasks . process ( entry [ key ] ) for line in result [ 'output' ] : output . append ( line ) if not result [ 'success' ] : self . event . failed ( ) return { 'success' : False , 'output' : output } self . event . succeeded ( ) return { 'success' : True , 'output' : output }
Processing one stage .
2,622
def trading_fees ( self ) -> TradingFees : return self . _fetch ( 'trading fees' , self . market . code ) ( self . _trading_fees ) ( )
Fetch trading fees .
2,623
def fetch_ticker ( self ) -> Ticker : return self . _fetch ( 'ticker' , self . market . code ) ( self . _ticker ) ( )
Fetch the market ticker .
2,624
def fetch_order_book ( self ) -> OrderBook : return self . _fetch ( 'order book' , self . market . code ) ( self . _order_book ) ( )
Fetch the order book .
2,625
def fetch_trades_since ( self , since : int ) -> List [ Trade ] : return self . _fetch_since ( 'trades' , self . market . code ) ( self . _trades_since ) ( since )
Fetch trades since given timestamp .
2,626
def fetch_deposits ( self , limit : int ) -> List [ Deposit ] : return self . _transactions ( self . _deposits , 'deposits' , limit )
Fetch latest deposits must provide a limit .
2,627
def fetch_deposits_since ( self , since : int ) -> List [ Deposit ] : return self . _transactions_since ( self . _deposits_since , 'deposits' , since )
Fetch all deposits since the given timestamp .
2,628
def fetch_withdrawals ( self , limit : int ) -> List [ Withdrawal ] : return self . _transactions ( self . _withdrawals , 'withdrawals' , limit )
Fetch latest withdrawals must provide a limit .
2,629
def fetch_withdrawals_since ( self , since : int ) -> List [ Withdrawal ] : return self . _transactions_since ( self . _withdrawals_since , 'withdrawals' , since )
Fetch all withdrawals since the given timestamp .
2,630
def request_withdrawal ( self , amount : Number , address : str , subtract_fee : bool = False , ** params ) -> Withdrawal : self . log . debug ( f'Requesting {self.currency} withdrawal from {self.name} to {address}' ) amount = self . _parse_money ( amount ) if self . dry_run : withdrawal = Withdrawal . create_default ( TxType . WITHDRAWAL , self . currency , amount , address ) self . log . warning ( f'DRY RUN: Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal try : withdrawal = self . _withdraw ( amount , address , subtract_fee , ** params ) except Exception as e : msg = f'Failed requesting withdrawal on {self.name}!: amount={amount}, address={address}' raise self . exception ( InvalidWithdrawal , msg , e ) from e self . log . info ( f'Withdrawal requested on {self.name}: {withdrawal}' ) return withdrawal
Request a withdrawal .
2,631
def fetch_order ( self , order_id : str ) -> Order : return self . _fetch ( f'order id={order_id}' , exc = OrderNotFound ) ( self . _order ) ( order_id )
Fetch an order by ID .
2,632
def fetch_open_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _open_orders , limit )
Fetch latest open orders must provide a limit .
2,633
def fetch_closed_orders ( self , limit : int ) -> List [ Order ] : return self . _fetch_orders_limit ( self . _closed_orders , limit )
Fetch latest closed orders must provide a limit .
2,634
def fetch_closed_orders_since ( self , since : int ) -> List [ Order ] : return self . _fetch_orders_since ( self . _closed_orders_since , since )
Fetch closed orders since the given timestamp .
2,635
def cancel_order ( self , order_id : str ) -> str : self . log . debug ( f'Canceling order id={order_id} on {self.name}' ) if self . dry_run : self . log . warning ( f'DRY RUN: Order cancelled on {self.name}: id={order_id}' ) return order_id try : self . _cancel_order ( order_id ) except Exception as e : raise self . exception ( OrderNotFound , f'Failed to cancel order: id={order_id}' , e ) from e self . log . info ( f'Order cancelled on {self.name}: id={order_id}' ) return order_id
Cancel an order by ID .
2,636
def cancel_orders ( self , order_ids : List [ str ] ) -> List [ str ] : orders_to_cancel = order_ids self . log . debug ( f'Canceling orders on {self.name}: ids={orders_to_cancel}' ) cancelled_orders = [ ] if self . dry_run : self . log . warning ( f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}' ) return orders_to_cancel try : if self . has_batch_cancel : self . _cancel_orders ( orders_to_cancel ) cancelled_orders . append ( orders_to_cancel ) orders_to_cancel . clear ( ) else : for i , order_id in enumerate ( orders_to_cancel ) : self . _cancel_order ( order_id ) cancelled_orders . append ( order_id ) orders_to_cancel . pop ( i ) except Exception as e : msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}' raise self . exception ( OrderNotFound , msg , e ) from e self . log . info ( f'Orders cancelled on {self.name}: ids={cancelled_orders}' ) return cancelled_orders
Cancel multiple orders by a list of IDs .
2,637
def cancel_all_orders ( self ) -> List [ str ] : order_ids = [ o . id for o in self . fetch_all_open_orders ( ) ] return self . cancel_orders ( order_ids )
Cancel all open orders .
2,638
def min_order_amount ( self ) -> Money : return self . _fetch ( 'minimum order amount' , self . market . code ) ( self . _min_order_amount ) ( )
Minimum amount to place an order .
2,639
def place_market_order ( self , side : Side , amount : Number ) -> Order : return self . place_order ( side , OrderType . MARKET , amount )
Place a market order .
2,640
def main ( ) : parser = argparse . ArgumentParser ( description = main . __doc__ , add_help = True ) parser . add_argument ( '-M' , '--master_key' , dest = 'master_key' , help = 'Path to the master key ' + 'used for the encryption. Data is transferred without encryption if this' + 'is not provided.' , type = str , required = False , default = None ) parser . add_argument ( '-B' , '--bucket' , dest = 'bucket' , help = 'S3 bucket.' , type = str , required = True ) parser . add_argument ( '-R' , '--remote_dir' , dest = 'remote_dir' , help = 'Pseudo directory within ' + 'the bucket to store the file(s). NOTE: Folder structure below ' + 'REMOTE_DIR will be retained.' , type = str , required = False , default = '' ) parser . add_argument ( 'data' , help = 'File(s) or folder(s) to transfer to S3.' , type = str , nargs = '+' ) params = parser . parse_args ( ) if params . master_key and not os . path . exists ( params . master_key ) : raise InputParameterError ( 'The master key was not found at ' + params . master_key ) if not os . path . exists ( os . path . expanduser ( '~/.boto' ) ) : raise RuntimeError ( '~/.boto not found' ) if params . remote_dir . startswith ( '/' ) : raise InputParameterError ( 'The remote dir cannot start with a \'/\'' ) for datum in params . data : datum = os . path . abspath ( datum ) if not os . path . exists ( datum ) : print ( 'ERROR: %s could not be found.' % datum , file = sys . stderr ) continue write_to_s3 ( datum , params . master_key , params . bucket , params . remote_dir ) return None
This is the main module for the script . The script will accept a file or a directory and then encrypt it with a provided key before pushing it to S3 into a specified bucket .
2,641
def _get_bucket_endpoint ( self ) : conn = S3Connection ( ) bucket = conn . lookup ( self . bucket_name ) if not bucket : raise InputParameterError ( 'The provided bucket %s doesn\'t exist' % self . bucket_name ) endpoint = str ( bucket . get_location ( ) ) return endpoint
Queries S3 to identify the region hosting the provided bucket .
2,642
def align_rna ( job , fastqs , univ_options , star_options ) : star = job . wrapJobFn ( run_star , fastqs , univ_options , star_options , cores = star_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_options [ 'index' ] ) , disk = PromisedRequirement ( star_disk , fastqs , star_options [ 'index' ] ) ) s_and_i = job . wrapJobFn ( sort_and_index_star , star . rv ( ) , univ_options , star_options ) . encapsulate ( ) job . addChild ( star ) star . addChild ( s_and_i ) return s_and_i . rv ( )
A wrapper for the entire rna alignment subgraph .
2,643
def run_star ( job , fastqs , univ_options , star_options ) : assert star_options [ 'type' ] in ( 'star' , 'starlong' ) work_dir = os . getcwd ( ) input_files = { 'rna_cutadapt_1.fastq' : fastqs [ 0 ] , 'rna_cutadapt_2.fastq' : fastqs [ 1 ] , 'star_index.tar.gz' : star_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) gz = '.gz' if is_gzipfile ( input_files [ 'rna_cutadapt_1.fastq' ] ) else '' if gz : for read_file in 'rna_cutadapt_1.fastq' , 'rna_cutadapt_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz input_files [ 'star_index' ] = untargz ( input_files [ 'star_index.tar.gz' ] , work_dir ) star_fusion_idx = os . path . join ( input_files [ 'star_index' ] , 'ref_genome.fa.star.idx' ) if os . path . exists ( star_fusion_idx ) : input_files [ 'star_index' ] = star_fusion_idx input_files = { key : docker_path ( path , work_dir = work_dir ) for key , path in input_files . items ( ) } parameters = [ '--runThreadN' , str ( star_options [ 'n' ] ) , '--genomeDir' , input_files [ 'star_index' ] , '--twopassMode' , 'Basic' , '--outReadsUnmapped' , 'None' , '--chimSegmentMin' , '12' , '--chimJunctionOverhangMin' , '12' , '--alignSJDBoverhangMin' , '10' , '--alignMatesGapMax' , '200000' , '--alignIntronMax' , '200000' , '--chimSegmentReadGapMax' , 'parameter' , '3' , '--alignSJstitchMismatchNmax' , '5' , '-1' , '5' , '5' , '--outFileNamePrefix' , 'rna' , '--readFilesIn' , input_files [ 'rna_cutadapt_1.fastq' + gz ] , input_files [ 'rna_cutadapt_2.fastq' + gz ] , '--outSAMattributes' , 'NH' , 'HI' , 'AS' , 'NM' , 'MD' , '--outSAMtype' , 'BAM' , 'Unsorted' , '--quantMode' , 'TranscriptomeSAM' ] if gz : parameters . extend ( [ '--readFilesCommand' , 'zcat' ] ) if star_options [ 'type' ] == 'star' : docker_call ( tool = 'star' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = star_options [ 'version' ] ) else : docker_call ( tool = 'starlong' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = star_options [ 'version' ] ) output_files = defaultdict ( ) for output_file in [ 'rnaAligned.toTranscriptome.out.bam' , 'rnaAligned.out.bam' , 'rnaChimeric.out.junction' ] : output_files [ output_file ] = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , output_file ] ) ) export_results ( job , output_files [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rna_transcriptome.bam' , univ_options , subfolder = 'alignments' ) export_results ( job , output_files [ 'rnaChimeric.out.junction' ] , 'rna_chimeric.junction' , univ_options , subfolder = 'mutations/fusions' ) job . fileStore . logToMaster ( 'Ran STAR on %s successfully' % univ_options [ 'patient' ] ) return output_files
Align a pair of fastqs with STAR .
2,644
def sort_and_index_star ( job , star_bams , univ_options , star_options ) : star_options [ 'samtools' ] [ 'n' ] = star_options [ 'n' ] sort = job . wrapJobFn ( sort_bamfile , star_bams [ 'rnaAligned.out.bam' ] , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , disk = PromisedRequirement ( sort_disk , star_bams [ 'rnaAligned.out.bam' ] ) ) index = job . wrapJobFn ( index_bamfile , sort . rv ( ) , 'rna' , univ_options , samtools_options = star_options [ 'samtools' ] , sample_info = 'genome_sorted' , disk = PromisedRequirement ( index_disk , sort . rv ( ) ) ) job . addChild ( sort ) sort . addChild ( index ) return { 'rna_genome' : index . rv ( ) , 'rna_transcriptome.bam' : star_bams [ 'rnaAligned.toTranscriptome.out.bam' ] , 'rnaChimeric.out.junction' : star_bams [ 'rnaChimeric.out.junction' ] }
A wrapper for sorting and indexing the genomic star bam generated by run_star . It is required since run_star returns a dict of 2 bams
2,645
def reset ( self ) : self . expr = [ ] self . matcher = None self . last_matcher = None self . description = None
Resets the state of the expression
2,646
def clone ( self ) : from copy import copy clone = copy ( self ) clone . expr = copy ( self . expr ) clone . factory = False return clone
Clone this expression
2,647
def resolve ( self , value = None ) : if self . matcher : self . _init_matcher ( ) matcher = self . evaluate ( ) try : value = self . _transform ( value ) self . _assertion ( matcher , value ) except AssertionError as ex : raise ex finally : if self . deferred : self . reset ( )
Resolve the current expression against the supplied value
2,648
def _assertion ( self , matcher , value ) : if isinstance ( value , Expectation ) : assertion = value . _assertion . __get__ ( self , Expectation ) assertion ( matcher , value . value ) else : hc . assert_that ( value , matcher )
Perform the actual assertion for the given matcher and value . Override this method to apply a special configuration when performing the assertion . If the assertion fails it should raise an AssertionError .
2,649
def _transform ( self , value ) : if self . transform : try : value = self . transform ( value ) except : import sys exc_type , exc_obj , exc_tb = sys . exc_info ( ) raise AssertionError ( 'Error applying transformation <{0}>: {2}: {3}' . format ( self . transform . __name__ , value , exc_type . __name__ , exc_obj ) ) return value
Applies any defined transformation to the given value
2,650
def evaluate ( self ) : ops = [ ] rpn = [ ] for token in self . expr : if isinstance ( token , int ) : while len ( ops ) and token <= ops [ - 1 ] : rpn . append ( ops . pop ( ) ) ops . append ( token ) else : rpn . append ( token ) while len ( ops ) : rpn . append ( ops . pop ( ) ) stack = [ ] for token in rpn : if isinstance ( token , int ) : if token == OPERATOR . NOT : stack [ - 1 ] = IsNot ( stack [ - 1 ] ) continue if len ( stack ) < 2 : raise RuntimeError ( 'Unable to build a valid expression. Not enough operands available.' ) if token == OPERATOR . OR : matcher = hc . any_of ( * stack [ - 2 : ] ) else : matcher = hc . all_of ( * stack [ - 2 : ] ) stack [ - 2 : ] = [ matcher ] else : stack . append ( token ) if len ( stack ) != 1 : raise RuntimeError ( 'Unable to build a valid expression. The RPN stack should have just one item.' ) matcher = stack . pop ( ) if self . description : matcher = hc . described_as ( self . description , matcher ) return matcher
Converts the current expression into a single matcher applying coordination operators to operands according to their binding rules
2,651
def _find_matcher ( self , alias ) : matcher = lookup ( alias ) if not matcher : msg = 'Matcher "%s" not found' % alias similar = suggest ( alias , max = 3 , cutoff = 0.5 ) if len ( similar ) > 1 : last = similar . pop ( ) msg += '. Perhaps you meant to use %s or %s?' % ( ', ' . join ( similar ) , last ) elif len ( similar ) > 0 : msg += '. Perhaps you meant to use %s?' % similar . pop ( ) raise KeyError ( msg ) return matcher
Finds a matcher based on the given alias or raises an error if no matcher could be found .
2,652
def _init_matcher ( self , * args , ** kwargs ) : fn = lambda x : x . evaluate ( ) if isinstance ( x , Expectation ) else x args = [ fn ( x ) for x in args ] kwargs = dict ( ( k , fn ( v ) ) for k , v in kwargs . items ( ) ) matcher = self . matcher ( * args , ** kwargs ) self . expr . append ( matcher ) self . matcher = None return matcher
Executes the current matcher appending it to the expression
2,653
def described_as ( self , description , * args ) : if len ( args ) : description = description . format ( * args ) self . description = description return self
Specify a custom message for the matcher
2,654
def make_dbsource ( ** kwargs ) : if 'spatialite' in connection . settings_dict . get ( 'ENGINE' ) : kwargs . setdefault ( 'file' , connection . settings_dict [ 'NAME' ] ) return mapnik . SQLite ( wkb_format = 'spatialite' , ** kwargs ) names = ( ( 'dbname' , 'NAME' ) , ( 'user' , 'USER' ) , ( 'password' , 'PASSWORD' ) , ( 'host' , 'HOST' ) , ( 'port' , 'PORT' ) ) for mopt , dopt in names : val = connection . settings_dict . get ( dopt ) if val : kwargs . setdefault ( mopt , val ) return mapnik . PostGIS ( ** kwargs )
Returns a mapnik PostGIS or SQLite Datasource .
2,655
def layer ( self , queryset , stylename = None ) : cls = RasterLayer if hasattr ( queryset , 'image' ) else VectorLayer layer = cls ( queryset , style = stylename ) try : style = self . map . find_style ( layer . stylename ) except KeyError : self . map . append_style ( layer . stylename , layer . style ( ) ) layer . styles . append ( layer . stylename ) self . map . layers . append ( layer . _layer ) return layer
Returns a map Layer .
2,656
def zoom_bbox ( self , bbox ) : try : bbox . transform ( self . map . srs ) except gdal . GDALException : pass else : self . map . zoom_to_box ( mapnik . Box2d ( * bbox . extent ) )
Zoom map to geometry extent .
2,657
def style ( self ) : style = mapnik . Style ( ) rule = mapnik . Rule ( ) self . _symbolizer = self . symbolizer ( ) rule . symbols . append ( self . _symbolizer ) style . rules . append ( rule ) return style
Returns a default Style .
2,658
def wrap_fusion ( job , fastqs , star_output , univ_options , star_fusion_options , fusion_inspector_options ) : if not star_fusion_options [ 'run' ] : job . fileStore . logToMaster ( 'Skipping STAR-Fusion on %s' % univ_options [ 'patient' ] ) return fusion = job . wrapJobFn ( run_fusion , fastqs , star_output [ 'rnaChimeric.out.junction' ] , univ_options , star_fusion_options , fusion_inspector_options , cores = star_fusion_options [ 'n' ] , memory = PromisedRequirement ( lambda x : int ( 1.85 * x . size ) , star_fusion_options [ 'index' ] ) , disk = PromisedRequirement ( fusion_disk , fastqs , star_fusion_options [ 'index' ] ) ) . encapsulate ( ) job . addChild ( fusion ) return fusion . rv ( )
A wrapper for run_fusion using the results from cutadapt and star as input .
2,659
def parse_star_fusion ( infile ) : reader = csv . reader ( infile , delimiter = '\t' ) header = reader . next ( ) header = { key : index for index , key in enumerate ( header ) } features = [ 'LeftGene' , 'LeftLocalBreakpoint' , 'LeftBreakpoint' , 'RightGene' , 'RightLocalBreakpoint' , 'RightBreakpoint' , 'LargeAnchorSupport' , 'JunctionReadCount' , 'SpanningFragCount' ] for line in reader : yield Expando ( dict ( ( feature , line [ header [ feature ] ] ) for feature in features ) )
Parses STAR - Fusion format and returns an Expando object with basic features
2,660
def get_transcripts ( transcript_file ) : with open ( transcript_file , 'r' ) as fa : transcripts = { } regex_s = r"(?P<ID>TRINITY.*)\s(?P<fusion>.*--.*):(?P<left_start>\d+)-(?P<right_start>\d+)" regex = re . compile ( regex_s ) while True : try : info = fa . next ( ) seq = fa . next ( ) assert info . startswith ( '>' ) m = regex . search ( info ) if m : transcripts [ m . group ( 'ID' ) ] = seq . strip ( ) except StopIteration : break except AssertionError : print ( "WARNING: Malformed fusion transcript file" ) return transcripts
Parses FusionInspector transcript file and returns dictionary of sequences
2,661
def split_fusion_transcript ( annotation_path , transcripts ) : annotation = collections . defaultdict ( dict ) forward = 'ACGTN' reverse = 'TGCAN' trans = string . maketrans ( forward , reverse ) five_pr_splits = collections . defaultdict ( dict ) three_pr_splits = collections . defaultdict ( dict ) regex = re . compile ( r'ID=(?P<ID>.*);Name=(?P<Name>.*);Target=(?P<Target>.*)\s(?P<start>\d+)\s(?P<stop>\d+)' ) with open ( annotation_path , 'r' ) as gff : for line in gff : print ( line ) if line . startswith ( '#' ) : _ , eyd , fusion = line . strip ( ) . split ( ) fusion , start_stop = fusion . split ( ':' ) left_break , right_break = start_stop . split ( '-' ) annotation [ fusion ] [ eyd ] = { } annotation [ fusion ] [ eyd ] [ 'left_break' ] = left_break annotation [ fusion ] [ eyd ] [ 'right_break' ] = right_break else : line = line . strip ( ) . split ( '\t' ) fusion = line [ 0 ] strand = line [ 6 ] block_start = line [ 3 ] block_stop = line [ 4 ] attr = line [ 8 ] m = regex . search ( attr ) if m : transcript_id = m . group ( 'Name' ) rb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'right_break' ] ] ) lb = any ( [ block_start == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] , block_stop == annotation [ fusion ] [ transcript_id ] [ 'left_break' ] ] ) if strand == '-' and rb : transcript_split = int ( m . group ( 'stop' ) ) + 1 five_seq = transcripts [ transcript_id ] [ transcript_split : ] five_pr_splits [ fusion ] [ transcript_id ] = five_seq . translate ( trans ) [ : : - 1 ] three_seq = transcripts [ transcript_id ] [ : transcript_split ] three_pr_splits [ fusion ] [ transcript_id ] = three_seq . translate ( trans ) [ : : - 1 ] elif strand == '+' and lb : transcript_split = int ( m . group ( 'stop' ) ) s1 = transcripts [ transcript_id ] [ : transcript_split ] five_pr_splits [ fusion ] [ transcript_id ] = s1 s2 = transcripts [ transcript_id ] [ transcript_split : ] three_pr_splits [ fusion ] [ transcript_id ] = s2 return five_pr_splits , three_pr_splits
Finds the breakpoint in the fusion transcript and splits the 5 donor from the 3 acceptor
2,662
def get_gene_ids ( fusion_bed ) : with open ( fusion_bed , 'r' ) as f : gene_to_id = { } regex = re . compile ( r'(?P<gene>ENSG\d*)' ) for line in f : line = line . split ( '\t' ) transcript , gene_bit , name = line [ 3 ] . split ( ';' ) m = regex . search ( gene_bit ) if m : gene_to_id [ name ] = m . group ( 'gene' ) return gene_to_id
Parses FusionInspector bed file to ascertain the ENSEMBL gene ids
2,663
def reformat_star_fusion_output ( job , fusion_annot , fusion_file , transcript_file , transcript_gff_file , univ_options ) : input_files = { 'results.tsv' : fusion_file , 'fusion.bed' : fusion_annot } if transcript_file and transcript_gff_file : input_files [ 'transcripts.fa' ] = transcript_file input_files [ 'transcripts.gff' ] = transcript_gff_file work_dir = job . fileStore . getLocalTempDir ( ) input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) hugo_to_gene_ids = get_gene_ids ( input_files [ 'fusion.bed' ] ) if transcript_file and transcript_gff_file : transcripts = get_transcripts ( input_files [ 'transcripts.fa' ] ) five_pr_splits , three_pr_splits = split_fusion_transcript ( input_files [ 'transcripts.gff' ] , transcripts ) else : five_pr_splits = collections . defaultdict ( dict ) three_pr_splits = collections . defaultdict ( dict ) header = [ '# chr1' , 'start1' , 'end1' , 'chr2' , 'start2' , 'end2' , 'name' , 'score' , 'strand1' , 'strand2' , 'junctionSeq1' , 'junctionSeq2' , 'hugo1' , 'hugo2' ] output_path = os . path . join ( work_dir , 'fusion_results.bedpe' ) with open ( input_files [ 'results.tsv' ] , 'r' ) as in_f , open ( output_path , 'w' ) as out_f : writer = csv . writer ( out_f , delimiter = '\t' ) writer . writerow ( header ) for record in parse_star_fusion ( in_f ) : left_chr , left_break , left_strand = record . LeftBreakpoint . split ( ':' ) right_chr , right_break , right_strand = record . RightBreakpoint . split ( ':' ) fusion = '' . join ( [ record . LeftGene , '--' , record . RightGene ] ) name = '-' . join ( [ hugo_to_gene_ids [ record . LeftGene ] , hugo_to_gene_ids [ record . RightGene ] ] ) score = 'Junction:%s-Spanning:%s' % ( record . JunctionReadCount , record . SpanningFragCount ) if len ( five_pr_splits [ fusion ] . keys ( ) ) == 0 : five_pr_splits [ fusion ] [ 'N/A' ] = '.' if len ( three_pr_splits [ fusion ] . keys ( ) ) == 0 : three_pr_splits [ fusion ] [ 'N/A' ] = '.' for transcript_id in five_pr_splits [ fusion ] . keys ( ) : five_prime_seq = five_pr_splits [ fusion ] [ transcript_id ] three_prime_seq = three_pr_splits [ fusion ] [ transcript_id ] writer . writerow ( [ left_chr , '.' , left_break , right_chr , right_break , '.' , name , score , left_strand , right_strand , five_prime_seq , three_prime_seq , record . LeftGene , record . RightGene ] ) bedpe_id = job . fileStore . writeGlobalFile ( output_path ) export_results ( job , bedpe_id , 'fusion.bedpe' , univ_options , subfolder = 'mutations/fusions' ) job . fileStore . logToMaster ( 'Reformatted STAR-Fusion output for %s successfully' % univ_options [ 'patient' ] ) return bedpe_id
Writes STAR - Fusion results in Transgene BEDPE format
2,664
def _ensure_patient_group_is_ok ( patient_object , patient_name = None ) : from protect . addons . common import TCGAToGTEx assert isinstance ( patient_object , ( set , dict ) ) , '%s,%s' % ( patient_object , patient_name ) test_set = set ( patient_object ) if 'tumor_type' not in patient_object : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a Tumor type.' ) elif patient_object [ 'tumor_type' ] not in TCGAToGTEx : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does contains an invalid Tumor type. Please use one of the ' 'valid TCGA tumor types.' ) if { 'tumor_dna_fastq_1' , 'normal_dna_fastq_1' , 'tumor_rna_fastq_1' } . issubset ( test_set ) : pass else : if 'hla_haplotype_files' not in test_set : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a hla_haplotype_files entry.\nCannot haplotype ' 'patient if all the input sequence files are not fastqs.' ) if ( ( { re . search ( 'tumor_dna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } or { re . search ( 'normal_dna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } ) and ( 'mutation_vcf' not in test_set and 'fusion_bedpe' not in test_set ) ) : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a mutation_vcf or fusion_bedpe entry. If both ' 'tumor and normal DNA sequences (fastqs or bam) are not provided, ' 'a pre-computed vcf and/or bedpe must be provided.' ) if { re . search ( 'tumor_rna_((bam)|(fastq_1)).*' , x ) for x in test_set } == { None } : if 'mutation_vcf' not in test_set and 'fusion_bedpe' in test_set : pass else : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name ) + 'does not contain a tumor rna sequence data entry. We require ' 'either tumor_rna_fastq_1 or tumor_rna_bam.' ) if 'tumor_rna_bam' in test_set and 'tumor_rna_transcriptome_bam' not in test_set : if 'expression_files' not in test_set : raise ParameterError ( ( 'The patient entry for sample %s ' % patient_name + 'was provided a tumor rna bam with sequences mapped to the ' 'genome but was not provided a matching rna bam for the ' 'transcriptome or a tar containing expression values. ' 'We require either a matching transcriptome bam to estimate' 'expression, or the precomputed expression values.' ) )
Ensure that the provided entries for the patient groups is formatted properly .
2,665
def _add_default_entries ( input_dict , defaults_dict ) : for key , value in defaults_dict . iteritems ( ) : if key == 'patients' : print ( 'Cannot default `patients`.' ) continue if isinstance ( value , dict ) : if key not in input_dict or input_dict [ key ] is None : input_dict [ key ] = value else : r = _add_default_entries ( input_dict . get ( key , { } ) , value ) input_dict [ key ] = r else : if key not in input_dict or input_dict [ key ] is None : input_dict [ key ] = value return input_dict
Add the entries in defaults dict into input_dict if they don t exist in input_dict
2,666
def _process_group ( input_group , required_group , groupname , append_subgroups = None ) : if append_subgroups is None : append_subgroups = [ ] tool_options = { } for key in input_group : _ensure_set_contains ( input_group [ key ] , required_group . get ( key , { } ) , groupname + '::' + key ) if key in append_subgroups : continue else : tool_options [ key ] = input_group [ key ] for key in input_group : if key in append_subgroups : continue else : for yek in append_subgroups : tool_options [ key ] . update ( input_group [ yek ] ) return tool_options
Process one group from the input yaml . Ensure it has the required entries . If there is a subgroup that should be processed and then appended to the rest of the subgroups in that group handle it accordingly .
2,667
def get_fastq_2 ( job , patient_id , sample_type , fastq_1 ) : prefix , extn = fastq_1 , 'temp' final_extn = '' while extn : prefix , extn = os . path . splitext ( prefix ) final_extn = extn + final_extn if prefix . endswith ( '1' ) : prefix = prefix [ : - 1 ] job . fileStore . logToMaster ( '"%s" prefix for "%s" determined to be %s' % ( sample_type , patient_id , prefix ) ) break else : raise ParameterError ( 'Could not determine prefix from provided fastq (%s). Is it ' 'of the form <fastq_prefix>1.[fq/fastq][.gz]?' % fastq_1 ) if final_extn not in [ '.fastq' , '.fastq.gz' , '.fq' , '.fq.gz' ] : raise ParameterError ( 'If and _2 fastq path is not specified, only .fastq, .fq or ' 'their gzippped extensions are accepted. Could not process ' '%s:%s.' % ( patient_id , sample_type + '_fastq_1' ) ) return '' . join ( [ prefix , '2' , final_extn ] )
For a path to a fastq_1 file return a fastq_2 file with the same prefix and naming scheme .
2,668
def parse_config_file ( job , config_file , max_cores = None ) : sample_set , univ_options , processed_tool_inputs = _parse_config_file ( job , config_file , max_cores ) for patient_id in sample_set . keys ( ) : job . addFollowOnJobFn ( launch_protect , sample_set [ patient_id ] , univ_options , processed_tool_inputs ) return None
Parse the config file and spawn a ProTECT job for every input sample .
2,669
def get_all_tool_inputs ( job , tools , outer_key = '' , mutation_caller_list = None ) : for tool in tools : for option in tools [ tool ] : if isinstance ( tools [ tool ] [ option ] , dict ) : tools [ tool ] [ option ] = get_all_tool_inputs ( job , { option : tools [ tool ] [ option ] } , outer_key = ':' . join ( [ outer_key , tool ] ) . lstrip ( ':' ) ) [ option ] else : if option . split ( '_' ) [ - 1 ] in [ 'file' , 'vcf' , 'index' , 'fasta' , 'fai' , 'idx' , 'dict' , 'tbi' , 'beds' , 'gtf' , 'config' ] : tools [ tool ] [ option ] = job . addChildJobFn ( get_pipeline_inputs , ':' . join ( [ outer_key , tool , option ] ) . lstrip ( ':' ) , tools [ tool ] [ option ] ) . rv ( ) elif option == 'version' : tools [ tool ] [ option ] = str ( tools [ tool ] [ option ] ) if mutation_caller_list is not None : indexes = tools . pop ( 'indexes' ) indexes [ 'chromosomes' ] = parse_chromosome_string ( job , indexes [ 'chromosomes' ] ) for mutation_caller in mutation_caller_list : if mutation_caller == 'indexes' : continue tools [ mutation_caller ] . update ( indexes ) return tools
Iterate through all the tool options and download required files from their remote locations .
2,670
def get_pipeline_inputs ( job , input_flag , input_file , encryption_key = None , per_file_encryption = False , gdc_download_token = None ) : work_dir = os . getcwd ( ) job . fileStore . logToMaster ( 'Obtaining file (%s) to the file job store' % input_flag ) if input_file . startswith ( ( 'http' , 'https' , 'ftp' ) ) : input_file = get_file_from_url ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'S3' , 's3' ) ) : input_file = get_file_from_s3 ( job , input_file , encryption_key = encryption_key , per_file_encryption = per_file_encryption , write_to_jobstore = True ) elif input_file . startswith ( ( 'GDC' , 'gdc' ) ) : input_file = get_file_from_gdc ( job , input_file , gdc_download_token = gdc_download_token , write_to_jobstore = True ) else : assert os . path . exists ( input_file ) , 'Bogus Input : ' + input_file input_file = job . fileStore . writeGlobalFile ( input_file ) return input_file
Get the input file from s3 or disk and write to file store .
2,671
def prepare_samples ( job , patient_dict , univ_options ) : job . fileStore . logToMaster ( 'Downloading Inputs for %s' % univ_options [ 'patient' ] ) output_dict = { } for input_file in patient_dict : if not input_file . endswith ( ( 'bam' , 'bai' , '_1' , '_2' , 'files' , 'vcf' , 'bedpe' ) ) : output_dict [ input_file ] = patient_dict [ input_file ] continue output_dict [ input_file ] = get_pipeline_inputs ( job , ':' . join ( [ univ_options [ 'patient' ] , input_file ] ) , patient_dict [ input_file ] , encryption_key = ( univ_options [ 'sse_key' ] if patient_dict [ 'ssec_encrypted' ] else None ) , per_file_encryption = univ_options [ 'sse_key_is_master' ] , gdc_download_token = univ_options [ 'gdc_download_token' ] ) return output_dict
Obtain the input files for the patient and write them to the file store .
2,672
def get_patient_bams ( job , patient_dict , sample_type , univ_options , bwa_options , mutect_options ) : output_dict = { } if 'dna' in sample_type : sample_info = 'fix_pg_sorted' prefix = sample_type + '_' + sample_info else : sample_info = 'genome_sorted' prefix = 'rna_' + sample_info if sample_type + '_bam' in patient_dict [ 'gdc_inputs' ] : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] [ 0 ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bam' ] [ 1 ] elif sample_type + '_bai' in patient_dict : output_dict [ prefix + '.bam' ] = patient_dict [ sample_type + '_bam' ] output_dict [ prefix + '.bam.bai' ] = patient_dict [ sample_type + '_bai' ] else : from protect . alignment . dna import index_bamfile , index_disk output_job = job . wrapJobFn ( index_bamfile , patient_dict [ sample_type + '_bam' ] , 'rna' if sample_type == 'tumor_rna' else sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = sample_info , export = False , disk = PromisedRequirement ( index_disk , patient_dict [ sample_type + '_bam' ] ) ) job . addChild ( output_job ) output_dict = output_job . rv ( ) if sample_type == 'tumor_rna' : if 'tumor_rna_transcriptome_bam' not in patient_dict : patient_dict [ 'tumor_rna_transcriptome_bam' ] = None return { 'rna_genome' : output_dict , 'rna_transcriptome.bam' : patient_dict [ 'tumor_rna_transcriptome_bam' ] } else : return output_dict
Convenience function to return the bam and its index in the correct format for a sample type .
2,673
def get_patient_vcf ( job , patient_dict ) : temp = job . fileStore . readGlobalFile ( patient_dict [ 'mutation_vcf' ] , os . path . join ( os . getcwd ( ) , 'temp.gz' ) ) if is_gzipfile ( temp ) : outfile = job . fileStore . writeGlobalFile ( gunzip ( temp ) ) job . fileStore . deleteGlobalFile ( patient_dict [ 'mutation_vcf' ] ) else : outfile = patient_dict [ 'mutation_vcf' ] return outfile
Convenience function to get the vcf from the patient dict
2,674
def get_patient_mhc_haplotype ( job , patient_dict ) : haplotype_archive = job . fileStore . readGlobalFile ( patient_dict [ 'hla_haplotype_files' ] ) haplotype_archive = untargz ( haplotype_archive , os . getcwd ( ) ) output_dict = { } for filename in 'mhci_alleles.list' , 'mhcii_alleles.list' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( haplotype_archive , filename ) ) return output_dict
Convenience function to get the mhc haplotype from the patient dict
2,675
def get_patient_expression ( job , patient_dict ) : expression_archive = job . fileStore . readGlobalFile ( patient_dict [ 'expression_files' ] ) expression_archive = untargz ( expression_archive , os . getcwd ( ) ) output_dict = { } for filename in 'rsem.genes.results' , 'rsem.isoforms.results' : output_dict [ filename ] = job . fileStore . writeGlobalFile ( os . path . join ( expression_archive , filename ) ) return output_dict
Convenience function to get the expression from the patient dict
2,676
def generate_config_file ( ) : shutil . copy ( os . path . join ( os . path . dirname ( __file__ ) , 'input_parameters.yaml' ) , os . path . join ( os . getcwd ( ) , 'ProTECT_config.yaml' ) )
Generate a config file for a ProTECT run on hg19 .
2,677
def main ( ) : parser = argparse . ArgumentParser ( prog = 'ProTECT' , description = 'Prediction of T-Cell Epitopes for Cancer Therapy' , epilog = 'Contact Arjun Rao (aarao@ucsc.edu) if you encounter ' 'any problems while running ProTECT' ) inputs = parser . add_mutually_exclusive_group ( required = True ) inputs . add_argument ( '--config_file' , dest = 'config_file' , help = 'Config file to be used in the ' 'run.' , type = str , default = None ) inputs . add_argument ( '--generate_config' , dest = 'generate_config' , help = 'Generate a config file ' 'in the current directory that is pre-filled with references and flags for ' 'an hg19 run.' , action = 'store_true' , default = False ) parser . add_argument ( '--max-cores-per-job' , dest = 'max_cores' , help = 'Maximum cores to use per ' 'job. Aligners and Haplotypers ask for cores dependent on the machine that ' 'the launchpad gets assigned to -- In a heterogeneous cluster, this can ' 'lead to problems. This value should be set to the number of cpus on the ' 'smallest node in a cluster.' , type = int , required = False , default = None ) params , others = parser . parse_known_args ( ) if params . generate_config : generate_config_file ( ) else : Job . Runner . addToilOptions ( parser ) params = parser . parse_args ( ) params . config_file = os . path . abspath ( params . config_file ) if params . maxCores : if not params . max_cores : params . max_cores = int ( params . maxCores ) else : if params . max_cores > int ( params . maxCores ) : print ( "The value provided to max-cores-per-job (%s) was greater than that " "provided to maxCores (%s). Setting max-cores-per-job = maxCores." % ( params . max_cores , params . maxCores ) , file = sys . stderr ) params . max_cores = int ( params . maxCores ) start = Job . wrapJobFn ( parse_config_file , params . config_file , params . max_cores ) Job . Runner . startToil ( start , params ) return None
This is the main function for ProTECT .
2,678
def poll ( self ) : if select . select ( [ self . tn ] , [ ] , [ ] , 0 ) == ( [ self . tn ] , [ ] , [ ] ) : response = urllib . unquote ( self . tn . read_until ( b"\n" ) . decode ( ) ) if self . debug : print "Telnet Poll: %s" % ( response [ : - 1 ] ) return response else : return None
Poll Check for a non - response string generated by LCDd and return any string read . LCDd generates strings for key presses menu events & screen visibility changes .
2,679
def module_to_dict ( module , omittable = lambda k : k . startswith ( '_' ) ) : return dict ( [ ( k , repr ( v ) ) for k , v in module . __dict__ . items ( ) if not omittable ( k ) ] )
Converts a module namespace to a Python dictionary . Used by get_settings_diff .
2,680
def run_snpeff ( job , merged_mutation_file , univ_options , snpeff_options ) : work_dir = os . getcwd ( ) input_files = { 'merged_mutations.vcf' : merged_mutation_file , 'snpeff_index.tar.gz' : snpeff_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'snpeff_index' ] = untargz ( input_files [ 'snpeff_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'eff' , '-dataDir' , input_files [ 'snpeff_index' ] , '-c' , '/' . join ( [ input_files [ 'snpeff_index' ] , 'snpEff_' + univ_options [ 'ref' ] + '_gencode.config' ] ) , '-no-intergenic' , '-no-downstream' , '-no-upstream' , '-noStats' , univ_options [ 'ref' ] + '_gencode' , input_files [ 'merged_mutations.vcf' ] ] xmx = snpeff_options [ 'java_Xmx' ] if snpeff_options [ 'java_Xmx' ] else univ_options [ 'java_Xmx' ] with open ( '/' . join ( [ work_dir , 'mutations.vcf' ] ) , 'w' ) as snpeff_file : docker_call ( tool = 'snpeff' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = xmx , outfile = snpeff_file , tool_version = snpeff_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( snpeff_file . name ) export_results ( job , output_file , snpeff_file . name , univ_options , subfolder = 'mutations/snpeffed' ) job . fileStore . logToMaster ( 'Ran snpeff on %s successfully' % univ_options [ 'patient' ] ) return output_file
Run snpeff on an input vcf .
2,681
def paths_in_directory ( input_directory ) : paths = [ ] for base_path , directories , filenames in os . walk ( input_directory ) : relative_path = os . path . relpath ( base_path , input_directory ) path_components = relative_path . split ( os . sep ) if path_components [ 0 ] == "." : path_components = path_components [ 1 : ] if path_components and path_components [ 0 ] . startswith ( "." ) : continue path_components = filter ( bool , path_components ) for filename in filenames : if filename . startswith ( "." ) : continue paths . append ( path_components + [ filename ] ) return paths
Generate a list of all files in input_directory each as a list containing path components .
2,682
def run_car_t_validity_assessment ( job , rsem_files , univ_options , reports_options ) : return job . addChildJobFn ( assess_car_t_validity , rsem_files [ 'rsem.genes.results' ] , univ_options , reports_options ) . rv ( )
A wrapper for assess_car_t_validity .
2,683
def align_dna ( job , fastqs , sample_type , univ_options , bwa_options ) : bwa = job . wrapJobFn ( run_bwa , fastqs , sample_type , univ_options , bwa_options , disk = PromisedRequirement ( bwa_disk , fastqs , bwa_options [ 'index' ] ) , memory = univ_options [ 'java_Xmx' ] , cores = bwa_options [ 'n' ] ) sam2bam = job . wrapJobFn ( bam_conversion , bwa . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) reheader = job . wrapJobFn ( fix_bam_header , sam2bam . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , disk = PromisedRequirement ( sam2bam_disk , bwa . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) regroup = job . wrapJobFn ( add_readgroups , reheader . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( regroup_disk , reheader . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) mkdup = job . wrapJobFn ( mark_duplicates , regroup . rv ( ) , sample_type , univ_options , bwa_options [ 'picard' ] , disk = PromisedRequirement ( mkdup_disk , regroup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) index = job . wrapJobFn ( index_bamfile , mkdup . rv ( ) , sample_type , univ_options , bwa_options [ 'samtools' ] , sample_info = 'fix_pg_sorted' , disk = PromisedRequirement ( index_disk , mkdup . rv ( ) ) , memory = univ_options [ 'java_Xmx' ] ) job . addChild ( bwa ) bwa . addChild ( sam2bam ) sam2bam . addChild ( reheader ) reheader . addChild ( regroup ) regroup . addChild ( mkdup ) mkdup . addChild ( index ) return index . rv ( )
A wrapper for the entire dna alignment subgraph .
2,684
def run_bwa ( job , fastqs , sample_type , univ_options , bwa_options ) : work_dir = os . getcwd ( ) input_files = { 'dna_1.fastq' : fastqs [ 0 ] , 'dna_2.fastq' : fastqs [ 1 ] , 'bwa_index.tar.gz' : bwa_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) gz = '.gz' if is_gzipfile ( input_files [ 'dna_1.fastq' ] ) else '' if gz : for read_file in 'dna_1.fastq' , 'dna_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz input_files [ 'bwa_index' ] = untargz ( input_files [ 'bwa_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'mem' , '-t' , str ( bwa_options [ 'n' ] ) , '-v' , '1' , '/' . join ( [ input_files [ 'bwa_index' ] , univ_options [ 'ref' ] ] ) , input_files [ 'dna_1.fastq' + gz ] , input_files [ 'dna_2.fastq' + gz ] ] with open ( '' . join ( [ work_dir , '/' , sample_type , '.sam' ] ) , 'w' ) as samfile : docker_call ( tool = 'bwa' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = samfile , tool_version = bwa_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( samfile . name ) job . fileStore . logToMaster ( 'Ran bwa on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
Align a pair of fastqs with bwa .
2,685
def bam_conversion ( job , samfile , sample_type , univ_options , samtools_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.sam' : samfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) bamfile = '/' . join ( [ work_dir , sample_type + '.bam' ] ) parameters = [ 'view' , '-bS' , '-o' , docker_path ( bamfile ) , input_files [ sample_type + '.sam' ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( bamfile ) job . fileStore . deleteGlobalFile ( samfile ) job . fileStore . logToMaster ( 'Ran sam2bam on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
Convert a sam to a bam .
2,686
def fix_bam_header ( job , bamfile , sample_type , univ_options , samtools_options , retained_chroms = None ) : if retained_chroms is None : retained_chroms = [ ] work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'view' , '-H' , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_input_bam.header' ] ) , 'w' ) as headerfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = headerfile , tool_version = samtools_options [ 'version' ] ) with open ( headerfile . name , 'r' ) as headerfile , open ( '/' . join ( [ work_dir , sample_type + '_output_bam.header' ] ) , 'w' ) as outheaderfile : for line in headerfile : if line . startswith ( '@PG' ) : line = '\t' . join ( [ x for x in line . strip ( ) . split ( '\t' ) if not x . startswith ( 'CL' ) ] ) if retained_chroms and line . startswith ( '@SQ' ) : if line . strip ( ) . split ( ) [ 1 ] . lstrip ( 'SN:' ) not in retained_chroms : continue print ( line . strip ( ) , file = outheaderfile ) parameters = [ 'reheader' , docker_path ( outheaderfile . name ) , input_files [ sample_type + '.bam' ] ] with open ( '/' . join ( [ work_dir , sample_type + '_fixPG.bam' ] ) , 'w' ) as fixpg_bamfile : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = fixpg_bamfile , tool_version = samtools_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( fixpg_bamfile . name ) job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran reheader on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
Fix the bam header to remove the command line call . Failing to do this causes Picard to reject the bam .
2,687
def add_readgroups ( job , bamfile , sample_type , univ_options , picard_options ) : work_dir = os . getcwd ( ) input_files = { sample_type + '.bam' : bamfile } get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'AddOrReplaceReadGroups' , 'CREATE_INDEX=false' , 'I=/data/' + sample_type + '.bam' , 'O=/data/' + sample_type + '_reheader.bam' , 'SO=coordinate' , 'ID=1' , '' . join ( [ 'LB=' , univ_options [ 'patient' ] ] ) , 'PL=ILLUMINA' , 'PU=12345' , '' . join ( [ 'SM=' , sample_type . rstrip ( '_dna' ) ] ) ] docker_call ( tool = 'picard' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , java_xmx = univ_options [ 'java_Xmx' ] , tool_version = picard_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , sample_type + '_reheader.bam' ] ) ) job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran add_read_groups on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_file
Add read groups to the bam .
2,688
def weekday ( cls , year , month , day ) : return NepDate . from_bs_date ( year , month , day ) . weekday ( )
Returns the weekday of the date . 0 = aaitabar
2,689
def monthrange ( cls , year , month ) : functions . check_valid_bs_range ( NepDate ( year , month , 1 ) ) return values . NEPALI_MONTH_DAY_DATA [ year ] [ month - 1 ]
Returns the number of days in a month
2,690
def itermonthdays ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield day . day else : yield 0
Similar to itermonthdates but returns day number instead of NepDate object
2,691
def itermonthdays2 ( cls , year , month ) : for day in NepCal . itermonthdates ( year , month ) : if day . month == month : yield ( day . day , day . weekday ( ) ) else : yield ( 0 , day . weekday ( ) )
Similar to itermonthdays2 but returns tuples of day and weekday .
2,692
def monthdatescalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdates ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
Returns a list of week in a month . A week is a list of NepDate objects
2,693
def monthdayscalendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven day numbers .
2,694
def monthdays2calendar ( cls , year , month ) : weeks = [ ] week = [ ] for day in NepCal . itermonthdays2 ( year , month ) : week . append ( day ) if len ( week ) == 7 : weeks . append ( week ) week = [ ] if len ( week ) > 0 : weeks . append ( week ) return weeks
Return a list of the weeks in the month month of the year as full weeks . Weeks are lists of seven tuples of day numbers and weekday numbers .
2,695
def run_somaticsniper_with_merge ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : spawn = job . wrapJobFn ( run_somaticsniper , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = False ) . encapsulate ( ) job . addChild ( spawn ) return spawn . rv ( )
A wrapper for the the entire SomaticSniper sub - graph .
2,696
def run_somaticsniper ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options , split = True ) : if somaticsniper_options [ 'chromosomes' ] : chromosomes = somaticsniper_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , somaticsniper_options [ 'genome_fai' ] ) perchrom_somaticsniper = defaultdict ( ) snipe = job . wrapJobFn ( run_somaticsniper_full , tumor_bam , normal_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) pileup = job . wrapJobFn ( run_pileup , tumor_bam , univ_options , somaticsniper_options , disk = PromisedRequirement ( pileup_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) filtersnipes = job . wrapJobFn ( filter_somaticsniper , tumor_bam , snipe . rv ( ) , pileup . rv ( ) , univ_options , somaticsniper_options , disk = PromisedRequirement ( sniper_filter_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , somaticsniper_options [ 'genome_fasta' ] ) , memory = '6G' ) job . addChild ( snipe ) job . addChild ( pileup ) snipe . addChild ( filtersnipes ) pileup . addChild ( filtersnipes ) if split : unmerge_snipes = job . wrapJobFn ( unmerge , filtersnipes . rv ( ) , 'somaticsniper' , chromosomes , somaticsniper_options , univ_options ) filtersnipes . addChild ( unmerge_snipes ) return unmerge_snipes . rv ( ) else : return filtersnipes . rv ( )
Run the SomaticSniper subgraph on the DNA bams . Optionally split the results into per - chromosome vcfs .
2,697
def run_somaticsniper_full ( job , tumor_bam , normal_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal.bam' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal.bam.bai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } output_file = os . path . join ( work_dir , 'somatic-sniper_full.vcf' ) parameters = [ '-f' , input_files [ 'genome.fa' ] , '-F' , 'vcf' , '-G' , '-L' , '-q' , '1' , '-Q' , '15' , input_files [ 'tumor.bam' ] , input_files [ 'normal.bam' ] , docker_path ( output_file ) ] docker_call ( tool = 'somaticsniper' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( output_file ) job . fileStore . logToMaster ( 'Ran SomaticSniper on %s successfully' % univ_options [ 'patient' ] ) return outfile
Run SomaticSniper on the DNA bams .
2,698
def filter_somaticsniper ( job , tumor_bam , somaticsniper_output , tumor_pileup , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'input.vcf' : somaticsniper_output , 'pileup.txt' : tumor_pileup , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'snpfilter.pl' , '--snp-file' , input_files [ 'input.vcf' ] , '--indel-file' , input_files [ 'pileup.txt' ] ] docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) parameters = [ 'prepare_for_readcount.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter' ] docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) parameters = [ '-b' , '15' , '-f' , input_files [ 'genome.fa' ] , '-l' , input_files [ 'input.vcf' ] + '.SNPfilter.pos' , '-w' , '1' , input_files [ 'tumor.bam' ] ] with open ( os . path . join ( work_dir , 'readcounts.txt' ) , 'w' ) as readcounts_file : docker_call ( tool = 'bam-readcount' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = readcounts_file , tool_version = somaticsniper_options [ 'bam_readcount' ] [ 'version' ] ) parameters = [ 'fpfilter.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter' , '--readcount-file' , docker_path ( readcounts_file . name ) ] docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) parameters = [ 'highconfidence.pl' , '--snp-file' , input_files [ 'input.vcf' ] + '.SNPfilter.fp_pass' ] docker_call ( tool = 'somaticsniper-addons' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = somaticsniper_options [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( os . path . join ( os . getcwd ( ) , 'input.vcf.SNPfilter.fp_pass.hc' ) ) job . fileStore . logToMaster ( 'Filtered SomaticSniper for %s successfully' % univ_options [ 'patient' ] ) return outfile
Filter SomaticSniper calls .
2,699
def run_pileup ( job , tumor_bam , univ_options , somaticsniper_options ) : work_dir = os . getcwd ( ) input_files = { 'tumor.bam' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor.bam.bai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'genome.fa.tar.gz' : somaticsniper_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : somaticsniper_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ 'pileup' , '-cvi' , '-f' , docker_path ( input_files [ 'genome.fa' ] ) , docker_path ( input_files [ 'tumor.bam' ] ) ] with open ( os . path . join ( work_dir , 'pileup.txt' ) , 'w' ) as pileup_file : docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = pileup_file , tool_version = somaticsniper_options [ 'samtools' ] [ 'version' ] ) outfile = job . fileStore . writeGlobalFile ( pileup_file . name ) job . fileStore . logToMaster ( 'Ran samtools pileup on %s successfully' % univ_options [ 'patient' ] ) return outfile
Runs a samtools pileup on the tumor bam .