idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
12,100 | def join ( self , a , * args ) : mapping = self . _mapping set_a = mapping . setdefault ( a , [ a ] ) for arg in args : set_b = mapping . get ( arg ) if set_b is None : set_a . append ( arg ) mapping [ arg ] = set_a elif set_b is not set_a : if len ( set_b ) > len ( set_a ) : set_a , set_b = set_b , set_a set_a . extend ( set_b ) for elem in set_b : mapping [ elem ] = set_a | Join given arguments into the same set . Accepts one or more arguments . |
12,101 | def joined ( self , a , b ) : mapping = self . _mapping try : return mapping [ a ] is mapping [ b ] except KeyError : return False | Returns True if a and b are members of the same set . |
12,102 | def fromcsv ( args ) : from csv import reader from xlwt import Workbook , easyxf from jcvi . formats . base import flexible_cast p = OptionParser ( fromcsv . __doc__ ) p . add_option ( "--noheader" , default = False , action = "store_true" , help = "Do not treat the first row as header" ) p . add_option ( "--rgb" , default = - 1 , type = "int" , help = "Show RGB color box" ) p . set_sep ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) csvfile , = args header = not opts . noheader rgb = opts . rgb excelfile = csvfile . rsplit ( "." , 1 ) [ 0 ] + ".xls" data = [ ] for row in reader ( open ( csvfile ) , delimiter = opts . sep ) : data . append ( row ) w = Workbook ( ) s = w . add_sheet ( op . basename ( csvfile ) ) header_style = easyxf ( 'font: bold on' ) if header : s . panes_frozen = True s . horz_split_pos = 1 cm = ColorMatcher ( ) for i , row in enumerate ( data ) : for j , cell in enumerate ( row ) : cell = flexible_cast ( cell ) if header and i == 0 : s . write ( i , j , cell , header_style ) else : if j == rgb : cix = cm . match_color_index ( cell ) color_style = easyxf ( 'font: color_index {0}' . format ( cix ) ) s . write ( i , j , cell , color_style ) else : s . write ( i , j , cell ) w . save ( excelfile ) logging . debug ( "File written to `{0}`." . format ( excelfile ) ) return excelfile | %prog fromcsv csvfile |
12,103 | def csv ( args ) : from xlrd import open_workbook p = OptionParser ( csv . __doc__ ) p . set_sep ( sep = ',' ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) excelfile , = args sep = opts . sep csvfile = excelfile . rsplit ( "." , 1 ) [ 0 ] + ".csv" wb = open_workbook ( excelfile ) fw = open ( csvfile , "w" ) for s in wb . sheets ( ) : print ( 'Sheet:' , s . name , file = sys . stderr ) for row in range ( s . nrows ) : values = [ ] for col in range ( s . ncols ) : values . append ( s . cell ( row , col ) . value ) print ( sep . join ( str ( x ) for x in values ) , file = fw ) | %prog csv excelfile |
12,104 | def match_color_index ( self , color ) : from jcvi . utils . webcolors import color_diff if isinstance ( color , int ) : return color if color : if isinstance ( color , six . string_types ) : rgb = map ( int , color . split ( ',' ) ) else : rgb = color . Get ( ) logging . disable ( logging . DEBUG ) distances = [ color_diff ( rgb , x ) for x in self . xlwt_colors ] logging . disable ( logging . NOTSET ) result = distances . index ( min ( distances ) ) self . unused_colors . discard ( self . xlwt_colors [ result ] ) return result | Takes an R G B string or wx . Color and returns a matching xlwt color . |
12,105 | def get_unused_color ( self ) : if not self . unused_colors : self . reset ( ) used_colors = [ c for c in self . xlwt_colors if c not in self . unused_colors ] result_color = max ( self . unused_colors , key = lambda c : min ( self . color_distance ( c , c2 ) for c2 in used_colors ) ) result_index = self . xlwt_colors . index ( result_color ) self . unused_colors . discard ( result_color ) return result_index | Returns an xlwt color index that has not been previously returned by this instance . Attempts to maximize the distance between the color and all previously used colors . |
12,106 | def validate ( args ) : import pyfasta p = OptionParser ( validate . __doc__ ) p . add_option ( "--prefix" , help = "Add prefix to seqid" ) opts , args = p . parse_args ( args ) vcffile , fastafile = args pf = opts . prefix genome = pyfasta . Fasta ( fastafile , record_class = pyfasta . MemoryRecord ) fp = must_open ( vcffile ) match_ref = match_alt = total = 0 for row in fp : if row [ 0 ] == '#' : continue seqid , pos , id , ref , alt = row . split ( ) [ : 5 ] total += 1 if pf : seqid = pf + seqid pos = int ( pos ) if seqid not in genome : continue true_ref = genome [ seqid ] [ pos - 1 ] if total % 100000 == 0 : print ( total , "sites parsed" , file = sys . stderr ) if ref == true_ref : match_ref += 1 elif alt == true_ref : match_alt += 1 logging . debug ( "Match REF: {}" . format ( percentage ( match_ref , total ) ) ) logging . debug ( "Match ALT: {}" . format ( percentage ( match_alt , total ) ) ) | %prog validate input . vcf genome . fasta |
12,107 | def uniq ( args ) : from six . moves . urllib . parse import parse_qs p = OptionParser ( uniq . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) vcffile , = args fp = must_open ( vcffile ) data = [ ] for row in fp : if row [ 0 ] == '#' : print ( row . strip ( ) ) continue v = VcfLine ( row ) data . append ( v ) for pos , vv in groupby ( data , lambda x : x . pos ) : vv = list ( vv ) if len ( vv ) == 1 : print ( vv [ 0 ] ) continue bestv = max ( vv , key = lambda x : float ( parse_qs ( x . info ) [ "R2" ] [ 0 ] ) ) print ( bestv ) | %prog uniq vcffile |
12,108 | def sample ( args ) : from random import random p = OptionParser ( sample . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) vcffile , ratio = args ratio = float ( ratio ) fp = open ( vcffile ) pf = vcffile . rsplit ( "." , 1 ) [ 0 ] kept = pf + ".kept.vcf" withheld = pf + ".withheld.vcf" fwk = open ( kept , "w" ) fww = open ( withheld , "w" ) nkept = nwithheld = 0 for row in fp : if row [ 0 ] == '#' : print ( row . strip ( ) , file = fwk ) continue if random ( ) < ratio : nkept += 1 print ( row . strip ( ) , file = fwk ) else : nwithheld += 1 print ( row . strip ( ) , file = fww ) logging . debug ( "{0} records kept to `{1}`" . format ( nkept , kept ) ) logging . debug ( "{0} records withheld to `{1}`" . format ( nwithheld , withheld ) ) | %prog sample vcffile 0 . 9 |
12,109 | def fromimpute2 ( args ) : p = OptionParser ( fromimpute2 . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) impute2file , fastafile , chr = args fasta = Fasta ( fastafile ) print ( get_vcfstanza ( fastafile , fasta ) ) fp = open ( impute2file ) seen = set ( ) for row in fp : snp_id , rsid , pos , ref , alt , aa , ab , bb = row . split ( ) pos = int ( pos ) if pos in seen : continue seen . add ( pos ) code = max ( ( float ( aa ) , "0/0" ) , ( float ( ab ) , "0/1" ) , ( float ( bb ) , "1/1" ) ) [ - 1 ] tag = "PR" if snp_id == chr else "IM" print ( "\t" . join ( str ( x ) for x in ( chr , pos , rsid , ref , alt , "." , "." , tag , "GT:GP" , code + ":" + "," . join ( ( aa , ab , bb ) ) ) ) ) | %prog fromimpute2 impute2file fastafile 1 |
12,110 | def refallele ( args ) : p = OptionParser ( refallele . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) vcffile , = args fp = open ( vcffile ) for row in fp : if row [ 0 ] == '#' : continue atoms = row . split ( ) marker = "{0}:{1}" . format ( * atoms [ : 2 ] ) ref = atoms [ 3 ] print ( "\t" . join ( ( marker , ref ) ) ) | %prog refallele vcffile > out . refAllele |
12,111 | def location ( args ) : from jcvi . formats . bed import BedLine from jcvi . graphics . histogram import stem_leaf_plot p = OptionParser ( location . __doc__ ) p . add_option ( "--dist" , default = 100 , type = "int" , help = "Distance cutoff to call 5` and 3` [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , fastafile = args dist = opts . dist sizes = Sizes ( fastafile ) . mapping fp = open ( bedfile ) fiveprime = threeprime = total = 0 percentages = [ ] for row in fp : b = BedLine ( row ) pos = b . start size = sizes [ b . seqid ] if pos < dist : fiveprime += 1 if size - pos < dist : threeprime += 1 total += 1 percentages . append ( 100 * pos / size ) m = "Five prime (within {0}bp of start codon): {1}\n" . format ( dist , fiveprime ) m += "Three prime (within {0}bp of stop codon): {1}\n" . format ( dist , threeprime ) m += "Total: {0}" . format ( total ) print ( m , file = sys . stderr ) bins = 10 title = "Locations within the gene [0=Five-prime, 100=Three-prime]" stem_leaf_plot ( percentages , 0 , 100 , bins , title = title ) | %prog location bedfile fastafile |
12,112 | def liftover ( args ) : p = OptionParser ( liftover . __doc__ ) p . add_option ( "--newid" , default = False , action = "store_true" , help = "Make new identifiers" ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) oldvcf , chainfile , newvcf = args ul = UniqueLiftover ( chainfile ) num_excluded = 0 fp = open ( oldvcf ) fw = open ( newvcf , "w" ) for row in fp : row = row . strip ( ) if row [ 0 ] == '#' : if row . startswith ( "##source=" ) : row = "##source={0}" . format ( __file__ ) elif row . startswith ( "##reference=" ) : row = "##reference=hg38" elif row . startswith ( "##contig=" ) : continue print ( row . strip ( ) , file = fw ) continue v = VcfLine ( row ) if v . seqid == "MT" : v . seqid = "chrM" print ( v , file = fw ) continue try : new_chrom , new_pos = ul . liftover_cpra ( CM [ v . seqid ] , v . pos ) except : num_excluded += 1 continue if new_chrom != None and new_pos != None : v . seqid , v . pos = new_chrom , new_pos if opts . newid : v . rsid = "{0}:{1}" . format ( new_chrom . replace ( "chr" , "" ) , new_pos ) print ( v , file = fw ) else : num_excluded += 1 logging . debug ( "Excluded {0}" . format ( num_excluded ) ) | %prog liftover old . vcf hg19ToHg38 . over . chain . gz new . vcf |
12,113 | def multilineplot ( args ) : p = OptionParser ( multilineplot . __doc__ ) p . add_option ( "--lines" , help = "Features to plot in lineplot [default: %default]" ) p . add_option ( "--colors" , help = "List of colors matching number of input bed files" ) p . add_option ( "--mode" , default = "span" , choices = ( "span" , "count" , "score" ) , help = "Accumulate feature based on [default: %default]" ) p . add_option ( "--binned" , default = False , action = "store_true" , help = "Specify whether the input is already binned; " + "if True, input files are considered to be binfiles" ) p . add_option ( "--ymax" , type = "int" , help = "Set Y-axis max" ) add_window_options ( p ) opts , args , iopts = p . set_image_options ( args , figsize = "8x5" ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) fastafile , chr = args window , shift , subtract , merge = check_window_options ( opts ) linebeds = [ ] colors = opts . colors if opts . lines : lines = opts . lines . split ( "," ) assert len ( colors ) == len ( lines ) , "Number of chosen colors must match" + " number of input bed files" linebeds = get_beds ( lines , binned = opts . binned ) linebins = get_binfiles ( linebeds , fastafile , shift , mode = opts . mode , binned = opts . binned , merge = merge ) clen = Sizes ( fastafile ) . mapping [ chr ] nbins = get_nbins ( clen , shift ) plt . rcParams [ "xtick.major.size" ] = 0 plt . rcParams [ "ytick.major.size" ] = 0 plt . rcParams [ "figure.figsize" ] = iopts . w , iopts . h fig , axarr = plt . subplots ( nrows = len ( lines ) ) if len ( linebeds ) == 1 : axarr = ( axarr , ) fig . suptitle ( latex ( chr ) , color = "darkslategray" ) for i , ax in enumerate ( axarr ) : lineplot ( ax , [ linebins [ i ] ] , nbins , chr , window , shift , color = "{0}{1}" . format ( colors [ i ] , 'r' ) ) if opts . ymax : ax . set_ylim ( 0 , opts . ymax ) plt . subplots_adjust ( hspace = 0.5 ) image_name = chr + "." + iopts . format savefig ( image_name , dpi = iopts . dpi , iopts = iopts ) | %prog multilineplot fastafile chr1 |
12,114 | def _needle ( fa , fb , needlefile , a , b , results ) : from Bio . Emboss . Applications import NeedleCommandline needle_cline = NeedleCommandline ( asequence = fa , bsequence = fb , gapopen = 10 , gapextend = 0.5 , outfile = needlefile ) stdout , stderr = needle_cline ( ) nh = NeedleHeader ( needlefile ) FileShredder ( [ fa , fb , needlefile ] , verbose = False ) r = [ "\t" . join ( ( a , b , nh . identity , nh . score ) ) ] results . extend ( r ) | Run single needle job |
12,115 | def needle ( args ) : from jcvi . formats . fasta import Fasta , SeqIO p = OptionParser ( needle . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) manager = mp . Manager ( ) results = manager . list ( ) needle_pool = mp . Pool ( processes = mp . cpu_count ( ) ) pairsfile , apep , bpep = args afasta , bfasta = Fasta ( apep ) , Fasta ( bpep ) fp = must_open ( pairsfile ) for i , row in enumerate ( fp ) : a , b = row . split ( ) a , b = afasta [ a ] , bfasta [ b ] fa , fb = must_open ( "{0}_{1}_a.fasta" . format ( pairsfile , i ) , "w" ) , must_open ( "{0}_{1}_b.fasta" . format ( pairsfile , i ) , "w" ) SeqIO . write ( [ a ] , fa , "fasta" ) SeqIO . write ( [ b ] , fb , "fasta" ) fa . close ( ) fb . close ( ) needlefile = "{0}_{1}_ab.needle" . format ( pairsfile , i ) needle_pool . apply_async ( _needle , ( fa . name , fb . name , needlefile , a . id , b . id , results ) ) needle_pool . close ( ) needle_pool . join ( ) fp . close ( ) scoresfile = "{0}.scores" . format ( pairsfile . rsplit ( "." ) [ 0 ] ) fw = must_open ( scoresfile , "w" ) for result in results : print ( result , file = fw ) fw . close ( ) | %prog needle nw . pairs a . pep . fasta b . pep . fasta |
12,116 | def maker ( args ) : from jcvi . formats . base import SetFile , FileShredder A , T , P = "ABINITIO_PREDICTION" , "TRANSCRIPT" , "PROTEIN" Registry = { "maker" : ( A , 5 ) , "augustus_masked" : ( A , 1 ) , "snap_masked" : ( A , 1 ) , "genemark" : ( A , 1 ) , "est2genome" : ( T , 5 ) , "est_gff" : ( T , 5 ) , "protein2genome" : ( P , 5 ) , "blastx" : ( P , 1 ) } p = OptionParser ( maker . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) gffile , fastafile = args types = "type.ids" if need_update ( gffile , types ) : cmd = "cut -f2 -s {0} | sort -u" . format ( gffile ) sh ( cmd , outfile = types ) types = SetFile ( types ) reg = defaultdict ( list ) weightsfile = "weights.txt" contents = [ ] for s in types : rs = s . split ( ":" ) [ 0 ] if rs not in Registry : continue type , weight = Registry [ rs ] reg [ type ] . append ( s ) contents . append ( "\t" . join ( str ( x ) for x in ( type , s , weight ) ) ) contents = "\n" . join ( sorted ( contents ) ) write_file ( weightsfile , contents ) evs = [ x + ".gff" for x in ( A , T , P ) ] FileShredder ( evs ) for type , tracks in reg . items ( ) : for t in tracks : cmd = "grep '\t{0}' {1} | grep -v '_match\t' >> {2}.gff" . format ( t , gffile , type ) sh ( cmd ) partition ( evs ) runfile = "run.sh" contents = EVMRUN . format ( * evs ) write_file ( runfile , contents ) | %prog maker maker . gff3 genome . fasta |
12,117 | def tigrload ( args ) : p = OptionParser ( tigrload . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) db , ev_type = args runfile = "load.sh" contents = EVMLOAD . format ( db , ev_type ) write_file ( runfile , contents ) | %prog tigrload db ev_type |
12,118 | def pasa ( args ) : p = OptionParser ( pasa . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) pasa_db , fastafile = args termexons = "pasa.terminal_exons.gff3" if need_update ( fastafile , termexons ) : cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi" cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"' . format ( pasa_db ) cmd += ' -g {0}' . format ( fastafile ) sh ( cmd ) cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl" cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff" sh ( cmd , outfile = termexons ) return termexons | %prog pasa pasa_db fastafile |
12,119 | def tigrprepare ( args ) : p = OptionParser ( tigrprepare . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 4 : sys . exit ( not p . print_help ( ) ) fastafile , asmbl_id , db , pasa_db = args if asmbl_id == 'all' : idsfile = fastafile + ".ids" if need_update ( fastafile , idsfile ) : ids ( [ fastafile , "-o" , idsfile ] ) else : idsfile = asmbl_id oneid = open ( idsfile ) . next ( ) . strip ( ) weightsfile = "weights.txt" if need_update ( idsfile , weightsfile ) : cmd = "$EVM/TIGR-only/create_sample_weights_file.dbi" cmd += " {0} {1} | tee weights.txt" . format ( db , oneid ) sh ( cmd ) evs = [ "gene_predictions.gff3" , "transcript_alignments.gff3" , "protein_alignments.gff3" ] if need_update ( weightsfile , evs ) : cmd = "$EVM/TIGR-only/write_GFF3_files.dbi" cmd += " --db {0} --asmbl_id {1} --weights {2}" . format ( db , idsfile , weightsfile ) sh ( cmd ) evs [ 1 ] = fix_transcript ( ) partition ( evs ) runfile = "run.sh" contents = EVMRUN . format ( * evs ) write_file ( runfile , contents ) | %prog tigrprepare asmbl . fasta asmbl . ids db pasa . terminal_exons . gff3 |
12,120 | def uniq ( args ) : p = OptionParser ( uniq . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) gffile , cdsfasta = args gff = Gff ( gffile ) sizes = Sizes ( cdsfasta ) . mapping gene_register = { } for g in gff : if g . type != "mRNA" : continue aed = float ( g . attributes [ "_AED" ] [ 0 ] ) gene_register [ g . parent ] = ( 1 - aed ) * sizes [ g . accn ] allgenes = import_feats ( gffile ) g = get_piles ( allgenes ) bestids = set ( ) for group in g : ranges = [ to_range ( x , score = gene_register [ x . accn ] , id = x . accn ) for x in group ] selected_chain , score = range_chain ( ranges ) bestids |= set ( x . id for x in selected_chain ) removed = set ( x . accn for x in allgenes ) - bestids fw = open ( "removed.ids" , "w" ) print ( "\n" . join ( sorted ( removed ) ) , file = fw ) fw . close ( ) populate_children ( opts . outfile , bestids , gffile , "gene" ) | %prog uniq gffile cdsfasta |
12,121 | def nmd ( args ) : import __builtin__ from jcvi . utils . cbook import enumerate_reversed p = OptionParser ( nmd . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) gffile , = args gff = make_index ( gffile ) fw = must_open ( opts . outfile , "w" ) for gene in gff . features_of_type ( 'gene' , order_by = ( 'seqid' , 'start' ) ) : _enumerate = __builtin__ . enumerate if gene . strand == "-" else enumerate_reversed for mrna in gff . children ( gene , featuretype = 'mRNA' , order_by = ( 'start' ) ) : tracker = dict ( ) tracker [ 'exon' ] = list ( gff . children ( mrna , featuretype = 'exon' , order_by = ( 'start' ) ) ) tracker [ 'cds' ] = [ None ] * len ( tracker [ 'exon' ] ) tcds_pos = None for i , exon in _enumerate ( tracker [ 'exon' ] ) : for cds in gff . region ( region = exon , featuretype = 'CDS' , completely_within = True ) : if mrna . id in cds [ 'Parent' ] : tracker [ 'cds' ] [ i ] = cds tcds_pos = i break if tcds_pos : break NMD , distance = False , 0 if ( mrna . strand == "+" and tcds_pos + 1 < len ( tracker [ 'exon' ] ) ) or ( mrna . strand == "-" and tcds_pos - 1 >= 0 ) : tcds = tracker [ 'cds' ] [ tcds_pos ] texon = tracker [ 'exon' ] [ tcds_pos ] PTC = tcds . end if mrna . strand == '+' else tcds . start TDSS = texon . end if mrna . strand == '+' else texon . start distance = abs ( TDSS - PTC ) NMD = True if distance > 50 else False print ( "\t" . join ( str ( x ) for x in ( gene . id , mrna . id , gff . children_bp ( mrna , child_featuretype = 'CDS' ) , distance , NMD ) ) , file = fw ) fw . close ( ) | %prog nmd gffile |
12,122 | def print_edges ( G , bed , families ) : symbols = { '+' : '>' , '-' : '<' } for seqid , bs in bed . sub_beds ( ) : prev_node , prev_strand = None , '+' for b in bs : accn = b . accn strand = b . strand node = "=" . join ( families [ accn ] ) if prev_node : print ( "{}{}--{}{}" . format ( prev_node , symbols [ prev_strand ] , symbols [ strand ] , node ) ) prev_node , prev_strand = node , strand | Instead of going through the graph construction just print the edges . |
12,123 | def adjgraph ( args ) : import pygraphviz as pgv from jcvi . utils . iter import pairwise from jcvi . formats . base import SetFile p = OptionParser ( adjgraph . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) infile , subgraph = args subgraph = SetFile ( subgraph ) subgraph = set ( x . strip ( "-" ) for x in subgraph ) G = pgv . AGraph ( strict = False ) SG = pgv . AGraph ( strict = False ) palette = ( "green" , "magenta" , "tomato" , "peachpuff" ) fp = open ( infile ) genome_id = - 1 key = 0 for row in fp : if row . strip ( ) == "" : continue atoms = row . split ( ) tag = atoms [ 0 ] if tag in ( "ChrNumber" , "chr" ) : continue if tag == "genome" : genome_id += 1 gcolor = palette [ genome_id ] continue nodeseq = [ ] for p in atoms : np = p . strip ( "-" ) nodeL , nodeR = np + "L" , np + "R" if p [ 0 ] == "-" : nodeseq += [ nodeR , nodeL ] else : nodeseq += [ nodeL , nodeR ] for a , b in pairwise ( nodeseq ) : G . add_edge ( a , b , key , color = gcolor ) key += 1 na , nb = a [ : - 1 ] , b [ : - 1 ] if na not in subgraph and nb not in subgraph : continue SG . add_edge ( a , b , key , color = gcolor ) G . graph_attr . update ( dpi = "300" ) fw = open ( "graph.dot" , "w" ) G . write ( fw ) fw . close ( ) fw = open ( "subgraph.dot" , "w" ) SG . write ( fw ) fw . close ( ) | %prog adjgraph adjacency . txt subgraph . txt |
12,124 | def pairs ( args ) : p = OptionParser ( pairs . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) anchorfile , prefix = args outfile = prefix + ".pairs" fw = open ( outfile , "w" ) af = AnchorFile ( anchorfile ) blocks = af . blocks pad = len ( str ( len ( blocks ) ) ) npairs = 0 for i , block in enumerate ( blocks ) : block_id = "{0}{1:0{2}d}" . format ( prefix , i + 1 , pad ) lines = [ ] for q , s , score in block : npairs += 1 score = score . replace ( 'L' , '' ) lines . append ( "\t" . join ( ( q , s , score , block_id ) ) ) print ( "\n" . join ( sorted ( lines ) ) , file = fw ) fw . close ( ) logging . debug ( "A total of {0} pairs written to `{1}`." . format ( npairs , outfile ) ) | %prog pairs anchorsfile prefix |
12,125 | def zipbed ( args ) : p = OptionParser ( zipbed . __doc__ ) p . add_option ( "--prefix" , default = "b" , help = "Prefix for the new seqid [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , anchorfile = args prefix = opts . prefix bed = Bed ( bedfile ) order = bed . order newbedfile = prefix + ".bed" fw = open ( newbedfile , "w" ) af = AnchorFile ( anchorfile ) blocks = af . blocks pad = len ( str ( len ( blocks ) ) ) for i , block in enumerate ( blocks ) : block_id = "{0}{1:0{2}d}" . format ( prefix , i + 1 , pad ) pairs = [ ] for q , s , score in block : qi , q = order [ q ] si , s = order [ s ] pairs . append ( ( qi , si ) ) newbed = list ( interleave_pairs ( pairs ) ) for i , b in enumerate ( newbed ) : accn = bed [ b ] . accn print ( "\t" . join ( str ( x ) for x in ( block_id , i , i + 1 , accn ) ) , file = fw ) logging . debug ( "Reconstructed bedfile written to `{0}`." . format ( newbedfile ) ) | %prog zipbed species . bed collinear . anchors |
12,126 | def collinear ( args ) : p = OptionParser ( collinear . __doc__ ) p . set_beds ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) anchorfile , = args qbed , sbed , qorder , sorder , is_self = check_beds ( anchorfile , p , opts ) af = AnchorFile ( anchorfile ) newanchorfile = anchorfile . rsplit ( "." , 1 ) [ 0 ] + ".collinear.anchors" fw = open ( newanchorfile , "w" ) blocks = af . blocks for block in blocks : print ( "#" * 3 , file = fw ) iblock = [ ] for q , s , score in block : qi , q = qorder [ q ] si , s = sorder [ s ] score = int ( long ( score ) ) iblock . append ( [ qi , si , score ] ) block = get_collinear ( iblock ) for q , s , score in block : q = qbed [ q ] . accn s = sbed [ s ] . accn print ( "\t" . join ( ( q , s , str ( score ) ) ) , file = fw ) fw . close ( ) | %prog collinear a . b . anchors |
12,127 | def counts ( args ) : p = OptionParser ( counts . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) vcffile , = args vcf_reader = vcf . Reader ( open ( vcffile ) ) for r in vcf_reader : v = CPRA ( r ) if not v . is_valid : continue for sample in r . samples : ro = sample [ "RO" ] ao = sample [ "AO" ] print ( "\t" . join ( str ( x ) for x in ( v , ro , ao ) ) ) | %prog counts vcffile |
12,128 | def prepare ( args ) : p = OptionParser ( prepare . __doc__ ) p . add_option ( "--accuracy" , default = .85 , help = "Sequencing per-base accuracy" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) vcffile , bamfile = args right = "{:.2f}" . format ( opts . accuracy ) wrong = "{:.2f}" . format ( 1 - opts . accuracy ) vcf_reader = vcf . Reader ( open ( vcffile ) ) variants = [ ] for r in vcf_reader : v = CPRA ( r ) if not v . is_valid : continue variants . append ( v ) logging . debug ( "A total of {} bi-allelic SNVs imported from `{}`" . format ( len ( variants ) , vcffile ) ) bamfile = pysam . AlignmentFile ( bamfile , "rb" ) for v in variants : pos = v . pos - 1 for column in bamfile . pileup ( v . chr , pos , pos + 1 , truncate = True ) : for read in column . pileups : query_position = read . query_position if query_position is None : continue read_name = read . alignment . query_name query_base = read . alignment . query_sequence [ query_position ] a , b = v . alleles if query_base == a : other_base = b elif query_base == b : other_base = a else : continue print ( " " . join ( str ( x ) for x in ( v , read_name , query_base , right , other_base , wrong ) ) ) | %prog prepare vcffile bamfile |
12,129 | def is_valid ( self ) : return len ( self . ref ) == 1 and len ( self . alt ) == 1 and len ( self . alt [ 0 ] ) == 1 | Only retain SNPs or single indels and are bi - allelic |
12,130 | def _number_finder ( s , regex , numconv ) : s = regex . split ( s ) if len ( s ) == 1 : return tuple ( s ) s = remove_empty ( s ) for i in range ( len ( s ) ) : try : s [ i ] = numconv ( s [ i ] ) except ValueError : pass if not isinstance ( s [ 0 ] , six . string_types ) : return [ '' ] + s else : return s | Helper to split numbers |
12,131 | def index_natsorted ( seq , key = lambda x : x , number_type = float , signed = True , exp = True ) : from operator import itemgetter item1 = itemgetter ( 1 ) index_seq_pair = [ [ x , key ( y ) ] for x , y in zip ( range ( len ( seq ) ) , seq ) ] index_seq_pair . sort ( key = lambda x : natsort_key ( item1 ( x ) , number_type = number_type , signed = signed , exp = exp ) ) return [ x [ 0 ] for x in index_seq_pair ] | \ Sorts a sequence naturally but returns a list of sorted the indeces and not the sorted list . |
12,132 | def batchseeds ( args ) : from jcvi . formats . pdf import cat xargs = args [ 1 : ] p = OptionParser ( batchseeds . __doc__ ) opts , args , iopts = add_seeds_options ( p , args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) folder , = args folder = folder . rstrip ( '/' ) outdir = folder + "-debug" outfile = folder + "-output.tsv" assert op . isdir ( folder ) images = [ ] jsonfile = opts . calibrate or op . join ( folder , "calibrate.json" ) if not op . exists ( jsonfile ) : jsonfile = None for im in iglob ( folder , "*.jpg,*.JPG,*.png" ) : if im . endswith ( ( ".resize.jpg" , ".main.jpg" , ".label.jpg" ) ) : continue if op . basename ( im ) . startswith ( "calibrate" ) : continue images . append ( im ) fw = must_open ( outfile , 'w' ) print ( Seed . header ( calibrate = jsonfile ) , file = fw ) nseeds = 0 for im in images : imargs = [ im , "--noheader" , "--outdir={0}" . format ( outdir ) ] + xargs if jsonfile : imargs += [ "--calibrate={0}" . format ( jsonfile ) ] objects = seeds ( imargs ) for o in objects : print ( o , file = fw ) nseeds += len ( objects ) fw . close ( ) logging . debug ( "Processed {0} images." . format ( len ( images ) ) ) logging . debug ( "A total of {0} objects written to `{1}`." . format ( nseeds , outfile ) ) pdfs = iglob ( outdir , "*.pdf" ) outpdf = folder + "-output.pdf" cat ( pdfs + [ "--outfile={0}" . format ( outpdf ) ] ) logging . debug ( "Debugging information written to `{0}`." . format ( outpdf ) ) return outfile | %prog batchseeds folder |
12,133 | def filterbedgraph ( args ) : p = OptionParser ( filterbedgraph . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedgraphfile , cutoff = args c = float ( cutoff ) fp = open ( bedgraphfile ) pf = bedgraphfile . rsplit ( "." , 1 ) [ 0 ] filteredbed = pf + ".filtered-{}.bed" . format ( cutoff ) fw = open ( filteredbed , "w" ) nfiltered = ntotal = 0 for row in fp : b = BedLine ( row ) ntotal += 1 if float ( b . accn ) >= c : print ( b , file = fw ) nfiltered += 1 fw . close ( ) logging . debug ( "A total of {} intervals (score >= {}) written to `{}`" . format ( percentage ( nfiltered , ntotal ) , cutoff , filteredbed ) ) mergeBed ( filteredbed , sorted = True , delim = None ) | %prog filterbedgraph a . bedgraph 1 |
12,134 | def tiling ( args ) : p = OptionParser ( tiling . __doc__ ) p . add_option ( "--overlap" , default = 3000 , type = "int" , help = "Minimum amount of overlaps required" ) p . set_verbose ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args ov = opts . overlap bed = Bed ( bedfile ) inf = len ( bed ) selected = Bed ( ) for seqid , sbed in bed . sub_beds ( ) : g = Grouper ( ) current = sbed [ 0 ] for a in sbed : g . join ( a ) if a . start < current . end - ov : g . join ( a , current ) if a . end > current . end : current = a for gbed in g : end = max ( x . end for x in gbed ) gbed . sort ( key = lambda x : ( x . start , - x . end ) ) entries = len ( gbed ) counts = [ inf ] * entries counts [ 0 ] = 1 traceback = [ - 1 ] * entries for i , a in enumerate ( gbed ) : for j in xrange ( i + 1 , entries ) : b = gbed [ j ] if b . start >= a . end - ov : break if counts [ i ] + 1 < counts [ j ] : counts [ j ] = counts [ i ] + 1 traceback [ j ] = i endi = [ i for i , a in enumerate ( gbed ) if a . end == end ] last = min ( ( traceback [ i ] , i ) for i in endi ) [ 1 ] chain = [ ] while last != - 1 : chain . append ( last ) last = traceback [ last ] chain = chain [ : : - 1 ] selected . extend ( [ gbed [ x ] for x in chain ] ) if opts . verbose : print ( counts ) print ( traceback ) print ( chain ) print ( "\n" . join ( str ( x ) for x in gbed ) ) print ( "*" * 30 ) print ( "\n" . join ( str ( gbed [ x ] ) for x in chain ) ) print ( ) tilingbedfile = bedfile . rsplit ( "." , 1 ) [ 0 ] + ".tiling.bed" selected . print_to_file ( filename = tilingbedfile , sorted = True ) logging . debug ( "A total of {} tiling features written to `{}`" . format ( len ( selected ) , tilingbedfile ) ) | %prog tiling bedfile |
12,135 | def chain ( args ) : p = OptionParser ( chain . __doc__ ) p . add_option ( "--dist" , default = 100000 , help = "Chaining distance" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args cmd = "sort -k4,4 -k1,1 -k2,2n -k3,3n {0} -o {0}" . format ( bedfile ) sh ( cmd ) bed = Bed ( bedfile , sorted = False ) newbed = Bed ( ) for accn , bb in groupby ( bed , key = lambda x : x . accn ) : bb = list ( bb ) g = Grouper ( ) for a in bb : g . join ( a ) for a , b in pairwise ( bb ) : if a . seqid == b . seqid and b . start - a . end < opts . dist : g . join ( a , b ) data = [ ] for p in g : seqid = p [ 0 ] . seqid start = min ( x . start for x in p ) end = max ( x . end for x in p ) score = sum ( x . span for x in p ) data . append ( ( seqid , start - 1 , end , accn , score ) ) d = max ( data , key = lambda x : x [ - 1 ] ) newbed . append ( BedLine ( "\t" . join ( str ( x ) for x in d ) ) ) newbed . print_to_file ( opts . outfile , sorted = True ) | %prog chain bedfile |
12,136 | def density ( args ) : p = OptionParser ( density . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , fastafile = args bed = Bed ( bedfile ) sizes = Sizes ( fastafile ) . mapping header = "seqid features size density_per_Mb" . split ( ) print ( "\t" . join ( header ) ) for seqid , bb in bed . sub_beds ( ) : nfeats = len ( bb ) size = sizes [ seqid ] ds = nfeats * 1e6 / size print ( "\t" . join ( str ( x ) for x in ( seqid , nfeats , size , "{0:.1f}" . format ( ds ) ) ) ) | %prog density bedfile ref . fasta |
12,137 | def alignextend ( args ) : p = OptionParser ( alignextend . __doc__ ) p . add_option ( "--len" , default = 100 , type = "int" , help = "Extend to this length" ) p . add_option ( "--qv" , default = 31 , type = "int" , help = "Dummy qv score for extended bases" ) p . add_option ( "--bedonly" , default = False , action = "store_true" , help = "Only generate bed files, no FASTA" ) p . set_bedpe ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedpe , ref = args qvchar = chr ( opts . qv + 33 ) pf = bedpe . split ( "." ) [ 0 ] filtered = bedpe + ".filtered" if need_update ( bedpe , filtered ) : filter_bedpe ( bedpe , filtered , ref , rc = opts . rc , minlen = opts . minlen , maxlen = opts . maxlen , rlen = opts . rlen ) rmdup = filtered + ".filtered.sorted.rmdup" if need_update ( filtered , rmdup ) : rmdup_bedpe ( filtered , rmdup , dupwiggle = opts . dup ) if opts . bedonly : return bed1 , bed2 = pf + ".1e.bed" , pf + ".2e.bed" if need_update ( rmdup , ( bed1 , bed2 ) ) : sh ( "cut -f1-3,7-9 {0}" . format ( rmdup ) , outfile = bed1 ) sh ( "cut -f4-6,7-8,10 {0}" . format ( rmdup ) , outfile = bed2 ) sfa1 , sfa2 = pf + ".1e.sfa" , pf + ".2e.sfa" if need_update ( ( bed1 , bed2 , ref ) , ( sfa1 , sfa2 ) ) : for bed in ( bed1 , bed2 ) : fastaFromBed ( bed , ref , name = True , tab = True , stranded = True ) fq1 , fq2 = pf + ".1e.fq" , pf + ".2e.fq" if need_update ( ( sfa1 , sfa2 ) , ( fq1 , fq2 ) ) : for sfa in ( sfa1 , sfa2 ) : sfa_to_fq ( sfa , qvchar ) | %prog alignextend bedpefile ref . fasta |
12,138 | def seqids ( args ) : p = OptionParser ( seqids . __doc__ ) p . add_option ( "--maxn" , default = 100 , type = "int" , help = "Maximum number of seqids" ) p . add_option ( "--prefix" , help = "Seqids must start with" ) p . add_option ( "--exclude" , default = "random" , help = "Seqids should not contain" ) opts , args = p . parse_args ( args ) if len ( args ) < 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args pf = opts . prefix exclude = opts . exclude bed = Bed ( bedfile ) s = bed . seqids if pf : s = [ x for x in s if x . startswith ( pf ) ] if exclude : s = [ x for x in s if not exclude in x ] s = s [ : opts . maxn ] print ( "," . join ( s ) ) | %prog seqids bedfile |
12,139 | def random ( args ) : from random import sample from jcvi . formats . base import flexible_cast p = OptionParser ( random . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , N = args assert is_number ( N ) b = Bed ( bedfile ) NN = flexible_cast ( N ) if NN < 1 : NN = int ( round ( NN * len ( b ) ) ) beds = sample ( b , NN ) new_bed = Bed ( ) new_bed . extend ( beds ) outfile = bedfile . rsplit ( "." , 1 ) [ 0 ] + ".{0}.bed" . format ( N ) new_bed . print_to_file ( outfile ) logging . debug ( "Write {0} features to `{1}`" . format ( NN , outfile ) ) | %prog random bedfile number_of_features |
12,140 | def filter ( args ) : p = OptionParser ( filter . __doc__ ) p . add_option ( "--minsize" , default = 0 , type = "int" , help = "Minimum feature length" ) p . add_option ( "--maxsize" , default = 1000000000 , type = "int" , help = "Minimum feature length" ) p . add_option ( "--minaccn" , type = "int" , help = "Minimum value of accn, useful to filter based on coverage" ) p . add_option ( "--minscore" , type = "int" , help = "Minimum score" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args fp = must_open ( bedfile ) fw = must_open ( opts . outfile , "w" ) minsize , maxsize = opts . minsize , opts . maxsize minaccn = opts . minaccn minscore = opts . minscore total = [ ] keep = [ ] for row in fp : try : b = BedLine ( row ) except IndexError : print ( row . strip ( ) , file = fw ) continue span = b . span total . append ( span ) if not minsize <= span <= maxsize : continue if minaccn and int ( b . accn ) < minaccn : continue if minscore and int ( b . score ) < minscore : continue print ( b , file = fw ) keep . append ( span ) logging . debug ( "Stats: {0} features kept." . format ( percentage ( len ( keep ) , len ( total ) ) ) ) logging . debug ( "Stats: {0} bases kept." . format ( percentage ( sum ( keep ) , sum ( total ) ) ) ) | %prog filter bedfile |
12,141 | def mergebydepth ( args ) : p = OptionParser ( mergebydepth . __doc__ ) p . add_option ( "--mindepth" , default = 3 , type = "int" , help = "Minimum depth required" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , fastafile = args mindepth = opts . mindepth bedgraph = make_bedgraph ( bedfile ) bedgraphfiltered = bedgraph + ".d{0}" . format ( mindepth ) if need_update ( bedgraph , bedgraphfiltered ) : filter ( [ bedgraph , "--minaccn={0}" . format ( mindepth ) , "--outfile={0}" . format ( bedgraphfiltered ) ] ) merged = bedgraphfiltered + ".merge.fasta" if need_update ( bedgraphfiltered , merged ) : mergeBed ( bedgraphfiltered , sorted = True ) | %prog mergebydepth reads . bed genome . fasta |
12,142 | def depth ( args ) : p = OptionParser ( depth . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) readsbed , featsbed = args fp = open ( featsbed ) nargs = len ( fp . readline ( ) . split ( "\t" ) ) keepcols = "," . join ( str ( x ) for x in range ( 1 , nargs + 1 ) ) cmd = "coverageBed -a {0} -b {1} -d" . format ( readsbed , featsbed ) cmd += " | groupBy -g {0} -c {1} -o mean" . format ( keepcols , nargs + 2 ) sh ( cmd , outfile = opts . outfile ) | %prog depth reads . bed features . bed |
12,143 | def remove_isoforms ( ids ) : key = lambda x : x . rsplit ( "." , 1 ) [ 0 ] iso_number = lambda x : get_number ( x . split ( "." ) [ - 1 ] ) ids = sorted ( ids , key = key ) newids = [ ] for k , ii in groupby ( ids , key = key ) : min_i = min ( list ( ii ) , key = iso_number ) newids . append ( min_i ) return newids | This is more or less a hack to remove the GMAP multiple mappings . Multiple GMAP mappings can be seen given the names . mrna1 . mrna2 etc . |
12,144 | def longest ( args ) : from jcvi . formats . sizes import Sizes p = OptionParser ( longest . __doc__ ) p . add_option ( "--maxsize" , default = 20000 , type = "int" , help = "Limit max size" ) p . add_option ( "--minsize" , default = 60 , type = "int" , help = "Limit min size" ) p . add_option ( "--precedence" , default = "Medtr" , help = "Accessions with prefix take precedence" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , fastafile = args maxsize = opts . maxsize minsize = opts . minsize prec = opts . precedence mergedbed = mergeBed ( bedfile , nms = True ) sizes = Sizes ( fastafile ) . mapping bed = Bed ( mergedbed ) pf = bedfile . rsplit ( "." , 1 ) [ 0 ] ids = set ( ) for b in bed : accns = b . accn . split ( ";" ) prec_accns = [ x for x in accns if x . startswith ( prec ) ] if prec_accns : accns = prec_accns accn_sizes = [ ( sizes . get ( x , 0 ) , x ) for x in accns ] accn_sizes = [ ( size , x ) for size , x in accn_sizes if size < maxsize ] if not accn_sizes : continue max_size , max_accn = max ( accn_sizes ) if max_size < minsize : continue ids . add ( max_accn ) newids = remove_isoforms ( ids ) logging . debug ( "Remove isoforms: before={0} after={1}" . format ( len ( ids ) , len ( newids ) ) ) longestidsfile = pf + ".longest.ids" fw = open ( longestidsfile , "w" ) print ( "\n" . join ( newids ) , file = fw ) fw . close ( ) logging . debug ( "A total of {0} records written to `{1}`." . format ( len ( newids ) , longestidsfile ) ) longestbedfile = pf + ".longest.bed" some ( [ bedfile , longestidsfile , "--outfile={0}" . format ( longestbedfile ) , "--no_strip_names" ] ) | %prog longest bedfile fastafile |
12,145 | def merge ( args ) : p = OptionParser ( merge . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) < 1 : sys . exit ( not p . print_help ( ) ) bedfiles = args fw = must_open ( opts . outfile , "w" ) for bedfile in bedfiles : bed = Bed ( bedfile ) pf = op . basename ( bedfile ) . split ( "." ) [ 0 ] for b in bed : b . seqid = "_" . join ( ( pf , b . seqid ) ) print ( b , file = fw ) | %prog merge bedfiles > newbedfile |
12,146 | def fix ( args ) : p = OptionParser ( fix . __doc__ ) p . add_option ( "--minspan" , default = 0 , type = "int" , help = "Enforce minimum span [default: %default]" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args minspan = opts . minspan fp = open ( bedfile ) fw = must_open ( opts . outfile , "w" ) nfixed = nfiltered = ntotal = 0 for row in fp : atoms = row . strip ( ) . split ( "\t" ) assert len ( atoms ) >= 3 , "Must be at least 3 columns" seqid , start , end = atoms [ : 3 ] start , end = int ( start ) , int ( end ) orientation = '+' if start > end : start , end = end , start orientation = '-' nfixed += 1 atoms [ 1 : 3 ] = [ str ( start ) , str ( end ) ] if len ( atoms ) > 6 : atoms [ 6 ] = orientation line = "\t" . join ( atoms ) b = BedLine ( line ) if b . span >= minspan : print ( b , file = fw ) nfiltered += 1 ntotal += 1 if nfixed : logging . debug ( "Total fixed: {0}" . format ( percentage ( nfixed , ntotal ) ) ) if nfiltered : logging . debug ( "Total filtered: {0}" . format ( percentage ( nfiltered , ntotal ) ) ) | %prog fix bedfile > newbedfile |
12,147 | def some ( args ) : from jcvi . formats . base import SetFile from jcvi . utils . cbook import gene_name p = OptionParser ( some . __doc__ ) p . add_option ( "-v" , dest = "inverse" , default = False , action = "store_true" , help = "Get the inverse, like grep -v [default: %default]" ) p . set_outfile ( ) p . set_stripnames ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) bedfile , idsfile = args inverse = opts . inverse ostrip = opts . strip_names fw = must_open ( opts . outfile , "w" ) ids = SetFile ( idsfile ) if ostrip : ids = set ( gene_name ( x ) for x in ids ) bed = Bed ( bedfile ) ntotal = nkeep = 0 for b in bed : ntotal += 1 keep = b . accn in ids if inverse : keep = not keep if keep : nkeep += 1 print ( b , file = fw ) fw . close ( ) logging . debug ( "Stats: {0} features kept." . format ( percentage ( nkeep , ntotal ) ) ) | %prog some bedfile idsfile > newbedfile |
12,148 | def uniq ( args ) : from jcvi . formats . sizes import Sizes p = OptionParser ( uniq . __doc__ ) p . add_option ( "--sizes" , help = "Use sequence length as score" ) p . add_option ( "--mode" , default = "span" , choices = ( "span" , "score" ) , help = "Pile mode" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args uniqbedfile = bedfile . split ( "." ) [ 0 ] + ".uniq.bed" bed = Bed ( bedfile ) if opts . sizes : sizes = Sizes ( opts . sizes ) . mapping ranges = [ Range ( x . seqid , x . start , x . end , sizes [ x . accn ] , i ) for i , x in enumerate ( bed ) ] else : if opts . mode == "span" : ranges = [ Range ( x . seqid , x . start , x . end , x . end - x . start + 1 , i ) for i , x in enumerate ( bed ) ] else : ranges = [ Range ( x . seqid , x . start , x . end , float ( x . score ) , i ) for i , x in enumerate ( bed ) ] selected , score = range_chain ( ranges ) selected = [ x . id for x in selected ] selected_ids = set ( selected ) selected = [ bed [ x ] for x in selected ] notselected = [ x for i , x in enumerate ( bed ) if i not in selected_ids ] newbed = Bed ( ) newbed . extend ( selected ) newbed . print_to_file ( uniqbedfile , sorted = True ) if notselected : leftoverfile = bedfile . split ( "." ) [ 0 ] + ".leftover.bed" leftoverbed = Bed ( ) leftoverbed . extend ( notselected ) leftoverbed . print_to_file ( leftoverfile , sorted = True ) logging . debug ( "Imported: {0}, Exported: {1}" . format ( len ( bed ) , len ( newbed ) ) ) return uniqbedfile | %prog uniq bedfile |
12,149 | def pile ( args ) : from jcvi . utils . grouper import Grouper p = OptionParser ( pile . __doc__ ) p . add_option ( "--minOverlap" , default = 0 , type = "int" , help = "Minimum overlap required [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) abedfile , bbedfile = args iw = intersectBed_wao ( abedfile , bbedfile , minOverlap = opts . minOverlap ) groups = Grouper ( ) for a , b in iw : groups . join ( a . accn , b . accn ) ngroups = 0 for group in groups : if len ( group ) > 1 : ngroups += 1 print ( "|" . join ( group ) ) logging . debug ( "A total of {0} piles (>= 2 members)" . format ( ngroups ) ) | %prog pile abedfile bbedfile > piles |
12,150 | def index ( args ) : p = OptionParser ( index . __doc__ ) p . add_option ( "--fasta" , help = "Generate bedgraph and index" ) p . add_option ( "--query" , help = "Chromosome location" ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args fastafile = opts . fasta if fastafile : bedfile = make_bedgraph ( bedfile , fastafile ) bedfile = sort ( [ bedfile ] ) gzfile = bedfile + ".gz" if need_update ( bedfile , gzfile ) : cmd = "bgzip {0}" . format ( bedfile ) sh ( cmd ) tbifile = gzfile + ".tbi" if need_update ( gzfile , tbifile ) : cmd = "tabix -p bed {0}" . format ( gzfile ) sh ( cmd ) query = opts . query if not query : return cmd = "tabix {0} {1}" . format ( gzfile , query ) sh ( cmd , outfile = opts . outfile ) | %prog index bedfile |
12,151 | def evaluate ( args ) : from jcvi . formats . sizes import Sizes p = OptionParser ( evaluate . __doc__ ) p . add_option ( "--query" , help = "Chromosome location [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) prediction , reality , fastafile = args query = opts . query prediction = mergeBed ( prediction ) reality = mergeBed ( reality ) sizes = Sizes ( fastafile ) sizesfile = sizes . filename prediction_complement = complementBed ( prediction , sizesfile ) reality_complement = complementBed ( reality , sizesfile ) TPbed = intersectBed ( prediction , reality ) FPbed = intersectBed ( prediction , reality_complement ) FNbed = intersectBed ( prediction_complement , reality ) TNbed = intersectBed ( prediction_complement , reality_complement ) beds = ( TPbed , FPbed , FNbed , TNbed ) if query : subbeds = [ ] rr = query_to_range ( query , sizes ) ce = 'echo "{0}"' . format ( "\t" . join ( str ( x ) for x in rr ) ) for b in beds : subbed = "." . join ( ( b , query ) ) cmd = ce + " | intersectBed -a stdin -b {0}" . format ( b ) sh ( cmd , outfile = subbed ) subbeds . append ( subbed ) beds = subbeds be = BedEvaluate ( * beds ) print ( be , file = sys . stderr ) if query : for b in subbeds : os . remove ( b ) return be | %prog evaluate prediction . bed reality . bed fastafile |
12,152 | def refine ( args ) : p = OptionParser ( refine . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) abedfile , bbedfile , refinedbed = args fw = open ( refinedbed , "w" ) intersected = refined = 0 for a , b in intersectBed_wao ( abedfile , bbedfile ) : if b is None : print ( a , file = fw ) continue intersected += 1 aspan_before = a . span arange = ( a . start , a . end ) brange = ( b . start , b . end ) irange = range_intersect ( arange , brange ) a . start , a . end = irange aspan_after = a . span if aspan_before > aspan_after : refined += 1 print ( a , file = fw ) fw . close ( ) print ( "Total intersected: {0}" . format ( intersected ) , file = sys . stderr ) print ( "Total refined: {0}" . format ( refined ) , file = sys . stderr ) summary ( [ abedfile ] ) summary ( [ refinedbed ] ) | %prog refine bedfile1 bedfile2 refinedbed |
12,153 | def distance ( args ) : from jcvi . utils . iter import pairwise p = OptionParser ( distance . __doc__ ) p . add_option ( "--distmode" , default = "ss" , choices = ( "ss" , "ee" ) , help = "Distance mode between paired reads. ss is outer distance, " "ee is inner distance [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args sortedbedfile = sort ( [ bedfile ] ) valid = total = 0 fp = open ( sortedbedfile ) for a , b in pairwise ( fp ) : a = BedLine ( a ) b = BedLine ( b ) ar = ( a . seqid , a . start , a . end , "+" ) br = ( b . seqid , b . start , b . end , "+" ) dist , oo = range_distance ( ar , br , distmode = opts . distmode ) total += 1 if dist > 0 : print ( dist ) valid += 1 logging . debug ( "Total valid (> 0) distances: {0}." . format ( percentage ( valid , total ) ) ) | %prog distance bedfile |
12,154 | def bedpe ( args ) : from jcvi . assembly . coverage import bed_to_bedpe p = OptionParser ( bedpe . __doc__ ) p . add_option ( "--span" , default = False , action = "store_true" , help = "Write span bed file [default: %default]" ) p . add_option ( "--strand" , default = False , action = "store_true" , help = "Write the strand columns [default: %default]" ) p . add_option ( "--mates" , help = "Check the library stats from .mates file" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args pf = bedfile . rsplit ( "." , 1 ) [ 0 ] bedpefile = pf + ".bedpe" bedspanfile = pf + ".spans.bed" if opts . span else None bed_to_bedpe ( bedfile , bedpefile , pairsbedfile = bedspanfile , matesfile = opts . mates , strand = opts . strand ) return bedpefile , bedspanfile | %prog bedpe bedfile |
12,155 | def sizes ( args ) : p = OptionParser ( sizes . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args assert op . exists ( bedfile ) sizesfile = bedfile . rsplit ( "." , 1 ) [ 0 ] + ".sizes" fw = must_open ( sizesfile , "w" , checkexists = True , skipcheck = True ) if fw : b = Bed ( bedfile ) for s , sbeds in b . sub_beds ( ) : print ( "{0}\t{1}" . format ( s , max ( x . end for x in sbeds ) ) , file = fw ) logging . debug ( "Sizes file written to `{0}`." . format ( sizesfile ) ) return sizesfile | %prog sizes bedfile |
12,156 | def analyze_dists ( dists , cutoff = 1000 , alpha = .1 ) : peak0 = [ d for d in dists if d < cutoff ] peak1 = [ d for d in dists if d >= cutoff ] c0 , c1 = len ( peak0 ) , len ( peak1 ) logging . debug ( "Component counts: {0} {1}" . format ( c0 , c1 ) ) if c0 == 0 or c1 == 0 or float ( c1 ) / len ( dists ) < alpha : logging . debug ( "Single peak identified ({0} / {1} < {2})" . format ( c1 , len ( dists ) , alpha ) ) return np . median ( dists ) peak0_median = np . median ( peak0 ) peak1_median = np . median ( peak1 ) logging . debug ( "Dual peaks identified: {0}bp ({1}), {2}bp ({3}) (selected)" . format ( int ( peak0_median ) , c0 , int ( peak1_median ) , c1 ) ) return peak1_median | The dists can show bimodal distribution if they come from a mate - pair library . Assume bimodal distribution and then separate the two peaks . Based on the percentage in each peak we can decide if it is indeed one peak or two peaks and report the median respectively . |
12,157 | def summary ( args ) : p = OptionParser ( summary . __doc__ ) p . add_option ( "--sizes" , default = False , action = "store_true" , help = "Write .sizes file" ) p . add_option ( "--all" , default = False , action = "store_true" , help = "Write summary stats per seqid" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args bed = Bed ( bedfile ) bs = BedSummary ( bed ) if opts . sizes : sizesfile = bedfile + ".sizes" fw = open ( sizesfile , "w" ) for span , accn in bs . mspans : print ( span , file = fw ) fw . close ( ) logging . debug ( "Spans written to `{0}`." . format ( sizesfile ) ) return bs if not opts . all : bs . report ( ) return bs for seqid , subbeds in bed . sub_beds ( ) : bs = BedSummary ( subbeds ) print ( "\t" . join ( ( seqid , str ( bs ) ) ) ) | %prog summary bedfile |
12,158 | def sort ( args ) : p = OptionParser ( sort . __doc__ ) p . add_option ( "-i" , "--inplace" , dest = "inplace" , default = False , action = "store_true" , help = "Sort bed file in place [default: %default]" ) p . add_option ( "-u" , dest = "unique" , default = False , action = "store_true" , help = "Uniqify the bed file" ) p . add_option ( "--accn" , default = False , action = "store_true" , help = "Sort based on the accessions [default: %default]" ) p . set_outfile ( outfile = None ) p . set_tmpdir ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args inplace = opts . inplace if not inplace and ".sorted." in bedfile : return bedfile sortedbed = opts . outfile if inplace : sortedbed = bedfile elif opts . outfile is None : pf , sf = op . basename ( bedfile ) . rsplit ( "." , 1 ) sortedbed = pf + ".sorted." + sf sortopt = "-k1,1 -k2,2n -k3,3n -k4,4" if not opts . accn else "-k4,4 -k1,1 -k2,2n -k3,3n" cmd = "sort" if opts . tmpdir : cmd += " -T {0}" . format ( opts . tmpdir ) if opts . unique : cmd += " -u" cmd += " {0} {1} -o {2}" . format ( sortopt , bedfile , sortedbed ) if inplace or need_update ( bedfile , sortedbed ) : sh ( cmd ) return sortedbed | %prog sort bedfile |
12,159 | def mates ( args ) : p = OptionParser ( mates . __doc__ ) p . add_option ( "--lib" , default = False , action = "store_true" , help = "Output library information along with pairs [default: %default]" ) p . add_option ( "--nointra" , default = False , action = "store_true" , help = "Remove mates that are intra-scaffold [default: %default]" ) p . add_option ( "--prefix" , default = False , action = "store_true" , help = "Only keep links between IDs with same prefix [default: %default]" ) p . set_mates ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) bedfile , = args rclip = opts . rclip key = ( lambda x : x . accn [ : - rclip ] ) if rclip else ( lambda x : x . accn ) bed = Bed ( bedfile , key = key ) pf = bedfile . rsplit ( "." , 1 ) [ 0 ] matesfile = pf + ".mates" lib = pf if opts . lib else None fw = open ( matesfile , "w" ) if lib : bedfile , stats = pairs ( [ bedfile , "--rclip={0}" . format ( rclip ) , "--cutoff={0}" . format ( opts . cutoff ) ] ) sv = int ( 2 * stats . sd ) mindist = max ( stats . mean - sv , 1 ) maxdist = stats . mean + sv print ( "\t" . join ( str ( x ) for x in ( "library" , pf , mindist , maxdist ) ) , file = fw ) num_fragments = num_pairs = 0 matesbedfile = matesfile + ".bed" fwm = open ( matesbedfile , "w" ) for pe , lines in groupby ( bed , key = key ) : lines = list ( lines ) if len ( lines ) != 2 : num_fragments += len ( lines ) continue a , b = lines if opts . nointra and a . seqid == b . seqid : continue if opts . prefix : aprefix = a . seqid . split ( "_" ) [ 0 ] bprefix = b . seqid . split ( "_" ) [ 0 ] if aprefix != bprefix : continue num_pairs += 1 pair = [ a . accn , b . accn ] if lib : pair . append ( lib ) print ( "\t" . join ( pair ) , file = fw ) print ( a , file = fwm ) print ( b , file = fwm ) logging . debug ( "Discard {0} frags and write {1} pairs to `{2}` and `{3}`." . format ( num_fragments , num_pairs , matesfile , matesbedfile ) ) fw . close ( ) fwm . close ( ) return matesfile , matesbedfile | %prog mates bedfile |
12,160 | def swapped ( self ) : args = [ getattr ( self , attr ) for attr in BlastLine . __slots__ [ : 12 ] ] args [ 0 : 2 ] = [ self . subject , self . query ] args [ 6 : 10 ] = [ self . sstart , self . sstop , self . qstart , self . qstop ] if self . orientation == '-' : args [ 8 ] , args [ 9 ] = args [ 9 ] , args [ 8 ] b = "\t" . join ( str ( x ) for x in args ) return BlastLine ( b ) | Swap query and subject . |
12,161 | def gff ( args ) : p = OptionParser ( gff . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) gbkfile , = args MultiGenBank ( gbkfile ) | %prog gff seq . gbk |
12,162 | def tee_lookahead ( t , i ) : for value in islice ( t . __copy__ ( ) , i , None ) : return value raise IndexError ( i ) | Inspect the i - th upcomping value from a tee object while leaving the tee object at its current position . |
12,163 | def uniq ( args ) : p = OptionParser ( uniq . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastqfile , = args fw = must_open ( opts . outfile , "w" ) nduplicates = nreads = 0 seen = set ( ) for rec in iter_fastq ( fastqfile ) : nreads += 1 if rec is None : break name = rec . name if name in seen : nduplicates += 1 continue seen . add ( name ) print ( rec , file = fw ) logging . debug ( "Removed duplicate reads: {}" . format ( percentage ( nduplicates , nreads ) ) ) | %prog uniq fastqfile |
12,164 | def suffix ( args ) : p = OptionParser ( suffix . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) fastqfile , sf = args fw = must_open ( opts . outfile , "w" ) nreads = nselected = 0 for rec in iter_fastq ( fastqfile ) : nreads += 1 if rec is None : break if rec . seq . endswith ( sf ) : print ( rec , file = fw ) nselected += 1 logging . debug ( "Selected reads with suffix {0}: {1}" . format ( sf , percentage ( nselected , nreads ) ) ) | %prog suffix fastqfile CAG |
12,165 | def readlen ( args ) : p = OptionParser ( readlen . __doc__ ) p . set_firstN ( ) p . add_option ( "--silent" , default = False , action = "store_true" , help = "Do not print read length stats" ) p . add_option ( "--nocheck" , default = False , action = "store_true" , help = "Do not check file type suffix" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) f , = args if ( not opts . nocheck ) and ( not is_fastq ( f ) ) : logging . debug ( "File `{}` does not endswith .fastq or .fq" . format ( f ) ) return 0 s = calc_readlen ( f , opts . firstN ) if not opts . silent : print ( "\t" . join ( str ( x ) for x in ( f , s . min , s . max , s . mean , s . median ) ) ) return int ( s . max ) | %prog readlen fastqfile |
12,166 | def fasta ( args ) : p = OptionParser ( fasta . __doc__ ) p . add_option ( "--seqtk" , default = False , action = "store_true" , help = "Use seqtk to convert" ) p . set_outdir ( ) p . set_outfile ( outfile = None ) opts , args = p . parse_args ( args ) if len ( args ) < 1 : sys . exit ( not p . print_help ( ) ) fastqfiles = args outdir = opts . outdir if outdir and outdir != "." : mkdir ( outdir ) fastqfile = fastqfiles [ 0 ] pf = op . basename ( fastqfile ) gzinput = pf . endswith ( ".gz" ) if gzinput : pf = pf . rsplit ( "." , 1 ) [ 0 ] pf , sf = pf . rsplit ( "." , 1 ) if sf not in ( "fq" , "fastq" ) : logging . debug ( "Assumed FASTA: suffix not `fq` or `fastq`" ) return fastqfile , None fastafile , qualfile = pf + ".fasta" , pf + ".qual" outfile = opts . outfile or fastafile outfile = op . join ( outdir , outfile ) if opts . seqtk : if need_update ( fastqfiles , outfile ) : for i , fastqfile in enumerate ( fastqfiles ) : cmd = "seqtk seq -A {0} -L 30 -l 70" . format ( fastqfile ) sh ( cmd , outfile = outfile , append = i ) else : logging . debug ( "Outfile `{0}` already exists." . format ( outfile ) ) return outfile , None for fastqfile in fastqfiles : SeqIO . convert ( fastqfile , "fastq" , fastafile , "fasta" ) SeqIO . convert ( fastqfile , "fastq" , qualfile , "qual" ) return fastafile , qualfile | %prog fasta fastqfiles |
12,167 | def filter ( args ) : p = OptionParser ( filter . __doc__ ) p . add_option ( "-q" , dest = "qv" , default = 20 , type = "int" , help = "Minimum quality score to keep [default: %default]" ) p . add_option ( "-p" , dest = "pct" , default = 95 , type = "int" , help = "Minimum percent of bases that have [-q] quality " "[default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) not in ( 1 , 2 ) : sys . exit ( not p . print_help ( ) ) if len ( args ) == 1 : r1 = r2 = args [ 0 ] else : r1 , r2 = args qv = opts . qv pct = opts . pct offset = guessoffset ( [ r1 ] ) qvchar = chr ( offset + qv ) logging . debug ( "Call base qv >= {0} as good." . format ( qvchar ) ) outfile = r1 . rsplit ( "." , 1 ) [ 0 ] + ".q{0}.paired.fastq" . format ( qv ) fw = open ( outfile , "w" ) p1fp , p2fp = FastqPairedIterator ( r1 , r2 ) while True : a = list ( islice ( p1fp , 4 ) ) if not a : break b = list ( islice ( p2fp , 4 ) ) q1 = a [ - 1 ] . rstrip ( ) q2 = b [ - 1 ] . rstrip ( ) if isHighQv ( q1 , qvchar , pct = pct ) and isHighQv ( q2 , qvchar , pct = pct ) : fw . writelines ( a ) fw . writelines ( b ) | %prog filter paired . fastq |
12,168 | def shuffle ( args ) : p = OptionParser ( shuffle . __doc__ ) p . set_tag ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) p1 , p2 = args pairsfastq = pairspf ( ( p1 , p2 ) ) + ".fastq" tag = opts . tag p1fp = must_open ( p1 ) p2fp = must_open ( p2 ) pairsfw = must_open ( pairsfastq , "w" ) nreads = 0 while True : a = list ( islice ( p1fp , 4 ) ) if not a : break b = list ( islice ( p2fp , 4 ) ) if tag : name = a [ 0 ] . rstrip ( ) a [ 0 ] = name + "/1\n" b [ 0 ] = name + "/2\n" pairsfw . writelines ( a ) pairsfw . writelines ( b ) nreads += 2 pairsfw . close ( ) extra = nreads * 2 if tag else 0 checkShuffleSizes ( p1 , p2 , pairsfastq , extra = extra ) logging . debug ( "File `{0}` verified after writing {1} reads." . format ( pairsfastq , nreads ) ) return pairsfastq | %prog shuffle p1 . fastq p2 . fastq |
12,169 | def split ( args ) : from jcvi . apps . grid import Jobs p = OptionParser ( split . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) pairsfastq , = args gz = pairsfastq . endswith ( ".gz" ) pf = pairsfastq . replace ( ".gz" , "" ) . rsplit ( "." , 1 ) [ 0 ] p1 = pf + ".1.fastq" p2 = pf + ".2.fastq" cmd = "zcat" if gz else "cat" p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'" . format ( pairsfastq ) p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'" . format ( pairsfastq ) if gz : p1cmd += " | gzip" p2cmd += " | gzip" p1 += ".gz" p2 += ".gz" p1cmd += " > " + p1 p2cmd += " > " + p2 args = [ ( p1cmd , ) , ( p2cmd , ) ] m = Jobs ( target = sh , args = args ) m . run ( ) checkShuffleSizes ( p1 , p2 , pairsfastq ) | %prog split pairs . fastq |
12,170 | def guessoffset ( args ) : p = OptionParser ( guessoffset . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastqfile , = args ai = iter_fastq ( fastqfile ) rec = next ( ai ) offset = 64 while rec : quality = rec . quality lowcounts = len ( [ x for x in quality if x < 59 ] ) highcounts = len ( [ x for x in quality if x > 74 ] ) diff = highcounts - lowcounts if diff > 10 : break elif diff < - 10 : offset = 33 break rec = next ( ai ) if offset == 33 : print ( "Sanger encoding (offset=33)" , file = sys . stderr ) elif offset == 64 : print ( "Illumina encoding (offset=64)" , file = sys . stderr ) return offset | %prog guessoffset fastqfile |
12,171 | def format ( args ) : p = OptionParser ( format . __doc__ ) p . add_option ( "--convert" , default = None , choices = [ ">=1.8" , "<1.8" , "sra" ] , help = "Convert fastq header to a different format" + " [default: %default]" ) p . set_tag ( specify_tag = True ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastqfile , = args ai = iter_fastq ( fastqfile ) rec = next ( ai ) dialect = None while rec : h = FastqHeader ( rec . header ) if not dialect : dialect = h . dialect logging . debug ( "Input fastq dialect: `{0}`" . format ( dialect ) ) if opts . convert : logging . debug ( "Output fastq dialect: `{0}`" . format ( opts . convert ) ) rec . name = h . format_header ( dialect = opts . convert , tag = opts . tag ) print ( rec ) rec = next ( ai ) | %prog format fastqfile |
12,172 | def trim ( args ) : p = OptionParser ( trim . __doc__ ) p . add_option ( "-f" , dest = "first" , default = 0 , type = "int" , help = "First base to keep. Default is 1." ) p . add_option ( "-l" , dest = "last" , default = 0 , type = "int" , help = "Last base to keep. Default is entire read." ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastqfile , = args obfastqfile = op . basename ( fastqfile ) fq = obfastqfile . rsplit ( "." , 1 ) [ 0 ] + ".ntrimmed.fastq" if fastqfile . endswith ( ".gz" ) : fq = obfastqfile . rsplit ( "." , 2 ) [ 0 ] + ".ntrimmed.fastq.gz" cmd = "fastx_trimmer -Q33 " if opts . first : cmd += "-f {0.first} " . format ( opts ) if opts . last : cmd += "-l {0.last} " . format ( opts ) sh ( cmd , infile = fastqfile , outfile = fq ) | %prog trim fastqfile |
12,173 | def catread ( args ) : p = OptionParser ( catread . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) r1 , r2 = args p1fp , p2fp = FastqPairedIterator ( r1 , r2 ) outfile = pairspf ( ( r1 , r2 ) ) + ".cat.fastq" fw = must_open ( outfile , "w" ) while True : a = list ( islice ( p1fp , 4 ) ) if not a : break atitle , aseq , _ , aqual = a btitle , bseq , _ , bqual = list ( islice ( p2fp , 4 ) ) print ( "\n" . join ( ( atitle . strip ( ) , aseq . strip ( ) + bseq . strip ( ) , "+" , aqual . strip ( ) + bqual . strip ( ) ) ) , file = fw ) | %prog catread fastqfile1 fastqfile2 |
12,174 | def splitread ( args ) : p = OptionParser ( splitread . __doc__ ) p . add_option ( "-n" , dest = "n" , default = 76 , type = "int" , help = "Split at N-th base position [default: %default]" ) p . add_option ( "--rc" , default = False , action = "store_true" , help = "Reverse complement second read [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) pairsfastq , = args base = op . basename ( pairsfastq ) . split ( "." ) [ 0 ] fq1 = base + ".1.fastq" fq2 = base + ".2.fastq" fw1 = must_open ( fq1 , "w" ) fw2 = must_open ( fq2 , "w" ) fp = must_open ( pairsfastq ) n = opts . n minsize = n * 8 / 5 for name , seq , qual in FastqGeneralIterator ( fp ) : if len ( seq ) < minsize : logging . error ( "Skipping read {0}, length={1}" . format ( name , len ( seq ) ) ) continue name = "@" + name rec1 = FastqLite ( name , seq [ : n ] , qual [ : n ] ) rec2 = FastqLite ( name , seq [ n : ] , qual [ n : ] ) if opts . rc : rec2 . rc ( ) print ( rec1 , file = fw1 ) print ( rec2 , file = fw2 ) logging . debug ( "Reads split into `{0},{1}`" . format ( fq1 , fq2 ) ) fw1 . close ( ) fw2 . close ( ) | %prog splitread fastqfile |
12,175 | def size ( args ) : p = OptionParser ( size . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) < 1 : sys . exit ( not p . print_help ( ) ) total_size = total_numrecords = 0 for f in args : cur_size = cur_numrecords = 0 for rec in iter_fastq ( f ) : if not rec : break cur_numrecords += 1 cur_size += len ( rec ) print ( " " . join ( str ( x ) for x in ( op . basename ( f ) , cur_numrecords , cur_size ) ) ) total_numrecords += cur_numrecords total_size += cur_size if len ( args ) > 1 : print ( " " . join ( str ( x ) for x in ( "Total" , total_numrecords , total_size ) ) ) | %prog size fastqfile |
12,176 | def convert ( args ) : p = OptionParser ( convert . __doc__ ) p . set_phred ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) infastq , = args phred = opts . phred or str ( guessoffset ( [ infastq ] ) ) ophred = { "64" : "33" , "33" : "64" } [ phred ] gz = infastq . endswith ( ".gz" ) outfastq = infastq . rsplit ( "." , 1 ) [ 0 ] if gz else infastq pf , sf = outfastq . rsplit ( "." , 1 ) outfastq = "{0}.q{1}.{2}" . format ( pf , ophred , sf ) if gz : outfastq += ".gz" fin = "illumina" if phred == "64" else "sanger" fout = "sanger" if phred == "64" else "illumina" seqret = "seqret" if infastq . endswith ( ".gz" ) : cmd = "zcat {0} | " . format ( infastq ) cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout" . format ( fin , fout ) else : cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout" . format ( fin , infastq , fout ) sh ( cmd , outfile = outfastq ) return outfastq | %prog convert in . fastq |
12,177 | def pairinplace ( args ) : from jcvi . utils . iter import pairwise p = OptionParser ( pairinplace . __doc__ ) p . set_rclip ( ) p . set_tag ( ) p . add_option ( "--base" , help = "Base name for the output files [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) fastqfile , = args base = opts . base or op . basename ( fastqfile ) . split ( "." ) [ 0 ] frags = base + ".frags.fastq" pairs = base + ".pairs.fastq" if fastqfile . endswith ( ".gz" ) : frags += ".gz" pairs += ".gz" fragsfw = must_open ( frags , "w" ) pairsfw = must_open ( pairs , "w" ) N = opts . rclip tag = opts . tag strip_name = ( lambda x : x [ : - N ] ) if N else None fh_iter = iter_fastq ( fastqfile , key = strip_name ) skipflag = False for a , b in pairwise ( fh_iter ) : if b is None : break if skipflag : skipflag = False continue if a . name == b . name : if tag : a . name += "/1" b . name += "/2" print ( a , file = pairsfw ) print ( b , file = pairsfw ) skipflag = True else : print ( a , file = fragsfw ) if not skipflag : print ( a , file = fragsfw ) logging . debug ( "Reads paired into `%s` and `%s`" % ( pairs , frags ) ) return pairs | %prog pairinplace bulk . fastq |
12,178 | def fromsra ( args ) : p = OptionParser ( fromsra . __doc__ ) p . add_option ( "--paired" , default = False , action = "store_true" , help = "Specify if library layout is paired-end " + "[default: %default]" ) p . add_option ( "--compress" , default = None , choices = [ "gzip" , "bzip2" ] , help = "Compress output fastq files [default: %default]" ) p . set_outdir ( ) p . set_grid ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) srafile , = args paired = opts . paired compress = opts . compress outdir = opts . outdir script_path = which ( "fastq-dump" ) if not script_path : logging . error ( "Cannot find `fastq-dump` in the PATH" ) sys . exit ( ) cmd = [ script_path ] if compress : cmd . append ( "--{0}" . format ( compress ) ) if paired : cmd . append ( "--split-files" ) if outdir : cmd . append ( "--outdir {0}" . format ( outdir ) ) cmd . append ( srafile ) outcmd = " " . join ( cmd ) sh ( outcmd , grid = opts . grid ) | %prog fromsra srafile |
12,179 | def blast ( args ) : p = OptionParser ( blast . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) btabfile , = args btab = Btab ( btabfile ) for b in btab : print ( b . blastline ) | %prog blast btabfile |
12,180 | def bed ( args ) : from jcvi . formats . blast import BlastLine p = OptionParser ( bed . __doc__ ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) btabfile , = args btab = Btab ( btabfile ) for b in btab : Bline = BlastLine ( b . blastline ) print ( Bline . bedline ) | %prog bed btabfile |
12,181 | def gff ( args ) : from jcvi . utils . range import range_minmax from jcvi . formats . gff import valid_gff_parent_child , valid_gff_type p = OptionParser ( gff . __doc__ ) p . add_option ( "--source" , default = None , help = "Specify GFF source." + " By default, it picks algorithm used to generate btab file." + " [default: %default]" ) p . add_option ( "--type" , default = "protein_match" , choices = valid_gff_type , help = "GFF feature type [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) btabfile , = args btabdict = { } btab = Btab ( btabfile , aat_dialect = True ) osource = opts . source or "aat" otype = opts . type octype = valid_gff_parent_child [ otype ] for b in btab : nargs = b . nargs id = b . query + "-" + otype + "{0:05d}" . format ( b . chainNum ) key = b . key if key not in btabdict : btabdict [ key ] = { 'id' : id , 'method' : b . method , 'query' : b . query , 'subject' : b . subject , 'strand' : b . qStrand , 'sDesc' : b . sDesc , 'coords' : [ ] , 'children' : [ ] } btabdict [ key ] [ 'coords' ] . append ( ( b . qStart , b . qStop ) ) btabdict [ key ] [ 'children' ] . append ( b . gffline ( source = osource , type = octype , id = id ) ) for v in btabdict . itervalues ( ) : b = BtabLine ( "\t" . join ( str ( x ) for x in [ 0 ] * nargs ) , aat_dialect = True ) id = v [ 'id' ] b . query = v [ 'query' ] b . method = v [ 'method' ] b . subject = v [ 'subject' ] b . qStrand = v [ 'strand' ] b . sDesc = v [ 'sDesc' ] b . qStart , b . qStop = range_minmax ( v [ 'coords' ] ) print ( b . gffline ( source = osource , type = otype , primary_tag = "ID" , id = id ) ) print ( "\n" . join ( v [ 'children' ] ) ) | %prog gff btabfile |
12,182 | def batch_taxonomy ( list_of_taxids ) : for taxid in list_of_taxids : handle = Entrez . efetch ( db = 'Taxonomy' , id = taxid , retmode = "xml" ) records = Entrez . read ( handle ) yield records [ 0 ] [ "ScientificName" ] | Convert list of taxids to Latin names |
12,183 | def batch_entrez ( list_of_terms , db = "nuccore" , retmax = 1 , rettype = "fasta" , batchsize = 1 , email = myEmail ) : for term in list_of_terms : logging . debug ( "Search term %s" % term ) success = False ids = None if not term : continue while not success : try : search_handle = Entrez . esearch ( db = db , retmax = retmax , term = term ) rec = Entrez . read ( search_handle ) success = True ids = rec [ "IdList" ] except ( HTTPError , URLError , RuntimeError , KeyError ) as e : logging . error ( e ) logging . debug ( "wait 5 seconds to reconnect..." ) time . sleep ( 5 ) if not ids : logging . error ( "term {0} not found" . format ( term ) ) continue assert ids nids = len ( ids ) if nids > 1 : logging . debug ( "A total of {0} results found." . format ( nids ) ) if batchsize != 1 : logging . debug ( "Use a batch size of {0}." . format ( batchsize ) ) ids = list ( grouper ( ids , batchsize ) ) for id in ids : id = [ x for x in id if x ] size = len ( id ) id = "," . join ( id ) success = False while not success : try : fetch_handle = Entrez . efetch ( db = db , id = id , rettype = rettype , email = email ) success = True except ( HTTPError , URLError , RuntimeError ) as e : logging . error ( e ) logging . debug ( "wait 5 seconds to reconnect..." ) time . sleep ( 5 ) yield id , size , term , fetch_handle | Retrieve multiple rather than a single record |
12,184 | def ensembl ( args ) : p = OptionParser ( ensembl . __doc__ ) p . add_option ( "--version" , default = "75" , help = "Ensembl version [default: %default]" ) opts , args = p . parse_args ( args ) version = opts . version url = "ftp://ftp.ensembl.org/pub/release-{0}/" . format ( version ) fasta_url = url + "fasta/" valid_species = [ x for x in ls_ftp ( fasta_url ) if "." not in x ] doc = "\n" . join ( ( ensembl . __doc__ , tile ( valid_species ) ) ) p . set_usage ( doc ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) species , = args species = species . split ( "," ) for s in species : download_species_ensembl ( s , valid_species , url ) | %prog ensembl species |
12,185 | def get_first_rec ( fastafile ) : f = list ( SeqIO . parse ( fastafile , "fasta" ) ) if len ( f ) > 1 : logging . debug ( "{0} records found in {1}, using the first one" . format ( len ( f ) , fastafile ) ) return f [ 0 ] | Returns the first record in the fastafile |
12,186 | def bisect ( args ) : p = OptionParser ( bisect . __doc__ ) p . set_email ( ) opts , args = p . parse_args ( args ) if len ( args ) != 2 : sys . exit ( not p . print_help ( ) ) acc , fastafile = args arec = get_first_rec ( fastafile ) valid = None for i in range ( 1 , 100 ) : term = "%s.%d" % ( acc , i ) try : query = list ( batch_entrez ( [ term ] , email = opts . email ) ) except AssertionError as e : logging . debug ( "no records found for %s. terminating." % term ) return id , term , handle = query [ 0 ] brec = next ( SeqIO . parse ( handle , "fasta" ) ) match = print_first_difference ( arec , brec , ignore_case = True , ignore_N = True , rc = True ) if match : valid = term break if valid : print ( ) print ( green ( "%s matches the sequence in `%s`" % ( valid , fastafile ) ) ) | %prog bisect acc accession . fasta |
12,187 | def inspect ( item , maxchar = 80 ) : for i in dir ( item ) : try : member = str ( getattr ( item , i ) ) if maxchar and len ( member ) > maxchar : member = member [ : maxchar ] + "..." except : member = "[ERROR]" print ( "{}: {}" . format ( i , member ) , file = sys . stderr ) | Inspect the attributes of an item . |
12,188 | def depends ( func ) : from jcvi . apps . base import need_update , listify infile = "infile" outfile = "outfile" def wrapper ( * args , ** kwargs ) : assert outfile in kwargs , "You need to specify `outfile=` on function call" if infile in kwargs : infilename = listify ( kwargs [ infile ] ) for x in infilename : assert op . exists ( x ) , "The specified infile `{0}` does not exist" . format ( x ) outfilename = kwargs [ outfile ] if need_update ( infilename , outfilename ) : return func ( * args , ** kwargs ) else : msg = "File `{0}` exists. Computation skipped." . format ( outfilename ) logging . debug ( msg ) outfilename = listify ( outfilename ) for x in outfilename : assert op . exists ( x ) , "Something went wrong, `{0}` not found" . format ( x ) return outfilename return wrapper | Decorator to perform check on infile and outfile . When infile is not present issue warning and when outfile is present skip function calls . |
12,189 | def human_size ( size , a_kilobyte_is_1024_bytes = False , precision = 1 , target = None ) : if size < 0 : raise ValueError ( 'number must be non-negative' ) multiple = 1024 if a_kilobyte_is_1024_bytes else 1000 for suffix in SUFFIXES [ multiple ] : if target : if suffix == target : break size /= float ( multiple ) else : if size >= multiple : size /= float ( multiple ) else : break return '{0:.{1}f}{2}' . format ( size , precision , suffix ) | Convert a file size to human - readable form . |
12,190 | def gene_name ( st , exclude = ( "ev" , ) , sep = "." ) : if any ( st . startswith ( x ) for x in exclude ) : sep = None st = st . split ( '|' ) [ 0 ] if sep and sep in st : name , suffix = st . rsplit ( sep , 1 ) else : name , suffix = st , "" if len ( suffix ) != 1 : name = st return name | Helper functions in the BLAST filtering to get rid alternative splicings . This is ugly but different annotation groups are inconsistent with respect to how the alternative splicings are named . Mostly it can be done by removing the suffix except for ones in the exclude list . |
12,191 | def fixChromName ( name , orgn = "medicago" ) : import re mtr_pat1 = re . compile ( r"Mt[0-9]+\.[0-9]+[\.[0-9]+]{0,}_([a-z]+[0-9]+)" ) mtr_pat2 = re . compile ( r"([A-z0-9]+)_[A-z]+_[A-z]+" ) zmays_pat = re . compile ( r"[a-z]+:[A-z0-9]+:([A-z0-9]+):[0-9]+:[0-9]+:[0-9]+" ) zmays_sub = { 'mitochondrion' : 'Mt' , 'chloroplast' : 'Pt' } if orgn == "medicago" : for mtr_pat in ( mtr_pat1 , mtr_pat2 ) : match = re . search ( mtr_pat , name ) if match : n = match . group ( 1 ) n = n . replace ( "0" , "" ) name = re . sub ( mtr_pat , n , name ) elif orgn == "maize" : match = re . search ( zmays_pat , name ) if match : n = match . group ( 1 ) name = re . sub ( zmays_pat , n , name ) if name in zmays_sub : name = zmays_sub [ name ] return name | Convert quirky chromosome names encountered in different release files which are very project specific into a more general format . |
12,192 | def fill ( text , delimiter = "" , width = 70 ) : texts = [ ] for i in xrange ( 0 , len ( text ) , width ) : t = delimiter . join ( text [ i : i + width ] ) texts . append ( t ) return "\n" . join ( texts ) | Wrap text with width per line |
12,193 | def tile ( lt , width = 70 , gap = 1 ) : from jcvi . utils . iter import grouper max_len = max ( len ( x ) for x in lt ) + gap items_per_line = max ( width // max_len , 1 ) lt = [ x . rjust ( max_len ) for x in lt ] g = list ( grouper ( lt , items_per_line , fillvalue = "" ) ) return "\n" . join ( "" . join ( x ) for x in g ) | Pretty print list of items . |
12,194 | def normalize_lms_axis ( ax , xlim = None , ylim = None , xfactor = 1e-6 , yfactor = 1 , xlabel = None , ylabel = "Map (cM)" ) : if xlim : ax . set_xlim ( 0 , xlim ) if ylim : ax . set_ylim ( 0 , ylim ) if xlabel : xticklabels = [ int ( round ( x * xfactor ) ) for x in ax . get_xticks ( ) ] ax . set_xticklabels ( xticklabels , family = 'Helvetica' ) ax . set_xlabel ( xlabel ) else : ax . set_xticks ( [ ] ) if ylabel : yticklabels = [ int ( round ( x * yfactor ) ) for x in ax . get_yticks ( ) ] ax . set_yticklabels ( yticklabels , family = 'Helvetica' ) ax . set_ylabel ( ylabel ) else : ax . set_yticks ( [ ] ) | Normalize the axis limits and labels to beautify axis . |
12,195 | def fake ( args ) : from math import ceil from random import choice from Bio import SeqIO from Bio . Seq import Seq from Bio . SeqRecord import SeqRecord p = OptionParser ( fake . __doc__ ) p . set_outfile ( ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) inputbed , = args bed = Bed ( inputbed ) recs = [ ] for seqid , sb in bed . sub_beds ( ) : maxend = max ( x . end for x in sb ) size = int ( ceil ( maxend / 1000. ) * 1000 ) seq = "" . join ( [ choice ( "ACGT" ) for x in xrange ( size ) ] ) rec = SeqRecord ( Seq ( seq ) , id = seqid , description = "" ) recs . append ( rec ) fw = must_open ( opts . outfile , "w" ) SeqIO . write ( recs , fw , "fasta" ) | %prog fake input . bed |
12,196 | def compute_score ( markers , bonus , penalty ) : nmarkers = len ( markers ) s = [ bonus ] * nmarkers f = [ - 1 ] * nmarkers for i in xrange ( 1 , nmarkers ) : for j in xrange ( i ) : mi , mj = markers [ i ] , markers [ j ] t = bonus if mi . mlg == mj . mlg else penalty + bonus if s [ i ] < s [ j ] + t : s [ i ] = s [ j ] + t f [ i ] = j highest_score = max ( s ) si = s . index ( highest_score ) onchain = set ( ) while True : if si < 0 : break si = f [ si ] onchain . add ( si ) return [ x for i , x in enumerate ( markers ) if i in onchain ] | Compute chain score using dynamic programming . If a marker is the same linkage group as a previous one we add bonus ; otherwise we penalize the chain switching . |
12,197 | def split ( args ) : p = OptionParser ( split . __doc__ ) p . add_option ( "--chunk" , default = 4 , type = "int" , help = "Split chunks of at least N markers" ) p . add_option ( "--splitsingle" , default = False , action = "store_true" , help = "Split breakpoint range right in the middle" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) inputbed , = args bonus = 2 nchunk = opts . chunk nbreaks = 0 penalty = - ( nchunk * bonus - 1 ) bed = Bed ( inputbed ) for seqid , bb in bed . sub_beds ( ) : markers = [ Marker ( x ) for x in bb ] markers = compute_score ( markers , bonus , penalty ) for mi , mj in pairwise ( markers ) : if mi . mlg == mj . mlg : continue assert mi . seqid == mj . seqid start , end = mi . pos , mj . pos if start > end : start , end = end , start if opts . splitsingle : start = end = ( start + end ) / 2 print ( "\t" . join ( str ( x ) for x in ( mi . seqid , start - 1 , end ) ) ) nbreaks += 1 logging . debug ( "A total of {} breakpoints inferred (--chunk={})" . format ( nbreaks , nchunk ) ) | %prog split input . bed |
12,198 | def movie ( args ) : p = OptionParser ( movie . __doc__ ) p . add_option ( "--gapsize" , default = 100 , type = "int" , help = "Insert gaps of size between scaffolds" ) add_allmaps_plot_options ( p ) opts , args = p . parse_args ( args ) if len ( args ) != 3 : sys . exit ( not p . print_help ( ) ) inputbed , scaffoldsfasta , seqid = args gapsize = opts . gapsize pf = inputbed . rsplit ( "." , 1 ) [ 0 ] agpfile = pf + ".chr.agp" tourfile = pf + ".tour" fp = open ( tourfile ) sizes = Sizes ( scaffoldsfasta ) . mapping ffmpeg = "ffmpeg" mkdir ( ffmpeg ) score = cur_score = None i = 1 for header , block in read_block ( fp , ">" ) : s , tag , label = header [ 1 : ] . split ( ) if s != seqid : continue tour = block [ 0 ] . split ( ) tour = [ ( x [ : - 1 ] , x [ - 1 ] ) for x in tour ] if label . startswith ( "GA" ) : cur_score = label . split ( "-" ) [ - 1 ] if cur_score == score : i += 1 continue score = cur_score image_name = "." . join ( ( seqid , "{0:04d}" . format ( i ) , label , "pdf" ) ) if need_update ( tourfile , image_name ) : fwagp = must_open ( agpfile , "w" ) order_to_agp ( seqid , tour , sizes , fwagp , gapsize = gapsize , gaptype = "map" ) fwagp . close ( ) logging . debug ( "{0} written to `{1}`" . format ( header , agpfile ) ) build ( [ inputbed , scaffoldsfasta , "--cleanup" ] ) pdf_name = plot ( [ inputbed , seqid , "--title={0}" . format ( label ) ] ) sh ( "mv {0} {1}" . format ( pdf_name , image_name ) ) if label in ( "INIT" , "FLIP" , "TSP" , "FINAL" ) : for j in xrange ( 5 ) : image_delay = image_name . rsplit ( "." , 1 ) [ 0 ] + ".d{0}.pdf" . format ( j ) sh ( "cp {0} {1}/{2}" . format ( image_name , ffmpeg , image_delay ) ) else : sh ( "cp {0} {1}/" . format ( image_name , ffmpeg ) ) i += 1 make_movie ( ffmpeg , pf ) | %prog movie input . bed scaffolds . fasta chr1 |
12,199 | def make_movie ( workdir , pf , dpi = 120 , fps = 1 , format = "pdf" , engine = "ffmpeg" ) : os . chdir ( workdir ) if format != "png" : cmd = "parallel convert -density {}" . format ( dpi ) cmd += " {} {.}.png ::: " + "*.{}" . format ( format ) sh ( cmd ) assert engine in ( "ffmpeg" , "gifsicle" ) , "Only ffmpeg or gifsicle is currently supported" if engine == "ffmpeg" : cmd = "ffmpeg -framerate {} -pattern_type glob -i '*.png' {}.mp4" . format ( fps , pf ) elif engine == "gifsicle" : cmd = "convert *.png gif:- |" cmd += " gifsicle --delay {} --loop --optimize=3" . format ( 100 / fps ) cmd += " --colors=256 --multifile - > {}.gif" . format ( pf ) sh ( cmd ) | Make the movie using either ffmpeg or gifsicle . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.