idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
2,900
def undecorate ( func ) : orig_call_wrapper = lambda x : x for call_wrapper , unwrap in SUPPORTED_DECORATOR . items ( ) : if isinstance ( func , call_wrapper ) : func = unwrap ( func ) orig_call_wrapper = call_wrapper break return orig_call_wrapper , func
Returns the decorator and the undecorated function of given object .
2,901
def item ( ctx , appid , title ) : ctx . obj [ 'appid' ] = appid ctx . obj [ 'title' ] = title
Market - related commands .
2,902
def get_price ( ctx , currency ) : appid = ctx . obj [ 'appid' ] title = ctx . obj [ 'title' ] item_ = Item ( appid , title ) item_ . get_price_data ( currency ) click . secho ( 'Lowest price: %s %s' % ( item_ . price_lowest , item_ . price_currency ) , fg = 'green' )
Prints out market item price .
2,903
def get_cards ( ctx ) : appid = ctx . obj [ 'appid' ] app = Application ( appid ) click . secho ( 'Cards for `%s` [appid: %s]' % ( app . title , appid ) , fg = 'green' ) if not app . has_cards : click . secho ( 'This app has no cards.' , fg = 'red' , err = True ) return cards , booster = app . get_cards ( ) def get_line ( card ) : return '%s [market hash: `%s`]' % ( card . title , card . market_hash ) for card in cards . values ( ) : click . echo ( get_line ( card ) ) if booster : click . secho ( '* Booster pack: `%s`' % get_line ( booster ) , fg = 'yellow' ) click . secho ( '* Total cards: %d' % len ( cards ) , fg = 'green' )
Prints out cards available for application .
2,904
def get_card_prices ( ctx , currency ) : appid = ctx . obj [ 'appid' ] detailed = True appids = [ appid ] if ',' in appid : appids = [ appid . strip ( ) for appid in appid . split ( ',' ) ] detailed = False for appid in appids : print_card_prices ( appid , currency , detailed = detailed ) click . echo ( '' )
Prints out lowest card prices for an application . Comma - separated list of application IDs is supported .
2,905
def get_gems ( ctx ) : username = ctx . obj [ 'username' ] click . secho ( 'Total gems owned by `%s`: %d' % ( username , User ( username ) . gems_total ) , fg = 'green' )
Prints out total gems count for a Steam user .
2,906
def get_games ( ctx ) : username = ctx . obj [ 'username' ] games = User ( username ) . get_games_owned ( ) for game in sorted ( games . values ( ) , key = itemgetter ( 'title' ) ) : click . echo ( '%s [appid: %s]' % ( game [ 'title' ] , game [ 'appid' ] ) ) click . secho ( 'Total gems owned by `%s`: %d' % ( username , len ( games ) ) , fg = 'green' )
Prints out games owned by a Steam user .
2,907
def get_booster_stats ( ctx , currency ) : username = ctx . obj [ 'username' ] inventory = User ( username ) . _get_inventory_raw ( ) boosters = { } for item in inventory [ 'rgDescriptions' ] . values ( ) : is_booster = False tags = item [ 'tags' ] for tag in tags : if tag [ 'internal_name' ] == TAG_ITEM_CLASS_BOOSTER : is_booster = True break if not is_booster : continue appid = item [ 'market_fee_app' ] title = item [ 'name' ] boosters [ appid ] = title if not boosters : click . secho ( 'User `%s` has no booster packs' % username , fg = 'red' , err = True ) return for appid , title in boosters . items ( ) : click . secho ( 'Found booster: `%s`' % title , fg = 'blue' ) print_card_prices ( appid , currency )
Prints out price stats for booster packs available in Steam user inventory .
2,908
def get_cards_stats ( ctx , currency , skip_owned , appid , foil ) : username = ctx . obj [ 'username' ] cards_by_app = defaultdict ( list ) inventory = User ( username ) . traverse_inventory ( item_filter = TAG_ITEM_CLASS_CARD ) for item in inventory : appid_ = item . app . appid if not appid or appid_ in appid : cards_by_app [ appid_ ] . append ( item ) if not cards_by_app : click . secho ( 'User `%s` has no cards' % username , fg = 'red' , err = True ) return for appid_ , cards in cards_by_app . items ( ) : app = cards [ 0 ] . app print_card_prices ( app . appid , currency , owned_cards = [ card . title for card in cards ] , skip_owned = skip_owned , foil = foil , )
Prints out price stats for cards available in Steam user inventory .
2,909
def run_radia_with_merge ( job , rna_bam , tumor_bam , normal_bam , univ_options , radia_options ) : spawn = job . wrapJobFn ( run_radia , rna_bam [ 'rna_genome' ] , tumor_bam , normal_bam , univ_options , radia_options , disk = '100M' , memory = '100M' ) . encapsulate ( ) merge = job . wrapJobFn ( merge_perchrom_vcfs , spawn . rv ( ) , univ_options , disk = '100M' , memory = '100M' ) job . addChild ( spawn ) spawn . addChild ( merge ) return merge . rv ( )
A wrapper for the the entire RADIA sub - graph .
2,910
def run_radia ( job , rna_bam , tumor_bam , normal_bam , univ_options , radia_options ) : if 'rna_genome' in rna_bam . keys ( ) : rna_bam = rna_bam [ 'rna_genome' ] elif set ( rna_bam . keys ( ) ) == { 'rna_genome_sorted.bam' , 'rna_genome_sorted.bam.bai' } : pass else : raise RuntimeError ( 'An improperly formatted dict was passed to rna_bam.' ) bams = { 'tumor_rna' : rna_bam [ 'rna_genome_sorted.bam' ] , 'tumor_rnai' : rna_bam [ 'rna_genome_sorted.bam.bai' ] , 'tumor_dna' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , 'tumor_dnai' : tumor_bam [ 'tumor_dna_fix_pg_sorted.bam.bai' ] , 'normal_dna' : normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , 'normal_dnai' : normal_bam [ 'normal_dna_fix_pg_sorted.bam.bai' ] } if radia_options [ 'chromosomes' ] : chromosomes = radia_options [ 'chromosomes' ] else : chromosomes = sample_chromosomes ( job , radia_options [ 'genome_fai' ] ) perchrom_radia = defaultdict ( ) for chrom in chromosomes : radia = job . addChildJobFn ( run_radia_perchrom , bams , univ_options , radia_options , chrom , memory = '6G' , disk = PromisedRequirement ( radia_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , rna_bam [ 'rna_genome_sorted.bam' ] , radia_options [ 'genome_fasta' ] ) ) filter_radia = radia . addChildJobFn ( run_filter_radia , bams , radia . rv ( ) , univ_options , radia_options , chrom , memory = '6G' , disk = PromisedRequirement ( radia_disk , tumor_bam [ 'tumor_dna_fix_pg_sorted.bam' ] , normal_bam [ 'normal_dna_fix_pg_sorted.bam' ] , rna_bam [ 'rna_genome_sorted.bam' ] , radia_options [ 'genome_fasta' ] ) ) perchrom_radia [ chrom ] = filter_radia . rv ( ) job . fileStore . logToMaster ( 'Ran spawn_radia on %s successfully' % univ_options [ 'patient' ] ) return perchrom_radia
Spawn a RADIA job for each chromosome on the input bam trios .
2,911
def run_radia_perchrom ( job , bams , univ_options , radia_options , chrom ) : work_dir = os . getcwd ( ) input_files = { 'rna.bam' : bams [ 'tumor_rna' ] , 'rna.bam.bai' : bams [ 'tumor_rnai' ] , 'tumor.bam' : bams [ 'tumor_dna' ] , 'tumor.bam.bai' : bams [ 'tumor_dnai' ] , 'normal.bam' : bams [ 'normal_dna' ] , 'normal.bam.bai' : bams [ 'normal_dnai' ] , 'genome.fa.tar.gz' : radia_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : radia_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } radia_output = '' . join ( [ work_dir , '/radia_' , chrom , '.vcf' ] ) radia_log = '' . join ( [ work_dir , '/radia_' , chrom , '_radia.log' ] ) parameters = [ univ_options [ 'patient' ] , chrom , '-n' , input_files [ 'normal.bam' ] , '-t' , input_files [ 'tumor.bam' ] , '-r' , input_files [ 'rna.bam' ] , '' . join ( [ '--rnaTumorFasta=' , input_files [ 'genome.fa' ] ] ) , '-f' , input_files [ 'genome.fa' ] , '-o' , docker_path ( radia_output ) , '-i' , univ_options [ 'ref' ] , '-m' , input_files [ 'genome.fa' ] , '-d' , 'aarjunrao@soe.ucsc.edu' , '-q' , 'Illumina' , '--disease' , 'CANCER' , '-l' , 'INFO' , '-g' , docker_path ( radia_log ) ] docker_call ( tool = 'radia' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = radia_options [ 'version' ] ) output_file = job . fileStore . writeGlobalFile ( radia_output ) job . fileStore . logToMaster ( 'Ran radia on %s:%s successfully' % ( univ_options [ 'patient' ] , chrom ) ) return output_file
Run RADIA call on a single chromosome in the input bams .
2,912
def run_filter_radia ( job , bams , radia_file , univ_options , radia_options , chrom ) : work_dir = os . getcwd ( ) input_files = { 'rna.bam' : bams [ 'tumor_rna' ] , 'rna.bam.bai' : bams [ 'tumor_rnai' ] , 'tumor.bam' : bams [ 'tumor_dna' ] , 'tumor.bam.bai' : bams [ 'tumor_dnai' ] , 'normal.bam' : bams [ 'normal_dna' ] , 'normal.bam.bai' : bams [ 'normal_dnai' ] , 'radia.vcf' : radia_file , 'genome.fa.tar.gz' : radia_options [ 'genome_fasta' ] , 'genome.fa.fai.tar.gz' : radia_options [ 'genome_fai' ] , 'cosmic_beds' : radia_options [ 'cosmic_beds' ] , 'dbsnp_beds' : radia_options [ 'dbsnp_beds' ] , 'retrogene_beds' : radia_options [ 'retrogene_beds' ] , 'pseudogene_beds' : radia_options [ 'pseudogene_beds' ] , 'gencode_beds' : radia_options [ 'gencode_beds' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) for key in ( 'genome.fa' , 'genome.fa.fai' ) : input_files [ key ] = untargz ( input_files [ key + '.tar.gz' ] , work_dir ) for key in ( 'cosmic_beds' , 'dbsnp_beds' , 'retrogene_beds' , 'pseudogene_beds' , 'gencode_beds' ) : input_files [ key ] = untargz ( input_files [ key ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } filterradia_log = '' . join ( [ work_dir , '/radia_filtered_' , chrom , '_radia.log' ] ) parameters = [ univ_options [ 'patient' ] , chrom . lstrip ( 'chr' ) , input_files [ 'radia.vcf' ] , '/data' , '/home/radia/scripts' , '-d' , input_files [ 'dbsnp_beds' ] , '-r' , input_files [ 'retrogene_beds' ] , '-p' , input_files [ 'pseudogene_beds' ] , '-c' , input_files [ 'cosmic_beds' ] , '-t' , input_files [ 'gencode_beds' ] , '--noSnpEff' , '--noBlacklist' , '--noTargets' , '--noRnaBlacklist' , '-f' , input_files [ 'genome.fa' ] , '--log=INFO' , '-g' , docker_path ( filterradia_log ) ] docker_call ( tool = 'filterradia' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = radia_options [ 'version' ] ) output_file = '' . join ( [ work_dir , '/' , chrom , '.vcf' ] ) os . rename ( '' . join ( [ work_dir , '/' , univ_options [ 'patient' ] , '_' , chrom , '.vcf' ] ) , output_file ) output_fsid = job . fileStore . writeGlobalFile ( output_file ) export_results ( job , output_fsid , output_file , univ_options , subfolder = 'mutations/radia' ) job . fileStore . logToMaster ( 'Ran filter-radia on %s:%s successfully' % ( univ_options [ 'patient' ] , chrom ) ) return output_fsid
Run filterradia on the RADIA output .
2,913
def index_bamfile ( job , bamfile , sample_type , univ_options , samtools_options , sample_info = None , export = True ) : work_dir = os . getcwd ( ) in_bamfile = sample_type if sample_info is not None : assert isinstance ( sample_info , str ) in_bamfile = '_' . join ( [ in_bamfile , sample_info ] ) in_bamfile += '.bam' input_files = { in_bamfile : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'index' , input_files [ in_bamfile ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = samtools_options [ 'version' ] ) out_bai = '/' . join ( [ work_dir , in_bamfile + '.bai' ] ) output_files = { in_bamfile : bamfile , in_bamfile + '.bai' : job . fileStore . writeGlobalFile ( out_bai ) } if export : export_results ( job , bamfile , os . path . splitext ( out_bai ) [ 0 ] , univ_options , subfolder = 'alignments' ) export_results ( job , output_files [ in_bamfile + '.bai' ] , out_bai , univ_options , subfolder = 'alignments' ) job . fileStore . logToMaster ( 'Ran samtools-index on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return output_files
Index bamfile using samtools
2,914
def sort_bamfile ( job , bamfile , sample_type , univ_options , samtools_options ) : work_dir = os . getcwd ( ) in_bamfile = '' . join ( [ sample_type , '.bam' ] ) out_bamfile = '_' . join ( [ sample_type , 'sorted.bam' ] ) input_files = { in_bamfile : bamfile } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True ) parameters = [ 'sort' , '-o' , docker_path ( out_bamfile ) , '-O' , 'bam' , '-T' , 'temp_sorted' , '-@' , str ( samtools_options [ 'n' ] ) , input_files [ in_bamfile ] ] docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = samtools_options [ 'version' ] ) job . fileStore . deleteGlobalFile ( bamfile ) job . fileStore . logToMaster ( 'Ran samtools-sort on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) ) return job . fileStore . writeGlobalFile ( out_bamfile )
Sort bamfile using samtools
2,915
def get_identity ( user ) : identity = Identity ( user . id ) if hasattr ( user , 'id' ) : identity . provides . add ( UserNeed ( user . id ) ) for role in getattr ( user , 'roles' , [ ] ) : identity . provides . add ( RoleNeed ( role . name ) ) identity . user = user return identity
Create an identity for a given user instance .
2,916
def object_to_items ( data_structure ) : items = [ ] try : items = list ( data_structure . __dict__ . items ( ) ) except : pass hierarchy = [ data_structure ] try : hierarchy += inspect . getmro ( data_structure ) except : pass slots = [ ] try : for b in hierarchy : try : slots += b . __slots__ except : pass except : pass for x in slots : items . append ( ( x , getattr ( data_structure , x ) ) ) return items
Converts a object to a items list respecting also slots .
2,917
def recursive_sort ( data_structure ) : if not isinstance ( data_structure , _primitive_types ) : is_meta = isinstance ( data_structure , Meta ) was_dict = isinstance ( data_structure , WasDict ) if not ( is_meta or was_dict ) : was_dict = isinstance ( data_structure , dict ) if not was_dict : try : data_structure = data_structure . __dict__ was_dict = True except : pass try : data_structure = data_structure . items ( ) except : pass tlen = - 1 try : tlen = len ( data_structure ) except : pass if tlen != - 1 : try : if was_dict : return tuple ( sorted ( [ ( recursive_sort ( x [ 0 ] ) , recursive_sort ( x [ 1 ] ) , ) for x in data_structure ] , key = TraversalBasedReprCompare ) ) elif is_meta : return data_structure [ 0 : - 1 ] + [ recursive_sort ( data_structure [ - 1 ] ) ] else : return tuple ( sorted ( [ recursive_sort ( x , ) for x in data_structure ] , key = TraversalBasedReprCompare , ) ) except : pass return data_structure
Sort a recursive data_structure .
2,918
def traverse_frozen_data ( data_structure ) : parent_stack = [ data_structure ] while parent_stack : node = parent_stack . pop ( 0 ) tlen = - 1 if not isinstance ( node , _string_types ) : try : tlen = len ( node ) except : pass if tlen == - 1 : yield node else : parent_stack = list ( node ) + parent_stack
Yields the leaves of the frozen data - structure pre - order .
2,919
def tree_diff ( a , b , n = 5 , sort = False ) : a = dump ( a ) b = dump ( b ) if not sort : a = vformat ( a ) . split ( "\n" ) b = vformat ( b ) . split ( "\n" ) else : a = vformat ( recursive_sort ( a ) ) . split ( "\n" ) b = vformat ( recursive_sort ( b ) ) . split ( "\n" ) return "\n" . join ( difflib . unified_diff ( a , b , n = n , lineterm = "" ) )
Dump any data - structure or object traverse it depth - first in - order and apply a unified diff .
2,920
def stats ( self ) : stats_online = CRef . cint ( ) stats_ingame = CRef . cint ( ) stats_chatting = CRef . cint ( ) self . _iface . get_clan_stats ( self . group_id , stats_online , stats_ingame , stats_chatting , ) return { 'online' : int ( stats_online ) , 'ingame' : int ( stats_ingame ) , 'chatting' : int ( stats_chatting ) , }
Basic group statistics .
2,921
def startproject ( name , directory , verbosity ) : handle_template ( 'project' , name , target = directory , verbosity = verbosity ) click . echo ( f"Success: '{name}' project was successfully created on '{directory}'" )
Creates a Trading - Bots project directory structure for the given project NAME in the current directory or optionally in the given DIRECTORY .
2,922
def createbot ( name , directory , verbosity ) : handle_template ( 'bot' , name , target = directory , verbosity = verbosity ) click . echo ( f"Success: '{name}' bot was successfully created on '{directory}'" )
Creates a Bot s directory structure for the given bot NAME in the current directory or optionally in the given DIRECTORY .
2,923
def get_state ( self , as_str = False ) : uid = self . user_id if self . _iface_user . get_id ( ) == uid : result = self . _iface . get_my_state ( ) else : result = self . _iface . get_state ( uid ) if as_str : return UserState . get_alias ( result ) return result
Returns user state . See UserState .
2,924
def load_permissions_on_identity_loaded ( sender , identity ) : identity . provides . add ( any_user ) if current_user . is_authenticated : identity . provides . add ( authenticated_user )
Add system roles Needs to users identities .
2,925
def print_errors ( self , file_name ) : for error in self . get_messages ( file_name ) : print ( '\t' , error . __unicode__ ( ) )
Prints the errors observed for a file
2,926
def clean ( self ) : data = super ( RasterQueryForm , self ) . clean ( ) geom = data . pop ( 'upload' , None ) or data . pop ( 'bbox' , None ) if geom : data [ 'g' ] = geom return data
Return cleaned fields as a dict determine which geom takes precedence .
2,927
def register ( matcher , * aliases ) : docstr = matcher . __doc__ if matcher . __doc__ is not None else '' helpmatchers [ matcher ] = docstr . strip ( ) for alias in aliases : matchers [ alias ] = matcher norm = normalize ( alias ) normalized [ norm ] = alias norm = norm . replace ( '_' , '' ) normalized [ norm ] = alias
Register a matcher associated to one or more aliases . Each alias given is also normalized .
2,928
def normalize ( alias ) : alias = re . sub ( r'([a-z])([A-Z])' , r'\1_\2' , alias ) words = alias . lower ( ) . split ( '_' ) words = filter ( lambda w : w not in IGNORED_WORDS , words ) return '_' . join ( words )
Normalizes an alias by removing adverbs defined in IGNORED_WORDS
2,929
def lookup ( alias ) : if alias in matchers : return matchers [ alias ] else : norm = normalize ( alias ) if norm in normalized : alias = normalized [ norm ] return matchers [ alias ] if - 1 != alias . find ( '_' ) : norm = normalize ( alias ) . replace ( '_' , '' ) return lookup ( norm ) return None
Tries to find a matcher callable associated to the given alias . If an exact match does not exists it will try normalizing it and even removing underscores to find one .
2,930
def suggest ( alias , max = 3 , cutoff = 0.5 ) : aliases = matchers . keys ( ) similar = get_close_matches ( alias , aliases , n = max , cutoff = cutoff ) return similar
Suggest a list of aliases which are similar enough
2,931
def sample_chromosomes ( job , genome_fai_file ) : work_dir = os . getcwd ( ) genome_fai = untargz ( job . fileStore . readGlobalFile ( genome_fai_file ) , work_dir ) return chromosomes_from_fai ( genome_fai )
Get a list of chromosomes in the input data .
2,932
def run_mutation_aggregator ( job , mutation_results , univ_options ) : out = { } for chrom in mutation_results [ 'mutect' ] . keys ( ) : out [ chrom ] = job . addChildJobFn ( merge_perchrom_mutations , chrom , mutation_results , univ_options ) . rv ( ) merged_snvs = job . addFollowOnJobFn ( merge_perchrom_vcfs , out , 'merged' , univ_options ) job . fileStore . logToMaster ( 'Aggregated mutations for %s successfully' % univ_options [ 'patient' ] ) return merged_snvs . rv ( )
Aggregate all the called mutations .
2,933
def merge_perchrom_mutations ( job , chrom , mutations , univ_options ) : work_dir = os . getcwd ( ) from protect . mutation_calling . muse import process_muse_vcf from protect . mutation_calling . mutect import process_mutect_vcf from protect . mutation_calling . radia import process_radia_vcf from protect . mutation_calling . somaticsniper import process_somaticsniper_vcf from protect . mutation_calling . strelka import process_strelka_vcf mutations . pop ( 'indels' ) mutations [ 'strelka_indels' ] = mutations [ 'strelka' ] [ 'indels' ] mutations [ 'strelka_snvs' ] = mutations [ 'strelka' ] [ 'snvs' ] vcf_processor = { 'snvs' : { 'mutect' : process_mutect_vcf , 'muse' : process_muse_vcf , 'radia' : process_radia_vcf , 'somaticsniper' : process_somaticsniper_vcf , 'strelka_snvs' : process_strelka_vcf } , 'indels' : { 'strelka_indels' : process_strelka_vcf } } majority = { 'snvs' : 2 , 'indels' : 1 } accepted_hits = defaultdict ( dict ) for mut_type in vcf_processor . keys ( ) : perchrom_mutations = { caller : vcf_processor [ mut_type ] [ caller ] ( job , mutations [ caller ] [ chrom ] , work_dir , univ_options ) for caller in vcf_processor [ mut_type ] } perchrom_mutations [ 'strelka' ] = perchrom_mutations [ 'strelka_' + mut_type ] perchrom_mutations . pop ( 'strelka_' + mut_type ) vcf_lists = { caller : read_vcf ( vcf_file ) for caller , vcf_file in perchrom_mutations . items ( ) } all_positions = list ( set ( itertools . chain ( * vcf_lists . values ( ) ) ) ) for position in sorted ( all_positions ) : hits = { caller : position in vcf_lists [ caller ] for caller in perchrom_mutations . keys ( ) } if sum ( hits . values ( ) ) >= majority [ mut_type ] : callers = ',' . join ( [ caller for caller , hit in hits . items ( ) if hit ] ) assert position [ 1 ] not in accepted_hits [ position [ 0 ] ] accepted_hits [ position [ 0 ] ] [ position [ 1 ] ] = ( position [ 2 ] , position [ 3 ] , callers ) with open ( '' . join ( [ work_dir , '/' , chrom , '.vcf' ] ) , 'w' ) as outfile : print ( '##fileformat=VCFv4.0' , file = outfile ) print ( '##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.' , file = outfile ) print ( '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO' , file = outfile ) for chrom in chrom_sorted ( accepted_hits . keys ( ) ) : for position in sorted ( accepted_hits [ chrom ] ) : print ( chrom , position , '.' , accepted_hits [ chrom ] [ position ] [ 0 ] , accepted_hits [ chrom ] [ position ] [ 1 ] , '.' , 'PASS' , 'callers=' + accepted_hits [ chrom ] [ position ] [ 2 ] , sep = '\t' , file = outfile ) fsid = job . fileStore . writeGlobalFile ( outfile . name ) export_results ( job , fsid , outfile . name , univ_options , subfolder = 'mutations/merged' ) return fsid
Merge the mutation calls for a single chromosome .
2,934
def read_vcf ( vcf_file ) : vcf_dict = [ ] with open ( vcf_file , 'r' ) as invcf : for line in invcf : if line . startswith ( '#' ) : continue line = line . strip ( ) . split ( ) vcf_dict . append ( ( line [ 0 ] , line [ 1 ] , line [ 3 ] , line [ 4 ] ) ) return vcf_dict
Read a vcf file to a dict of lists .
2,935
def merge_perchrom_vcfs ( job , perchrom_vcfs , tool_name , univ_options ) : work_dir = os . getcwd ( ) input_files = { '' . join ( [ chrom , '.vcf' ] ) : jsid for chrom , jsid in perchrom_vcfs . items ( ) } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) first = True with open ( '' . join ( [ work_dir , '/' , 'all_merged.vcf' ] ) , 'w' ) as outvcf : for chromvcfname in chrom_sorted ( [ x . rstrip ( '.vcf' ) for x in input_files . keys ( ) ] ) : with open ( input_files [ chromvcfname + '.vcf' ] , 'r' ) as infile : for line in infile : line = line . strip ( ) if line . startswith ( '#' ) : if first : print ( line , file = outvcf ) continue first = False print ( line , file = outvcf ) output_file = job . fileStore . writeGlobalFile ( outvcf . name ) export_results ( job , output_file , outvcf . name , univ_options , subfolder = 'mutations/' + tool_name ) job . fileStore . logToMaster ( 'Ran merge_perchrom_vcfs for %s successfully' % tool_name ) return output_file
Merge per - chromosome vcf files into a single genome level vcf .
2,936
def unmerge ( job , input_vcf , tool_name , chromosomes , tool_options , univ_options ) : work_dir = os . getcwd ( ) input_files = { 'input.vcf' : input_vcf , 'genome.fa.fai.tar.gz' : tool_options [ 'genome_fai' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'genome.fa.fai' ] = untargz ( input_files [ 'genome.fa.fai.tar.gz' ] , work_dir ) read_chromosomes = defaultdict ( ) with open ( input_files [ 'input.vcf' ] , 'r' ) as in_vcf : header = [ ] for line in in_vcf : if line . startswith ( '#' ) : header . append ( line ) continue line = line . strip ( ) chrom = line . split ( ) [ 0 ] if chrom in read_chromosomes : print ( line , file = read_chromosomes [ chrom ] ) else : read_chromosomes [ chrom ] = open ( os . path . join ( os . getcwd ( ) , chrom + '.vcf' ) , 'w' ) print ( '' . join ( header ) , file = read_chromosomes [ chrom ] , end = '' ) print ( line , file = read_chromosomes [ chrom ] ) for chrom in set ( chromosomes ) . difference ( set ( read_chromosomes . keys ( ) ) ) : read_chromosomes [ chrom ] = open ( os . path . join ( os . getcwd ( ) , chrom + '.vcf' ) , 'w' ) print ( '' . join ( header ) , file = read_chromosomes [ chrom ] , end = '' ) outdict = { } chroms = set ( chromosomes ) . intersection ( set ( read_chromosomes . keys ( ) ) ) for chrom , chromvcf in read_chromosomes . items ( ) : chromvcf . close ( ) if chrom not in chroms : continue outdict [ chrom ] = job . fileStore . writeGlobalFile ( chromvcf . name ) export_results ( job , outdict [ chrom ] , chromvcf . name , univ_options , subfolder = 'mutations/' + tool_name ) return outdict
Un - merge a vcf file into per - chromosome vcfs .
2,937
def as_feature ( data ) : if not isinstance ( data , ( Feature , FeatureCollection ) ) : if is_featurelike ( data ) : data = Feature ( ** data ) elif has_features ( data ) : data = FeatureCollection ( ** data ) elif isinstance ( data , collections . Sequence ) : data = FeatureCollection ( features = data ) elif has_layer ( data ) : data = LayerCollection ( data ) elif has_coordinates ( data ) : data = Feature ( geometry = data ) elif isinstance ( data , collections . Mapping ) and not data : data = Feature ( ) return data
Returns a Feature or FeatureCollection .
2,938
def has_layer ( fcollection ) : for val in six . viewvalues ( fcollection ) : if has_features ( val ) : return True return False
Returns true for a multi - layer dict of FeatureCollections .
2,939
def wrap_rsem ( job , star_bams , univ_options , rsem_options ) : rsem = job . addChildJobFn ( run_rsem , star_bams [ 'rna_transcriptome.bam' ] , univ_options , rsem_options , cores = rsem_options [ 'n' ] , disk = PromisedRequirement ( rsem_disk , star_bams , rsem_options [ 'index' ] ) ) return rsem . rv ( )
A wrapper for run_rsem using the results from run_star as input .
2,940
def run_rsem ( job , rna_bam , univ_options , rsem_options ) : work_dir = os . getcwd ( ) input_files = { 'star_transcriptome.bam' : rna_bam , 'rsem_index.tar.gz' : rsem_options [ 'index' ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) input_files [ 'rsem_index' ] = untargz ( input_files [ 'rsem_index.tar.gz' ] , work_dir ) input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ '--paired-end' , '-p' , str ( rsem_options [ 'n' ] ) , '--bam' , input_files [ 'star_transcriptome.bam' ] , '--no-bam-output' , '/' . join ( [ input_files [ 'rsem_index' ] , univ_options [ 'ref' ] ] ) , 'rsem' ] docker_call ( tool = 'rsem' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = rsem_options [ 'version' ] ) output_files = { } for filename in ( 'rsem.genes.results' , 'rsem.isoforms.results' ) : output_files [ filename ] = job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , filename ] ) ) export_results ( job , output_files [ filename ] , '/' . join ( [ work_dir , filename ] ) , univ_options , subfolder = 'expression' ) job . fileStore . logToMaster ( 'Ran rsem on %s successfully' % univ_options [ 'patient' ] ) return output_files
Run rsem on the input RNA bam .
2,941
def activate ( self , page = None ) : page = page or '' if '://' in page : self . _iface . activate_overlay_url ( page ) else : self . _iface . activate_overlay_game ( page )
Activates overlay with browser optionally opened at a given page .
2,942
def any_of ( value , * args ) : if len ( args ) : value = ( value , ) + args return ExpectationAny ( value )
At least one of the items in value should match
2,943
def all_of ( value , * args ) : if len ( args ) : value = ( value , ) + args return ExpectationAll ( value )
All the items in value should match
2,944
def none_of ( value , * args ) : if len ( args ) : value = ( value , ) + args return ExpectationNone ( value )
None of the items in value should match
2,945
def run_cutadapt ( job , fastqs , univ_options , cutadapt_options ) : work_dir = os . getcwd ( ) input_files = { 'rna_1.fastq' : fastqs [ 0 ] , 'rna_2.fastq' : fastqs [ 1 ] } input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False ) gz = '.gz' if is_gzipfile ( input_files [ 'rna_1.fastq' ] ) else '' if gz : for read_file in 'rna_1.fastq' , 'rna_2.fastq' : os . symlink ( read_file , read_file + gz ) input_files [ read_file + gz ] = input_files [ read_file ] + gz input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) } parameters = [ '-a' , cutadapt_options [ 'a' ] , '-A' , cutadapt_options [ 'A' ] , '-m' , '35' , '-o' , docker_path ( 'rna_cutadapt_1.fastq.gz' ) , '-p' , docker_path ( 'rna_cutadapt_2.fastq.gz' ) , input_files [ 'rna_1.fastq' + gz ] , input_files [ 'rna_2.fastq' + gz ] ] docker_call ( tool = 'cutadapt' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = cutadapt_options [ 'version' ] ) output_files = [ ] for fastq_file in [ 'rna_cutadapt_1.fastq.gz' , 'rna_cutadapt_2.fastq.gz' ] : output_files . append ( job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , fastq_file ] ) ) ) job . fileStore . logToMaster ( 'Ran cutadapt on %s successfully' % univ_options [ 'patient' ] ) return output_files
Runs cutadapt on the input RNA fastq files .
2,946
def index ( ) : identity = g . identity actions = { } for action in access . actions . values ( ) : actions [ action . value ] = DynamicPermission ( action ) . allows ( identity ) if current_user . is_anonymous : return render_template ( "invenio_access/open.html" , actions = actions , identity = identity ) else : return render_template ( "invenio_access/limited.html" , message = '' , actions = actions , identity = identity )
Basic test view .
2,947
def role_admin ( ) : identity = g . identity actions = { } for action in access . actions . values ( ) : actions [ action . value ] = DynamicPermission ( action ) . allows ( identity ) message = 'You are opening a page requiring the "admin-access" permission' return render_template ( "invenio_access/limited.html" , message = message , actions = actions , identity = identity )
View only allowed to admin role .
2,948
def read_fastas ( input_files ) : tumor_file = [ y for x , y in input_files . items ( ) if x . startswith ( 'T' ) ] [ 0 ] normal_file = [ y for x , y in input_files . items ( ) if x . startswith ( 'N' ) ] [ 0 ] output_files = defaultdict ( list ) output_files = _read_fasta ( tumor_file , output_files ) num_entries = len ( output_files ) output_files = _read_fasta ( normal_file , output_files ) assert len ( output_files ) == num_entries return output_files
Read the tumor and normal fastas into a joint dict .
2,949
def _read_fasta ( fasta_file , output_dict ) : read_name = None with open ( fasta_file , 'r' ) as f : for line in f : line = line . strip ( ) if not line : continue if line . startswith ( '>' ) : read_name = line . lstrip ( '>' ) else : assert read_name is not None , line output_dict [ read_name ] . append ( line . strip ( ) ) return output_dict
Read the peptide fasta into an existing dict .
2,950
def _process_consensus_mhcii ( mhc_file , normal = False ) : core_col = None results = pandas . DataFrame ( columns = [ 'allele' , 'pept' , 'tumor_pred' , 'core' ] ) with open ( mhc_file , 'r' ) as mf : peptides = set ( ) for line in mf : if not line . startswith ( 'HLA' ) : continue line = line . strip ( ) . split ( '\t' ) allele = line [ 0 ] pept = line [ 4 ] pred = line [ 6 ] if core_col : core = line [ core_col ] else : methods = line [ 5 ] . lstrip ( 'Consensus(' ) . rstrip ( ')' ) methods = methods . split ( ',' ) if 'NN' in methods : core_col = 13 elif 'netMHCIIpan' in methods : core_col = 17 elif 'Sturniolo' in methods : core_col = 19 elif 'SMM' in methods : core_col = 10 core = line [ core_col ] if core_col else 'NOCORE' if float ( pred ) > 5.00 and not normal : continue results . loc [ len ( results ) ] = [ allele , pept , pred , core ] results . drop_duplicates ( inplace = True ) return results
Process the results from running IEDB MHCII binding predictions using the consensus method into a pandas dataframe .
2,951
def _process_net_mhcii ( mhc_file , normal = False ) : results = pandas . DataFrame ( columns = [ 'allele' , 'pept' , 'tumor_pred' , 'core' , 'peptide_name' ] ) with open ( mhc_file , 'r' ) as mf : peptides = set ( ) allele = re . sub ( '-DQB' , '/DQB' , mf . readline ( ) . strip ( ) ) _ = mf . readline ( ) for line in mf : line = line . strip ( ) . split ( '\t' ) pept = line [ 1 ] pred = line [ 5 ] core = 'NOCORE' peptide_name = line [ 2 ] if float ( pred ) > 5.00 and not normal : continue results . loc [ len ( results ) ] = [ allele , pept , pred , core , peptide_name ] results . drop_duplicates ( inplace = True ) return results
Process the results from running NetMHCIIpan binding predictions into a pandas dataframe .
2,952
def _process_mhci ( mhc_file , normal = False ) : results = pandas . DataFrame ( columns = [ 'allele' , 'pept' , 'tumor_pred' , 'core' ] ) with open ( mhc_file , 'r' ) as mf : peptides = set ( ) for line in mf : if not line . startswith ( 'HLA' ) : continue line = line . strip ( ) . split ( '\t' ) allele = line [ 0 ] pept = line [ 5 ] pred = line [ 7 ] if float ( pred ) > 5.00 and not normal : continue results . loc [ len ( results ) ] = [ allele , pept , pred , pept ] results . drop_duplicates ( inplace = True ) return results
Process the results from running IEDB MHCI binding predictions into a pandas dataframe .
2,953
def pept_diff ( p1 , p2 ) : if len ( p1 ) != len ( p2 ) : return - 1 else : return sum ( [ p1 [ i ] != p2 [ i ] for i in range ( len ( p1 ) ) ] )
Return the number of differences betweeen 2 peptides
2,954
def print_mhc_peptide ( neoepitope_info , peptides , pepmap , outfile , netmhc = False ) : if netmhc : peptide_names = [ neoepitope_info . peptide_name ] else : peptide_names = [ x for x , y in peptides . items ( ) if neoepitope_info . pept in y ] neoepitope_info = neoepitope_info . _asdict ( ) if neoepitope_info [ 'normal_pept' ] == 'N' * len ( neoepitope_info [ 'pept' ] ) : neoepitope_info [ 'normal_pept' ] = neoepitope_info [ 'normal_pred' ] = 'NA' for peptide_name in peptide_names : print ( '{ni[allele]}\t' '{ni[pept]}\t' '{ni[normal_pept]}\t' '{pname}\t' '{ni[core]}\t' '0\t' '{ni[tumor_pred]}\t' '{ni[normal_pred]}\t' '{pmap}' . format ( ni = neoepitope_info , pname = peptide_name , pmap = pepmap [ peptide_name ] ) , file = outfile ) return None
Accept data about one neoepitope from merge_mhc_peptide_calls and print it to outfile . This is a generic module to reduce code redundancy .
2,955
def check ( domain , prefix , code , strategies = '*' ) : if strategies == '*' or 'dns_txt' in strategies : if check_dns_txt ( domain , prefix , code ) : return True if strategies == '*' or 'dns_cname' in strategies : if check_dns_cname ( domain , prefix , code ) : return True if strategies == '*' or 'meta_tag' in strategies : if check_meta_tag ( domain , prefix , code ) : return True if strategies == '*' or 'html_file' in strategies : if check_html_file ( domain , prefix , code ) : return True return False
Check the ownership of a domain by going thru a serie of strategies . If at least one strategy succeed the domain is considered verified and this methods returns true .
2,956
def register_cache_buster ( self , app , config = None ) : if not ( config is None or isinstance ( config , dict ) ) : raise ValueError ( "`config` must be an instance of dict or None" ) bust_map = { } unbust_map = { } app . logger . debug ( 'Starting computing hashes for static assets' ) for dirpath , dirnames , filenames in os . walk ( app . static_folder ) : for filename in filenames : rooted_filename = os . path . join ( dirpath , filename ) if not self . __is_file_to_be_busted ( rooted_filename ) : continue app . logger . debug ( f'Computing hashes for {rooted_filename}' ) with open ( rooted_filename , 'rb' ) as f : version = hashlib . md5 ( f . read ( ) ) . hexdigest ( ) [ : self . hash_size ] unbusted = os . path . relpath ( rooted_filename , app . static_folder ) busted = f"{unbusted}?q={version}" bust_map [ unbusted ] = busted unbust_map [ busted ] = unbusted app . logger . debug ( 'Finished Starting computing hashes for static assets' ) def bust_filename ( file ) : return bust_map . get ( file , file ) def unbust_filename ( file ) : return unbust_map . get ( file , file ) @ app . url_defaults def reverse_to_cache_busted_url ( endpoint , values ) : if endpoint == 'static' : values [ 'filename' ] = bust_filename ( values [ 'filename' ] ) def debusting_static_view ( * args , ** kwargs ) : kwargs [ 'filename' ] = unbust_filename ( kwargs . get ( 'filename' ) ) return original_static_view ( * args , ** kwargs ) original_static_view = app . view_functions [ 'static' ] app . view_functions [ 'static' ] = debusting_static_view
Register app in cache buster so that url_for adds a unique prefix to URLs generated for the static endpoint . Also make the app able to serve cache - busted static files .
2,957
def env_or_default ( var , default = None ) : if var in os . environ : return os . environ [ var ] return default
Get environment variable or provide default .
2,958
def kms_encrypt ( value , key , aws_config = None ) : aws_config = aws_config or { } aws = boto3 . session . Session ( ** aws_config ) client = aws . client ( 'kms' ) enc_res = client . encrypt ( KeyId = key , Plaintext = value ) return n ( b64encode ( enc_res [ 'CiphertextBlob' ] ) )
Encrypt and value with KMS key .
2,959
def get_value ( * args , ** kwargs ) : global _config if _config is None : raise ValueError ( 'configuration not set; must run figgypy.set_config first' ) return _config . get_value ( * args , ** kwargs )
Get from config object by exposing Config . get_value method .
2,960
def set_value ( * args , ** kwargs ) : global _config if _config is None : raise ValueError ( 'configuration not set; must run figgypy.set_config first' ) return _config . set_value ( * args , ** kwargs )
Set value in the global Config object .
2,961
def decode_escapes ( s ) : def decode_match ( match ) : return codecs . decode ( match . group ( 0 ) , 'unicode-escape' ) return ESCAPE_SEQUENCE_RE . sub ( decode_match , s )
Unescape libconfig string literals
2,962
def loads ( string , filename = None , includedir = '' ) : try : f = io . StringIO ( string ) except TypeError : raise TypeError ( "libconf.loads() input string must by unicode" ) return load ( f , filename = filename , includedir = includedir )
Load the contents of string to a Python object
2,963
def dump_string ( s ) : s = ( s . replace ( '\\' , '\\\\' ) . replace ( '"' , '\\"' ) . replace ( '\f' , r'\f' ) . replace ( '\n' , r'\n' ) . replace ( '\r' , r'\r' ) . replace ( '\t' , r'\t' ) ) s = UNPRINTABLE_CHARACTER_RE . sub ( lambda m : r'\x{:02x}' . format ( ord ( m . group ( 0 ) ) ) , s ) return '"' + s + '"'
Stringize s adding double quotes and escaping as necessary
2,964
def get_dump_type ( value ) : if isinstance ( value , dict ) : return 'd' if isinstance ( value , tuple ) : return 'l' if isinstance ( value , list ) : return 'a' if isinstance ( value , bool ) : return 'b' if isint ( value ) : if is_long_int ( value ) : return 'i64' else : return 'i' if isinstance ( value , float ) : return 'f' if isstr ( value ) : return 's' return None
Get the libconfig datatype of a value
2,965
def get_array_value_dtype ( lst ) : array_value_type = None for value in lst : dtype = get_dump_type ( value ) if dtype not in { 'b' , 'i' , 'i64' , 'f' , 's' } : raise ConfigSerializeError ( "Invalid datatype in array (may only contain scalars):" "%r of type %s" % ( value , type ( value ) ) ) if array_value_type is None : array_value_type = dtype continue if array_value_type == dtype : continue if array_value_type == 'i' and dtype == 'i64' : array_value_type = 'i64' continue if array_value_type == 'i64' and dtype == 'i' : continue raise ConfigSerializeError ( "Mixed types in array (all elements must have same type):" "%r of type %s" % ( value , type ( value ) ) ) return array_value_type
Return array value type raise ConfigSerializeError for invalid arrays
2,966
def dump_value ( key , value , f , indent = 0 ) : spaces = ' ' * indent if key is None : key_prefix = '' key_prefix_nl = '' else : key_prefix = key + ' = ' key_prefix_nl = key + ' =\n' + spaces dtype = get_dump_type ( value ) if dtype == 'd' : f . write ( u'{}{}{{\n' . format ( spaces , key_prefix_nl ) ) dump_dict ( value , f , indent + 4 ) f . write ( u'{}}}' . format ( spaces ) ) elif dtype == 'l' : f . write ( u'{}{}(\n' . format ( spaces , key_prefix_nl ) ) dump_collection ( value , f , indent + 4 ) f . write ( u'\n{})' . format ( spaces ) ) elif dtype == 'a' : f . write ( u'{}{}[\n' . format ( spaces , key_prefix_nl ) ) value_dtype = get_array_value_dtype ( value ) if value_dtype == 'i64' : value = [ LibconfInt64 ( v ) for v in value ] dump_collection ( value , f , indent + 4 ) f . write ( u'\n{}]' . format ( spaces ) ) elif dtype == 's' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , dump_string ( value ) ) ) elif dtype == 'i' or dtype == 'i64' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , dump_int ( value ) ) ) elif dtype == 'f' or dtype == 'b' : f . write ( u'{}{}{}' . format ( spaces , key_prefix , value ) ) else : raise ConfigSerializeError ( "Can not serialize object %r of type %s" % ( value , type ( value ) ) )
Save a value of any libconfig type
2,967
def dump_collection ( cfg , f , indent = 0 ) : for i , value in enumerate ( cfg ) : dump_value ( None , value , f , indent ) if i < len ( cfg ) - 1 : f . write ( u',\n' )
Save a collection of attributes
2,968
def dump_dict ( cfg , f , indent = 0 ) : for key in cfg : if not isstr ( key ) : raise ConfigSerializeError ( "Dict keys must be strings: %r" % ( key , ) ) dump_value ( key , cfg [ key ] , f , indent ) f . write ( u';\n' )
Save a dictionary of attributes
2,969
def dumps ( cfg ) : str_file = io . StringIO ( ) dump ( cfg , str_file ) return str_file . getvalue ( )
Serialize cfg into a libconfig - formatted str
2,970
def dump ( cfg , f ) : if not isinstance ( cfg , dict ) : raise ConfigSerializeError ( 'dump() requires a dict as input, not %r of type %r' % ( cfg , type ( cfg ) ) ) dump_dict ( cfg , f , 0 )
Serialize cfg as a libconfig - formatted stream into f
2,971
def tokenize ( self , string ) : pos = 0 while pos < len ( string ) : m = SKIP_RE . match ( string , pos = pos ) if m : skip_lines = m . group ( 0 ) . split ( '\n' ) if len ( skip_lines ) > 1 : self . row += len ( skip_lines ) - 1 self . column = 1 + len ( skip_lines [ - 1 ] ) else : self . column += len ( skip_lines [ 0 ] ) pos = m . end ( ) continue for cls , type , regex in self . token_map : m = regex . match ( string , pos = pos ) if m : yield cls ( type , m . group ( 0 ) , self . filename , self . row , self . column ) self . column += len ( m . group ( 0 ) ) pos = m . end ( ) break else : raise ConfigParseError ( "Couldn't load config in %r row %d, column %d: %r" % ( self . filename , self . row , self . column , string [ pos : pos + 20 ] ) )
Yield tokens from the input string or throw ConfigParseError
2,972
def from_file ( cls , f , filename = None , includedir = '' , seenfiles = None ) : if filename is None : filename = getattr ( f , 'name' , '<unknown>' ) if seenfiles is None : seenfiles = set ( ) if filename in seenfiles : raise ConfigParseError ( "Circular include: %r" % ( filename , ) ) seenfiles = seenfiles | { filename } tokenizer = Tokenizer ( filename = filename ) lines = [ ] tokens = [ ] for line in f : m = re . match ( r'@include "(.*)"$' , line . strip ( ) ) if m : tokens . extend ( tokenizer . tokenize ( '' . join ( lines ) ) ) lines = [ re . sub ( r'\S' , ' ' , line ) ] includefilename = decode_escapes ( m . group ( 1 ) ) includefilename = os . path . join ( includedir , includefilename ) try : includefile = open ( includefilename , "r" ) except IOError : raise ConfigParseError ( "Could not open include file %r" % ( includefilename , ) ) with includefile : includestream = cls . from_file ( includefile , filename = includefilename , includedir = includedir , seenfiles = seenfiles ) tokens . extend ( includestream . tokens ) else : lines . append ( line ) tokens . extend ( tokenizer . tokenize ( '' . join ( lines ) ) ) return cls ( tokens )
Create a token stream by reading an input file
2,973
def error ( self , msg ) : if self . finished ( ) : raise ConfigParseError ( "Unexpected end of input; %s" % ( msg , ) ) else : t = self . peek ( ) raise ConfigParseError ( "Unexpected token %s; %s" % ( t , msg ) )
Raise a ConfigParseError at the current input position
2,974
def load_variables ( ) : if ( not os . environ . get ( "PYCONFLUENCE_TOKEN" ) or not os . environ . get ( "PYCONFLUENCE_USER" ) or not os . environ . get ( "PYCONFLUENCE_ORG" ) ) : print ( "One or more pyconfluence environment variables are not set. " "See README for directions on how to resolve this." ) sys . exit ( "Error" ) global token global user global base_url token = os . environ [ "PYCONFLUENCE_TOKEN" ] user = os . environ [ "PYCONFLUENCE_USER" ] base_url = ( "https://" + os . environ [ "PYCONFLUENCE_ORG" ] + ".atlassian" ".net/wiki/rest/api/content" )
Load variables from environment variables .
2,975
def rest ( url , req = "GET" , data = None ) : load_variables ( ) return _rest ( base_url + url , req , data )
Main function to be called from this module .
2,976
def _rest ( url , req , data = None ) : if url . upper ( ) . startswith ( "HTTPS" ) : print ( "Secure connection required: Please use HTTPS or https" ) return "" req = req . upper ( ) if req != "GET" and req != "PUT" and req != "POST" and req != "DELETE" : return "" status , body = _api_action ( url , req , data ) if ( int ( status ) >= 200 and int ( status ) <= 226 ) : return body else : return body
Send a rest rest request to the server .
2,977
def _api_action ( url , req , data = None ) : requisite_headers = { 'Accept' : 'application/json' , 'Content-Type' : 'application/json' } auth = ( user , token ) if req == "GET" : response = requests . get ( url , headers = requisite_headers , auth = auth ) elif req == "PUT" : response = requests . put ( url , headers = requisite_headers , auth = auth , data = data ) elif req == "POST" : response = requests . post ( url , headers = requisite_headers , auth = auth , data = data ) elif req == "DELETE" : response = requests . delete ( url , headers = requisite_headers , auth = auth ) return response . status_code , response . text
Take action based on what kind of request is needed .
2,978
def _platform_patterns ( self , platform = 'generic' , compiled = False ) : patterns = self . _dict_compiled . get ( platform , None ) if compiled else self . _dict_text . get ( platform , None ) if patterns is None : raise KeyError ( "Unknown platform: {}" . format ( platform ) ) return patterns
Return all the patterns for specific platform .
2,979
def pattern ( self , platform , key , compiled = True ) : patterns = self . _platform_patterns ( platform , compiled = compiled ) pattern = patterns . get ( key , self . _platform_patterns ( compiled = compiled ) . get ( key , None ) ) if pattern is None : raise KeyError ( "Patterns database corrupted. Platform: {}, Key: {}" . format ( platform , key ) ) return pattern
Return the pattern defined by the key string specific to the platform .
2,980
def description ( self , platform , key ) : patterns = self . _dict_dscr . get ( platform , None ) description = patterns . get ( key , None ) return description
Return the patter description .
2,981
def platform ( self , with_prompt , platforms = None ) : if platforms is None : platforms = self . _dict [ 'generic' ] [ 'prompt_detection' ] for platform in platforms : pattern = self . pattern ( platform , 'prompt' ) result = re . search ( pattern , with_prompt ) if result : return platform return None
Return the platform name based on the prompt matching .
2,982
def after_connect ( self ) : show_users = self . device . send ( "show users" , timeout = 120 ) result = re . search ( pattern_manager . pattern ( self . platform , 'connected_locally' ) , show_users ) if result : self . log ( 'Locally connected to Calvados. Exiting.' ) self . device . send ( 'exit' ) return True return False
Execute after connect .
2,983
def get_hostname_text ( self ) : try : hostname_text = self . device . send ( 'hostname' , timeout = 10 ) if hostname_text : self . device . hostname = hostname_text . splitlines ( ) [ 0 ] return hostname_text except CommandError : self . log ( "Non Unix jumphost type detected" ) return None
Return hostname information from the Unix host .
2,984
def _find_file ( f ) : if os . path . isabs ( f ) : return f else : for d in Config . _dirs : _f = os . path . join ( d , f ) if os . path . isfile ( _f ) : return _f raise FiggypyError ( "could not find configuration file {} in dirs {}" . format ( f , Config . _dirs ) )
Find a config file if possible .
2,985
def _load_file ( self , f ) : try : with open ( f , 'r' ) as _fo : _seria_in = seria . load ( _fo ) _y = _seria_in . dump ( 'yaml' ) except IOError : raise FiggypyError ( "could not open configuration file" ) self . values . update ( yaml . load ( _y ) )
Get values from config file
2,986
def setup ( self , config_file = None , aws_config = None , gpg_config = None , decrypt_gpg = True , decrypt_kms = True ) : if aws_config is not None : self . aws_config = aws_config if gpg_config is not None : self . gpg_config = gpg_config if decrypt_kms is not None : self . decrypt_kms = decrypt_kms if decrypt_gpg is not None : self . decrypt_gpg = decrypt_gpg if config_file is not None : self . config_file = config_file return self
Make setup easier by providing a constructor method .
2,987
def authenticate ( self , driver ) : events = [ driver . username_re , driver . password_re , self . device . prompt_re , driver . rommon_re , driver . unable_to_connect_re , driver . authentication_error_re , pexpect . TIMEOUT , pexpect . EOF ] transitions = [ ( driver . username_re , [ 0 ] , 1 , partial ( a_send_username , self . username ) , 10 ) , ( driver . username_re , [ 1 ] , 1 , None , 10 ) , ( driver . password_re , [ 0 , 1 ] , 2 , partial ( a_send_password , self . _acquire_password ( ) ) , _C [ 'first_prompt_timeout' ] ) , ( driver . username_re , [ 2 ] , - 1 , a_authentication_error , 0 ) , ( driver . password_re , [ 2 ] , - 1 , a_authentication_error , 0 ) , ( driver . authentication_error_re , [ 1 , 2 ] , - 1 , a_authentication_error , 0 ) , ( self . device . prompt_re , [ 0 , 1 , 2 ] , - 1 , None , 0 ) , ( driver . rommon_re , [ 0 ] , - 1 , partial ( a_send , "\r\n" ) , 0 ) , ( pexpect . TIMEOUT , [ 0 ] , 1 , partial ( a_send , "\r\n" ) , 10 ) , ( pexpect . TIMEOUT , [ 2 ] , - 1 , None , 0 ) , ( pexpect . TIMEOUT , [ 3 , 7 ] , - 1 , ConnectionTimeoutError ( "Connection Timeout" , self . hostname ) , 0 ) , ( driver . unable_to_connect_re , [ 0 , 1 , 2 ] , - 1 , a_unable_to_connect , 0 ) , ] self . log ( "EXPECTED_PROMPT={}" . format ( pattern_to_str ( self . device . prompt_re ) ) ) fsm = FSM ( "CONSOLE-SERVER-AUTH" , self . device , events , transitions , timeout = _C [ 'connect_timeout' ] , init_pattern = self . last_pattern ) return fsm . run ( )
Authenticate using the Console Server protocol specific FSM .
2,988
def delegate ( attribute_name , method_names ) : info = { 'attribute' : attribute_name , 'methods' : method_names } def decorator ( cls ) : attribute = info [ 'attribute' ] if attribute . startswith ( "__" ) : attribute = "_" + cls . __name__ + attribute for name in info [ 'methods' ] : setattr ( cls , name , eval ( "lambda self, *a, **kw: " "self.{0}.{1}(*a, **kw)" . format ( attribute , name ) ) ) return cls return decorator
Pass the call to the attribute called attribute_name for every method listed in method_names .
2,989
def pattern_to_str ( pattern ) : if isinstance ( pattern , str ) : return repr ( pattern ) else : return repr ( pattern . pattern ) if pattern else None
Convert regex pattern to string .
2,990
def levenshtein_distance ( str_a , str_b ) : len_a , len_b = len ( str_a ) , len ( str_b ) if len_a > len_b : str_a , str_b = str_b , str_a len_a , len_b = len_b , len_a current = range ( len_a + 1 ) for i in range ( 1 , len_b + 1 ) : previous , current = current , [ i ] + [ 0 ] * len_a for j in range ( 1 , len_a + 1 ) : add , delete = previous [ j ] + 1 , current [ j - 1 ] + 1 change = previous [ j - 1 ] if str_a [ j - 1 ] != str_b [ i - 1 ] : change += + 1 current [ j ] = min ( add , delete , change ) return current [ len_a ]
Calculate the Levenshtein distance between string a and b .
2,991
def parse_inventory ( inventory_output = None ) : udi = { "name" : "" , "description" : "" , "pid" : "" , "vid" : "" , "sn" : "" } if inventory_output is None : return udi capture_next = False chassis_udi_text = None for line in inventory_output . split ( '\n' ) : lc_line = line . lower ( ) if ( 'chassis' in lc_line or 'switch system' in lc_line or 'rack' in lc_line ) and 'name' in lc_line and 'descr' : capture_next = True chassis_udi_text = line continue if capture_next : inventory_output = chassis_udi_text + "\n" + line break match = re . search ( r"(?i)NAME: (?P<name>.*?),? (?i)DESCR" , inventory_output , re . MULTILINE ) if match : udi [ 'name' ] = match . group ( 'name' ) . strip ( '" ,' ) match = re . search ( r"(?i)DESCR: (?P<description>.*)" , inventory_output , re . MULTILINE ) if match : udi [ 'description' ] = match . group ( 'description' ) . strip ( '" ' ) match = re . search ( r"(?i)PID: (?P<pid>.*?),? " , inventory_output , re . MULTILINE ) if match : udi [ 'pid' ] = match . group ( 'pid' ) match = re . search ( r"(?i)VID: (?P<vid>.*?),? " , inventory_output , re . MULTILINE ) if match : udi [ 'vid' ] = match . group ( 'vid' ) match = re . search ( r"(?i)SN: (?P<sn>.*)" , inventory_output , re . MULTILINE ) if match : udi [ 'sn' ] = match . group ( 'sn' ) . strip ( ) return udi
Parse the inventory text and return udi dict .
2,992
def normalize_urls ( urls ) : _urls = [ ] if isinstance ( urls , list ) : if urls : if isinstance ( urls [ 0 ] , list ) : _urls = urls elif isinstance ( urls [ 0 ] , str ) : _urls = [ urls ] else : raise RuntimeError ( "No target host url provided." ) elif isinstance ( urls , str ) : _urls = [ [ urls ] ] return _urls
Overload urls and make list of lists of urls .
2,993
def yaml_file_to_dict ( script_name , path = None ) : def load_yaml ( file_path ) : with open ( file_path , 'r' ) as yamlfile : try : dictionary = yaml . load ( yamlfile ) except yaml . YAMLError : return { } return dictionary def merge ( user , default ) : if isinstance ( user , dict ) and isinstance ( default , dict ) : for k , v in default . iteritems ( ) : if k not in user : user [ k ] = v else : user [ k ] = merge ( user [ k ] , v ) return user if path is None : path = os . path . abspath ( '.' ) config_file_path = os . path . join ( path , script_name + '.yaml' ) if not os . path . exists ( config_file_path ) : raise RuntimeError ( 'Config file does not exist: {}' . format ( config_file_path ) ) default_dict = load_yaml ( config_file_path ) user_config_file_path = os . path . join ( os . path . expanduser ( '~' ) , '.condoor' , os . path . basename ( script_name ) + '.yaml' ) user_config_file_path = os . getenv ( 'CONDOOR_' + os . path . basename ( script_name ) . upper ( ) , user_config_file_path ) if os . path . exists ( user_config_file_path ) : user_dict = load_yaml ( user_config_file_path ) if user_dict : default_dict = merge ( user_dict , default_dict ) return default_dict
Read yaml file and return the dict .
2,994
def write ( self , text ) : index = text . find ( '\n' ) if index == - 1 : self . _buffer = self . _buffer + text else : self . _buffer = self . _buffer + text [ : index + 1 ] if self . _pattern : result = re . search ( self . _pattern , self . _buffer ) if result : for group in result . groups ( ) : if group : self . _buffer = self . _buffer . replace ( group , "***" ) self . _file . write ( self . _buffer ) self . _file . flush ( ) self . _buffer = text [ index + 1 : ]
Override the standard write method to filter the content .
2,995
def start ( builtins = False , profile_threads = True ) : if profile_threads : threading . setprofile ( _callback ) _yappi . start ( builtins , profile_threads )
Start profiler .
2,996
def set_clock_type ( type ) : type = type . upper ( ) if type not in CLOCK_TYPES : raise YappiError ( "Invalid clock type:%s" % ( type ) ) _yappi . set_clock_type ( CLOCK_TYPES [ type ] )
Sets the internal clock type for timing . Profiler shall not have any previous stats . Otherwise an exception is thrown .
2,997
async def read_reply ( self ) : code = 500 messages = [ ] go_on = True while go_on : try : line = await self . readline ( ) except ValueError as e : code = 500 go_on = False else : try : code = int ( line [ : 3 ] ) except ValueError as e : raise ConnectionResetError ( "Connection lost." ) from e else : go_on = line [ 3 : 4 ] == b"-" message = line [ 4 : ] . strip ( b" \t\r\n" ) . decode ( "ascii" ) messages . append ( message ) full_message = "\n" . join ( messages ) return code , full_message
Reads a reply from the server .
2,998
def make_hop_info_from_url ( url , verify_reachability = None ) : parsed = urlparse ( url ) username = None if parsed . username is None else unquote ( parsed . username ) password = None if parsed . password is None else unquote ( parsed . password ) try : enable_password = parse_qs ( parsed . query ) [ "enable_password" ] [ 0 ] except KeyError : enable_password = None hop_info = HopInfo ( parsed . scheme , parsed . hostname , username , password , parsed . port , enable_password , verify_reachability = verify_reachability ) if hop_info . is_valid ( ) : return hop_info raise InvalidHopInfoError
Build HopInfo object from url .
2,999
def is_reachable ( self ) : if self . verify_reachability and hasattr ( self . verify_reachability , '__call__' ) : return self . verify_reachability ( host = self . hostname , port = self . port ) return True
Return if host is reachable .