idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
16,900 | def valid_uuid ( value ) : try : uuid . UUID ( value , version = 4 ) return True except ( TypeError , ValueError , AttributeError ) : return False | Check if value is a valid UUID . |
16,901 | def create_args_parser ( description ) : parser = argparse . ArgumentParser ( description = description ) def network_port ( string ) : value = int ( string ) if not 0 < value <= 65535 : raise argparse . ArgumentTypeError ( 'port must be in ]0,65535] interval' ) return value def cacert_file ( cacert ) : try : context = ssl . create_default_context ( cafile = cacert ) except AttributeError : return cacert except IOError : raise argparse . ArgumentTypeError ( 'CA Certificate not found' ) try : not_after = context . get_ca_certs ( ) [ 0 ] [ 'notAfter' ] not_after = ssl . cert_time_to_seconds ( not_after ) not_before = context . get_ca_certs ( ) [ 0 ] [ 'notBefore' ] not_before = ssl . cert_time_to_seconds ( not_before ) except ( KeyError , IndexError ) : raise argparse . ArgumentTypeError ( 'CA Certificate is erroneous' ) if not_after < int ( time . time ( ) ) : raise argparse . ArgumentTypeError ( 'CA Certificate expired' ) if not_before > int ( time . time ( ) ) : raise argparse . ArgumentTypeError ( 'CA Certificate not active yet' ) return cacert def log_level ( string ) : value = getattr ( logging , string . upper ( ) , None ) if not isinstance ( value , int ) : raise argparse . ArgumentTypeError ( 'log level must be one of {debug,info,warning,error,critical}' ) return value def filename ( string ) : if not os . path . isfile ( string ) : raise argparse . ArgumentTypeError ( '%s is not a valid file path' % string ) return string parser . add_argument ( '-p' , '--port' , default = PORT , type = network_port , help = 'TCP Port to listen on. Default: {0}' . format ( PORT ) ) parser . add_argument ( '-b' , '--bind-address' , default = ADDRESS , help = 'Address to listen on. Default: {0}' . format ( ADDRESS ) ) parser . add_argument ( '-u' , '--unix-socket' , help = 'Unix file socket to listen on.' ) parser . add_argument ( '-k' , '--key-file' , type = filename , help = 'Server key file. Default: {0}' . format ( KEY_FILE ) ) parser . add_argument ( '-c' , '--cert-file' , type = filename , help = 'Server cert file. Default: {0}' . format ( CERT_FILE ) ) parser . add_argument ( '--ca-file' , type = cacert_file , help = 'CA cert file. Default: {0}' . format ( CA_FILE ) ) parser . add_argument ( '-L' , '--log-level' , default = 'warning' , type = log_level , help = 'Wished level of logging. Default: WARNING' ) parser . add_argument ( '--foreground' , action = 'store_true' , help = 'Run in foreground and logs all messages to console.' ) parser . add_argument ( '-l' , '--log-file' , type = filename , help = 'Path to the logging file.' ) parser . add_argument ( '--version' , action = 'store_true' , help = 'Print version then exit.' ) return parser | Create a command - line arguments parser for OSPD . |
16,902 | def go_to_background ( ) : try : if os . fork ( ) : sys . exit ( ) except OSError as errmsg : LOGGER . error ( 'Fork failed: {0}' . format ( errmsg ) ) sys . exit ( 'Fork failed' ) | Daemonize the running process . |
16,903 | def get_common_args ( parser , args = None ) : options = parser . parse_args ( args ) port = options . port address = options . bind_address unix_socket = options . unix_socket log_level = options . log_level keyfile = options . key_file or KEY_FILE certfile = options . cert_file or CERT_FILE cafile = options . ca_file or CA_FILE common_args = dict ( ) common_args [ 'port' ] = port common_args [ 'address' ] = address common_args [ 'unix_socket' ] = unix_socket common_args [ 'keyfile' ] = keyfile common_args [ 'certfile' ] = certfile common_args [ 'cafile' ] = cafile common_args [ 'log_level' ] = log_level common_args [ 'foreground' ] = options . foreground common_args [ 'log_file' ] = options . log_file common_args [ 'version' ] = options . version return common_args | Return list of OSPD common command - line arguments from parser after validating provided values or setting default ones . |
16,904 | def print_version ( wrapper ) : scanner_name = wrapper . get_scanner_name ( ) server_version = wrapper . get_server_version ( ) print ( "OSP Server for {0} version {1}" . format ( scanner_name , server_version ) ) protocol_version = wrapper . get_protocol_version ( ) print ( "OSP Version: {0}" . format ( protocol_version ) ) daemon_name = wrapper . get_daemon_name ( ) daemon_version = wrapper . get_daemon_version ( ) print ( "Using: {0} {1}" . format ( daemon_name , daemon_version ) ) print ( "Copyright (C) 2014, 2015 Greenbone Networks GmbH\n" "License GPLv2+: GNU GPL version 2 or later\n" "This is free software: you are free to change" " and redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law." ) | Prints the server version and license information . |
16,905 | def main ( name , klass ) : parser = create_args_parser ( name ) cargs = get_common_args ( parser ) logging . getLogger ( ) . setLevel ( cargs [ 'log_level' ] ) wrapper = klass ( certfile = cargs [ 'certfile' ] , keyfile = cargs [ 'keyfile' ] , cafile = cargs [ 'cafile' ] ) if cargs [ 'version' ] : print_version ( wrapper ) sys . exit ( ) if cargs [ 'foreground' ] : console = logging . StreamHandler ( ) console . setFormatter ( logging . Formatter ( '%(asctime)s %(name)s: %(levelname)s: %(message)s' ) ) logging . getLogger ( ) . addHandler ( console ) elif cargs [ 'log_file' ] : logfile = logging . handlers . WatchedFileHandler ( cargs [ 'log_file' ] ) logfile . setFormatter ( logging . Formatter ( '%(asctime)s %(name)s: %(levelname)s: %(message)s' ) ) logging . getLogger ( ) . addHandler ( logfile ) go_to_background ( ) else : syslog = logging . handlers . SysLogHandler ( '/dev/log' ) syslog . setFormatter ( logging . Formatter ( '%(name)s: %(levelname)s: %(message)s' ) ) logging . getLogger ( ) . addHandler ( syslog ) syslog_fd = syslog . socket . fileno ( ) os . dup2 ( syslog_fd , 1 ) os . dup2 ( syslog_fd , 2 ) go_to_background ( ) if not wrapper . check ( ) : return 1 return wrapper . run ( cargs [ 'address' ] , cargs [ 'port' ] , cargs [ 'unix_socket' ] ) | OSPD Main function . |
16,906 | def add_result ( self , scan_id , result_type , host = '' , name = '' , value = '' , port = '' , test_id = '' , severity = '' , qod = '' ) : assert scan_id assert len ( name ) or len ( value ) result = dict ( ) result [ 'type' ] = result_type result [ 'name' ] = name result [ 'severity' ] = severity result [ 'test_id' ] = test_id result [ 'value' ] = value result [ 'host' ] = host result [ 'port' ] = port result [ 'qod' ] = qod results = self . scans_table [ scan_id ] [ 'results' ] results . append ( result ) self . scans_table [ scan_id ] [ 'results' ] = results | Add a result to a scan in the table . |
16,907 | def get_hosts_unfinished ( self , scan_id ) : unfinished_hosts = list ( ) for target in self . scans_table [ scan_id ] [ 'finished_hosts' ] : unfinished_hosts . extend ( target_str_to_list ( target ) ) for target in self . scans_table [ scan_id ] [ 'finished_hosts' ] : for host in self . scans_table [ scan_id ] [ 'finished_hosts' ] [ target ] : unfinished_hosts . remove ( host ) return unfinished_hosts | Get a list of finished hosts . |
16,908 | def results_iterator ( self , scan_id , pop_res ) : if pop_res : result_aux = self . scans_table [ scan_id ] [ 'results' ] self . scans_table [ scan_id ] [ 'results' ] = list ( ) return iter ( result_aux ) return iter ( self . scans_table [ scan_id ] [ 'results' ] ) | Returns an iterator over scan_id scan s results . If pop_res is True it removed the fetched results from the list . |
16,909 | def del_results_for_stopped_hosts ( self , scan_id ) : unfinished_hosts = self . get_hosts_unfinished ( scan_id ) for result in self . results_iterator ( scan_id , False ) : if result [ 'host' ] in unfinished_hosts : self . remove_single_result ( scan_id , result ) | Remove results from the result table for those host |
16,910 | def create_scan ( self , scan_id = '' , targets = '' , options = None , vts = '' ) : if self . data_manager is None : self . data_manager = multiprocessing . Manager ( ) if scan_id and self . id_exists ( scan_id ) and ( self . get_status ( scan_id ) == ScanStatus . STOPPED ) : return self . resume_scan ( scan_id , options ) if not options : options = dict ( ) scan_info = self . data_manager . dict ( ) scan_info [ 'results' ] = list ( ) scan_info [ 'finished_hosts' ] = dict ( [ [ target , [ ] ] for target , _ , _ in targets ] ) scan_info [ 'progress' ] = 0 scan_info [ 'target_progress' ] = dict ( [ [ target , { } ] for target , _ , _ in targets ] ) scan_info [ 'targets' ] = targets scan_info [ 'vts' ] = vts scan_info [ 'options' ] = options scan_info [ 'start_time' ] = int ( time . time ( ) ) scan_info [ 'end_time' ] = "0" scan_info [ 'status' ] = ScanStatus . INIT if scan_id is None or scan_id == '' : scan_id = str ( uuid . uuid4 ( ) ) scan_info [ 'scan_id' ] = scan_id self . scans_table [ scan_id ] = scan_info return scan_id | Creates a new scan with provided scan information . |
16,911 | def set_option ( self , scan_id , name , value ) : self . scans_table [ scan_id ] [ 'options' ] [ name ] = value | Set a scan_id scan s name option to value . |
16,912 | def get_target_progress ( self , scan_id , target ) : total_hosts = len ( target_str_to_list ( target ) ) host_progresses = self . scans_table [ scan_id ] [ 'target_progress' ] . get ( target ) try : t_prog = sum ( host_progresses . values ( ) ) / total_hosts except ZeroDivisionError : LOGGER . error ( "Zero division error in " , get_target_progress . __name__ ) raise return t_prog | Get a target s current progress value . The value is calculated with the progress of each single host in the target . |
16,913 | def get_target_list ( self , scan_id ) : target_list = [ ] for target , _ , _ in self . scans_table [ scan_id ] [ 'targets' ] : target_list . append ( target ) return target_list | Get a scan s target list . |
16,914 | def get_ports ( self , scan_id , target ) : if target : for item in self . scans_table [ scan_id ] [ 'targets' ] : if target == item [ 0 ] : return item [ 1 ] return self . scans_table [ scan_id ] [ 'targets' ] [ 0 ] [ 1 ] | Get a scan s ports list . If a target is specified it will return the corresponding port for it . If not it returns the port item of the first nested list in the target s list . |
16,915 | def get_credentials ( self , scan_id , target ) : if target : for item in self . scans_table [ scan_id ] [ 'targets' ] : if target == item [ 0 ] : return item [ 2 ] | Get a scan s credential list . It return dictionary with the corresponding credential for a given target . |
16,916 | def delete_scan ( self , scan_id ) : if self . get_status ( scan_id ) == ScanStatus . RUNNING : return False self . scans_table . pop ( scan_id ) if len ( self . scans_table ) == 0 : del self . data_manager self . data_manager = None return True | Delete a scan if fully finished . |
16,917 | def is_timestamp ( obj ) : return isinstance ( obj , datetime . datetime ) or is_string ( obj ) or is_int ( obj ) or is_float ( obj ) | Yaml either have automatically converted it to a datetime object or it is a string that will be validated later . |
16,918 | def init_logging ( log_level ) : log_level = log_level_to_string_map [ min ( log_level , 5 ) ] msg = "%(levelname)s - %(name)s:%(lineno)s - %(message)s" if log_level in os . environ else "%(levelname)s - %(message)s" logging_conf = { "version" : 1 , "root" : { "level" : log_level , "handlers" : [ "console" ] } , "handlers" : { "console" : { "class" : "logging.StreamHandler" , "level" : log_level , "formatter" : "simple" , "stream" : "ext://sys.stdout" } } , "formatters" : { "simple" : { "format" : " {0}" . format ( msg ) } } } logging . config . dictConfig ( logging_conf ) | Init logging settings with default set to INFO |
16,919 | def keywords ( self ) : defined_keywords = [ ( 'allowempty_map' , 'allowempty_map' ) , ( 'assertion' , 'assertion' ) , ( 'default' , 'default' ) , ( 'class' , 'class' ) , ( 'desc' , 'desc' ) , ( 'enum' , 'enum' ) , ( 'example' , 'example' ) , ( 'extensions' , 'extensions' ) , ( 'format' , 'format' ) , ( 'func' , 'func' ) , ( 'ident' , 'ident' ) , ( 'include_name' , 'include' ) , ( 'length' , 'length' ) , ( 'map_regex_rule' , 'map_regex_rule' ) , ( 'mapping' , 'mapping' ) , ( 'matching' , 'matching' ) , ( 'matching_rule' , 'matching_rule' ) , ( 'name' , 'name' ) , ( 'nullable' , 'nullable' ) ( 'parent' , 'parent' ) , ( 'pattern' , 'pattern' ) , ( 'pattern_regexp' , 'pattern_regexp' ) , ( 'range' , 'range' ) , ( 'regex_mappings' , 'regex_mappings' ) , ( 'required' , 'required' ) , ( 'schema' , 'schema' ) , ( 'schema_str' , 'schema_str' ) , ( 'sequence' , 'sequence' ) , ( 'type' , 'type' ) , ( 'type_class' , 'type_class' ) , ( 'unique' , 'unique' ) , ( 'version' , 'version' ) , ] found_keywords = [ ] for var_name , keyword_name in defined_keywords : if getattr ( self , var_name , None ) : found_keywords . append ( keyword_name ) return found_keywords | Returns a list of all keywords that this rule object has defined . A keyword is considered defined if the value it returns ! = None . |
16,920 | def _load_extensions ( self ) : log . debug ( u"loading all extensions : %s" , self . extensions ) self . loaded_extensions = [ ] for f in self . extensions : if not os . path . isabs ( f ) : f = os . path . abspath ( f ) if not os . path . exists ( f ) : raise CoreError ( u"Extension file: {0} not found on disk" . format ( f ) ) self . loaded_extensions . append ( imp . load_source ( "" , f ) ) log . debug ( self . loaded_extensions ) log . debug ( [ dir ( m ) for m in self . loaded_extensions ] ) | Load all extension files into the namespace pykwalify . ext |
16,921 | def _handle_func ( self , value , rule , path , done = None ) : func = rule . func if not func : return found_method = False for extension in self . loaded_extensions : method = getattr ( extension , func , None ) if method : found_method = True ret = method ( value , rule , path ) if ret is not True and ret is not None : msg = '%s. Path: {path}' % unicode ( ret ) self . errors . append ( SchemaError . SchemaErrorEntry ( msg = msg , path = path , value = None ) ) if not ret : raise CoreError ( u"Error when running extension function : {0}" . format ( func ) ) break if not found_method : raise CoreError ( u"Did not find method '{0}' in any loaded extension file" . format ( func ) ) | Helper function that should check if func is specified for this rule and then handle it for all cases in a generic way . |
16,922 | def _validate_range ( self , max_ , min_ , max_ex , min_ex , value , path , prefix ) : if not isinstance ( value , int ) and not isinstance ( value , float ) : raise CoreError ( "Value must be a integer type" ) log . debug ( u"Validate range : %s : %s : %s : %s : %s : %s" , max_ , min_ , max_ex , min_ex , value , path , ) if max_ is not None and max_ < value : self . errors . append ( SchemaError . SchemaErrorEntry ( msg = u"Type '{prefix}' has size of '{value}', greater than max limit '{max_}'. Path: '{path}'" , path = path , value = nativestr ( value ) if tt [ 'str' ] ( value ) else value , prefix = prefix , max_ = max_ ) ) if min_ is not None and min_ > value : self . errors . append ( SchemaError . SchemaErrorEntry ( msg = u"Type '{prefix}' has size of '{value}', less than min limit '{min_}'. Path: '{path}'" , path = path , value = nativestr ( value ) if tt [ 'str' ] ( value ) else value , prefix = prefix , min_ = min_ ) ) if max_ex is not None and max_ex <= value : self . errors . append ( SchemaError . SchemaErrorEntry ( msg = u"Type '{prefix}' has size of '{value}', greater than or equals to max limit(exclusive) '{max_ex}'. Path: '{path}'" , path = path , value = nativestr ( value ) if tt [ 'str' ] ( value ) else value , prefix = prefix , max_ex = max_ex ) ) if min_ex is not None and min_ex >= value : self . errors . append ( SchemaError . SchemaErrorEntry ( msg = u"Type '{prefix}' has size of '{value}', less than or equals to min limit(exclusive) '{min_ex}'. Path: '{path}'" , path = path , value = nativestr ( value ) if tt [ 'str' ] ( value ) else value , prefix = prefix , min_ex = min_ex ) ) | Validate that value is within range values . |
16,923 | def run ( cli_args ) : from . core import Core c = Core ( source_file = cli_args [ "--data-file" ] , schema_files = cli_args [ "--schema-file" ] , extensions = cli_args [ '--extension' ] , strict_rule_validation = cli_args [ '--strict-rule-validation' ] , fix_ruby_style_regex = cli_args [ '--fix-ruby-style-regex' ] , allow_assertions = cli_args [ '--allow-assertions' ] , file_encoding = cli_args [ '--encoding' ] , ) c . validate ( ) return c | Split the functionality into 2 methods . |
16,924 | def to_bedtool ( iterator ) : def gen ( ) : for i in iterator : yield helpers . asinterval ( i ) return pybedtools . BedTool ( gen ( ) ) | Convert any iterator into a pybedtools . BedTool object . |
16,925 | def tsses ( db , merge_overlapping = False , attrs = None , attrs_sep = ":" , merge_kwargs = None , as_bed6 = False , bedtools_227_or_later = True ) : _override = os . environ . get ( 'GFFUTILS_USES_BEDTOOLS_227_OR_LATER' , None ) if _override is not None : if _override == 'true' : bedtools_227_or_later = True elif _override == 'false' : bedtools_227_or_later = False else : raise ValueError ( "Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER " "environment variable: {0}" . format ( _override ) ) if bedtools_227_or_later : _merge_kwargs = dict ( o = 'distinct' , s = True , c = '4,5,6' ) else : _merge_kwargs = dict ( o = 'distinct' , s = True , c = '4' ) if merge_kwargs is not None : _merge_kwargs . update ( merge_kwargs ) def gen ( ) : for gene in db . features_of_type ( 'gene' ) : for transcript in db . children ( gene , level = 1 ) : if transcript . strand == '-' : transcript . start = transcript . stop else : transcript . stop = transcript . start transcript . featuretype = transcript . featuretype + '_TSS' yield helpers . asinterval ( transcript ) x = pybedtools . BedTool ( gen ( ) ) . sort ( ) if attrs is None : if db . dialect [ 'fmt' ] == 'gtf' : attrs = 'gene_id' else : attrs = 'ID' if merge_overlapping or as_bed6 : if isinstance ( attrs , six . string_types ) : attrs = [ attrs ] def to_bed ( f ) : name = attrs_sep . join ( [ f . attrs [ i ] for i in attrs ] ) return pybedtools . Interval ( f . chrom , f . start , f . stop , name , str ( f . score ) , f . strand ) x = x . each ( to_bed ) . saveas ( ) if merge_overlapping : if bedtools_227_or_later : x = x . merge ( ** _merge_kwargs ) else : def fix_merge ( f ) : f = featurefuncs . extend_fields ( f , 6 ) return pybedtools . Interval ( f . chrom , f . start , f . stop , f [ 4 ] , '.' , f [ 3 ] ) x = x . merge ( ** _merge_kwargs ) . saveas ( ) . each ( fix_merge ) . saveas ( ) return x | Create 1 - bp transcription start sites for all transcripts in the database and return as a sorted pybedtools . BedTool object pointing to a temporary file . |
16,926 | def close ( self ) : self . out_stream . close ( ) if self . in_place : shutil . move ( self . temp_file . name , self . out ) | Close the stream . Assumes stream has close method . |
16,927 | def to_seqfeature ( feature ) : if isinstance ( feature , six . string_types ) : feature = feature_from_line ( feature ) qualifiers = { 'source' : [ feature . source ] , 'score' : [ feature . score ] , 'seqid' : [ feature . seqid ] , 'frame' : [ feature . frame ] , } qualifiers . update ( feature . attributes ) return SeqFeature ( FeatureLocation ( feature . start - 1 , feature . stop ) , id = feature . id , type = feature . featuretype , strand = _biopython_strand [ feature . strand ] , qualifiers = qualifiers ) | Converts a gffutils . Feature object to a Bio . SeqFeature object . |
16,928 | def from_seqfeature ( s , ** kwargs ) : source = s . qualifiers . get ( 'source' , '.' ) [ 0 ] score = s . qualifiers . get ( 'score' , '.' ) [ 0 ] seqid = s . qualifiers . get ( 'seqid' , '.' ) [ 0 ] frame = s . qualifiers . get ( 'frame' , '.' ) [ 0 ] strand = _feature_strand [ s . strand ] start = s . location . start . position + 1 stop = s . location . end . position featuretype = s . type id = s . id attributes = dict ( s . qualifiers ) attributes . pop ( 'source' , '.' ) attributes . pop ( 'score' , '.' ) attributes . pop ( 'seqid' , '.' ) attributes . pop ( 'frame' , '.' ) return Feature ( seqid , source , featuretype , start , stop , score , strand , frame , attributes , id = id , ** kwargs ) | Converts a Bio . SeqFeature object to a gffutils . Feature object . |
16,929 | def set_pragmas ( self , pragmas ) : self . pragmas = pragmas c = self . conn . cursor ( ) c . executescript ( ';\n' . join ( [ 'PRAGMA %s=%s' % i for i in self . pragmas . items ( ) ] ) ) self . conn . commit ( ) | Set pragmas for the current database connection . |
16,930 | def _feature_returner ( self , ** kwargs ) : kwargs . setdefault ( 'dialect' , self . dialect ) kwargs . setdefault ( 'keep_order' , self . keep_order ) kwargs . setdefault ( 'sort_attribute_values' , self . sort_attribute_values ) return Feature ( ** kwargs ) | Returns a feature adding additional database - specific defaults |
16,931 | def schema ( self ) : c = self . conn . cursor ( ) c . execute ( ) results = [ ] for i , in c : if i is not None : results . append ( i ) return '\n' . join ( results ) | Returns the database schema as a string . |
16,932 | def featuretypes ( self ) : c = self . conn . cursor ( ) c . execute ( ) for i , in c : yield i | Iterate over feature types found in the database . |
16,933 | def execute ( self , query ) : c = self . conn . cursor ( ) return c . execute ( query ) | Execute arbitrary queries on the db . |
16,934 | def interfeatures ( self , features , new_featuretype = None , merge_attributes = True , dialect = None , attribute_func = None , update_attributes = None ) : for i , f in enumerate ( features ) : if i == 0 : interfeature_start = f . stop last_feature = f continue interfeature_stop = f . start if new_featuretype is None : new_featuretype = 'inter_%s_%s' % ( last_feature . featuretype , f . featuretype ) if last_feature . strand != f . strand : new_strand = '.' else : new_strand = f . strand if last_feature . chrom != f . chrom : last_feature = f continue strand = new_strand chrom = last_feature . chrom interfeature_start += 1 interfeature_stop -= 1 if merge_attributes : new_attributes = helpers . merge_attributes ( last_feature . attributes , f . attributes ) else : new_attributes = { } if update_attributes : new_attributes . update ( update_attributes ) new_bin = bins . bins ( interfeature_start , interfeature_stop , one = True ) _id = None fields = dict ( seqid = chrom , source = 'gffutils_derived' , featuretype = new_featuretype , start = interfeature_start , end = interfeature_stop , score = '.' , strand = strand , frame = '.' , attributes = new_attributes , bin = new_bin ) if dialect is None : try : dialect = self . dialect except AttributeError : dialect = None yield self . _feature_returner ( ** fields ) interfeature_start = f . stop | Construct new features representing the space between features . |
16,935 | def update ( self , data , make_backup = True , ** kwargs ) : from gffutils import create from gffutils import iterators if make_backup : if isinstance ( self . dbfn , six . string_types ) : shutil . copy2 ( self . dbfn , self . dbfn + '.bak' ) _iterator_kwargs = { } for k , v in kwargs . items ( ) : if k in constants . _iterator_kwargs : _iterator_kwargs [ k ] = v data = iterators . DataIterator ( data , ** _iterator_kwargs ) if self . dialect [ 'fmt' ] == 'gtf' : if 'id_spec' not in kwargs : kwargs [ 'id_spec' ] = { 'gene' : 'gene_id' , 'transcript' : 'transcript_id' } db = create . _GTFDBCreator ( data = data , dbfn = self . dbfn , dialect = self . dialect , ** kwargs ) elif self . dialect [ 'fmt' ] == 'gff3' : if 'id_spec' not in kwargs : kwargs [ 'id_spec' ] = 'ID' db = create . _GFFDBCreator ( data = data , dbfn = self . dbfn , dialect = self . dialect , ** kwargs ) else : raise ValueError db . _populate_from_lines ( data ) db . _update_relations ( ) db . _finalize ( ) return db | Update database with features in data . |
16,936 | def create_introns ( self , exon_featuretype = 'exon' , grandparent_featuretype = 'gene' , parent_featuretype = None , new_featuretype = 'intron' , merge_attributes = True ) : if ( grandparent_featuretype and parent_featuretype ) or ( grandparent_featuretype is None and parent_featuretype is None ) : raise ValueError ( "exactly one of `grandparent_featuretype` or " "`parent_featuretype` should be provided" ) if grandparent_featuretype : def child_gen ( ) : for gene in self . features_of_type ( grandparent_featuretype ) : for child in self . children ( gene , level = 1 ) : yield child elif parent_featuretype : def child_gen ( ) : for child in self . features_of_type ( parent_featuretype ) : yield child for child in child_gen ( ) : exons = self . children ( child , level = 1 , featuretype = exon_featuretype , order_by = 'start' ) for intron in self . interfeatures ( exons , new_featuretype = new_featuretype , merge_attributes = merge_attributes , dialect = self . dialect ) : yield intron | Create introns from existing annotations . |
16,937 | def merge ( self , features , ignore_strand = False ) : features = list ( features ) if len ( features ) == 0 : raise StopIteration if ignore_strand : strand = '.' else : strands = [ i . strand for i in features ] if len ( set ( strands ) ) > 1 : raise ValueError ( 'Specify ignore_strand=True to force merging ' 'of multiple strands' ) strand = strands [ 0 ] chroms = [ i . chrom for i in features ] if len ( set ( chroms ) ) > 1 : raise NotImplementedError ( 'Merging multiple chromosomes not ' 'implemented' ) chrom = chroms [ 0 ] current_merged_start = features [ 0 ] . start current_merged_stop = features [ 0 ] . stop for feature in features [ 1 : ] : if feature . start <= current_merged_stop + 1 : if feature . stop >= current_merged_stop : current_merged_stop = feature . stop else : continue else : merged_feature = dict ( seqid = feature . chrom , source = '.' , featuretype = feature . featuretype , start = current_merged_start , end = current_merged_stop , score = '.' , strand = strand , frame = '.' , attributes = '' ) yield self . _feature_returner ( ** merged_feature ) current_merged_start = feature . start current_merged_stop = feature . stop if len ( features ) == 1 : feature = features [ 0 ] merged_feature = dict ( seqid = feature . chrom , source = '.' , featuretype = feature . featuretype , start = current_merged_start , end = current_merged_stop , score = '.' , strand = strand , frame = '.' , attributes = '' ) yield self . _feature_returner ( ** merged_feature ) | Merge overlapping features together . |
16,938 | def children_bp ( self , feature , child_featuretype = 'exon' , merge = False , ignore_strand = False ) : children = self . children ( feature , featuretype = child_featuretype , order_by = 'start' ) if merge : children = self . merge ( children , ignore_strand = ignore_strand ) total = 0 for child in children : total += len ( child ) return total | Total bp of all children of a featuretype . |
16,939 | def bed12 ( self , feature , block_featuretype = [ 'exon' ] , thick_featuretype = [ 'CDS' ] , thin_featuretype = None , name_field = 'ID' , color = None ) : if thick_featuretype and thin_featuretype : raise ValueError ( "Can only specify one of `thick_featuertype` or " "`thin_featuretype`" ) exons = list ( self . children ( feature , featuretype = block_featuretype , order_by = 'start' ) ) if len ( exons ) == 0 : exons = [ feature ] feature = self [ feature ] first = exons [ 0 ] . start last = exons [ - 1 ] . stop if first != feature . start : raise ValueError ( "Start of first exon (%s) does not match start of feature (%s)" % ( first , feature . start ) ) if last != feature . stop : raise ValueError ( "End of last exon (%s) does not match end of feature (%s)" % ( last , feature . stop ) ) if color is None : color = '0,0,0' color = color . replace ( ' ' , '' ) . strip ( ) chrom = feature . chrom chromStart = feature . start - 1 chromEnd = feature . stop orig = constants . always_return_list constants . always_return_list = True try : name = feature [ name_field ] [ 0 ] except KeyError : name = "." constants . always_return_list = orig score = feature . score if score == '.' : score = '0' strand = feature . strand itemRgb = color blockCount = len ( exons ) blockSizes = [ len ( i ) for i in exons ] blockStarts = [ i . start - 1 - chromStart for i in exons ] if thick_featuretype : thick = list ( self . children ( feature , featuretype = thick_featuretype , order_by = 'start' ) ) if len ( thick ) == 0 : thickStart = feature . start thickEnd = feature . stop else : thickStart = thick [ 0 ] . start - 1 thickEnd = thick [ - 1 ] . stop if thin_featuretype : thin = list ( self . children ( feature , featuretype = thin_featuretype , order_by = 'start' ) ) if len ( thin ) == 0 : thickStart = feature . start thickEnd = feature . stop else : thickStart = thin [ 0 ] . stop thickEnd = thin [ - 1 ] . start - 1 tst = chromStart + blockStarts [ - 1 ] + blockSizes [ - 1 ] assert tst == chromEnd , "tst=%s; chromEnd=%s" % ( tst , chromEnd ) fields = [ chrom , chromStart , chromEnd , name , score , strand , thickStart , thickEnd , itemRgb , blockCount , ',' . join ( map ( str , blockSizes ) ) , ',' . join ( map ( str , blockStarts ) ) ] return '\t' . join ( map ( str , fields ) ) | Converts feature into a BED12 format . |
16,940 | def DataIterator ( data , checklines = 10 , transform = None , force_dialect_check = False , from_string = False , ** kwargs ) : _kwargs = dict ( data = data , checklines = checklines , transform = transform , force_dialect_check = force_dialect_check , ** kwargs ) if isinstance ( data , six . string_types ) : if from_string : return _StringIterator ( ** _kwargs ) else : if os . path . exists ( data ) : return _FileIterator ( ** _kwargs ) elif is_url ( data ) : return _UrlIterator ( ** _kwargs ) elif isinstance ( data , FeatureDB ) : _kwargs [ 'data' ] = data . all_features ( ) return _FeatureIterator ( ** _kwargs ) else : return _FeatureIterator ( ** _kwargs ) | Iterate over features no matter how they are provided . |
16,941 | def inspect ( data , look_for = [ 'featuretype' , 'chrom' , 'attribute_keys' , 'feature_count' ] , limit = None , verbose = True ) : results = { } obj_attrs = [ ] for i in look_for : if i not in [ 'attribute_keys' , 'feature_count' ] : obj_attrs . append ( i ) results [ i ] = Counter ( ) attr_keys = 'attribute_keys' in look_for d = iterators . DataIterator ( data ) feature_count = 0 for f in d : if verbose : sys . stderr . write ( '\r%s features inspected' % feature_count ) sys . stderr . flush ( ) for obj_attr in obj_attrs : results [ obj_attr ] . update ( [ getattr ( f , obj_attr ) ] ) if attr_keys : results [ 'attribute_keys' ] . update ( f . attributes . keys ( ) ) feature_count += 1 if limit and feature_count == limit : break new_results = { } for k , v in results . items ( ) : new_results [ k ] = dict ( v ) new_results [ 'feature_count' ] = feature_count return new_results | Inspect a GFF or GTF data source . |
16,942 | def clean_gff ( gff , cleaned , add_chr = False , chroms_to_ignore = None , featuretypes_to_ignore = None ) : logger . info ( "Cleaning GFF" ) chroms_to_ignore = chroms_to_ignore or [ ] featuretypes_to_ignore = featuretypes_to_ignore or [ ] with open ( cleaned , 'w' ) as fout : for i in gffutils . iterators . DataIterator ( gff ) : if add_chr : i . chrom = "chr" + i . chrom if i . chrom in chroms_to_ignore : continue if i . featuretype in featuretypes_to_ignore : continue fout . write ( str ( i ) + '\n' ) return cleaned | Cleans a GFF file by removing features on unwanted chromosomes and of unwanted featuretypes . Optionally adds chr to chrom names . |
16,943 | def feature_from_line ( line , dialect = None , strict = True , keep_order = False ) : if not strict : lines = line . splitlines ( False ) _lines = [ ] for i in lines : i = i . strip ( ) if len ( i ) > 0 : _lines . append ( i ) assert len ( _lines ) == 1 , _lines line = _lines [ 0 ] if '\t' in line : fields = line . rstrip ( '\n\r' ) . split ( '\t' ) else : fields = line . rstrip ( '\n\r' ) . split ( None , 8 ) else : fields = line . rstrip ( '\n\r' ) . split ( '\t' ) try : attr_string = fields [ 8 ] except IndexError : attr_string = "" attrs , _dialect = parser . _split_keyvals ( attr_string , dialect = dialect ) d = dict ( list ( zip ( constants . _gffkeys , fields ) ) ) d [ 'attributes' ] = attrs d [ 'extra' ] = fields [ 9 : ] d [ 'keep_order' ] = keep_order if dialect is None : dialect = _dialect return Feature ( dialect = dialect , ** d ) | Given a line from a GFF file return a Feature object |
16,944 | def calc_bin ( self , _bin = None ) : if _bin is None : try : _bin = bins . bins ( self . start , self . end , one = True ) except TypeError : _bin = None return _bin | Calculate the smallest UCSC genomic bin that will contain this feature . |
16,945 | def astuple ( self , encoding = None ) : if not encoding : return ( self . id , self . seqid , self . source , self . featuretype , self . start , self . end , self . score , self . strand , self . frame , helpers . _jsonify ( self . attributes ) , helpers . _jsonify ( self . extra ) , self . calc_bin ( ) ) return ( self . id . decode ( encoding ) , self . seqid . decode ( encoding ) , self . source . decode ( encoding ) , self . featuretype . decode ( encoding ) , self . start , self . end , self . score . decode ( encoding ) , self . strand . decode ( encoding ) , self . frame . decode ( encoding ) , helpers . _jsonify ( self . attributes ) . decode ( encoding ) , helpers . _jsonify ( self . extra ) . decode ( encoding ) , self . calc_bin ( ) ) | Return a tuple suitable for import into a database . |
16,946 | def sequence ( self , fasta , use_strand = True ) : if isinstance ( fasta , six . string_types ) : fasta = Fasta ( fasta , as_raw = False ) seq = fasta [ self . chrom ] [ self . start - 1 : self . stop ] if use_strand and self . strand == '-' : seq = seq . reverse . complement return seq . seq | Retrieves the sequence of this feature as a string . |
16,947 | def infer_dialect ( attributes ) : if isinstance ( attributes , six . string_types ) : attributes = [ attributes ] dialects = [ parser . _split_keyvals ( i ) [ 1 ] for i in attributes ] return _choose_dialect ( dialects ) | Infer the dialect based on the attributes . |
16,948 | def _choose_dialect ( dialects ) : if len ( dialects ) == 0 : return constants . dialect final_order = [ ] for dialect in dialects : for o in dialect [ 'order' ] : if o not in final_order : final_order . append ( o ) dialect = dialects [ 0 ] dialect [ 'order' ] = final_order return dialect | Given a list of dialects choose the one to use as the canonical version . |
16,949 | def _bin_from_dict ( d ) : try : start = int ( d [ 'start' ] ) end = int ( d [ 'end' ] ) return bins . bins ( start , end , one = True ) except ValueError : return None | Given a dictionary yielded by the parser return the genomic UCSC bin |
16,950 | def _jsonify ( x ) : if isinstance ( x , dict_class ) : return json . dumps ( x . _d , separators = ( ',' , ':' ) ) return json . dumps ( x , separators = ( ',' , ':' ) ) | Use most compact form of JSON |
16,951 | def _unjsonify ( x , isattributes = False ) : if isattributes : obj = json . loads ( x ) return dict_class ( obj ) return json . loads ( x ) | Convert JSON string to an ordered defaultdict . |
16,952 | def _feature_to_fields ( f , jsonify = True ) : x = [ ] for k in constants . _keys : v = getattr ( f , k ) if jsonify and ( k in ( 'attributes' , 'extra' ) ) : x . append ( _jsonify ( v ) ) else : x . append ( v ) return tuple ( x ) | Convert feature to tuple for faster sqlite3 import |
16,953 | def _dict_to_fields ( d , jsonify = True ) : x = [ ] for k in constants . _keys : v = d [ k ] if jsonify and ( k in ( 'attributes' , 'extra' ) ) : x . append ( _jsonify ( v ) ) else : x . append ( v ) return tuple ( x ) | Convert dict to tuple for faster sqlite3 import |
16,954 | def merge_attributes ( attr1 , attr2 ) : new_d = copy . deepcopy ( attr1 ) new_d . update ( attr2 ) for k , v in new_d . items ( ) : if not isinstance ( v , list ) : new_d [ k ] = [ v ] for k , v in six . iteritems ( attr1 ) : if k in attr2 : if not isinstance ( v , list ) : v = [ v ] new_d [ k ] . extend ( v ) return dict ( ( k , sorted ( set ( v ) ) ) for k , v in new_d . items ( ) ) | Merges two attribute dictionaries into a single dictionary . |
16,955 | def dialect_compare ( dialect1 , dialect2 ) : orig = set ( dialect1 . items ( ) ) new = set ( dialect2 . items ( ) ) return dict ( added = dict ( list ( new . difference ( orig ) ) ) , removed = dict ( list ( orig . difference ( new ) ) ) ) | Compares two dialects . |
16,956 | def sanitize_gff_db ( db , gid_field = "gid" ) : def sanitized_iterator ( ) : for gene_recs in db . iter_by_parent_childs ( ) : gene_id = gene_recs [ 0 ] . id for rec in gene_recs : if rec . start > rec . stop : rec . start , rec . stop = rec . stop , rec . start rec . attributes [ gid_field ] = [ gene_id ] yield rec sanitized_db = gffutils . create_db ( sanitized_iterator ( ) , ":memory:" , verbose = False ) return sanitized_db | Sanitize given GFF db . Returns a sanitized GFF db . |
16,957 | def sanitize_gff_file ( gff_fname , in_memory = True , in_place = False ) : db = None if is_gff_db ( gff_fname ) : db = gffutils . FeatureDB ( gff_fname ) else : if in_memory : db = gffutils . create_db ( gff_fname , ":memory:" , verbose = False ) else : db = get_gff_db ( gff_fname ) if in_place : gff_out = gffwriter . GFFWriter ( gff_fname , in_place = in_place ) else : gff_out = gffwriter . GFFWriter ( sys . stdout ) sanitized_db = sanitize_gff_db ( db ) for gene_rec in sanitized_db . all_features ( featuretype = "gene" ) : gff_out . write_gene_recs ( sanitized_db , gene_rec . id ) gff_out . close ( ) | Sanitize a GFF file . |
16,958 | def is_gff_db ( db_fname ) : if not os . path . isfile ( db_fname ) : return False if db_fname . endswith ( ".db" ) : return True return False | Return True if the given filename is a GFF database . |
16,959 | def get_gff_db ( gff_fname , ext = ".db" ) : if not os . path . isfile ( gff_fname ) : raise ValueError ( "GFF %s does not exist." % ( gff_fname ) ) candidate_db_fname = "%s.%s" % ( gff_fname , ext ) if os . path . isfile ( candidate_db_fname ) : return candidate_db_fname db_fname = tempfile . NamedTemporaryFile ( delete = False ) print ( "Creating db for %s" % ( gff_fname ) ) t1 = time . time ( ) db = gffutils . create_db ( gff_fname , db_fname . name , merge_strategy = "merge" , verbose = False ) t2 = time . time ( ) print ( " - Took %.2f seconds" % ( t2 - t1 ) ) return db | Get db for GFF file . If the database has a . db file load that . Otherwise create a named temporary file serialize the db to that and return the loaded database . |
16,960 | def _reconstruct ( keyvals , dialect , keep_order = False , sort_attribute_values = False ) : if not dialect : raise AttributeStringError ( ) if not keyvals : return "" parts = [ ] if constants . ignore_url_escape_characters or dialect [ 'fmt' ] != 'gff3' : attributes = keyvals else : attributes = { } for k , v in keyvals . items ( ) : attributes [ k ] = [ ] for i in v : attributes [ k ] . append ( '' . join ( [ quoter [ j ] for j in i ] ) ) if dialect [ 'repeated keys' ] : items = [ ] for key , val in attributes . items ( ) : if len ( val ) > 1 : for v in val : items . append ( ( key , [ v ] ) ) else : items . append ( ( key , val ) ) else : items = list ( attributes . items ( ) ) def sort_key ( x ) : try : return dialect [ 'order' ] . index ( x [ 0 ] ) except ValueError : return 1e6 if keep_order : items . sort ( key = sort_key ) for key , val in items : if val : if sort_attribute_values : val = sorted ( val ) val_str = dialect [ 'multival separator' ] . join ( val ) if val_str : if dialect [ 'quoted GFF2 values' ] : val_str = '"%s"' % val_str part = dialect [ 'keyval separator' ] . join ( [ key , val_str ] ) else : if dialect [ 'fmt' ] == 'gtf' : part = dialect [ 'keyval separator' ] . join ( [ key , '""' ] ) else : part = key parts . append ( part ) parts_str = dialect [ 'field separator' ] . join ( parts ) if dialect [ 'trailing semicolon' ] : parts_str += ';' return parts_str | Reconstructs the original attributes string according to the dialect . |
16,961 | def create_db ( data , dbfn , id_spec = None , force = False , verbose = False , checklines = 10 , merge_strategy = 'error' , transform = None , gtf_transcript_key = 'transcript_id' , gtf_gene_key = 'gene_id' , gtf_subfeature = 'exon' , force_gff = False , force_dialect_check = False , from_string = False , keep_order = False , text_factory = sqlite3 . OptimizedUnicode , force_merge_fields = None , pragmas = constants . default_pragmas , sort_attribute_values = False , dialect = None , _keep_tempfiles = False , infer_gene_extent = True , disable_infer_genes = False , disable_infer_transcripts = False , ** kwargs ) : _locals = locals ( ) deprecation_handler ( kwargs ) kwargs = dict ( ( i , _locals [ i ] ) for i in constants . _iterator_kwargs ) iterator = iterators . DataIterator ( ** kwargs ) kwargs . update ( ** _locals ) if dialect is None : dialect = iterator . dialect kwargs [ 'data' ] = iterator . _iter kwargs [ 'directives' ] = iterator . directives kwargs [ 'checklines' ] = 0 if force_gff or ( dialect [ 'fmt' ] == 'gff3' ) : cls = _GFFDBCreator id_spec = id_spec or 'ID' add_kwargs = dict ( id_spec = id_spec , ) elif dialect [ 'fmt' ] == 'gtf' : cls = _GTFDBCreator id_spec = id_spec or { 'gene' : 'gene_id' , 'transcript' : 'transcript_id' } add_kwargs = dict ( transcript_key = gtf_transcript_key , gene_key = gtf_gene_key , subfeature = gtf_subfeature , id_spec = id_spec , ) kwargs . update ( ** add_kwargs ) kwargs [ 'dialect' ] = dialect c = cls ( ** kwargs ) c . create ( ) if dbfn == ':memory:' : db = interface . FeatureDB ( c . conn , keep_order = keep_order , pragmas = pragmas , sort_attribute_values = sort_attribute_values , text_factory = text_factory ) else : db = interface . FeatureDB ( c , keep_order = keep_order , pragmas = pragmas , sort_attribute_values = sort_attribute_values , text_factory = text_factory ) return db | Create a database from a GFF or GTF file . |
16,962 | def _id_handler ( self , f ) : if isinstance ( self . id_spec , six . string_types ) : id_key = [ self . id_spec ] elif hasattr ( self . id_spec , '__call__' ) : id_key = [ self . id_spec ] elif isinstance ( self . id_spec , dict ) : try : id_key = self . id_spec [ f . featuretype ] if isinstance ( id_key , six . string_types ) : id_key = [ id_key ] except KeyError : return self . _increment_featuretype_autoid ( f . featuretype ) else : id_key = self . id_spec for k in id_key : if hasattr ( k , '__call__' ) : _id = k ( f ) if _id : if _id . startswith ( 'autoincrement:' ) : return self . _increment_featuretype_autoid ( _id [ 14 : ] ) return _id else : if ( len ( k ) > 3 ) and ( k [ 0 ] == ':' ) and ( k [ - 1 ] == ':' ) : return getattr ( f , k [ 1 : - 1 ] ) else : try : return f . attributes [ k ] [ 0 ] except ( KeyError , IndexError ) : pass return self . _increment_featuretype_autoid ( f . featuretype ) | Given a Feature from self . iterator figure out what the ID should be . |
16,963 | def create ( self ) : self . _init_tables ( ) self . _populate_from_lines ( self . iterator ) self . _update_relations ( ) self . _finalize ( ) | Calls various methods sequentially in order to fully build the database . |
16,964 | def execute ( self , query ) : c = self . conn . cursor ( ) result = c . execute ( query ) for i in result : yield i | Execute a query directly on the database . |
16,965 | def wait_for_js ( function ) : @ functools . wraps ( function ) def wrapper ( * args , ** kwargs ) : if len ( args ) < 1 : return function ( * args , ** kwargs ) else : self = args [ 0 ] if hasattr ( self , 'wait_for_js' ) : self . wait_for_js ( ) return function ( * args , ** kwargs ) return wrapper | Method decorator that waits for JavaScript dependencies before executing function . If the function is not a method the decorator has no effect . |
16,966 | def _wait_for_js ( self ) : if not hasattr ( self , 'browser' ) : return if hasattr ( self , '_js_vars' ) and self . _js_vars : EmptyPromise ( lambda : _are_js_vars_defined ( self . browser , self . _js_vars ) , u"JavaScript variables defined: {0}" . format ( ", " . join ( self . _js_vars ) ) ) . fulfill ( ) if hasattr ( self , '_requirejs_deps' ) and self . _requirejs_deps : EmptyPromise ( lambda : _are_requirejs_deps_loaded ( self . browser , self . _requirejs_deps ) , u"RequireJS dependencies loaded: {0}" . format ( ", " . join ( self . _requirejs_deps ) ) , try_limit = 5 ) . fulfill ( ) | Class method added by the decorators to allow decorated classes to manually re - check JavaScript dependencies . |
16,967 | def _are_js_vars_defined ( browser , js_vars ) : script = u" && " . join ( [ u"!(typeof {0} === 'undefined')" . format ( var ) for var in js_vars ] ) try : return browser . execute_script ( u"return {}" . format ( script ) ) except WebDriverException as exc : if "is not defined" in exc . msg or "is undefined" in exc . msg : return False else : raise | Return a boolean indicating whether all the JavaScript variables js_vars are defined on the current page . |
16,968 | def _are_requirejs_deps_loaded ( browser , deps ) : script = dedent ( u ) . format ( deps = json . dumps ( list ( deps ) ) ) browser . set_script_timeout ( 30 ) try : result = browser . execute_async_script ( script ) return result == 'Success' except TimeoutException : return False | Return a boolean indicating whether all the RequireJS dependencies deps have loaded on the current page . |
16,969 | def no_selenium_errors ( func ) : def _inner ( * args , ** kwargs ) : try : return_val = func ( * args , ** kwargs ) except WebDriverException : LOGGER . warning ( u'Exception ignored during retry loop:' , exc_info = True ) return False else : return return_val return _inner | Decorator to create an EmptyPromise check function that is satisfied only when func executes without a Selenium error . |
16,970 | def set_rules ( self , rules ) : self . rules_to_ignore = rules . get ( "ignore" , [ ] ) self . rules_to_run = rules . get ( "apply" , [ ] ) | Sets the rules to be run or ignored for the audit . |
16,971 | def set_scope ( self , include = None , exclude = None ) : if include : self . scope = u"document.querySelector(\"{}\")" . format ( u', ' . join ( include ) ) else : self . scope = "null" if exclude is not None : raise NotImplementedError ( "The argument `exclude` has not been implemented in " "AxsAuditConfig.set_scope method." ) | Sets scope the start point for the audit . |
16,972 | def _check_rules ( browser , rules_js , config ) : if config . rules_to_run is None : msg = 'No accessibility rules were specified to check.' log . warning ( msg ) return None rules = config . rules_to_run if rules : rules_config = u"auditConfig.auditRulesToRun = {rules};" . format ( rules = rules ) else : rules_config = "" ignored_rules = config . rules_to_ignore if ignored_rules : rules_config += ( u"\nauditConfig.auditRulesToIgnore = {rules};" . format ( rules = ignored_rules ) ) script = dedent ( u . format ( rules_js = rules_js , rules_config = rules_config , scope = config . scope ) ) result = browser . execute_script ( script ) audit_results = AuditResults ( errors = result . get ( 'errors_' ) , warnings = result . get ( 'warnings_' ) ) return audit_results | Check the page for violations of the configured rules . By default all rules in the ruleset will be checked . |
16,973 | def fulfill ( self ) : is_fulfilled , result = self . _check_fulfilled ( ) if is_fulfilled : return result else : raise BrokenPromise ( self ) | Evaluate the promise and return the result . |
16,974 | def search ( self ) : self . q ( css = 'button.btn' ) . click ( ) GitHubSearchResultsPage ( self . browser ) . wait_for_page ( ) | Click on the Search button and wait for the results page to be displayed |
16,975 | def set_rules ( self , rules ) : options = { } if rules : if rules . get ( "ignore" ) : options [ "rules" ] = { } for rule in rules . get ( "ignore" ) : options [ "rules" ] [ rule ] = { "enabled" : False } elif rules . get ( "apply" ) : options [ "runOnly" ] = { "type" : "rule" , "values" : rules . get ( "apply" ) , } elif rules . get ( "tags" ) : options [ "runOnly" ] = { "type" : "tag" , "values" : rules . get ( "tags" ) , } self . rules = json . dumps ( options ) | Set rules to ignore XOR limit to when checking for accessibility errors on the page . |
16,976 | def customize_ruleset ( self , custom_ruleset_file = None ) : custom_file = custom_ruleset_file or os . environ . get ( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file : return with open ( custom_file , "r" ) as additional_rules : custom_rules = additional_rules . read ( ) if "var customRules" not in custom_rules : raise A11yAuditConfigError ( "Custom rules file must include \"var customRules\"" ) self . custom_rules = custom_rules | Updates the ruleset to include a set of custom rules . These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID . |
16,977 | def _check_rules ( browser , rules_js , config ) : audit_run_script = dedent ( u ) . format ( rules_js = rules_js , custom_rules = config . custom_rules , context = config . context , options = config . rules ) audit_results_script = dedent ( u ) browser . execute_script ( audit_run_script ) def audit_results_check_func ( ) : unicode_results = browser . execute_script ( audit_results_script ) try : results = json . loads ( unicode_results ) except ( TypeError , ValueError ) : results = None if results : return True , results return False , None result = Promise ( audit_results_check_func , "Timed out waiting for a11y audit results." , timeout = 5 , ) . fulfill ( ) audit_results = result . get ( 'violations' ) return audit_results | Run an accessibility audit on the page using the axe - core ruleset . |
16,978 | def save_source ( driver , name ) : source = driver . page_source file_name = os . path . join ( os . environ . get ( 'SAVED_SOURCE_DIR' ) , '{name}.html' . format ( name = name ) ) try : with open ( file_name , 'wb' ) as output_file : output_file . write ( source . encode ( 'utf-8' ) ) except Exception : msg = u"Could not save the browser page source to {}." . format ( file_name ) LOGGER . warning ( msg ) | Save the rendered HTML of the browser . |
16,979 | def save_screenshot ( driver , name ) : if hasattr ( driver , 'save_screenshot' ) : screenshot_dir = os . environ . get ( 'SCREENSHOT_DIR' ) if not screenshot_dir : LOGGER . warning ( 'The SCREENSHOT_DIR environment variable was not set; not saving a screenshot' ) return elif not os . path . exists ( screenshot_dir ) : os . makedirs ( screenshot_dir ) image_name = os . path . join ( screenshot_dir , name + '.png' ) driver . save_screenshot ( image_name ) else : msg = ( u"Browser does not support screenshots. " u"Could not save screenshot '{name}'" ) . format ( name = name ) LOGGER . warning ( msg ) | Save a screenshot of the browser . |
16,980 | def save_driver_logs ( driver , prefix ) : browser_name = os . environ . get ( 'SELENIUM_BROWSER' , 'firefox' ) log_dir = os . environ . get ( 'SELENIUM_DRIVER_LOG_DIR' ) if not log_dir : LOGGER . warning ( 'The SELENIUM_DRIVER_LOG_DIR environment variable was not set; not saving logs' ) return elif not os . path . exists ( log_dir ) : os . makedirs ( log_dir ) if browser_name == 'firefox' : log_path = os . path . join ( os . getcwd ( ) , 'geckodriver.log' ) if os . path . exists ( log_path ) : dest_path = os . path . join ( log_dir , '{}_geckodriver.log' . format ( prefix ) ) copyfile ( log_path , dest_path ) return log_types = driver . log_types for log_type in log_types : try : log = driver . get_log ( log_type ) file_name = os . path . join ( log_dir , '{}_{}.log' . format ( prefix , log_type ) ) with open ( file_name , 'w' ) as output_file : for line in log : output_file . write ( "{}{}" . format ( dumps ( line ) , '\n' ) ) except : msg = ( u"Could not save browser log of type '{log_type}'. " u"It may be that the browser does not support it." ) . format ( log_type = log_type ) LOGGER . warning ( msg , exc_info = True ) | Save the selenium driver logs . |
16,981 | def browser ( tags = None , proxy = None , other_caps = None ) : browser_name = os . environ . get ( 'SELENIUM_BROWSER' , 'firefox' ) def browser_check_func ( ) : try : if _use_remote_browser ( SAUCE_ENV_VARS ) : browser_class , browser_args , browser_kwargs = _remote_browser_class ( SAUCE_ENV_VARS , tags ) elif _use_remote_browser ( REMOTE_ENV_VARS ) : browser_class , browser_args , browser_kwargs = _remote_browser_class ( REMOTE_ENV_VARS , tags ) else : browser_class , browser_args , browser_kwargs = _local_browser_class ( browser_name ) if proxy : browser_kwargs = _proxy_kwargs ( browser_name , proxy , browser_kwargs ) if browser_class == webdriver . Remote : desired_caps = other_caps or { } desired_caps . update ( browser_kwargs . get ( 'desired_capabilities' , { } ) ) browser_kwargs [ 'desired_capabilities' ] = desired_caps return True , browser_class ( * browser_args , ** browser_kwargs ) except ( socket . error , WebDriverException ) as err : msg = str ( err ) LOGGER . debug ( 'Failed to instantiate browser: ' + msg ) return False , None browser_instance = Promise ( browser_check_func , "Browser is instantiated successfully." , try_limit = 3 , timeout = 95 ) . fulfill ( ) return browser_instance | Interpret environment variables to configure Selenium . Performs validation logging and sensible defaults . |
16,982 | def _firefox_profile ( ) : profile_dir = os . environ . get ( FIREFOX_PROFILE_ENV_VAR ) if profile_dir : LOGGER . info ( u"Using firefox profile: %s" , profile_dir ) try : firefox_profile = webdriver . FirefoxProfile ( profile_dir ) except OSError as err : if err . errno == errno . ENOENT : raise BrowserConfigError ( u"Firefox profile directory {env_var}={profile_dir} does not exist" . format ( env_var = FIREFOX_PROFILE_ENV_VAR , profile_dir = profile_dir ) ) elif err . errno == errno . EACCES : raise BrowserConfigError ( u"Firefox profile directory {env_var}={profile_dir} has incorrect permissions. It must be \ readable and executable." . format ( env_var = FIREFOX_PROFILE_ENV_VAR , profile_dir = profile_dir ) ) else : raise BrowserConfigError ( u"Problem with firefox profile directory {env_var}={profile_dir}: {msg}" . format ( env_var = FIREFOX_PROFILE_ENV_VAR , profile_dir = profile_dir , msg = str ( err ) ) ) else : LOGGER . info ( "Using default firefox profile" ) firefox_profile = webdriver . FirefoxProfile ( ) firefox_profile . set_preference ( 'media.navigator.permission.disabled' , True ) firefox_profile . set_preference ( 'browser.startup.homepage' , 'about:blank' ) firefox_profile . set_preference ( 'startup.homepage_welcome_url' , 'about:blank' ) firefox_profile . set_preference ( 'startup.homepage_welcome_url.additional' , 'about:blank' ) firefox_profile . set_preference ( 'app.update.enabled' , False ) firefox_profile . set_preference ( 'plugins.hide_infobar_for_outdated_plugin' , True ) firefox_profile . set_preference ( 'datareporting.healthreport.service.enabled' , False ) firefox_profile . set_preference ( 'datareporting.policy.dataSubmissionEnabled' , False ) firefox_profile . set_preference ( 'toolkit.crashreporter.enabled' , False ) firefox_profile . set_preference ( 'devtools.jsonview.enabled' , False ) firefox_profile . set_preference ( 'focusmanager.testmode' , True ) for function in FIREFOX_PROFILE_CUSTOMIZERS : function ( firefox_profile ) return firefox_profile | Configure the Firefox profile respecting FIREFOX_PROFILE_PATH if set |
16,983 | def _local_browser_class ( browser_name ) : LOGGER . info ( u"Using local browser: %s [Default is firefox]" , browser_name ) browser_class = BROWSERS . get ( browser_name ) headless = os . environ . get ( 'BOKCHOY_HEADLESS' , 'false' ) . lower ( ) == 'true' if browser_class is None : raise BrowserConfigError ( u"Invalid browser name {name}. Options are: {options}" . format ( name = browser_name , options = ", " . join ( list ( BROWSERS . keys ( ) ) ) ) ) else : if browser_name == 'firefox' : log_path = os . path . join ( os . getcwd ( ) , 'geckodriver.log' ) if os . path . exists ( log_path ) : os . remove ( log_path ) firefox_options = FirefoxOptions ( ) firefox_options . log . level = 'trace' if headless : firefox_options . headless = True browser_args = [ ] browser_kwargs = { 'firefox_profile' : _firefox_profile ( ) , 'options' : firefox_options , } firefox_path = os . environ . get ( 'SELENIUM_FIREFOX_PATH' ) firefox_log = os . environ . get ( 'SELENIUM_FIREFOX_LOG' ) if firefox_path and firefox_log : browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( firefox_path = firefox_path , log_file = firefox_log ) } ) elif firefox_path : browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( firefox_path = firefox_path ) } ) elif firefox_log : browser_kwargs . update ( { 'firefox_binary' : FirefoxBinary ( log_file = firefox_log ) } ) elif browser_name == 'chrome' : chrome_options = ChromeOptions ( ) if headless : chrome_options . headless = True chrome_options . add_argument ( '--use-fake-device-for-media-stream' ) chrome_options . add_argument ( '--use-fake-ui-for-media-stream' ) browser_args = [ ] browser_kwargs = { 'options' : chrome_options , } else : browser_args , browser_kwargs = [ ] , { } return browser_class , browser_args , browser_kwargs | Returns class kwargs and args needed to instantiate the local browser . |
16,984 | def _remote_browser_class ( env_vars , tags = None ) : if tags is None : tags = [ ] envs = _required_envs ( env_vars ) envs . update ( _optional_envs ( ) ) caps = _capabilities_dict ( envs , tags ) if 'accessKey' in caps : LOGGER . info ( u"Using SauceLabs: %s %s %s" , caps [ 'platform' ] , caps [ 'browserName' ] , caps [ 'version' ] ) else : LOGGER . info ( u"Using Remote Browser: %s" , caps [ 'browserName' ] ) url = u"http://{0}:{1}/wd/hub" . format ( envs [ 'SELENIUM_HOST' ] , envs [ 'SELENIUM_PORT' ] ) browser_args = [ ] browser_kwargs = { 'command_executor' : url , 'desired_capabilities' : caps , } if caps [ 'browserName' ] == 'firefox' : browser_kwargs [ 'browser_profile' ] = _firefox_profile ( ) return webdriver . Remote , browser_args , browser_kwargs | Returns class kwargs and args needed to instantiate the remote browser . |
16,985 | def _proxy_kwargs ( browser_name , proxy , browser_kwargs = { } ) : proxy_dict = { "httpProxy" : proxy . proxy , "proxyType" : 'manual' , } if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs : wd_proxy = webdriver . common . proxy . Proxy ( proxy_dict ) browser_kwargs [ 'proxy' ] = wd_proxy else : if 'desired_capabilities' not in browser_kwargs : browser_kwargs [ 'desired_capabilities' ] = { } browser_kwargs [ 'desired_capabilities' ] [ 'proxy' ] = proxy_dict return browser_kwargs | Determines the kwargs needed to set up a proxy based on the browser type . |
16,986 | def _required_envs ( env_vars ) : envs = { key : os . environ . get ( key ) for key in env_vars } missing = [ key for key , val in list ( envs . items ( ) ) if val is None ] if missing : msg = ( u"These environment variables must be set: " + u", " . join ( missing ) ) raise BrowserConfigError ( msg ) if envs [ 'SELENIUM_BROWSER' ] not in BROWSERS : msg = u"Unsuppported browser: {0}" . format ( envs [ 'SELENIUM_BROWSER' ] ) raise BrowserConfigError ( msg ) return envs | Parse environment variables for required values raising a BrowserConfig error if they are not found . |
16,987 | def _optional_envs ( ) : envs = { key : os . environ . get ( key ) for key in OPTIONAL_ENV_VARS if key in os . environ } if 'JOB_NAME' in envs and 'BUILD_NUMBER' not in envs : raise BrowserConfigError ( "Missing BUILD_NUMBER environment var" ) if 'BUILD_NUMBER' in envs and 'JOB_NAME' not in envs : raise BrowserConfigError ( "Missing JOB_NAME environment var" ) return envs | Parse environment variables for optional values raising a BrowserConfig error if they are insufficiently specified . |
16,988 | def _capabilities_dict ( envs , tags ) : capabilities = { 'browserName' : envs [ 'SELENIUM_BROWSER' ] , 'acceptInsecureCerts' : bool ( envs . get ( 'SELENIUM_INSECURE_CERTS' , False ) ) , 'video-upload-on-pass' : False , 'sauce-advisor' : False , 'capture-html' : True , 'record-screenshots' : True , 'max-duration' : 600 , 'public' : 'public restricted' , 'tags' : tags , } if _use_remote_browser ( SAUCE_ENV_VARS ) : sauce_capabilities = { 'platform' : envs [ 'SELENIUM_PLATFORM' ] , 'version' : envs [ 'SELENIUM_VERSION' ] , 'username' : envs [ 'SAUCE_USER_NAME' ] , 'accessKey' : envs [ 'SAUCE_API_KEY' ] , } capabilities . update ( sauce_capabilities ) if 'JOB_NAME' in envs : jenkins_vars = { 'build' : envs [ 'BUILD_NUMBER' ] , 'name' : envs [ 'JOB_NAME' ] , } capabilities . update ( jenkins_vars ) return capabilities | Convert the dictionary of environment variables to a dictionary of desired capabilities to send to the Remote WebDriver . |
16,989 | def replace ( self , ** kwargs ) : clone = copy ( self ) clone . transforms = list ( clone . transforms ) for key , value in kwargs . items ( ) : if not hasattr ( clone , key ) : raise TypeError ( u'replace() got an unexpected keyword argument {!r}' . format ( key ) ) setattr ( clone , key , value ) return clone | Return a copy of this Query but with attributes specified as keyword arguments replaced by the keyword values . |
16,990 | def transform ( self , transform , desc = None ) : if desc is None : desc = u'transform({})' . format ( getattr ( transform , '__name__' , '' ) ) return self . replace ( transforms = self . transforms + [ transform ] , desc_stack = self . desc_stack + [ desc ] ) | Create a copy of this query transformed by transform . |
16,991 | def map ( self , map_fn , desc = None ) : if desc is None : desc = getattr ( map_fn , '__name__' , '' ) desc = u'map({})' . format ( desc ) return self . transform ( lambda xs : ( map_fn ( x ) for x in xs ) , desc = desc ) | Return a copy of this query with the values mapped through map_fn . |
16,992 | def filter ( self , filter_fn = None , desc = None , ** kwargs ) : if filter_fn is not None and kwargs : raise TypeError ( 'Must supply either a filter_fn or attribute filter parameters to filter(), but not both.' ) if filter_fn is None and not kwargs : raise TypeError ( 'Must supply one of filter_fn or one or more attribute filter parameters to filter().' ) if desc is None : if filter_fn is not None : desc = getattr ( filter_fn , '__name__' , '' ) elif kwargs : desc = u", " . join ( [ u"{}={!r}" . format ( key , value ) for key , value in kwargs . items ( ) ] ) desc = u"filter({})" . format ( desc ) if kwargs : def filter_fn ( elem ) : return all ( getattr ( elem , filter_key ) == filter_value for filter_key , filter_value in kwargs . items ( ) ) return self . transform ( lambda xs : ( x for x in xs if filter_fn ( x ) ) , desc = desc ) | Return a copy of this query with some values removed . |
16,993 | def _execute ( self ) : data = self . seed_fn ( ) for transform in self . transforms : data = transform ( data ) return list ( data ) | Run the query generating data from the seed_fn and performing transforms on the results . |
16,994 | def execute ( self , try_limit = 5 , try_interval = 0.5 , timeout = 30 ) : return Promise ( no_error ( self . _execute ) , u"Executing {!r}" . format ( self ) , try_limit = try_limit , try_interval = try_interval , timeout = timeout , ) . fulfill ( ) | Execute this query retrying based on the supplied parameters . |
16,995 | def first ( self ) : def _transform ( xs ) : try : return [ six . next ( iter ( xs ) ) ] except StopIteration : return [ ] return self . transform ( _transform , 'first' ) | Return a Query that selects only the first element of this Query . If no elements are available returns a query with no results . |
16,996 | def attrs ( self , attribute_name ) : desc = u'attrs({!r})' . format ( attribute_name ) return self . map ( lambda el : el . get_attribute ( attribute_name ) , desc ) . results | Retrieve HTML attribute values from the elements matched by the query . |
16,997 | def selected ( self ) : query_results = self . map ( lambda el : el . is_selected ( ) , 'selected' ) . results if query_results : return all ( query_results ) return False | Check whether all the matched elements are selected . |
16,998 | def visible ( self ) : query_results = self . map ( lambda el : el . is_displayed ( ) , 'visible' ) . results if query_results : return all ( query_results ) return False | Check whether all matched elements are visible . |
16,999 | def fill ( self , text ) : def _fill ( elem ) : elem . clear ( ) elem . send_keys ( text ) self . map ( _fill , u'fill({!r})' . format ( text ) ) . execute ( ) | Set the text value of each matched element to text . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.