idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
48,400
def _format_alignment ( self , a1 , a2 ) : html = [ ] for index , char in enumerate ( a1 ) : output = self . _substitutes . get ( char , char ) if a2 [ index ] == char : html . append ( '<span class="match">{}</span>' . format ( output ) ) elif char != '-' : html . append ( output ) return '' . join ( html )
Returns a1 marked up with HTML spans around characters that are also at the same index in a2 .
48,401
def render ( self ) : f1 = self . _format_alignment ( self . _alignment [ 0 ] , self . _alignment [ 1 ] ) f2 = self . _format_alignment ( self . _alignment [ 1 ] , self . _alignment [ 0 ] ) return f1 , f2
Returns a tuple of HTML fragments rendering each element of the sequence .
48,402
def generate ( self , output_dir , minimum_size ) : self . _output_dir = output_dir labels = list ( self . _matches . groupby ( [ constants . LABEL_FIELDNAME ] ) [ constants . WORK_FIELDNAME ] . nunique ( ) . index ) original_ngrams = self . _matches [ self . _matches [ constants . SIZE_FIELDNAME ] >= minimum_size ] . sort_values ( by = constants . SIZE_FIELDNAME , ascending = False ) [ constants . NGRAM_FIELDNAME ] . unique ( ) ngrams = [ ] for original_ngram in original_ngrams : ngrams . append ( self . _get_text ( Text ( original_ngram , self . _tokenizer ) ) ) for index , primary_label in enumerate ( labels ) : for secondary_label in labels [ index + 1 : ] : self . _generate_sequences ( primary_label , secondary_label , ngrams )
Generates sequence reports and writes them to the output directory .
48,403
def _generate_sequences ( self , primary_label , secondary_label , ngrams ) : cols = [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME ] primary_works = self . _matches [ self . _matches [ constants . LABEL_FIELDNAME ] == primary_label ] [ cols ] . drop_duplicates ( ) secondary_works = self . _matches [ self . _matches [ constants . LABEL_FIELDNAME ] == secondary_label ] [ cols ] . drop_duplicates ( ) for index , ( work1 , siglum1 ) in primary_works . iterrows ( ) : text1 = self . _get_text ( self . _corpus . get_witness ( work1 , siglum1 ) ) label1 = '{}_{}' . format ( work1 , siglum1 ) for index , ( work2 , siglum2 ) in secondary_works . iterrows ( ) : text2 = self . _get_text ( self . _corpus . get_witness ( work2 , siglum2 ) ) label2 = '{}_{}' . format ( work2 , siglum2 ) self . _generate_sequences_for_texts ( label1 , text1 , label2 , text2 , ngrams )
Generates aligned sequences between each witness labelled primary_label and each witness labelled secondary_label based around ngrams .
48,404
def _generate_sequences_for_ngram ( self , t1 , t2 , ngram , covered_spans ) : self . _logger . debug ( 'Generating sequences for n-gram "{}"' . format ( ngram ) ) pattern = re . compile ( re . escape ( ngram ) ) context_length = len ( ngram ) t1_spans = [ match . span ( ) for match in pattern . finditer ( t1 ) ] t2_spans = [ match . span ( ) for match in pattern . finditer ( t2 ) ] sequences = [ ] self . _logger . debug ( t1 ) for t1_span in t1_spans : for t2_span in t2_spans : if self . _is_inside ( t1_span , t2_span , covered_spans ) : self . _logger . debug ( 'Skipping match due to existing coverage' ) continue sequence = self . _generate_sequence ( t1 , t1_span , t2 , t2_span , context_length , covered_spans ) if sequence : sequences . append ( sequence ) return sequences
Generates aligned sequences for the texts t1 and t2 based around ngram .
48,405
def _generate_sequences_for_texts ( self , l1 , t1 , l2 , t2 , ngrams ) : self . _reverse_substitutes = dict ( ( v , k ) for k , v in self . _substitutes . items ( ) ) sequences = [ ] covered_spans = [ [ ] , [ ] ] for ngram in ngrams : sequences . extend ( self . _generate_sequences_for_ngram ( t1 , t2 , ngram , covered_spans ) ) if sequences : sequences . sort ( key = lambda x : x . start_index ) context = { 'l1' : l1 , 'l2' : l2 , 'sequences' : sequences } report_name = '{}-{}.html' . format ( l1 , l2 ) os . makedirs ( self . _output_dir , exist_ok = True ) self . _write ( context , self . _output_dir , report_name )
Generates and outputs aligned sequences for the texts t1 and t2 from ngrams .
48,406
def _get_text ( self , text ) : tokens = text . get_tokens ( ) for i , token in enumerate ( tokens ) : if len ( token ) > 1 : char = chr ( self . _char_code ) substitute = self . _substitutes . setdefault ( token , char ) if substitute == char : self . _char_code += 1 tokens [ i ] = substitute return self . _tokenizer . joiner . join ( tokens )
Returns the text content of text with all multi - character tokens replaced with a single character . Substitutions are recorded in self . _substitutes .
48,407
def _get_text_sequence ( self , text , span , context_length ) : start = max ( 0 , span [ 0 ] - context_length ) end = min ( len ( text ) , span [ 1 ] + context_length ) return text [ start : end ] , ( start , end )
Returns the subset of text encompassed by span plus context_length characters before and after .
48,408
def _is_inside ( self , span1 , span2 , covered_spans ) : if self . _is_span_inside ( span1 , covered_spans [ 0 ] ) and self . _is_span_inside ( span2 , covered_spans [ 1 ] ) : return True return False
Returns True if both span1 and span2 fall within covered_spans .
48,409
def _is_span_inside ( self , span , covered_spans ) : start = span [ 0 ] end = span [ 1 ] for c_start , c_end in covered_spans : if start >= c_start and end <= c_end : return True return False
Returns True if span falls within covered_spans .
48,410
def add_corpus_arguments ( parser ) : add_tokenizer_argument ( parser ) parser . add_argument ( 'corpus' , help = constants . DB_CORPUS_HELP , metavar = 'CORPUS' )
Adds common arguments for commands making use of a corpus to parser .
48,411
def add_db_arguments ( parser , db_option = False ) : parser . add_argument ( '-m' , '--memory' , action = 'store_true' , help = constants . DB_MEMORY_HELP ) parser . add_argument ( '-r' , '--ram' , default = 3 , help = constants . DB_RAM_HELP , type = int ) if db_option : parser . add_argument ( '-d' , '--db' , help = constants . DB_DATABASE_HELP , metavar = 'DATABASE' , required = True ) else : parser . add_argument ( 'db' , help = constants . DB_DATABASE_HELP , metavar = 'DATABASE' )
Adds common arguments for the database sub - commands to parser .
48,412
def add_supplied_query_arguments ( parser ) : parser . add_argument ( '-l' , '--labels' , help = constants . SUPPLIED_LABELS_HELP , nargs = '+' , required = True ) parser . add_argument ( '-s' , '--supplied' , help = constants . SUPPLIED_RESULTS_HELP , metavar = 'RESULTS' , nargs = '+' , required = True )
Adds common arguments for supplied query sub - commands to parser .
48,413
def configure_logging ( verbose , logger ) : if not verbose : log_level = logging . WARNING elif verbose == 1 : log_level = logging . INFO else : log_level = logging . DEBUG logger . setLevel ( log_level ) ch = colorlog . StreamHandler ( ) ch . setLevel ( log_level ) formatter = colorlog . ColoredFormatter ( '%(log_color)s%(asctime)s %(name)s %(levelname)s: %(message)s' ) ch . setFormatter ( formatter ) logger . addHandler ( ch )
Configures the logging used .
48,414
def get_catalogue ( args ) : catalogue = tacl . Catalogue ( ) catalogue . load ( args . catalogue ) return catalogue
Returns a tacl . Catalogue .
48,415
def get_corpus ( args ) : tokenizer = get_tokenizer ( args ) return tacl . Corpus ( args . corpus , tokenizer )
Returns a tacl . Corpus .
48,416
def get_data_store ( args ) : return tacl . DataStore ( args . db , args . memory , args . ram )
Returns a tacl . DataStore .
48,417
def get_ngrams ( path ) : with open ( path , encoding = 'utf-8' ) as fh : ngrams = [ ngram . strip ( ) for ngram in fh . readlines ( ) ] return ngrams
Returns a list of n - grams read from the file at path .
48,418
def excise ( self , ngrams , replacement ) : content = self . get_token_content ( ) ngrams . sort ( key = len , reverse = True ) for ngram in ngrams : content = content . replace ( ngram , replacement ) return content
Returns the token content of this text with every occurrence of each n - gram in ngrams replaced with replacement .
48,419
def _ngrams ( self , sequence , degree ) : count = max ( 0 , len ( sequence ) - degree + 1 ) return [ self . _tokenizer . joiner . join ( self . _tokenizer . joiner . join ( sequence [ i : i + degree ] ) . split ( ) ) for i in range ( count ) ]
Returns the n - grams generated from sequence .
48,420
def get_filter_ngrams_pattern ( filter_ngrams ) : return re . compile ( '|' . join ( [ re . escape ( ngram ) for ngram in filter_ngrams ] ) )
Returns a compiled regular expression matching on any of the n - grams in filter_ngrams .
48,421
def set_relation_many_to_many ( self , obj , field_name , objs ) : "Set a many-to-many field on an object" relation = getattr ( obj , field_name ) if hasattr ( relation , 'set' ) : relation . set ( objs ) else : setattr ( obj , field_name , objs )
Set a many - to - many field on an object
48,422
def detect_uniqueness_error ( self , exc ) : pattern = r"(\w+) with this (\w+) already exists" fields = [ ] if isinstance ( exc , IntegrityError ) : return self . _detect_integrity_error ( exc ) assert isinstance ( exc , ValidationError ) , TypeError for name , err in exc . error_dict . items ( ) : if re . search ( pattern , str ( err ) ) : fields . append ( name ) return fields or None
Parse error and if it describes any violations of a uniqueness constraint return the corresponding fields else None
48,423
def tls_meta_data ( self ) : for flow in self . input_stream : if flow [ 'protocol' ] != 'TCP' : continue if flow [ 'direction' ] == 'CTS' : try : tls_records , bytes_consumed = dpkt . ssl . tls_multi_factory ( flow [ 'payload' ] ) if bytes_consumed != len ( flow [ 'payload' ] ) : logger . warning ( 'Incomplete TLS record at the end...' ) flow [ 'tls' ] = { 'type' : 'TLS_CTS' , 'data' : { 'tls_records' : tls_records , 'uri' : None , 'headers' : None } } except ( dpkt . dpkt . NeedData , dpkt . dpkt . UnpackError , dpkt . ssl . SSL3Exception ) : flow [ 'tls' ] = None else : try : tls_records , bytes_consumed = dpkt . ssl . tls_multi_factory ( flow [ 'payload' ] ) if bytes_consumed != len ( flow [ 'payload' ] ) : logger . warning ( 'Incomplete TLS record at the end...' ) flow [ 'tls' ] = { 'type' : 'TLS_STC' , 'data' : { 'tls_records' : tls_records , 'uri' : None , 'headers' : None } } except ( dpkt . dpkt . NeedData , dpkt . dpkt . UnpackError , dpkt . ssl . SSL3Exception ) : flow [ 'tls' ] = None yield flow
Pull out the TLS metadata for each flow in the input_stream
48,424
def make_dict ( obj ) : if is_builtin ( obj ) or isinstance ( obj , OrderedDict ) : return obj output_dict = { } for key in dir ( obj ) : if not key . startswith ( '__' ) and not callable ( getattr ( obj , key ) ) : attr = getattr ( obj , key ) if isinstance ( attr , list ) : output_dict [ key ] = [ ] for item in attr : output_dict [ key ] . append ( make_dict ( item ) ) else : output_dict [ key ] = make_dict ( attr ) return output_dict
This method creates a dictionary out of a non - builtin object
48,425
def get_value ( data , key ) : ref = data try : for subkey in key . split ( '.' ) : if isinstance ( ref , dict ) : ref = ref [ subkey ] else : print ( 'CRITICAL: Cannot use subkey %s on non-dictionary element' % subkey ) return None return ref except KeyError : return None
Follow the dot notation to get the proper field then perform the action
48,426
def csv ( self , fh ) : self . _stats . to_csv ( fh , encoding = 'utf-8' , index = False ) return fh
Writes the report data to fh in CSV format and returns it .
48,427
def generate_statistics ( self ) : matches = self . _matches witness_fields = [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME , constants . LABEL_FIELDNAME ] witnesses = matches [ witness_fields ] . drop_duplicates ( ) rows = [ ] for index , ( work , siglum , label ) in witnesses . iterrows ( ) : witness = self . _corpus . get_witness ( work , siglum ) witness_matches = matches [ ( matches [ constants . WORK_FIELDNAME ] == work ) & ( matches [ constants . SIGLUM_FIELDNAME ] == siglum ) ] total_count , matching_count = self . _process_witness ( witness , witness_matches ) percentage = matching_count / total_count * 100 rows . append ( { constants . WORK_FIELDNAME : work , constants . SIGLUM_FIELDNAME : siglum , constants . COUNT_TOKENS_FIELDNAME : matching_count , constants . TOTAL_TOKENS_FIELDNAME : total_count , constants . PERCENTAGE_FIELDNAME : percentage , constants . LABEL_FIELDNAME : label } ) self . _stats = pd . DataFrame ( rows , columns = constants . STATISTICS_FIELDNAMES )
Replaces result rows with summary statistics about the results .
48,428
def _generate_text_from_slices ( self , full_text , slices ) : sliced_text = [ ] for start , end in slices : sliced_text . append ( full_text [ start : end ] ) return self . _tokenizer . joiner . join ( sliced_text )
Return a single string consisting of the parts specified in slices joined together by the tokenizer s joining string .
48,429
def _merge_slices ( match_slices ) : match_slices . sort ( key = lambda x : ( x [ 0 ] , - x [ 1 ] ) ) merged_slices = [ match_slices . pop ( 0 ) ] for slice_indices in match_slices : last_end = merged_slices [ - 1 ] [ 1 ] if slice_indices [ 0 ] <= last_end : if slice_indices [ 1 ] > last_end : merged_slices [ - 1 ] [ 1 ] = slice_indices [ 1 ] else : merged_slices . append ( slice_indices ) return merged_slices
Return a list of slice indices lists derived from match_slices with no overlaps .
48,430
def _process_witness ( self , witness , matches ) : tokens = witness . get_tokens ( ) full_text = witness . get_token_content ( ) fields = [ constants . NGRAM_FIELDNAME , constants . SIZE_FIELDNAME ] match_slices = [ ] for index , ( ngram , size ) in matches [ fields ] . iterrows ( ) : pattern = re . compile ( re . escape ( ngram ) ) start = 0 while True : match = pattern . search ( full_text , start ) if match is None : break match_slices . append ( [ match . start ( ) , match . end ( ) ] ) start = match . start ( ) + 1 merged_slices = self . _merge_slices ( match_slices ) match_content = self . _generate_text_from_slices ( full_text , merged_slices ) match_text = Text ( match_content , self . _tokenizer ) return len ( tokens ) , len ( match_text . get_tokens ( ) )
Return the counts of total tokens and matching tokens in witness .
48,431
def get_witnesses ( self , source_tree ) : witnesses = [ ] witness_elements = source_tree . xpath ( '/tei:*/tei:teiHeader/tei:fileDesc/tei:sourceDesc/' 'tei:listWit/tei:witness' , namespaces = constants . NAMESPACES ) for witness_element in witness_elements : witnesses . append ( ( witness_element . text , witness_element . get ( constants . XML + 'id' ) ) ) if not witnesses : witnesses = [ ( constants . BASE_WITNESS , constants . BASE_WITNESS_ID ) ] return witnesses
Returns a list of all witnesses of variant readings in source_tree along with their XML ids .
48,432
def run ( iface_name = None , bpf = None , summary = None , max_packets = 50 ) : streamer = packet_streamer . PacketStreamer ( iface_name = iface_name , bpf = bpf , max_packets = max_packets ) meta = packet_meta . PacketMeta ( ) rdns = reverse_dns . ReverseDNS ( ) tags = packet_tags . PacketTags ( ) tmeta = transport_meta . TransportMeta ( ) printer = packet_summary . PacketSummary ( ) meta . link ( streamer ) rdns . link ( meta ) tags . link ( rdns ) tmeta . link ( tags ) printer . link ( tmeta ) printer . pull ( )
Run the Simple Packet Printer Example
48,433
def generate ( self , output_dir , catalogue , results , label ) : data = results . get_raw_data ( ) labels = catalogue . ordered_labels ngrams = self . _generate_results ( output_dir , labels , data ) ngram_table = self . _generate_ngram_table ( output_dir , labels , data ) corpus_table = self . _generate_corpus_table ( labels , ngrams ) context = { 'corpus_table' : corpus_table , 'focus_label' : label , 'labels' : labels , 'ngram_table' : ngram_table , 'sep' : os . sep } report_name = 'lifetime-{}.html' . format ( label ) self . _write ( context , output_dir , report_name )
Generates the report writing it to output_dir .
48,434
def _generate_corpus_table ( self , labels , ngrams ) : html = [ ] for label in labels : html . append ( self . _render_corpus_row ( label , ngrams ) ) return '\n' . join ( html )
Returns an HTML table containing data on each corpus n - grams .
48,435
def _generate_ngram_table ( self , output_dir , labels , results ) : html = [ ] grouped = results . groupby ( constants . NGRAM_FIELDNAME ) row_template = self . _generate_ngram_row_template ( labels ) for name , group in grouped : html . append ( self . _render_ngram_row ( name , group , row_template , labels ) ) return '\n' . join ( html )
Returns an HTML table containing data on each n - gram in results .
48,436
def _generate_ngram_row_template ( self , labels ) : cells = [ '<td>{ngram}</td>' ] for label in labels : cells . append ( '<td>{{{}}}</td>' . format ( label ) ) return '\n' . join ( cells )
Returns the HTML template for a row in the n - gram table .
48,437
def _generate_results ( self , output_dir , labels , results ) : ngrams = { } for idx , label in enumerate ( labels ) : now_results = results [ results [ constants . LABEL_FIELDNAME ] == label ] earlier_labels = labels [ : idx ] earlier_ngrams = results [ results [ constants . LABEL_FIELDNAME ] . isin ( earlier_labels ) ] [ constants . NGRAM_FIELDNAME ] . values later_labels = labels [ idx + 1 : ] later_ngrams = results [ results [ constants . LABEL_FIELDNAME ] . isin ( later_labels ) ] [ constants . NGRAM_FIELDNAME ] . values first_ngrams = [ ] only_ngrams = [ ] last_ngrams = [ ] for ngram in now_results [ constants . NGRAM_FIELDNAME ] . unique ( ) : if ngram in earlier_ngrams : if ngram not in later_ngrams : last_ngrams . append ( ngram ) elif ngram in later_ngrams : first_ngrams . append ( ngram ) else : only_ngrams . append ( ngram ) self . _save_results ( output_dir , label , now_results , first_ngrams , 'first' ) self . _save_results ( output_dir , label , now_results , only_ngrams , 'only' ) self . _save_results ( output_dir , label , now_results , last_ngrams , 'last' ) ngrams [ label ] = { 'first' : first_ngrams , 'last' : last_ngrams , 'only' : only_ngrams } return ngrams
Creates multiple results files in output_dir based on results .
48,438
def _render_corpus_row ( self , label , ngrams ) : row = ( '<tr>\n<td>{label}</td>\n<td>{first}</td>\n<td>{only}</td>\n' '<td>{last}</td>\n</tr>' ) cell_data = { 'label' : label } for period in ( 'first' , 'only' , 'last' ) : cell_data [ period ] = ', ' . join ( ngrams [ label ] [ period ] ) return row . format ( ** cell_data )
Returns the HTML for a corpus row .
48,439
def _render_ngram_row ( self , ngram , ngram_group , row_template , labels ) : cell_data = { 'ngram' : ngram } label_data = { } for label in labels : label_data [ label ] = [ ] work_grouped = ngram_group . groupby ( constants . WORK_FIELDNAME ) for work , group in work_grouped : min_count = group [ constants . COUNT_FIELDNAME ] . min ( ) max_count = group [ constants . COUNT_FIELDNAME ] . max ( ) if min_count == max_count : count = min_count else : count = '{}\N{EN DASH}{}' . format ( min_count , max_count ) label_data [ group [ constants . LABEL_FIELDNAME ] . iloc [ 0 ] ] . append ( '{} ({})' . format ( work , count ) ) for label , data in label_data . items ( ) : label_data [ label ] = '; ' . join ( data ) cell_data . update ( label_data ) html = row_template . format ( ** cell_data ) return '<tr>\n{}\n</tr>' . format ( html )
Returns the HTML for an n - gram row .
48,440
def _save_results ( self , output_dir , label , results , ngrams , type_label ) : path = os . path . join ( output_dir , '{}-{}.csv' . format ( label , type_label ) ) results [ results [ constants . NGRAM_FIELDNAME ] . isin ( ngrams ) ] . to_csv ( path , encoding = 'utf-8' , float_format = '%d' , index = False )
Saves results filtered by label and ngram to output_dir .
48,441
def link ( self , stream_instance ) : if isinstance ( stream_instance , collections . Iterable ) : self . input_stream = stream_instance elif getattr ( stream_instance , 'output_stream' , None ) : self . input_stream = stream_instance . output_stream else : raise RuntimeError ( 'Calling link() with unknown instance type %s' % type ( stream_instance ) )
Set my input stream
48,442
def generate ( self , path , label ) : for filename in os . listdir ( path ) : self [ filename ] = label
Creates default data from the corpus at path marking all works with label .
48,443
def get_works_by_label ( self , label ) : return [ work for work , c_label in self . items ( ) if c_label == label ]
Returns a list of works associated with label .
48,444
def load ( self , path ) : fieldnames = [ 'work' , 'label' ] with open ( path , 'r' , encoding = 'utf-8' , newline = '' ) as fh : reader = csv . DictReader ( fh , delimiter = ' ' , fieldnames = fieldnames , skipinitialspace = True ) for row in reader : work , label = row [ 'work' ] , row [ 'label' ] if label : if label not in self . _ordered_labels : self . _ordered_labels . append ( label ) if work in self : raise MalformedCatalogueError ( CATALOGUE_WORK_RELABELLED_ERROR . format ( work ) ) self [ work ] = label
Loads the data from path into the catalogue .
48,445
def relabel ( self , label_map ) : catalogue = copy . deepcopy ( self ) to_delete = set ( ) for work , old_label in catalogue . items ( ) : if old_label in label_map : catalogue [ work ] = label_map [ old_label ] else : to_delete . add ( catalogue [ work ] ) for label in to_delete : catalogue . remove_label ( label ) return catalogue
Returns a copy of the catalogue with the labels remapped according to label_map .
48,446
def remove_label ( self , label ) : works_to_delete = [ ] for work , work_label in self . items ( ) : if work_label == label : works_to_delete . append ( work ) for work in works_to_delete : del self [ work ] if self . _ordered_labels : self . _ordered_labels . remove ( label )
Removes label from the catalogue by removing all works carrying it .
48,447
def save ( self , path ) : writer = csv . writer ( open ( path , 'w' , newline = '' ) , delimiter = ' ' ) rows = list ( self . items ( ) ) rows . sort ( key = lambda x : x [ 0 ] ) writer . writerows ( rows )
Saves this catalogue s data to path .
48,448
def _dns_weird ( self , record ) : weird = { } if record [ 'flags' ] [ 'zero' ] != 0 : weird [ 'zero' ] = record [ 'flags' ] [ 'zero' ] if record [ 'flags' ] [ 'truncated' ] : weird [ 'trucnated' ] = True weird_types = set ( [ 'DNS_NULL' , 'DNS_HINFO' , 'DNS_TXT' , 'UNKNOWN' ] ) for query in record [ 'queries' ] : if query [ 'type' ] in weird_types : weird [ 'query_type' ] = query [ 'type' ] weird_classes = set ( [ 'DNS_CHAOS' , 'DNS_HESIOD' , 'DNS_NONE' , 'DNS_ANY' ] ) for query in record [ 'queries' ] : if query [ 'class' ] in weird_classes : weird [ 'query_class' ] = query [ 'class' ] for section_name in [ 'answers' , 'name_servers' , 'additional' ] : for answer in record [ 'answers' ] [ section_name ] : if answer [ 'type' ] in weird_types : weird [ 'answer_type' ] = answer [ 'type' ] for section_name in [ 'answers' , 'name_servers' , 'additional' ] : for answer in record [ 'answers' ] [ section_name ] : if answer [ 'class' ] in weird_classes : weird [ 'answer_class' ] = answer [ 'class' ] for query in record [ 'queries' ] : subdomain = '.' . join ( query [ 'name' ] . split ( '.' ) [ : - 2 ] ) length = len ( subdomain ) entropy = self . entropy ( subdomain ) if length > 35 and entropy > 3.5 : weird [ 'subdomain_length' ] = length weird [ 'subdomain' ] = subdomain weird [ 'subdomain_entropy' ] = entropy weird [ 'subdomain' ] = subdomain return weird
Look for weird stuff in dns record using a set of criteria to mark the weird stuff
48,449
def get_sigla ( self , work ) : return [ os . path . splitext ( os . path . basename ( path ) ) [ 0 ] for path in glob . glob ( os . path . join ( self . _path , work , '*.txt' ) ) ]
Returns a list of all of the sigla for work .
48,450
def get_witness ( self , work , siglum , text_class = WitnessText ) : filename = os . path . join ( work , siglum + '.txt' ) self . _logger . debug ( 'Creating WitnessText object from {}' . format ( filename ) ) with open ( os . path . join ( self . _path , filename ) , encoding = 'utf-8' ) as fh : content = fh . read ( ) return text_class ( work , siglum , content , self . _tokenizer )
Returns a WitnessText representing the file associated with work and siglum .
48,451
def get_witnesses ( self , name = '*' ) : for filepath in glob . glob ( os . path . join ( self . _path , name , '*.txt' ) ) : if os . path . isfile ( filepath ) : name = os . path . split ( os . path . split ( filepath ) [ 0 ] ) [ 1 ] siglum = os . path . splitext ( os . path . basename ( filepath ) ) [ 0 ] yield self . get_witness ( name , siglum )
Returns a generator supplying WitnessText objects for each work in the corpus .
48,452
def get_works ( self ) : return [ os . path . split ( filepath ) [ 1 ] for filepath in glob . glob ( os . path . join ( self . _path , '*' ) ) if os . path . isdir ( filepath ) ]
Returns a list of the names of all works in the corpus .
48,453
def requires_columns ( required_cols ) : def dec ( f ) : @ wraps ( f ) def decorated_function ( * args , ** kwargs ) : actual_cols = list ( args [ 0 ] . _matches . columns ) missing_cols = [ ] for required_col in required_cols : if required_col not in actual_cols : missing_cols . append ( '"{}"' . format ( required_col ) ) if missing_cols : raise MalformedResultsError ( constants . MISSING_REQUIRED_COLUMNS_ERROR . format ( ', ' . join ( missing_cols ) ) ) return f ( * args , ** kwargs ) return decorated_function return dec
Decorator that raises a MalformedResultsError if any of required_cols is not present as a column in the matches of the Results object bearing the decorated method .
48,454
def process_for_rdns ( self ) : for item in self . input_stream : for endpoint in [ 'src' , 'dst' ] : if endpoint not in item [ 'packet' ] : item [ 'packet' ] [ endpoint + self . domain_postfix ] = None continue ip_address = net_utils . inet_to_str ( item [ 'packet' ] [ endpoint ] ) if self . ip_lookup_cache . get ( ip_address ) : domain = self . ip_lookup_cache . get ( ip_address ) elif net_utils . is_internal ( ip_address ) : domain = 'internal' elif net_utils . is_special ( ip_address ) : domain = net_utils . is_special ( ip_address ) else : domain = self . _reverse_dns_lookup ( ip_address ) item [ 'packet' ] [ endpoint + self . domain_postfix ] = domain self . ip_lookup_cache . set ( ip_address , domain ) yield item
Look through my input stream for the fields in ip_field_list and try to do a reverse dns lookup on those fields .
48,455
def generate_colours ( n ) : colours = [ ] golden_ratio_conjugate = 0.618033988749895 h = 0.8 s = 0.7 v = 0.95 for i in range ( n ) : h += golden_ratio_conjugate h %= 1 colours . append ( hsv_to_rgb ( h , s , v ) ) return colours
Return a list of n distinct colours each represented as an RGB string suitable for use in CSS .
48,456
def tag_stuff ( self ) : for item in self . input_stream : if 'tags' not in item : item [ 'tags' ] = set ( ) for tag_method in self . tag_methods : item [ 'tags' ] . add ( tag_method ( item ) ) if None in item [ 'tags' ] : item [ 'tags' ] . remove ( None ) yield item
Look through my input stream for the fields to be tagged
48,457
def _tag_net_direction ( data ) : src = data [ 'packet' ] [ 'src_domain' ] dst = data [ 'packet' ] [ 'dst_domain' ] if src == 'internal' : if dst == 'internal' or 'multicast' in dst or 'broadcast' in dst : return 'internal' else : return 'outgoing' elif dst == 'internal' : return 'incoming' else : return None
Create a tag based on the direction of the traffic
48,458
def _output_work ( self , work , root ) : output_filename = os . path . join ( self . _output_dir , work ) tree = etree . ElementTree ( root ) tree . write ( output_filename , encoding = 'utf-8' , pretty_print = True )
Saves the TEI XML document root at the path work .
48,459
def _populate_header ( self , root ) : title_stmt = root . xpath ( 'tei:teiHeader/tei:fileDesc/tei:titleStmt' , namespaces = constants . NAMESPACES ) [ 0 ] try : title_stmt [ 0 ] . text = root . xpath ( 'tei:TEI[1]/tei:teiHeader/tei:fileDesc/tei:titleStmt/' 'tei:title' , namespaces = constants . NAMESPACES ) [ 0 ] . text except IndexError : pass try : title_stmt [ 1 ] . text = root . xpath ( 'tei:TEI[1]/tei:teiHeader/tei:fileDesc/tei:titleStmt/' 'tei:author' , namespaces = constants . NAMESPACES ) [ 0 ] . text except IndexError : pass return root
Populate the teiHeader of the teiCorpus with useful information from the teiHeader of the first TEI part .
48,460
def _extract_work ( self , filename ) : basename = os . path . splitext ( os . path . basename ( filename ) ) [ 0 ] match = self . work_pattern . search ( basename ) if match is None : self . _logger . warning ( 'Found an anomalous filename "{}"' . format ( filename ) ) return None , None work = '{}{}' . format ( match . group ( 'prefix' ) , match . group ( 'work' ) ) return work , match . group ( 'part' )
Returns the name of the work in filename .
48,461
def _handle_resps ( self , root ) : resps , bearers = self . get_resps ( root ) if not resps : return root file_desc = root . xpath ( '/tei:teiCorpus/tei:teiHeader/tei:fileDesc' , namespaces = constants . NAMESPACES ) [ 0 ] edition_stmt = etree . Element ( TEI + 'editionStmt' ) file_desc . insert ( 1 , edition_stmt ) for index , ( resp_resp , resp_name ) in enumerate ( resps ) : resp_stmt = etree . SubElement ( edition_stmt , TEI + 'respStmt' ) xml_id = 'resp{}' . format ( index + 1 ) resp_stmt . set ( constants . XML + 'id' , xml_id ) resp = etree . SubElement ( resp_stmt , TEI + 'resp' ) resp . text = resp_resp name = etree . SubElement ( resp_stmt , TEI + 'name' ) name . text = resp_name resp_data = '{{{}|{}}}' . format ( resp_resp , resp_name ) self . _update_refs ( root , bearers , 'resp' , resp_data , xml_id ) return root
Returns root with a resp list added to the TEI header and
48,462
def _tidy ( self , work , file_path ) : output_file = os . path . join ( self . _output_dir , work ) self . _logger . info ( 'Tidying file {} into {}' . format ( file_path , output_file ) ) try : tei_doc = etree . parse ( file_path ) except etree . XMLSyntaxError as err : self . _logger . error ( 'XML file "{}" is invalid: {}' . format ( file_path , err ) ) raise return self . transform ( tei_doc ) . getroot ( )
Transforms the file at file_path into simpler XML and returns that .
48,463
def add_packet ( self , packet ) : if not self . meta [ 'flow_id' ] : self . meta [ 'flow_id' ] = flow_tuple ( packet ) self . meta [ 'src' ] = self . meta [ 'flow_id' ] [ 0 ] self . meta [ 'dst' ] = self . meta [ 'flow_id' ] [ 1 ] self . meta [ 'src_domain' ] = packet [ 'packet' ] [ 'src_domain' ] self . meta [ 'dst_domain' ] = packet [ 'packet' ] [ 'dst_domain' ] self . meta [ 'sport' ] = self . meta [ 'flow_id' ] [ 2 ] self . meta [ 'dport' ] = self . meta [ 'flow_id' ] [ 3 ] self . meta [ 'protocol' ] = self . meta [ 'flow_id' ] [ 4 ] self . meta [ 'direction' ] = self . _cts_or_stc ( packet ) self . meta [ 'start' ] = packet [ 'timestamp' ] self . meta [ 'end' ] = packet [ 'timestamp' ] self . meta [ 'packet_list' ] . append ( packet ) if packet [ 'timestamp' ] < self . meta [ 'start' ] : self . meta [ 'start' ] = packet [ 'timestamp' ] if packet [ 'timestamp' ] > self . meta [ 'end' ] : self . meta [ 'end' ] = packet [ 'timestamp' ] if self . meta [ 'protocol' ] == 'TCP' : flags = packet [ 'transport' ] [ 'flags' ] if 'syn' in flags : self . meta [ 'state' ] = 'partial_syn' self . meta [ 'direction' ] = 'CTS' elif 'fin' in flags : self . meta [ 'state' ] = 'complete' if self . meta [ 'state' ] == 'partial_syn' else 'partial' self . meta [ 'timeout' ] = datetime . now ( ) + timedelta ( seconds = 1 ) elif 'syn_ack' in flags : self . meta [ 'state' ] = 'partial_syn' self . meta [ 'direction' ] = 'STC' elif 'fin_ack' in flags : self . meta [ 'state' ] = 'complete' if self . meta [ 'state' ] == 'partial_syn' else 'partial' self . meta [ 'timeout' ] = datetime . now ( ) + timedelta ( seconds = 1 ) elif 'rst' in flags : self . meta [ 'state' ] = 'partial' self . meta [ 'timeout' ] = datetime . now ( ) + timedelta ( seconds = 1 ) if self . meta [ 'protocol' ] not in [ 'UDP' , 'TCP' ] : self . meta [ 'timeout' ] = datetime . now ( )
Add a packet to this flow
48,464
def _generate_tokens ( self , text ) : for index , tok in enumerate ( tokenize . generate_tokens ( io . StringIO ( text ) . readline ) ) : tok_type , tok_str , start , end , line = tok yield Token ( tok_type , tok_str , start , end , line , index , self . _line_numbers . line_to_offset ( start [ 0 ] , start [ 1 ] ) , self . _line_numbers . line_to_offset ( end [ 0 ] , end [ 1 ] ) )
Generates tokens for the given code .
48,465
def next_token ( self , tok , include_extra = False ) : i = tok . index + 1 if not include_extra : while is_non_coding_token ( self . _tokens [ i ] . type ) : i += 1 return self . _tokens [ i ]
Returns the next token after the given one . If include_extra is True includes non - coding tokens from the tokenize module such as NL and COMMENT .
48,466
def token_range ( self , first_token , last_token , include_extra = False ) : for i in xrange ( first_token . index , last_token . index + 1 ) : if include_extra or not is_non_coding_token ( self . _tokens [ i ] . type ) : yield self . _tokens [ i ]
Yields all tokens in order from first_token through and including last_token . If include_extra is True includes non - coding tokens such as tokenize . NL and . COMMENT .
48,467
def get_tokens ( self , node , include_extra = False ) : return self . token_range ( node . first_token , node . last_token , include_extra = include_extra )
Yields all tokens making up the given node . If include_extra is True includes non - coding tokens such as tokenize . NL and . COMMENT .
48,468
def from_utf8_col ( self , line , utf8_column ) : offsets = self . _utf8_offset_cache . get ( line ) if offsets is None : end_offset = self . _line_offsets [ line ] if line < len ( self . _line_offsets ) else self . _text_len line_text = self . _text [ self . _line_offsets [ line - 1 ] : end_offset ] offsets = [ i for i , c in enumerate ( line_text ) for byte in c . encode ( 'utf8' ) ] offsets . append ( len ( line_text ) ) self . _utf8_offset_cache [ line ] = offsets return offsets [ max ( 0 , min ( len ( offsets ) - 1 , utf8_column ) ) ]
Given a 1 - based line number and 0 - based utf8 column returns a 0 - based unicode column .
48,469
def line_to_offset ( self , line , column ) : line -= 1 if line >= len ( self . _line_offsets ) : return self . _text_len elif line < 0 : return 0 else : return min ( self . _line_offsets [ line ] + max ( 0 , column ) , self . _text_len )
Converts 1 - based line number and 0 - based column to 0 - based character offset into text .
48,470
def match_token ( token , tok_type , tok_str = None ) : return token . type == tok_type and ( tok_str is None or token . string == tok_str )
Returns true if token is of the given type and if a string is given has that string .
48,471
def expect_token ( token , tok_type , tok_str = None ) : if not match_token ( token , tok_type , tok_str ) : raise ValueError ( "Expected token %s, got %s on line %s col %s" % ( token_repr ( tok_type , tok_str ) , str ( token ) , token . start [ 0 ] , token . start [ 1 ] + 1 ) )
Verifies that the given token is of the expected type . If tok_str is given the token string is verified too . If the token doesn t match raises an informative ValueError .
48,472
def visit_tree ( node , previsit , postvisit ) : if not previsit : previsit = lambda node , pvalue : ( None , None ) if not postvisit : postvisit = lambda node , pvalue , value : None iter_children = iter_children_func ( node ) done = set ( ) ret = None stack = [ ( node , None , _PREVISIT ) ] while stack : current , par_value , value = stack . pop ( ) if value is _PREVISIT : assert current not in done done . add ( current ) pvalue , post_value = previsit ( current , par_value ) stack . append ( ( current , par_value , post_value ) ) ins = len ( stack ) for n in iter_children ( current ) : stack . insert ( ins , ( n , pvalue , _PREVISIT ) ) else : ret = postvisit ( current , par_value , value ) return ret
Scans the tree under the node depth - first using an explicit stack . It avoids implicit recursion via the function call stack to avoid hitting maximum recursion depth exceeded error .
48,473
def _cellrepr ( value , allow_formulas ) : if pd . isnull ( value ) is True : return "" if isinstance ( value , float ) : value = repr ( value ) else : value = str ( value ) if ( not allow_formulas ) and value . startswith ( '=' ) : value = "'%s" % value return value
Get a string representation of dataframe value .
48,474
def _resize_to_minimum ( worksheet , rows = None , cols = None ) : current_cols , current_rows = ( worksheet . col_count , worksheet . row_count ) if rows is not None and rows <= current_rows : rows = None if cols is not None and cols <= current_cols : cols = None if cols is not None or rows is not None : worksheet . resize ( rows , cols )
Resize the worksheet to guarantee a minimum size either in rows or columns or both .
48,475
def get_as_dataframe ( worksheet , evaluate_formulas = False , ** options ) : all_values = _get_all_values ( worksheet , evaluate_formulas ) return TextParser ( all_values , ** options ) . read ( )
Returns the worksheet contents as a DataFrame .
48,476
def timestamp ( self , timestamp ) : if timestamp is None : self . _timestamp = datetime . utcnow ( ) elif isinstance ( timestamp , datetime ) : self . _timestamp = timestamp else : self . _timestamp = datetime . strptime ( timestamp , fmt )
Entry timestamp as datetime .
48,477
def primitive ( self ) : primitive = { } if self . entry_number is not None : primitive [ 'entry-number' ] = self . entry_number if self . item_hash is not None : primitive [ 'item-hash' ] = self . item_hash primitive [ 'timestamp' ] = self . timestamp . strftime ( fmt ) return primitive
Entry as Python primitive .
48,478
def primitive ( self , primitive ) : self . entry_number = primitive [ 'entry-number' ] self . item_hash = primitive [ 'item-hash' ] self . timestamp = primitive [ 'timestamp' ]
Entry from Python primitive .
48,479
def config ( self , name , suffix ) : "Return config variable value, defaulting to environment" var = '%s_%s' % ( name , suffix ) var = var . upper ( ) . replace ( '-' , '_' ) if var in self . _config : return self . _config [ var ] return os . environ [ var ]
Return config variable value defaulting to environment
48,480
def index ( self , index , field , value ) : "Search for records matching a value in an index service" params = { "q" : value , "q.options" : "{fields:['%s']}" % ( field . replace ( '-' , '_' ) ) } response = self . get ( self . config ( index , 'search_url' ) , params = params ) results = [ hit [ 'fields' ] for hit in response . json ( ) [ 'hits' ] [ 'hit' ] ] for result in results : for key in result : result [ key . replace ( '_' , '-' ) ] = result . pop ( key ) return results
Search for records matching a value in an index service
48,481
def primitive ( self ) : dict = { } for key , value in self . __dict__ . items ( ) : if not key . startswith ( '_' ) : dict [ key ] = copy ( value ) for key in dict : if isinstance ( dict [ key ] , ( set ) ) : dict [ key ] = sorted ( list ( dict [ key ] ) ) return dict
Python primitive representation .
48,482
def primitive ( self , dictionary ) : self . __dict__ = { k : v for k , v in dictionary . items ( ) if v }
Item from Python primitive .
48,483
def primitive ( self ) : primitive = copy ( self . item . primitive ) primitive . update ( self . entry . primitive ) return primitive
Record as Python primitive .
48,484
def primitive ( self , primitive ) : self . entry = Entry ( ) self . entry . primitive = primitive primitive = copy ( primitive ) for field in self . entry . fields : del primitive [ field ] self . item = Item ( ) self . item . primitive = primitive
Record from Python primitive .
48,485
def load ( self , text , fieldnames = None ) : lines = text . split ( '\n' ) fieldnames = load_line ( lines [ 0 ] ) values = load_line ( lines [ 1 ] ) self . __dict__ = dict ( zip ( fieldnames , values ) )
Item from TSV representation .
48,486
def reader ( stream , fieldnames = None ) : if not fieldnames : fieldnames = load_line ( stream . readline ( ) ) for line in stream : values = load_line ( line ) item = Item ( ) item . __dict__ = dict ( zip ( fieldnames , values ) ) yield item
Read Items from a stream containing TSV .
48,487
def dump ( self ) : dict = self . primitive if not dict : return '' return dump_line ( self . keys ) + dump_line ( self . values )
TSV representation .
48,488
def load ( self , text , lineterminator = '\r\n' , quotechar = '"' , delimiter = "," , escapechar = escapechar , quoting = csv . QUOTE_MINIMAL ) : f = io . StringIO ( text ) if not quotechar : quoting = csv . QUOTE_NONE reader = csv . DictReader ( f , delimiter = delimiter , quotechar = quotechar , quoting = quoting , lineterminator = lineterminator ) if reader . fieldnames : reader . fieldnames = [ field . strip ( ) for field in reader . fieldnames ] try : self . primitive = next ( reader ) except StopIteration : self . primitive = { }
Item from CSV representation .
48,489
def dump ( self , ** kwargs ) : f = io . StringIO ( ) w = Writer ( f , self . keys , ** kwargs ) w . write ( self ) text = f . getvalue ( ) . lstrip ( ) f . close ( ) return text
CSV representation of a item .
48,490
def git_hash ( blob ) : head = str ( "blob " + str ( len ( blob ) ) + "\0" ) . encode ( "utf-8" ) return sha1 ( head + blob ) . hexdigest ( )
Return git - hash compatible SHA - 1 hexdigits for a blob of data .
48,491
def dump ( self ) : return json . dumps ( self . primitive , sort_keys = True , ensure_ascii = False , separators = ( ',' , ':' ) )
Item as a JSON representation .
48,492
def reader ( stream ) : string = stream . read ( ) decoder = json . JSONDecoder ( ) . raw_decode index = START . match ( string , 0 ) . end ( ) while index < len ( string ) : obj , end = decoder ( string , index ) item = Item ( ) item . primitive = obj yield item index = END . match ( string , end ) . end ( )
Read Items from a stream containing a JSON array .
48,493
def reader ( stream ) : for line in stream : item = Item ( ) item . json = line yield item
Read Items from a stream containing lines of JSON .
48,494
def tag_id ( self , name ) : return self . _store . get ( self . tag_key ( name ) ) or self . reset_tag ( name )
Get the unique tag identifier for a given tag .
48,495
def reset_tag ( self , name ) : id_ = str ( uuid . uuid4 ( ) ) . replace ( '-' , '' ) self . _store . forever ( self . tag_key ( name ) , id_ ) return id_
Reset the tag and return the new tag identifier .
48,496
def init_config ( self , app ) : app . config . setdefault ( 'LOGGING_FS_LEVEL' , 'DEBUG' if app . debug else 'WARNING' ) for k in dir ( config ) : if k . startswith ( 'LOGGING_FS' ) : app . config . setdefault ( k , getattr ( config , k ) ) if app . config [ 'LOGGING_FS_LOGFILE' ] is not None : app . config [ 'LOGGING_FS_LOGFILE' ] = app . config [ 'LOGGING_FS_LOGFILE' ] . format ( instance_path = app . instance_path , sys_prefix = sys . prefix , )
Initialize config .
48,497
def install_handler ( self , app ) : basedir = dirname ( app . config [ 'LOGGING_FS_LOGFILE' ] ) if not exists ( basedir ) : raise ValueError ( 'Log directory {0} does not exists.' . format ( basedir ) ) handler = RotatingFileHandler ( app . config [ 'LOGGING_FS_LOGFILE' ] , backupCount = app . config [ 'LOGGING_FS_BACKUPCOUNT' ] , maxBytes = app . config [ 'LOGGING_FS_MAXBYTES' ] , delay = True , ) handler . setFormatter ( logging . Formatter ( '%(asctime)s %(levelname)s: %(message)s ' '[in %(pathname)s:%(lineno)d]' ) ) handler . setLevel ( app . config [ 'LOGGING_FS_LEVEL' ] ) app . logger . addHandler ( handler ) if app . config [ 'LOGGING_FS_PYWARNINGS' ] : self . capture_pywarnings ( handler ) app . logger . addFilter ( add_request_id_filter )
Install log handler on Flask application .
48,498
def remember ( self , key , minutes , callback ) : val = self . get ( key ) if val is not None : return val val = value ( callback ) self . put ( key , val , minutes ) return val
Get an item from the cache or store the default value .
48,499
def remember_forever ( self , key , callback ) : val = self . get ( key ) if val is not None : return val val = value ( callback ) self . forever ( key , val ) return val
Get an item from the cache or store the default value forever .