idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
48,300
def _get_template ( self ) : loader = PackageLoader ( self . _package_name , 'assets/templates' ) env = Environment ( extensions = [ 'jinja2.ext.with_' ] , loader = loader ) return env . get_template ( '{}.html' . format ( self . _report_name ) )
Returns a template for this report .
48,301
def _write ( self , context , report_dir , report_name , assets_dir = None , template = None ) : if template is None : template = self . _get_template ( ) report = template . render ( context ) output_file = os . path . join ( report_dir , report_name ) with open ( output_file , 'w' , encoding = 'utf-8' ) as fh : fh . write ( report ) if assets_dir : self . _copy_static_assets ( assets_dir )
Writes the data in context in the report s template to report_name in report_dir .
48,302
def pull ( self ) : for item in self . input_stream : print ( '%s -' % item [ 'timestamp' ] , end = '' ) if item [ 'transport' ] : print ( item [ 'transport' ] [ 'type' ] , end = '' ) packet_type = item [ 'packet' ] [ 'type' ] print ( packet_type , end = '' ) packet = item [ 'packet' ] if packet_type in [ 'IP' , 'IP6' ] : if 'src_domain' in packet : print ( '%s(%s) % ( net_utils . inet_to_str ( packet [ 'src' ] ) , packet [ 'src_domain' ] , net_utils . inet_to_str ( packet [ 'dst' ] ) , packet [ 'dst_domain' ] ) , end = '' ) else : print ( '%s % ( net_utils . inet_to_str ( packet [ 'src' ] ) , net_utils . inet_to_str ( packet [ 'dst' ] ) ) , end = '' ) else : print ( str ( packet ) ) if item [ 'application' ] : print ( 'Application: %s' % item [ 'application' ] [ 'type' ] , end = '' ) print ( str ( item [ 'application' ] ) , end = '' ) print ( )
Print out summary information about each packet from the input_stream
48,303
def pull ( self ) : for item in self . input_stream : print ( 'Timestamp: %s' % item [ 'timestamp' ] ) print ( 'Ethernet Frame: %s % ( net_utils . mac_to_str ( item [ 'eth' ] [ 'src' ] ) , net_utils . mac_to_str ( item [ 'eth' ] [ 'dst' ] ) , item [ 'eth' ] [ 'type' ] ) ) packet_type = item [ 'packet' ] [ 'type' ] print ( 'Packet: %s ' % packet_type , end = '' ) packet = item [ 'packet' ] if packet_type in [ 'IP' , 'IP6' ] : print ( '%s % ( net_utils . inet_to_str ( packet [ 'src' ] ) , net_utils . inet_to_str ( packet [ 'dst' ] ) , packet [ 'len' ] , packet [ 'ttl' ] ) , end = '' ) if packet_type == 'IP' : print ( '-- Frag(df:%d mf:%d offset:%d)' % ( packet [ 'df' ] , packet [ 'mf' ] , packet [ 'offset' ] ) ) else : print ( ) else : print ( str ( packet ) ) if item [ 'transport' ] : transport_info = item [ 'transport' ] print ( 'Transport: %s ' % transport_info [ 'type' ] , end = '' ) for key , value in compat . iteritems ( transport_info ) : if key != 'data' : print ( key + ':' + repr ( value ) , end = ' ' ) data = transport_info [ 'data' ] print ( '\nData: %d bytes' % len ( data ) , end = '' ) if data : print ( '(%s...)' % repr ( data ) [ : 30 ] ) else : print ( ) if item [ 'application' ] : print ( 'Application: %s' % item [ 'application' ] [ 'type' ] , end = '' ) print ( str ( item [ 'application' ] ) ) if 'src_domain' in packet : print ( 'Domains: %s % ( packet [ 'src_domain' ] , packet [ 'dst_domain' ] ) ) if 'tags' in item : print ( list ( item [ 'tags' ] ) ) print ( )
Print out information about each packet from the input_stream
48,304
def load_backend ( ** orm_config ) : settings = { } settings [ 'SECRET_KEY' ] = orm_config . get ( 'secret_key' , '' ) db_config = orm_config [ 'database' ] if db_config : settings [ 'DATABASES' ] = { 'default' : database_settings ( db_config ) } from django_peeringdb . client_adaptor . setup import configure configure ( ** settings ) from django_peeringdb . client_adaptor import backend migrate = orm_config . get ( "migrate" ) if migrate and not backend . Backend ( ) . is_database_migrated ( ) : backend . Backend ( ) . migrate_database ( ) return backend
Load the client adaptor module of django_peeringdb Assumes config is valid .
48,305
def add_label_count ( self ) : self . _logger . info ( 'Adding label count' ) def add_label_count ( df ) : work_maxima = df . groupby ( constants . WORK_FIELDNAME , sort = False ) . max ( ) df . loc [ : , constants . LABEL_COUNT_FIELDNAME ] = work_maxima [ constants . COUNT_FIELDNAME ] . sum ( ) return df if self . _matches . empty : self . _matches [ constants . LABEL_COUNT_FIELDNAME ] = 0 else : self . _matches . loc [ : , constants . LABEL_COUNT_FIELDNAME ] = 0 self . _matches = self . _matches . groupby ( [ constants . LABEL_FIELDNAME , constants . NGRAM_FIELDNAME ] , sort = False ) . apply ( add_label_count ) self . _logger . info ( 'Finished adding label count' )
Adds to each result row a count of the number of occurrences of that n - gram across all works within the label .
48,306
def add_label_work_count ( self ) : self . _logger . info ( 'Adding label work count' ) def add_label_text_count ( df ) : work_maxima = df . groupby ( constants . WORK_FIELDNAME , sort = False ) . any ( ) df . loc [ : , constants . LABEL_WORK_COUNT_FIELDNAME ] = work_maxima [ constants . COUNT_FIELDNAME ] . sum ( ) return df if self . _matches . empty : self . _matches [ constants . LABEL_WORK_COUNT_FIELDNAME ] = 0 else : self . _matches . loc [ : , constants . LABEL_WORK_COUNT_FIELDNAME ] = 0 self . _matches = self . _matches . groupby ( [ constants . LABEL_FIELDNAME , constants . NGRAM_FIELDNAME ] , sort = False ) . apply ( add_label_text_count ) self . _logger . info ( 'Finished adding label work count' )
Adds to each result row a count of the number of works within the label contain that n - gram .
48,307
def _annotate_bifurcated_extend_data ( self , row , smaller , larger , tokenize , join ) : lcf = constants . LABEL_COUNT_FIELDNAME nf = constants . NGRAM_FIELDNAME ngram = row [ constants . NGRAM_FIELDNAME ] label_count = row [ constants . LABEL_COUNT_FIELDNAME ] if label_count == 1 and not smaller . empty : ngram_tokens = tokenize ( ngram ) sub_ngram1 = join ( ngram_tokens [ : - 1 ] ) sub_ngram2 = join ( ngram_tokens [ 1 : ] ) pattern = FilteredWitnessText . get_filter_ngrams_pattern ( [ sub_ngram1 , sub_ngram2 ] ) if smaller [ smaller [ constants . NGRAM_FIELDNAME ] . str . match ( pattern ) ] [ constants . LABEL_COUNT_FIELDNAME ] . max ( ) == 1 : row [ DELETE_FIELDNAME ] = True elif not larger . empty and larger [ larger [ nf ] . str . contains ( ngram , regex = False ) ] [ lcf ] . max ( ) == label_count : row [ DELETE_FIELDNAME ] = True return row
Returns row annotated with whether it should be deleted or not .
48,308
def bifurcated_extend ( self , corpus , max_size ) : temp_fd , temp_path = tempfile . mkstemp ( text = True ) try : self . _prepare_bifurcated_extend_data ( corpus , max_size , temp_path , temp_fd ) finally : try : os . remove ( temp_path ) except OSError as e : msg = ( 'Failed to remove temporary file containing unreduced ' 'results: {}' ) self . _logger . error ( msg . format ( e ) ) self . _bifurcated_extend ( )
Replaces the results with those n - grams that contain any of the original n - grams and that represent points at which an n - gram is a constituent of multiple larger n - grams with a lower label count .
48,309
def collapse_witnesses ( self ) : if self . _matches . empty : self . _matches . rename ( columns = { constants . SIGLUM_FIELDNAME : constants . SIGLA_FIELDNAME } , inplace = True ) return self . _matches . loc [ : , constants . SIGLA_FIELDNAME ] = self . _matches [ constants . SIGLUM_FIELDNAME ] grouped = self . _matches . groupby ( [ constants . WORK_FIELDNAME , constants . NGRAM_FIELDNAME , constants . COUNT_FIELDNAME ] , sort = False ) def merge_sigla ( df ) : merged = df [ 0 : 1 ] sigla = list ( df [ constants . SIGLA_FIELDNAME ] ) sigla . sort ( ) merged [ constants . SIGLUM_FIELDNAME ] = ', ' . join ( sigla ) return merged self . _matches = grouped . apply ( merge_sigla ) del self . _matches [ constants . SIGLA_FIELDNAME ] self . _matches . rename ( columns = { constants . SIGLUM_FIELDNAME : constants . SIGLA_FIELDNAME } , inplace = True )
Groups together witnesses for the same n - gram and work that has the same count and outputs a single row for each group .
48,310
def csv ( self , fh ) : self . _matches . to_csv ( fh , encoding = 'utf-8' , float_format = '%d' , index = False ) return fh
Writes the results data to fh in CSV format and returns fh .
48,311
def excise ( self , ngram ) : self . _logger . info ( 'Excising results containing "{}"' . format ( ngram ) ) if not ngram : return self . _matches = self . _matches [ ~ self . _matches [ constants . NGRAM_FIELDNAME ] . str . contains ( ngram , regex = False ) ]
Removes all rows whose n - gram contains ngram .
48,312
def extend ( self , corpus ) : self . _logger . info ( 'Extending results' ) if self . _matches . empty : return highest_n = self . _matches [ constants . SIZE_FIELDNAME ] . max ( ) if highest_n == 1 : self . _logger . warning ( 'Extending results that contain only 1-grams is unsupported; ' 'the original results will be used' ) return is_intersect = self . _is_intersect_results ( self . _matches ) matches = self . _matches [ self . _matches [ constants . SIZE_FIELDNAME ] == highest_n ] extended_matches = pd . DataFrame ( columns = constants . QUERY_FIELDNAMES ) cols = [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME , constants . LABEL_FIELDNAME ] for index , ( work , siglum , label ) in matches [ cols ] . drop_duplicates ( ) . iterrows ( ) : extended_ngrams = self . _generate_extended_ngrams ( matches , work , siglum , label , corpus , highest_n ) extended_matches = pd . concat ( [ extended_matches , self . _generate_extended_matches ( extended_ngrams , highest_n , work , siglum , label ) ] , sort = False ) extended_ngrams = None if is_intersect : extended_matches = self . _reciprocal_remove ( extended_matches ) self . _matches = self . _matches . append ( extended_matches , ignore_index = True ) . reindex ( columns = constants . QUERY_FIELDNAMES )
Adds rows for all longer forms of n - grams in the results that are present in the witnesses .
48,313
def _generate_extended_matches ( self , extended_ngrams , highest_n , work , siglum , label ) : rows_list = [ ] for extended_ngram in extended_ngrams : text = Text ( extended_ngram , self . _tokenizer ) for size , ngrams in text . get_ngrams ( highest_n + 1 , len ( text . get_tokens ( ) ) ) : data = [ { constants . WORK_FIELDNAME : work , constants . SIGLUM_FIELDNAME : siglum , constants . LABEL_FIELDNAME : label , constants . SIZE_FIELDNAME : size , constants . NGRAM_FIELDNAME : ngram , constants . COUNT_FIELDNAME : count } for ngram , count in ngrams . items ( ) ] rows_list . extend ( data ) self . _logger . debug ( 'Number of extended results: {}' . format ( len ( rows_list ) ) ) extended_matches = pd . DataFrame ( rows_list ) rows_list = None self . _logger . debug ( 'Finished generating intermediate extended matches' ) groupby_fields = [ constants . NGRAM_FIELDNAME , constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME , constants . SIZE_FIELDNAME , constants . LABEL_FIELDNAME ] if constants . NGRAM_FIELDNAME in extended_matches : extended_matches = extended_matches . groupby ( groupby_fields , sort = False ) . sum ( ) . reset_index ( ) return extended_matches
Returns extended match data derived from extended_ngrams .
48,314
def _generate_extended_ngrams ( self , matches , work , siglum , label , corpus , highest_n ) : t_join = self . _tokenizer . joiner . join witness_matches = matches [ ( matches [ constants . WORK_FIELDNAME ] == work ) & ( matches [ constants . SIGLUM_FIELDNAME ] == siglum ) & ( matches [ constants . LABEL_FIELDNAME ] == label ) ] text = corpus . get_witness ( work , siglum ) . get_token_content ( ) ngrams = [ tuple ( self . _tokenizer . tokenize ( ngram ) ) for ngram in list ( witness_matches [ constants . NGRAM_FIELDNAME ] ) ] working_ngrams = ngrams [ : ] extended_ngrams = set ( ngrams ) new_working_ngrams = [ ] overlap = highest_n - 1 ngram_index = { } for ngram in ngrams : values = ngram_index . setdefault ( ngram [ : - 1 ] , [ ] ) values . append ( ngram [ - 1 : ] ) extended_add = extended_ngrams . add new_working_append = new_working_ngrams . append ngram_size = highest_n while working_ngrams : removals = set ( ) ngram_size += 1 self . _logger . debug ( 'Iterating over {} n-grams to produce {}-grams' . format ( len ( working_ngrams ) , ngram_size ) ) for base in working_ngrams : remove_base = False base_overlap = base [ - overlap : ] for next_token in ngram_index . get ( base_overlap , [ ] ) : extension = base + next_token if t_join ( extension ) in text : extended_add ( extension ) new_working_append ( extension ) remove_base = True if remove_base : removals . add ( base ) extended_ngrams -= removals working_ngrams = new_working_ngrams [ : ] new_working_ngrams = [ ] new_working_append = new_working_ngrams . append extended_ngrams = sorted ( extended_ngrams , key = len , reverse = True ) extended_ngrams = [ t_join ( ngram ) for ngram in extended_ngrams ] self . _logger . debug ( 'Generated {} extended n-grams' . format ( len ( extended_ngrams ) ) ) self . _logger . debug ( 'Longest generated n-gram: {}' . format ( extended_ngrams [ 0 ] ) ) ngrams = [ ] for ngram in extended_ngrams : text , count = re . subn ( re . escape ( ngram ) , ' ' , text ) ngrams . extend ( [ ngram ] * count ) self . _logger . debug ( 'Aligned extended n-grams with the text; ' '{} distinct n-grams exist' . format ( len ( ngrams ) ) ) return ngrams
Returns the n - grams of the largest size that exist in siglum witness to work under label generated from adding together overlapping n - grams in matches .
48,315
def _generate_filter_ngrams ( self , data , min_size ) : max_size = data [ constants . SIZE_FIELDNAME ] . max ( ) kept_ngrams = list ( data [ data [ constants . SIZE_FIELDNAME ] == min_size ] [ constants . NGRAM_FIELDNAME ] ) for size in range ( min_size + 1 , max_size + 1 ) : pattern = FilteredWitnessText . get_filter_ngrams_pattern ( kept_ngrams ) potential_ngrams = list ( data [ data [ constants . SIZE_FIELDNAME ] == size ] [ constants . NGRAM_FIELDNAME ] ) kept_ngrams . extend ( [ ngram for ngram in potential_ngrams if pattern . search ( ngram ) is None ] ) return kept_ngrams
Returns the n - grams in data that do not contain any other n - gram in data .
48,316
def _generate_substrings ( self , ngram , size ) : text = Text ( ngram , self . _tokenizer ) substrings = [ ] for sub_size , ngrams in text . get_ngrams ( 1 , size - 1 ) : for sub_ngram , count in ngrams . items ( ) : substrings . extend ( [ sub_ngram ] * count ) return substrings
Returns a list of all substrings of ngram .
48,317
def group_by_witness ( self ) : if self . _matches . empty : self . _matches = pd . DataFrame ( { } , columns = [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME , constants . LABEL_FIELDNAME , constants . NGRAMS_FIELDNAME , constants . NUMBER_FIELDNAME , constants . TOTAL_COUNT_FIELDNAME ] ) return def witness_summary ( group ) : matches = group . sort_values ( by = [ constants . NGRAM_FIELDNAME ] , ascending = [ True ] ) match = matches . iloc [ 0 ] ngrams = ', ' . join ( list ( matches [ constants . NGRAM_FIELDNAME ] ) ) match [ constants . NGRAMS_FIELDNAME ] = ngrams match [ constants . NUMBER_FIELDNAME ] = len ( matches ) match [ constants . TOTAL_COUNT_FIELDNAME ] = matches [ constants . COUNT_FIELDNAME ] . sum ( ) return match group_cols = [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME ] self . _matches = self . _matches [ self . _matches [ constants . COUNT_FIELDNAME ] != 0 ] self . _matches = self . _matches . groupby ( group_cols , sort = False ) . apply ( witness_summary ) del self . _matches [ constants . NGRAM_FIELDNAME ] del self . _matches [ constants . SIZE_FIELDNAME ] del self . _matches [ constants . COUNT_FIELDNAME ]
Groups results by witness providing a single summary field giving the n - grams found in it a count of their number and the count of their combined occurrences .
48,318
def _is_intersect_results ( results ) : sample = results . iloc [ 0 ] ngram = sample [ constants . NGRAM_FIELDNAME ] label = sample [ constants . LABEL_FIELDNAME ] return not ( results [ ( results [ constants . NGRAM_FIELDNAME ] == ngram ) & ( results [ constants . LABEL_FIELDNAME ] != label ) ] . empty )
Returns False if results has an n - gram that exists in only one label True otherwise .
48,319
def prune_by_ngram ( self , ngrams ) : self . _logger . info ( 'Pruning results by n-gram' ) self . _matches = self . _matches [ ~ self . _matches [ constants . NGRAM_FIELDNAME ] . isin ( ngrams ) ]
Removes results rows whose n - gram is in ngrams .
48,320
def prune_by_ngram_count_per_work ( self , minimum = None , maximum = None , label = None ) : self . _logger . info ( 'Pruning results by n-gram count per work' ) matches = self . _matches keep_ngrams = matches [ constants . NGRAM_FIELDNAME ] . unique ( ) if label is not None : matches = matches [ matches [ constants . LABEL_FIELDNAME ] == label ] if minimum and maximum : keep_ngrams = matches [ ( matches [ constants . COUNT_FIELDNAME ] >= minimum ) & ( matches [ constants . COUNT_FIELDNAME ] <= maximum ) ] [ constants . NGRAM_FIELDNAME ] . unique ( ) elif minimum : keep_ngrams = matches [ matches [ constants . COUNT_FIELDNAME ] >= minimum ] [ constants . NGRAM_FIELDNAME ] . unique ( ) elif maximum : keep_ngrams = matches [ self . _matches [ constants . COUNT_FIELDNAME ] <= maximum ] [ constants . NGRAM_FIELDNAME ] . unique ( ) self . _matches = self . _matches [ self . _matches [ constants . NGRAM_FIELDNAME ] . isin ( keep_ngrams ) ]
Removes results rows if the n - gram count for all works bearing that n - gram is outside the range specified by minimum and maximum .
48,321
def prune_by_ngram_size ( self , minimum = None , maximum = None ) : self . _logger . info ( 'Pruning results by n-gram size' ) if minimum : self . _matches = self . _matches [ self . _matches [ constants . SIZE_FIELDNAME ] >= minimum ] if maximum : self . _matches = self . _matches [ self . _matches [ constants . SIZE_FIELDNAME ] <= maximum ]
Removes results rows whose n - gram size is outside the range specified by minimum and maximum .
48,322
def prune_by_work_count ( self , minimum = None , maximum = None , label = None ) : self . _logger . info ( 'Pruning results by work count' ) count_fieldname = 'tmp_count' matches = self . _matches if label is not None : matches = matches [ matches [ constants . LABEL_FIELDNAME ] == label ] filtered = matches [ matches [ constants . COUNT_FIELDNAME ] > 0 ] grouped = filtered . groupby ( constants . NGRAM_FIELDNAME , sort = False ) counts = pd . DataFrame ( grouped [ constants . WORK_FIELDNAME ] . nunique ( ) ) counts . rename ( columns = { constants . WORK_FIELDNAME : count_fieldname } , inplace = True ) if minimum : counts = counts [ counts [ count_fieldname ] >= minimum ] if maximum : counts = counts [ counts [ count_fieldname ] <= maximum ] self . _matches = pd . merge ( self . _matches , counts , left_on = constants . NGRAM_FIELDNAME , right_index = True ) del self . _matches [ count_fieldname ]
Removes results rows for n - grams that are not attested in a number of works in the range specified by minimum and maximum .
48,323
def reciprocal_remove ( self ) : self . _logger . info ( 'Removing n-grams that are not attested in all labels' ) self . _matches = self . _reciprocal_remove ( self . _matches )
Removes results rows for which the n - gram is not present in at least one text in each labelled set of texts .
48,324
def reduce ( self ) : self . _logger . info ( 'Reducing the n-grams' ) data = { } labels = { } for row_index , row in self . _matches . iterrows ( ) : work = row [ constants . WORK_FIELDNAME ] siglum = row [ constants . SIGLUM_FIELDNAME ] labels [ work ] = row [ constants . LABEL_FIELDNAME ] witness_data = data . setdefault ( ( work , siglum ) , { } ) witness_data [ row [ constants . NGRAM_FIELDNAME ] ] = { 'count' : int ( row [ constants . COUNT_FIELDNAME ] ) , 'size' : int ( row [ constants . SIZE_FIELDNAME ] ) } for witness_data in data . values ( ) : ngrams = list ( witness_data . keys ( ) ) ngrams . sort ( key = lambda ngram : witness_data [ ngram ] [ 'size' ] , reverse = True ) for ngram in ngrams : if witness_data [ ngram ] [ 'count' ] > 0 : self . _reduce_by_ngram ( witness_data , ngram ) rows = [ ] for ( work , siglum ) , witness_data in data . items ( ) : for ngram , ngram_data in witness_data . items ( ) : count = ngram_data [ 'count' ] if count > 0 : rows . append ( { constants . NGRAM_FIELDNAME : ngram , constants . SIZE_FIELDNAME : ngram_data [ 'size' ] , constants . WORK_FIELDNAME : work , constants . SIGLUM_FIELDNAME : siglum , constants . COUNT_FIELDNAME : count , constants . LABEL_FIELDNAME : labels [ work ] } ) self . _matches = pd . DataFrame ( rows , columns = constants . QUERY_FIELDNAMES )
Removes results rows whose n - grams are contained in larger n - grams .
48,325
def _reduce_by_ngram ( self , data , ngram ) : count = data [ ngram ] [ 'count' ] for substring in self . _generate_substrings ( ngram , data [ ngram ] [ 'size' ] ) : try : substring_data = data [ substring ] except KeyError : continue else : substring_data [ 'count' ] -= count
Lowers the counts of all n - grams in data that are substrings of ngram by ngram \ s count .
48,326
def relabel ( self , catalogue ) : for work , label in catalogue . items ( ) : self . _matches . loc [ self . _matches [ constants . WORK_FIELDNAME ] == work , constants . LABEL_FIELDNAME ] = label
Relabels results rows according to catalogue .
48,327
def remove_label ( self , label ) : self . _logger . info ( 'Removing label "{}"' . format ( label ) ) count = self . _matches [ constants . LABEL_FIELDNAME ] . value_counts ( ) . get ( label , 0 ) self . _matches = self . _matches [ self . _matches [ constants . LABEL_FIELDNAME ] != label ] self . _logger . info ( 'Removed {} labelled results' . format ( count ) )
Removes all results rows associated with label .
48,328
def sort ( self ) : self . _matches . sort_values ( by = [ constants . SIZE_FIELDNAME , constants . NGRAM_FIELDNAME , constants . COUNT_FIELDNAME , constants . LABEL_FIELDNAME , constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME ] , ascending = [ False , True , False , True , True , True ] , inplace = True )
Sorts all results rows .
48,329
def zero_fill ( self , corpus ) : self . _logger . info ( 'Zero-filling results' ) zero_rows = [ ] work_sigla = { } grouping_cols = [ constants . LABEL_FIELDNAME , constants . NGRAM_FIELDNAME , constants . SIZE_FIELDNAME , constants . WORK_FIELDNAME ] grouped = self . _matches . groupby ( grouping_cols , sort = False ) for ( label , ngram , size , work ) , group in grouped : row_data = { constants . NGRAM_FIELDNAME : ngram , constants . LABEL_FIELDNAME : label , constants . SIZE_FIELDNAME : size , constants . COUNT_FIELDNAME : 0 , constants . WORK_FIELDNAME : work , } if work not in work_sigla : work_sigla [ work ] = corpus . get_sigla ( work ) for siglum in work_sigla [ work ] : if group [ group [ constants . SIGLUM_FIELDNAME ] == siglum ] . empty : row_data [ constants . SIGLUM_FIELDNAME ] = siglum zero_rows . append ( row_data ) zero_df = pd . DataFrame ( zero_rows , columns = constants . QUERY_FIELDNAMES ) self . _matches = pd . concat ( [ self . _matches , zero_df ] , ignore_index = True , sort = False )
Adds rows to the results to ensure that for every n - gram that is attested in at least one witness every witness for that text has a row with added rows having a count of zero .
48,330
def get_logger ( ) : if not hasattr ( get_logger , 'logger' ) : get_logger . logger = logging . getLogger ( 'chains' ) format_str = '%(asctime)s [%(levelname)s] - %(module)s: %(message)s' logging . basicConfig ( datefmt = '%Y-%m-%d %H:%M:%S' , level = logging . INFO , format = format_str ) return get_logger . logger
Setup logging output defaults
48,331
def read_interface ( self ) : if self . _iface_is_file ( ) : self . pcap = pcapy . open_offline ( self . iface_name ) else : try : self . pcap = pcapy . open_live ( self . iface_name , 65536 , 1 , 0 ) except OSError : try : logger . warning ( 'Could not get promisc mode, turning flag off' ) self . pcap = pcapy . open_live ( self . iface_name , 65536 , 0 , 0 ) except OSError : log_utils . panic ( 'Could no open interface with any options (may need to be sudo)' ) if self . bpf : self . pcap . setfilter ( self . bpf ) print ( 'listening on %s: %s' % ( self . iface_name , self . bpf ) ) _packets = 0 while True : header , raw_buf = self . pcap . next ( ) if not header : break seconds , micro_sec = header . getts ( ) timestamp = seconds + micro_sec * 10 ** - 6 yield { 'timestamp' : timestamp , 'raw_buf' : raw_buf , 'packet_num' : _packets } _packets += 1 if self . max_packets and _packets >= self . max_packets : break try : print ( 'Packet stats: %d received, %d dropped, %d dropped by interface' % self . pcap . stats ( ) ) except pcapy . PcapError : print ( 'No stats available...' ) raise StopIteration
Read Packets from the packet capture interface
48,332
def generate_parser ( ) : parser = argparse . ArgumentParser ( description = constants . TACL_DESCRIPTION , formatter_class = ParagraphFormatter ) subparsers = parser . add_subparsers ( title = 'subcommands' ) generate_align_subparser ( subparsers ) generate_catalogue_subparser ( subparsers ) generate_counts_subparser ( subparsers ) generate_diff_subparser ( subparsers ) generate_excise_subparser ( subparsers ) generate_highlight_subparser ( subparsers ) generate_intersect_subparser ( subparsers ) generate_lifetime_subparser ( subparsers ) generate_ngrams_subparser ( subparsers ) generate_prepare_subparser ( subparsers ) generate_results_subparser ( subparsers ) generate_supplied_diff_subparser ( subparsers ) generate_search_subparser ( subparsers ) generate_supplied_intersect_subparser ( subparsers ) generate_statistics_subparser ( subparsers ) generate_strip_subparser ( subparsers ) return parser
Returns a parser configured with sub - commands and arguments .
48,333
def generate_align_subparser ( subparsers ) : parser = subparsers . add_parser ( 'align' , description = constants . ALIGN_DESCRIPTION , epilog = constants . ALIGN_EPILOG , formatter_class = ParagraphFormatter , help = constants . ALIGN_HELP ) parser . set_defaults ( func = align_results ) utils . add_common_arguments ( parser ) parser . add_argument ( '-m' , '--minimum' , default = 20 , help = constants . ALIGN_MINIMUM_SIZE_HELP , type = int ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'output' , help = constants . ALIGN_OUTPUT_HELP , metavar = 'OUTPUT' ) parser . add_argument ( 'results' , help = constants . RESULTS_RESULTS_HELP , metavar = 'RESULTS' )
Adds a sub - command parser to subparsers to generate aligned sequences from a set of results .
48,334
def generate_catalogue ( args , parser ) : catalogue = tacl . Catalogue ( ) catalogue . generate ( args . corpus , args . label ) catalogue . save ( args . catalogue )
Generates and saves a catalogue file .
48,335
def generate_catalogue_subparser ( subparsers ) : parser = subparsers . add_parser ( 'catalogue' , description = constants . CATALOGUE_DESCRIPTION , epilog = constants . CATALOGUE_EPILOG , formatter_class = ParagraphFormatter , help = constants . CATALOGUE_HELP ) utils . add_common_arguments ( parser ) parser . set_defaults ( func = generate_catalogue ) parser . add_argument ( 'corpus' , help = constants . DB_CORPUS_HELP , metavar = 'CORPUS' ) utils . add_query_arguments ( parser ) parser . add_argument ( '-l' , '--label' , default = '' , help = constants . CATALOGUE_LABEL_HELP )
Adds a sub - command parser to subparsers to generate and save a catalogue file .
48,336
def generate_counts_subparser ( subparsers ) : parser = subparsers . add_parser ( 'counts' , description = constants . COUNTS_DESCRIPTION , epilog = constants . COUNTS_EPILOG , formatter_class = ParagraphFormatter , help = constants . COUNTS_HELP ) parser . set_defaults ( func = ngram_counts ) utils . add_common_arguments ( parser ) utils . add_db_arguments ( parser ) utils . add_corpus_arguments ( parser ) utils . add_query_arguments ( parser )
Adds a sub - command parser to subparsers to make a counts query .
48,337
def generate_diff_subparser ( subparsers ) : parser = subparsers . add_parser ( 'diff' , description = constants . DIFF_DESCRIPTION , epilog = constants . DIFF_EPILOG , formatter_class = ParagraphFormatter , help = constants . DIFF_HELP ) parser . set_defaults ( func = ngram_diff ) group = parser . add_mutually_exclusive_group ( ) group . add_argument ( '-a' , '--asymmetric' , help = constants . ASYMMETRIC_HELP , metavar = 'LABEL' ) utils . add_common_arguments ( parser ) utils . add_db_arguments ( parser ) utils . add_corpus_arguments ( parser ) utils . add_query_arguments ( parser )
Adds a sub - command parser to subparsers to make a diff query .
48,338
def generate_excise_subparser ( subparsers ) : parser = subparsers . add_parser ( 'excise' , description = constants . EXCISE_DESCRIPTION , help = constants . EXCISE_HELP ) parser . set_defaults ( func = excise ) utils . add_common_arguments ( parser ) parser . add_argument ( 'ngrams' , metavar = 'NGRAMS' , help = constants . EXCISE_NGRAMS_HELP ) parser . add_argument ( 'replacement' , metavar = 'REPLACEMENT' , help = constants . EXCISE_REPLACEMENT_HELP ) parser . add_argument ( 'output' , metavar = 'OUTPUT' , help = constants . EXCISE_OUTPUT_HELP ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'works' , metavar = 'WORK' , help = constants . EXCISE_WORKS_HELP , nargs = '+' )
Adds a sub - command parser to subparsers to excise n - grams from witnesses .
48,339
def generate_highlight_subparser ( subparsers ) : parser = subparsers . add_parser ( 'highlight' , description = constants . HIGHLIGHT_DESCRIPTION , epilog = constants . HIGHLIGHT_EPILOG , formatter_class = ParagraphFormatter , help = constants . HIGHLIGHT_HELP ) parser . set_defaults ( func = highlight_text ) utils . add_common_arguments ( parser ) parser . add_argument ( '-m' , '--minus-ngrams' , metavar = 'NGRAMS' , help = constants . HIGHLIGHT_MINUS_NGRAMS_HELP ) group = parser . add_mutually_exclusive_group ( required = True ) group . add_argument ( '-n' , '--ngrams' , action = 'append' , metavar = 'NGRAMS' , help = constants . HIGHLIGHT_NGRAMS_HELP ) group . add_argument ( '-r' , '--results' , metavar = 'RESULTS' , help = constants . HIGHLIGHT_RESULTS_HELP ) parser . add_argument ( '-l' , '--label' , action = 'append' , metavar = 'LABEL' , help = constants . HIGHLIGHT_LABEL_HELP ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'base_name' , help = constants . HIGHLIGHT_BASE_NAME_HELP , metavar = 'BASE_NAME' ) parser . add_argument ( 'output' , metavar = 'OUTPUT' , help = constants . REPORT_OUTPUT_HELP )
Adds a sub - command parser to subparsers to highlight a witness text with its matches in a result .
48,340
def generate_intersect_subparser ( subparsers ) : parser = subparsers . add_parser ( 'intersect' , description = constants . INTERSECT_DESCRIPTION , epilog = constants . INTERSECT_EPILOG , formatter_class = ParagraphFormatter , help = constants . INTERSECT_HELP ) parser . set_defaults ( func = ngram_intersection ) utils . add_common_arguments ( parser ) utils . add_db_arguments ( parser ) utils . add_corpus_arguments ( parser ) utils . add_query_arguments ( parser )
Adds a sub - command parser to subparsers to make an intersection query .
48,341
def generate_lifetime_subparser ( subparsers ) : parser = subparsers . add_parser ( 'lifetime' , description = constants . LIFETIME_DESCRIPTION , epilog = constants . LIFETIME_EPILOG , formatter_class = ParagraphFormatter , help = constants . LIFETIME_HELP ) parser . set_defaults ( func = lifetime_report ) utils . add_tokenizer_argument ( parser ) utils . add_common_arguments ( parser ) utils . add_query_arguments ( parser ) parser . add_argument ( 'results' , help = constants . LIFETIME_RESULTS_HELP , metavar = 'RESULTS' ) parser . add_argument ( 'label' , help = constants . LIFETIME_LABEL_HELP , metavar = 'LABEL' ) parser . add_argument ( 'output' , help = constants . REPORT_OUTPUT_HELP , metavar = 'OUTPUT' )
Adds a sub - command parser to subparsers to make a lifetime report .
48,342
def generate_ngrams ( args , parser ) : store = utils . get_data_store ( args ) corpus = utils . get_corpus ( args ) if args . catalogue : catalogue = utils . get_catalogue ( args ) else : catalogue = None store . add_ngrams ( corpus , args . min_size , args . max_size , catalogue )
Adds n - grams data to the data store .
48,343
def generate_ngrams_subparser ( subparsers ) : parser = subparsers . add_parser ( 'ngrams' , description = constants . NGRAMS_DESCRIPTION , epilog = constants . NGRAMS_EPILOG , formatter_class = ParagraphFormatter , help = constants . NGRAMS_HELP ) parser . set_defaults ( func = generate_ngrams ) utils . add_common_arguments ( parser ) parser . add_argument ( '-c' , '--catalogue' , dest = 'catalogue' , help = constants . NGRAMS_CATALOGUE_HELP , metavar = 'CATALOGUE' ) utils . add_db_arguments ( parser ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'min_size' , help = constants . NGRAMS_MINIMUM_HELP , metavar = 'MINIMUM' , type = int ) parser . add_argument ( 'max_size' , help = constants . NGRAMS_MAXIMUM_HELP , metavar = 'MAXIMUM' , type = int )
Adds a sub - command parser to subparsers to add n - grams data to the data store .
48,344
def generate_prepare_subparser ( subparsers ) : parser = subparsers . add_parser ( 'prepare' , description = constants . PREPARE_DESCRIPTION , epilog = constants . PREPARE_EPILOG , formatter_class = ParagraphFormatter , help = constants . PREPARE_HELP ) parser . set_defaults ( func = prepare_xml ) utils . add_common_arguments ( parser ) parser . add_argument ( '-s' , '--source' , dest = 'source' , choices = constants . TEI_SOURCE_CHOICES , default = constants . TEI_SOURCE_CBETA_GITHUB , help = constants . PREPARE_SOURCE_HELP , metavar = 'SOURCE' ) parser . add_argument ( 'input' , help = constants . PREPARE_INPUT_HELP , metavar = 'INPUT' ) parser . add_argument ( 'output' , help = constants . PREPARE_OUTPUT_HELP , metavar = 'OUTPUT' )
Adds a sub - command parser to subparsers to prepare source XML files for stripping .
48,345
def generate_search_subparser ( subparsers ) : parser = subparsers . add_parser ( 'search' , description = constants . SEARCH_DESCRIPTION , epilog = constants . SEARCH_EPILOG , formatter_class = ParagraphFormatter , help = constants . SEARCH_HELP ) parser . set_defaults ( func = search_texts ) utils . add_common_arguments ( parser ) utils . add_db_arguments ( parser ) utils . add_corpus_arguments ( parser ) utils . add_query_arguments ( parser ) parser . add_argument ( 'ngrams' , help = constants . SEARCH_NGRAMS_HELP , nargs = '*' , metavar = 'NGRAMS' )
Adds a sub - command parser to subparsers to generate search results for a set of n - grams .
48,346
def generate_statistics_subparser ( subparsers ) : parser = subparsers . add_parser ( 'stats' , description = constants . STATISTICS_DESCRIPTION , formatter_class = ParagraphFormatter , help = constants . STATISTICS_HELP ) parser . set_defaults ( func = generate_statistics ) utils . add_common_arguments ( parser ) utils . add_corpus_arguments ( parser ) parser . add_argument ( 'results' , help = constants . STATISTICS_RESULTS_HELP , metavar = 'RESULTS' )
Adds a sub - command parser to subparsers to generate statistics from a set of results .
48,347
def generate_strip_subparser ( subparsers ) : parser = subparsers . add_parser ( 'strip' , description = constants . STRIP_DESCRIPTION , epilog = constants . STRIP_EPILOG , formatter_class = ParagraphFormatter , help = constants . STRIP_HELP ) parser . set_defaults ( func = strip_files ) utils . add_common_arguments ( parser ) parser . add_argument ( 'input' , help = constants . STRIP_INPUT_HELP , metavar = 'INPUT' ) parser . add_argument ( 'output' , help = constants . STRIP_OUTPUT_HELP , metavar = 'OUTPUT' )
Adds a sub - command parser to subparsers to process prepared files for use with the tacl ngrams command .
48,348
def generate_supplied_diff_subparser ( subparsers ) : parser = subparsers . add_parser ( 'sdiff' , description = constants . SUPPLIED_DIFF_DESCRIPTION , epilog = constants . SUPPLIED_DIFF_EPILOG , formatter_class = ParagraphFormatter , help = constants . SUPPLIED_DIFF_HELP ) parser . set_defaults ( func = supplied_diff ) utils . add_common_arguments ( parser ) utils . add_tokenizer_argument ( parser ) utils . add_db_arguments ( parser , True ) utils . add_supplied_query_arguments ( parser )
Adds a sub - command parser to subparsers to run a diff query using the supplied results sets .
48,349
def generate_supplied_intersect_subparser ( subparsers ) : parser = subparsers . add_parser ( 'sintersect' , description = constants . SUPPLIED_INTERSECT_DESCRIPTION , epilog = constants . SUPPLIED_INTERSECT_EPILOG , formatter_class = ParagraphFormatter , help = constants . SUPPLIED_INTERSECT_HELP ) parser . set_defaults ( func = supplied_intersect ) utils . add_common_arguments ( parser ) utils . add_db_arguments ( parser , True ) utils . add_supplied_query_arguments ( parser )
Adds a sub - command parser to subparsers to run an intersect query using the supplied results sets .
48,350
def highlight_text ( args , parser ) : tokenizer = utils . get_tokenizer ( args ) corpus = utils . get_corpus ( args ) output_dir = os . path . abspath ( args . output ) if os . path . exists ( output_dir ) : parser . exit ( status = 3 , message = 'Output directory already exists, ' 'aborting.\n' ) os . makedirs ( output_dir , exist_ok = True ) if args . ngrams : if args . label is None or len ( args . label ) != len ( args . ngrams ) : parser . error ( 'There must be as many labels as there are files ' 'of n-grams' ) report = tacl . NgramHighlightReport ( corpus , tokenizer ) ngrams = [ ] for ngram_file in args . ngrams : ngrams . append ( utils . get_ngrams ( ngram_file ) ) minus_ngrams = [ ] if args . minus_ngrams : minus_ngrams = utils . get_ngrams ( args . minus_ngrams ) report . generate ( args . output , args . base_name , ngrams , args . label , minus_ngrams ) else : report = tacl . ResultsHighlightReport ( corpus , tokenizer ) report . generate ( args . output , args . base_name , args . results )
Outputs the result of highlighting a text .
48,351
def lifetime_report ( args , parser ) : catalogue = utils . get_catalogue ( args ) tokenizer = utils . get_tokenizer ( args ) results = tacl . Results ( args . results , tokenizer ) output_dir = os . path . abspath ( args . output ) os . makedirs ( output_dir , exist_ok = True ) report = tacl . LifetimeReport ( ) report . generate ( output_dir , catalogue , results , args . label )
Generates a lifetime report .
48,352
def ngram_counts ( args , parser ) : store = utils . get_data_store ( args ) corpus = utils . get_corpus ( args ) catalogue = utils . get_catalogue ( args ) store . validate ( corpus , catalogue ) store . counts ( catalogue , sys . stdout )
Outputs the results of performing a counts query .
48,353
def ngram_diff ( args , parser ) : store = utils . get_data_store ( args ) corpus = utils . get_corpus ( args ) catalogue = utils . get_catalogue ( args ) tokenizer = utils . get_tokenizer ( args ) store . validate ( corpus , catalogue ) if args . asymmetric : store . diff_asymmetric ( catalogue , args . asymmetric , tokenizer , sys . stdout ) else : store . diff ( catalogue , tokenizer , sys . stdout )
Outputs the results of performing a diff query .
48,354
def ngram_intersection ( args , parser ) : store = utils . get_data_store ( args ) corpus = utils . get_corpus ( args ) catalogue = utils . get_catalogue ( args ) store . validate ( corpus , catalogue ) store . intersection ( catalogue , sys . stdout )
Outputs the results of performing an intersection query .
48,355
def prepare_xml ( args , parser ) : if args . source == constants . TEI_SOURCE_CBETA_GITHUB : corpus_class = tacl . TEICorpusCBETAGitHub else : raise Exception ( 'Unsupported TEI source option provided' ) corpus = corpus_class ( args . input , args . output ) corpus . tidy ( )
Prepares XML files for stripping .
48,356
def search_texts ( args , parser ) : store = utils . get_data_store ( args ) corpus = utils . get_corpus ( args ) catalogue = utils . get_catalogue ( args ) store . validate ( corpus , catalogue ) ngrams = [ ] for ngram_file in args . ngrams : ngrams . extend ( utils . get_ngrams ( ngram_file ) ) store . search ( catalogue , ngrams , sys . stdout )
Searches texts for presence of n - grams .
48,357
def strip_files ( args , parser ) : stripper = tacl . Stripper ( args . input , args . output ) stripper . strip_files ( )
Processes prepared XML files for use with the tacl ngrams command .
48,358
def http_meta_data ( self ) : for flow in self . input_stream : if flow [ 'direction' ] == 'CTS' : try : request = dpkt . http . Request ( flow [ 'payload' ] ) request_data = data_utils . make_dict ( request ) request_data [ 'uri' ] = self . _clean_uri ( request [ 'uri' ] ) flow [ 'http' ] = { 'type' : 'HTTP_REQUEST' , 'data' : request_data } except ( dpkt . dpkt . NeedData , dpkt . dpkt . UnpackError ) : flow [ 'http' ] = None else : try : response = dpkt . http . Response ( flow [ 'payload' ] ) flow [ 'http' ] = { 'type' : 'HTTP_RESPONSE' , 'data' : data_utils . make_dict ( response ) } except ( dpkt . dpkt . NeedData , dpkt . dpkt . UnpackError ) : flow [ 'http' ] = None if flow [ 'http' ] and flow [ 'protocol' ] != 'TCP' : flow [ 'http' ] . update ( { 'weird' : 'UDP-HTTP' } ) yield flow
Pull out the application metadata for each flow in the input_stream
48,359
def transport_meta_data ( self ) : for item in self . input_stream : trans_data = item [ 'packet' ] [ 'data' ] trans_type = self . _get_transport_type ( trans_data ) if trans_type and trans_data : item [ 'transport' ] = data_utils . make_dict ( trans_data ) item [ 'transport' ] [ 'type' ] = trans_type item [ 'transport' ] [ 'flags' ] = self . _readable_flags ( item [ 'transport' ] ) item [ 'transport' ] [ 'data' ] = trans_data [ 'data' ] yield item
Pull out the transport metadata for each packet in the input_stream
48,360
def _readable_flags ( transport ) : if 'flags' not in transport : return None _flag_list = [ ] flags = transport [ 'flags' ] if flags & dpkt . tcp . TH_SYN : if flags & dpkt . tcp . TH_ACK : _flag_list . append ( 'syn_ack' ) else : _flag_list . append ( 'syn' ) elif flags & dpkt . tcp . TH_FIN : if flags & dpkt . tcp . TH_ACK : _flag_list . append ( 'fin_ack' ) else : _flag_list . append ( 'fin' ) elif flags & dpkt . tcp . TH_RST : _flag_list . append ( 'rst' ) elif flags & dpkt . tcp . TH_PUSH : _flag_list . append ( 'psh' ) return _flag_list
Method that turns bit flags into a human readable list
48,361
def _create_breakdown_chart ( self , data , work , output_dir ) : chart_data = data . loc [ work ] . sort_values ( by = SHARED , ascending = False ) [ [ SHARED , UNIQUE , COMMON ] ] csv_path = os . path . join ( output_dir , 'breakdown_{}.csv' . format ( work ) ) chart_data . to_csv ( csv_path )
Generates and writes to a file in output_dir the data used to display a stacked bar chart .
48,362
def _create_chord_chart ( self , data , works , output_dir ) : matrix = [ ] chord_data = data . unstack ( BASE_WORK ) [ SHARED ] for index , row_data in chord_data . fillna ( value = 0 ) . iterrows ( ) : matrix . append ( [ value / 100 for value in row_data ] ) colours = generate_colours ( len ( works ) ) colour_works = [ { 'work' : work , 'colour' : colour } for work , colour in zip ( chord_data , colours ) ] json_data = json . dumps ( { 'works' : colour_works , 'matrix' : matrix } ) with open ( os . path . join ( output_dir , 'chord_data.js' ) , 'w' ) as fh : fh . write ( 'var chordData = {}' . format ( json_data ) )
Generates and writes to a file in output_dir the data used to display a chord chart .
48,363
def _create_matrix_chart ( self , data , works , output_dir ) : nodes = [ { 'work' : work , 'group' : 1 } for work in works ] weights = data . stack ( ) . unstack ( RELATED_WORK ) . max ( ) seen = [ ] links = [ ] for ( source , target ) , weight in weights . iteritems ( ) : if target not in seen and target != source : seen . append ( source ) links . append ( { 'source' : works . index ( source ) , 'target' : works . index ( target ) , 'value' : weight } ) json_data = json . dumps ( { 'nodes' : nodes , 'links' : links } ) with open ( os . path . join ( output_dir , 'matrix_data.js' ) , 'w' ) as fh : fh . write ( 'var matrixData = {}' . format ( json_data ) )
Generates and writes to a file in output_dir the data used to display a matrix chart .
48,364
def _create_related_chart ( self , data , work , output_dir ) : chart_data = data [ work ] . dropna ( ) . sort_values ( by = SHARED_RELATED_WORK , ascending = False ) csv_path = os . path . join ( output_dir , 'related_{}.csv' . format ( work ) ) chart_data . to_csv ( csv_path )
Generates and writes to a file in output_dir the data used to display a grouped bar chart .
48,365
def _drop_no_label_results ( self , results , fh ) : results . seek ( 0 ) results = Results ( results , self . _tokenizer ) results . remove_label ( self . _no_label ) results . csv ( fh )
Writes results to fh minus those results associated with the no label .
48,366
def _generate_statistics ( self , out_path , results_path ) : if not os . path . exists ( out_path ) : report = StatisticsReport ( self . _corpus , self . _tokenizer , results_path ) report . generate_statistics ( ) with open ( out_path , mode = 'w' , encoding = 'utf-8' , newline = '' ) as fh : report . csv ( fh )
Writes a statistics report for the results at results_path to out_path .
48,367
def _process_diff ( self , yes_work , maybe_work , work_dir , ym_results_path , yn_results_path , stats ) : distinct_results_path = os . path . join ( work_dir , 'distinct_{}.csv' . format ( maybe_work ) ) results = [ yn_results_path , ym_results_path ] labels = [ self . _no_label , self . _maybe_label ] self . _run_query ( distinct_results_path , self . _store . diff_supplied , [ results , labels , self . _tokenizer ] ) return self . _update_stats ( 'diff' , work_dir , distinct_results_path , yes_work , maybe_work , stats , SHARED , COMMON )
Returns statistics on the difference between the intersection of yes_work and maybe_work and the intersection of yes_work and no works .
48,368
def _process_intersection ( self , yes_work , maybe_work , work_dir , ym_results_path , stats ) : catalogue = { yes_work : self . _no_label , maybe_work : self . _maybe_label } self . _run_query ( ym_results_path , self . _store . intersection , [ catalogue ] , False ) return self . _update_stats ( 'intersect' , work_dir , ym_results_path , yes_work , maybe_work , stats , COMMON , UNIQUE )
Returns statistics on the intersection between yes_work and maybe_work .
48,369
def _process_maybe_work ( self , yes_work , maybe_work , work_dir , yn_results_path , stats ) : if maybe_work == yes_work : return stats self . _logger . info ( 'Processing "maybe" work {} against "yes" work {}.' . format ( maybe_work , yes_work ) ) for siglum in self . _corpus . get_sigla ( maybe_work ) : witness = ( maybe_work , siglum ) stats [ COMMON ] [ witness ] = 0 stats [ SHARED ] [ witness ] = 0 stats [ UNIQUE ] [ witness ] = 100 works = [ yes_work , maybe_work ] works . sort ( ) ym_results_path = os . path . join ( self . _ym_intersects_dir , '{}_intersect_{}.csv' . format ( * works ) ) stats = self . _process_intersection ( yes_work , maybe_work , work_dir , ym_results_path , stats ) stats = self . _process_diff ( yes_work , maybe_work , work_dir , ym_results_path , yn_results_path , stats ) return stats
Returns statistics of how yes_work compares with maybe_work .
48,370
def _process_works ( self , maybe_works , no_works , output_dir ) : output_data_dir = os . path . join ( output_dir , 'data' ) no_catalogue = { work : self . _no_label for work in no_works } self . _ym_intersects_dir = os . path . join ( output_data_dir , 'ym_intersects' ) data = { } os . makedirs ( self . _ym_intersects_dir , exist_ok = True ) for yes_work in maybe_works : no_catalogue [ yes_work ] = self . _maybe_label stats = self . _process_yes_work ( yes_work , no_catalogue , maybe_works , output_data_dir ) no_catalogue . pop ( yes_work ) for scope in ( SHARED , COMMON , UNIQUE ) : work_data = stats [ scope ] index = pd . MultiIndex . from_tuples ( list ( work_data . keys ( ) ) , names = [ RELATED_WORK , SIGLUM ] ) data [ ( yes_work , scope ) ] = pd . Series ( list ( work_data . values ( ) ) , index = index ) df = pd . DataFrame ( data ) df . columns . names = [ BASE_WORK , SCOPE ] df = df . stack ( BASE_WORK ) . swaplevel ( BASE_WORK , SIGLUM ) . swaplevel ( RELATED_WORK , BASE_WORK ) return df
Collect and return the data of how each work in maybe_works relates to each other work .
48,371
def _process_yes_work ( self , yes_work , no_catalogue , maybe_works , output_dir ) : self . _logger . info ( 'Processing "maybe" work {} as "yes".' . format ( yes_work ) ) stats = { COMMON : { } , SHARED : { } , UNIQUE : { } } yes_work_dir = os . path . join ( output_dir , yes_work ) os . makedirs ( yes_work_dir , exist_ok = True ) results_path = os . path . join ( yes_work_dir , 'intersect_with_no.csv' ) self . _run_query ( results_path , self . _store . intersection , [ no_catalogue ] ) for maybe_work in maybe_works : stats = self . _process_maybe_work ( yes_work , maybe_work , yes_work_dir , results_path , stats ) return stats
Returns statistics of how yes_work compares with the other works in no_catalogue and the maybe works .
48,372
def _run_query ( self , path , query , query_args , drop_no = True ) : if os . path . exists ( path ) : return output_results = io . StringIO ( newline = '' ) query ( * query_args , output_fh = output_results ) with open ( path , mode = 'w' , encoding = 'utf-8' , newline = '' ) as fh : if drop_no : self . _drop_no_label_results ( output_results , fh ) else : fh . write ( output_results . getvalue ( ) )
Runs query and outputs results to a file at path .
48,373
def _add_indices ( self ) : self . _logger . info ( 'Adding database indices' ) self . _conn . execute ( constants . CREATE_INDEX_TEXTNGRAM_SQL ) self . _logger . info ( 'Indices added' )
Adds the database indices relating to n - grams .
48,374
def add_ngrams ( self , corpus , minimum , maximum , catalogue = None ) : self . _initialise_database ( ) if catalogue : for work in catalogue : for witness in corpus . get_witnesses ( work ) : self . _add_text_ngrams ( witness , minimum , maximum ) else : for witness in corpus . get_witnesses ( ) : self . _add_text_ngrams ( witness , minimum , maximum ) self . _add_indices ( ) self . _analyse ( )
Adds n - gram data from corpus to the data store .
48,375
def _add_temporary_ngrams ( self , ngrams ) : ngrams = [ ngram for ngram in ngrams if ngram and isinstance ( ngram , str ) ] seen = { } ngrams = [ seen . setdefault ( x , x ) for x in ngrams if x not in seen ] self . _conn . execute ( constants . DROP_TEMPORARY_NGRAMS_TABLE_SQL ) self . _conn . execute ( constants . CREATE_TEMPORARY_NGRAMS_TABLE_SQL ) self . _conn . executemany ( constants . INSERT_TEMPORARY_NGRAM_SQL , [ ( ngram , ) for ngram in ngrams ] )
Adds ngrams to a temporary table .
48,376
def _add_temporary_results ( self , results , label ) : NGRAM , SIZE , NAME , SIGLUM , COUNT , LABEL = constants . QUERY_FIELDNAMES reader = csv . DictReader ( results ) data = [ ( row [ NGRAM ] , row [ SIZE ] , row [ NAME ] , row [ SIGLUM ] , row [ COUNT ] , label ) for row in reader ] self . _conn . executemany ( constants . INSERT_TEMPORARY_RESULTS_SQL , data )
Adds results to a temporary table with label .
48,377
def _add_text_ngrams ( self , witness , minimum , maximum ) : text_id = self . _get_text_id ( witness ) self . _logger . info ( 'Adding n-grams ({} <= n <= {}) for {}' . format ( minimum , maximum , witness . get_filename ( ) ) ) skip_sizes = [ ] for size in range ( minimum , maximum + 1 ) : if self . _has_ngrams ( text_id , size ) : self . _logger . info ( '{}-grams are already in the database' . format ( size ) ) skip_sizes . append ( size ) for size , ngrams in witness . get_ngrams ( minimum , maximum , skip_sizes ) : self . _add_text_size_ngrams ( text_id , size , ngrams )
Adds n - gram data from witness to the data store .
48,378
def _add_text_record ( self , witness ) : filename = witness . get_filename ( ) name , siglum = witness . get_names ( ) self . _logger . info ( 'Adding record for text {}' . format ( filename ) ) checksum = witness . get_checksum ( ) token_count = len ( witness . get_tokens ( ) ) with self . _conn : cursor = self . _conn . execute ( constants . INSERT_TEXT_SQL , [ name , siglum , checksum , token_count , '' ] ) return cursor . lastrowid
Adds a Text record for witness .
48,379
def _add_text_size_ngrams ( self , text_id , size , ngrams ) : unique_ngrams = len ( ngrams ) self . _logger . info ( 'Adding {} unique {}-grams' . format ( unique_ngrams , size ) ) parameters = [ [ text_id , ngram , size , count ] for ngram , count in ngrams . items ( ) ] with self . _conn : self . _conn . execute ( constants . INSERT_TEXT_HAS_NGRAM_SQL , [ text_id , size , unique_ngrams ] ) self . _conn . executemany ( constants . INSERT_NGRAM_SQL , parameters )
Adds ngrams that are of size size to the data store .
48,380
def _analyse ( self , table = '' ) : self . _logger . info ( 'Starting analysis of database' ) self . _conn . execute ( constants . ANALYSE_SQL . format ( table ) ) self . _logger . info ( 'Analysis of database complete' )
Analyses the database or table if it is supplied .
48,381
def _check_diff_result ( row , matches , tokenize , join ) : ngram_tokens = tokenize ( row [ constants . NGRAM_FIELDNAME ] ) sub_ngram1 = join ( ngram_tokens [ : - 1 ] ) sub_ngram2 = join ( ngram_tokens [ 1 : ] ) count = constants . COUNT_FIELDNAME discard = False status1 = matches . get ( sub_ngram1 ) if status1 == 0 : discard = True else : status2 = matches . get ( sub_ngram2 ) if status2 == 0 : discard = True elif ( status1 is None ) ^ ( status2 is None ) : discard = True if discard : row [ count ] = 0 return row
Returns row possibly with its count changed to 0 depending on the status of the n - grams that compose it .
48,382
def counts ( self , catalogue , output_fh ) : labels = list ( self . _set_labels ( catalogue ) ) label_placeholders = self . _get_placeholders ( labels ) query = constants . SELECT_COUNTS_SQL . format ( label_placeholders ) self . _logger . info ( 'Running counts query' ) self . _logger . debug ( 'Query: {}\nLabels: {}' . format ( query , labels ) ) cursor = self . _conn . execute ( query , labels ) return self . _csv ( cursor , constants . COUNTS_FIELDNAMES , output_fh )
Returns output_fh populated with CSV results giving n - gram counts of the witnesses of the works in catalogue .
48,383
def _csv ( self , cursor , fieldnames , output_fh ) : self . _logger . info ( 'Finished query; outputting results in CSV format' ) if sys . platform in ( 'win32' , 'cygwin' ) and output_fh is sys . stdout : writer = csv . writer ( output_fh , lineterminator = '\n' ) else : writer = csv . writer ( output_fh ) writer . writerow ( fieldnames ) for row in cursor : writer . writerow ( row ) self . _logger . info ( 'Finished outputting results' ) return output_fh
Writes the rows of cursor in CSV format to output_fh and returns it .
48,384
def _csv_temp ( self , cursor , fieldnames ) : temp_fd , temp_path = tempfile . mkstemp ( text = True ) with open ( temp_fd , 'w' , encoding = 'utf-8' , newline = '' ) as results_fh : self . _csv ( cursor , fieldnames , results_fh ) return temp_path
Writes the rows of cursor in CSV format to a temporary file and returns the path to that file .
48,385
def _delete_text_ngrams ( self , text_id ) : with self . _conn : self . _conn . execute ( constants . DELETE_TEXT_NGRAMS_SQL , [ text_id ] ) self . _conn . execute ( constants . DELETE_TEXT_HAS_NGRAMS_SQL , [ text_id ] )
Deletes all n - grams associated with text_id from the data store .
48,386
def _diff ( self , cursor , tokenizer , output_fh ) : temp_path = self . _csv_temp ( cursor , constants . QUERY_FIELDNAMES ) output_fh = self . _reduce_diff_results ( temp_path , tokenizer , output_fh ) try : os . remove ( temp_path ) except OSError as e : self . _logger . error ( 'Failed to remove temporary file containing ' 'unreduced results: {}' . format ( e ) ) return output_fh
Returns output_fh with diff results that have been reduced .
48,387
def diff ( self , catalogue , tokenizer , output_fh ) : labels = self . _sort_labels ( self . _set_labels ( catalogue ) ) if len ( labels ) < 2 : raise MalformedQueryError ( constants . INSUFFICIENT_LABELS_QUERY_ERROR ) label_placeholders = self . _get_placeholders ( labels ) query = constants . SELECT_DIFF_SQL . format ( label_placeholders , label_placeholders ) parameters = labels + labels self . _logger . info ( 'Running diff query' ) self . _logger . debug ( 'Query: {}\nLabels: {}' . format ( query , labels ) ) self . _log_query_plan ( query , parameters ) cursor = self . _conn . execute ( query , parameters ) return self . _diff ( cursor , tokenizer , output_fh )
Returns output_fh populated with CSV results giving the n - grams that are unique to the witnesses of each labelled set of works in catalogue .
48,388
def diff_asymmetric ( self , catalogue , prime_label , tokenizer , output_fh ) : labels = list ( self . _set_labels ( catalogue ) ) if len ( labels ) < 2 : raise MalformedQueryError ( constants . INSUFFICIENT_LABELS_QUERY_ERROR ) try : labels . remove ( prime_label ) except ValueError : raise MalformedQueryError ( constants . LABEL_NOT_IN_CATALOGUE_ERROR ) label_placeholders = self . _get_placeholders ( labels ) query = constants . SELECT_DIFF_ASYMMETRIC_SQL . format ( label_placeholders ) parameters = [ prime_label , prime_label ] + labels self . _logger . info ( 'Running asymmetric diff query' ) self . _logger . debug ( 'Query: {}\nLabels: {}\nPrime label: {}' . format ( query , labels , prime_label ) ) self . _log_query_plan ( query , parameters ) cursor = self . _conn . execute ( query , parameters ) return self . _diff ( cursor , tokenizer , output_fh )
Returns output_fh populated with CSV results giving the difference in n - grams between the witnesses of labelled sets of works in catalogue limited to those works labelled with prime_label .
48,389
def diff_supplied ( self , results_filenames , labels , tokenizer , output_fh ) : self . _add_temporary_results_sets ( results_filenames , labels ) query = constants . SELECT_DIFF_SUPPLIED_SQL self . _logger . info ( 'Running supplied diff query' ) self . _logger . debug ( 'Query: {}' . format ( query ) ) self . _log_query_plan ( query , [ ] ) cursor = self . _conn . execute ( query ) return self . _diff ( cursor , tokenizer , output_fh )
Returns output_fh populated with CSV results giving the n - grams that are unique to the witnesses in each set of works in results_sets using the labels in labels .
48,390
def _drop_indices ( self ) : self . _logger . info ( 'Dropping database indices' ) self . _conn . execute ( constants . DROP_TEXTNGRAM_INDEX_SQL ) self . _logger . info ( 'Finished dropping database indices' )
Drops the database indices relating to n - grams .
48,391
def _get_text_id ( self , witness ) : name , siglum = witness . get_names ( ) text_record = self . _conn . execute ( constants . SELECT_TEXT_SQL , [ name , siglum ] ) . fetchone ( ) if text_record is None : text_id = self . _add_text_record ( witness ) else : text_id = text_record [ 'id' ] if text_record [ 'checksum' ] != witness . get_checksum ( ) : filename = witness . get_filename ( ) self . _logger . info ( 'Text {} has changed since it was added to ' 'the database' . format ( filename ) ) self . _update_text_record ( witness , text_id ) self . _logger . info ( 'Deleting potentially out-of-date n-grams' ) self . _delete_text_ngrams ( text_id ) return text_id
Returns the database ID of the Text record for witness .
48,392
def _has_ngrams ( self , text_id , size ) : if self . _conn . execute ( constants . SELECT_HAS_NGRAMS_SQL , [ text_id , size ] ) . fetchone ( ) is None : return False return True
Returns True if a text has existing records for n - grams of size size .
48,393
def intersection ( self , catalogue , output_fh ) : labels = self . _sort_labels ( self . _set_labels ( catalogue ) ) if len ( labels ) < 2 : raise MalformedQueryError ( constants . INSUFFICIENT_LABELS_QUERY_ERROR ) label_placeholders = self . _get_placeholders ( labels ) subquery = self . _get_intersection_subquery ( labels ) query = constants . SELECT_INTERSECT_SQL . format ( label_placeholders , subquery ) parameters = labels + labels self . _logger . info ( 'Running intersection query' ) self . _logger . debug ( 'Query: {}\nLabels: {}' . format ( query , labels ) ) self . _log_query_plan ( query , parameters ) cursor = self . _conn . execute ( query , parameters ) return self . _csv ( cursor , constants . QUERY_FIELDNAMES , output_fh )
Returns output_fh populated with CSV results giving the intersection in n - grams of the witnesses of labelled sets of works in catalogue .
48,394
def intersection_supplied ( self , results_filenames , labels , output_fh ) : self . _add_temporary_results_sets ( results_filenames , labels ) query = constants . SELECT_INTERSECT_SUPPLIED_SQL parameters = [ len ( labels ) ] self . _logger . info ( 'Running supplied intersect query' ) self . _logger . debug ( 'Query: {}\nNumber of labels: {}' . format ( query , parameters [ 0 ] ) ) self . _log_query_plan ( query , parameters ) cursor = self . _conn . execute ( query , parameters ) return self . _csv ( cursor , constants . QUERY_FIELDNAMES , output_fh )
Returns output_fh populated with CSV results giving the n - grams that are common to witnesses in every set of works in results_sets using the labels in labels .
48,395
def _reduce_diff_results ( self , matches_path , tokenizer , output_fh ) : self . _logger . info ( 'Removing filler results' ) tokenize = tokenizer . tokenize join = tokenizer . joiner . join results = [ ] previous_witness = ( None , None ) previous_data = { } ngram_index = constants . QUERY_FIELDNAMES . index ( constants . NGRAM_FIELDNAME ) + 1 count_index = constants . QUERY_FIELDNAMES . index ( constants . COUNT_FIELDNAME ) + 1 grouped = pd . read_csv ( matches_path , encoding = 'utf-8' , na_filter = False ) . groupby ( [ constants . WORK_FIELDNAME , constants . SIGLUM_FIELDNAME , constants . SIZE_FIELDNAME ] ) for ( work , siglum , size ) , group in grouped : if ( work , siglum ) != previous_witness : previous_matches = group previous_witness = ( work , siglum ) else : self . _logger . debug ( 'Reducing down {} {}-grams for {} {}' . format ( len ( group . index ) , size , work , siglum ) ) if previous_matches . empty : reduced_count = 0 else : previous_matches = group . apply ( self . _check_diff_result , axis = 1 , args = ( previous_data , tokenize , join ) ) reduced_count = len ( previous_matches [ previous_matches [ constants . COUNT_FIELDNAME ] != 0 ] . index ) self . _logger . debug ( 'Reduced down to {} grams' . format ( reduced_count ) ) previous_data = { } for row in previous_matches . itertuples ( ) : previous_data [ row [ ngram_index ] ] = row [ count_index ] if not previous_matches . empty : results . append ( previous_matches [ previous_matches [ constants . COUNT_FIELDNAME ] != 0 ] ) reduced_results = pd . concat ( results , ignore_index = True ) . reindex ( columns = constants . QUERY_FIELDNAMES ) reduced_results . to_csv ( output_fh , encoding = 'utf-8' , float_format = '%d' , index = False ) return output_fh
Returns output_fh populated with a reduced set of data from matches_fh .
48,396
def search ( self , catalogue , ngrams , output_fh ) : labels = list ( self . _set_labels ( catalogue ) ) label_placeholders = self . _get_placeholders ( labels ) if ngrams : self . _add_temporary_ngrams ( ngrams ) query = constants . SELECT_SEARCH_SQL . format ( label_placeholders ) else : query = constants . SELECT_SEARCH_ALL_SQL . format ( label_placeholders ) self . _logger . info ( 'Running search query' ) self . _logger . debug ( 'Query: {}\nN-grams: {}' . format ( query , ', ' . join ( ngrams ) ) ) self . _log_query_plan ( query , labels ) cursor = self . _conn . execute ( query , labels ) return self . _csv ( cursor , constants . QUERY_FIELDNAMES , output_fh )
Returns output_fh populated with CSV results for each n - gram in ngrams that occurs within labelled witnesses in catalogue .
48,397
def _set_labels ( self , catalogue ) : with self . _conn : self . _conn . execute ( constants . UPDATE_LABELS_SQL , [ '' ] ) labels = { } for work , label in catalogue . items ( ) : self . _conn . execute ( constants . UPDATE_LABEL_SQL , [ label , work ] ) cursor = self . _conn . execute ( constants . SELECT_TEXT_TOKEN_COUNT_SQL , [ work ] ) token_count = cursor . fetchone ( ) [ 'token_count' ] labels [ label ] = labels . get ( label , 0 ) + token_count return labels
Returns a dictionary of the unique labels in catalogue and the count of all tokens associated with each and sets the record of each Text to its corresponding label .
48,398
def _update_text_record ( self , witness , text_id ) : checksum = witness . get_checksum ( ) token_count = len ( witness . get_tokens ( ) ) with self . _conn : self . _conn . execute ( constants . UPDATE_TEXT_SQL , [ checksum , token_count , text_id ] )
Updates the record with text_id with witness \ s checksum and token count .
48,399
def validate ( self , corpus , catalogue ) : is_valid = True for name in catalogue : count = 0 for witness in corpus . get_witnesses ( name ) : count += 1 name , siglum = witness . get_names ( ) filename = witness . get_filename ( ) row = self . _conn . execute ( constants . SELECT_TEXT_SQL , [ name , siglum ] ) . fetchone ( ) if row is None : is_valid = False self . _logger . warning ( 'No record (or n-grams) exists for {} in ' 'the database' . format ( filename ) ) elif row [ 'checksum' ] != witness . get_checksum ( ) : is_valid = False self . _logger . warning ( '{} has changed since its n-grams were ' 'added to the database' . format ( filename ) ) if count == 0 : raise FileNotFoundError ( constants . CATALOGUE_WORK_NOT_IN_CORPUS_ERROR . format ( name ) ) return is_valid
Returns True if all of the files labelled in catalogue are up - to - date in the database .