code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if isinstance(value, six.string_types): return Rules(aes) else: rules = Rules() for idx, (pattern, css_value) in enumerate(value): rules.add_rule(pattern, '{0}_{1}'.format(aes, idx)) return rules
def create_rules(aes, value)
Create a Rules instance for a single aesthetic value. Parameter --------- aes: str The name of the aesthetic value: str or list The value associated with any aesthetic
4.452141
4.61798
0.964088
#print('adding rule <{0}> <{1}>'.format(pattern, css_class)) self.__patterns.append(re.compile(pattern, flags=re.U | re.M)) self.__css_classes.append(css_class)
def add_rule(self, pattern, css_class)
Add a new rule. Parameters ---------- pattern: str Pattern that is compiled to a regular expression. css_class: str The class that will corresponds to given pattern.
3.078273
3.17448
0.969694
#print ('get_css_class for {0}'.format(value)) for idx, pattern in enumerate(self.__patterns): if pattern.match(value) is not None: #print ('matched rule {0} and returning {1}'.format(idx, self.__css_classes[idx])) return self.__css_classes[idx] return self.__default
def get_css_class(self, value)
Return the css class of first pattern that matches given value. If no rules match, the default css class will be returned (see the constructor)
3.310467
2.74565
1.205713
es = elasticsearch.Elasticsearch(**kwargs) es.indices.create(index=index_name, body=mapping) return connect(index_name, **kwargs)
def create_index(index_name, **kwargs)
Parameters ---------- index_name : str Name of the index to be created **kwargs Arguments to pass to Elasticsearch instance. Returns ------- Index
4.172684
4.656913
0.896019
def unroll_lists(list_of_lists): for i in itertools.product(*[set(j) for j in list_of_lists]): yield ' '.join(i) sents = document.split_by_sentences() for order, sent in enumerate(sents): postags = list(unroll_lists(sent.postag_lists)) lemmas = list(unroll_lists(sent.lemma_lists)) text = sent.text words = copy.deepcopy(sent.words) for i in words: del i['start'] del i['end'] sentence = { 'estnltk_text_object': json.dumps(sent), 'meta': { 'order_in_parent': order }, 'text': text, 'words': words, 'postags': postags, 'lemmas': lemmas } yield json.dumps(sentence)
def _get_indexable_sentences(document)
Parameters ---------- document : Text Article, book, paragraph, chapter, etc. Anything that is considered a document on its own. Yields ------ str json representation of elasticsearch type sentence
3.983593
3.740589
1.064964
for template in templates: name = '|'.join(['%s[%d]' % (f, o) for f, o in template]) for t in range(len(toks)): values_list = [] for field, offset in template: p = t + offset if p < 0 or p >= len(toks): values_list = [] break if field in toks[p]: value = toks[p][field] values_list.append(value if isinstance(value, (set, list)) else [value]) if len(template) == len(values_list): for values in product(*values_list): toks[t]['F'].append('%s=%s' % (name, '|'.join(values)))
def apply_templates(toks, templates)
Generate features for an item sequence by applying feature templates. A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value. Generated features are stored in the 'F' field of each item in the sequence. Parameters ---------- toks: list of tokens A list of processed toknes. templates: list of template tuples (str, int) A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value.
3.067123
2.836363
1.081358
sentences = [] for json_sent in jsondoc.split_by_sentences(): snt = Sentence() zipped = list(zip( json_sent.word_texts, json_sent.lemmas, json_sent.root_tokens, json_sent.forms, json_sent.endings, json_sent.postags)) json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens, FORM: form, ENDING: ending, POSTAG: postag} for text, lemma, root_tokens, form, ending, postag in zipped] # add labels, if they are present for tok, word in zip(json_toks, json_sent.words): if LABEL in word: tok[LABEL] = word[LABEL] for json_tok in json_toks: token = json_token_to_estner_token(json_tok) snt.append(token) if snt: for i in range(1, len(snt)): snt[i - 1].next = snt[i] snt[i].prew = snt[i - 1] sentences.append(snt) return Document(sentences=sentences)
def json_document_to_estner_document(jsondoc)
Convert an estnltk document to an estner document. Parameters ---------- jsondoc: dict Estnltk JSON-style document. Returns ------- estnltk.estner.ner.Document A ner document.
2.838017
2.994071
0.947879
token = Token() word = json_token[TEXT] lemma = word morph = '' label = 'O' ending = json_token[ENDING] root_toks = json_token[ROOT_TOKENS] if isinstance(root_toks[0], list): root_toks = root_toks[0] lemma = '_'.join(root_toks) + ('+' + ending if ending else '') if not lemma: lemma = word morph = '_%s_' % json_token[POSTAG] morph += ' ' + json_token[FORM] if LABEL in json_token: label = json_token[LABEL] return Token(word, lemma, morph, label)
def json_token_to_estner_token(json_token)
Convert a JSON-style word token to an estner token. Parameters ---------- vabamorf_token: dict Vabamorf token representing a single word. label: str The label string. Returns ------- estnltk.estner.ner.Token
3.916793
4.331248
0.90431
try: os.makedirs(self.model_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise
def makedir(self)
Create model_dir directory
2.389967
1.799776
1.327925
source = inspect.getsourcefile(settings_module) dest = os.path.join(self.model_dir, 'settings.py') shutil.copyfile(source, dest)
def copy_settings(self, settings_module)
Copy settings module to the model_dir directory
2.844137
2.145866
1.325403
mname = 'loaded_module' if six.PY2: import imp return imp.load_source(mname, self.settings_filename) else: import importlib.machinery loader = importlib.machinery.SourceFileLoader(mname, self.settings_filename) return loader.load_module(mname)
def load_settings(self)
Load settings module from the model_dir directory.
3.056885
2.900355
1.053969
modelUtil = ModelStorageUtil(model_dir) modelUtil.makedir() modelUtil.copy_settings(self.settings) # Convert json documents to ner documents nerdocs = [json_document_to_estner_document(jsondoc) for jsondoc in jsondocs] self.fex.prepare(nerdocs) self.fex.process(nerdocs) self.trainer.train(nerdocs, modelUtil.model_filename)
def train(self, jsondocs, model_dir)
Train a NER model using given documents. Each word in the documents must have a "label" attribute, which denote the named entities in the documents. Parameters ---------- jsondocs: list of JSON-style documents. The documents used for training the CRF model. model_dir: str A directory where the model will be saved.
6.136604
6.269899
0.978741
''' Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is a file named vislcg_cmd1 in the path, otherwise returns False; The idea borrows from: http://stackoverflow.com/a/377028 ''' for path in os.environ["PATH"].split( os.pathsep ): path1 = path.strip('"') file1 = os.path.join(path1, vislcg_cmd1) if os.path.isfile(file1) or os.path.isfile(file1+'.exe'): return True return False
def check_if_vislcg_is_in_path( self, vislcg_cmd1 )
Checks whether given vislcg_cmd1 is in system's PATH. Returns True, there is a file named vislcg_cmd1 in the path, otherwise returns False; The idea borrows from: http://stackoverflow.com/a/377028
3.831624
1.672263
2.29128
''' Creates a copy from given analysis dict. ''' assert isinstance(analysis, dict), "(!) Input 'analysis' should be a dict!" new_dict = { POSTAG: analysis[POSTAG],\ ROOT: analysis[ROOT],\ FORM: analysis[FORM],\ CLITIC: analysis[CLITIC],\ ENDING: analysis[ENDING] } if LEMMA in analysis: new_dict[LEMMA] = analysis[LEMMA] if ROOT_TOKENS in analysis: new_dict[ROOT_TOKENS] = analysis[ROOT_TOKENS] return new_dict
def copy_analysis_dict( analysis )
Creates a copy from given analysis dict.
3.875493
3.637058
1.065557
''' Returns a list of clause indices for the whole text. For each token in text, the list contains index of the clause the word belongs to, and the indices are unique over the whole text. ''' # Add clause boundary annotation (if missing) if not text.is_tagged( CLAUSES ): text.tag_clauses() # Collect (unique) clause indices over the whole text clause_indices = [] sent_id = 0 for sub_text in text.split_by( SENTENCES ): for word, cl_index in zip( sub_text.words, sub_text.clause_indices ): clause_indices.append( sent_id+cl_index ) nr_of_clauses = len(set(sub_text.clause_indices)) sent_id += nr_of_clauses assert len(clause_indices) == len(text.words), '(!) Number of clause indices should match nr of words!' return clause_indices
def get_unique_clause_indices( text )
Returns a list of clause indices for the whole text. For each token in text, the list contains index of the clause the word belongs to, and the indices are unique over the whole text.
5.412458
3.626537
1.492459
''' Returns a list of sentence indices for the whole text. For each token in text, the list contains index of the sentence the word belongs to, and the indices are unique over the whole text. ''' # Add sentence annotation (if missing) if not text.is_tagged( SENTENCES ): text.tokenize_sentences() # Collect (unique) sent indices over the whole text sent_indices = [] sent_id = 0 for sub_text in text.split_by( SENTENCES ): for word in sub_text.words: sent_indices.append( sent_id ) sent_id += 1 assert len(sent_indices) == len(text.words), '(!) Number of sent indices should match nr of words!' return sent_indices
def get_unique_sentence_indices( text )
Returns a list of sentence indices for the whole text. For each token in text, the list contains index of the sentence the word belongs to, and the indices are unique over the whole text.
5.623417
3.532459
1.591927
''' Converts nominal categories of the input analysis. Performs one-to-one conversions only. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for idx, pattern_items in enumerate(_noun_conversion_rules): pattern_str, replacement = pattern_items if pattern_str in analysis[FORM]: analysis[FORM] = analysis[FORM].replace( pattern_str, replacement ) return analysis
def _convert_nominal_form( analysis )
Converts nominal categories of the input analysis. Performs one-to-one conversions only.
8.529145
5.709472
1.493859
''' Converts ambiguous verbal categories of the input analysis. Performs one-to-many conversions. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' results = [] for root_pat, pos, form_pat, replacements in _amb_verb_conversion_rules: if analysis[POSTAG] == pos and re.match(root_pat, analysis[ROOT]) and \ re.match(form_pat, analysis[FORM]): for replacement in replacements: new_analysis = copy_analysis_dict( analysis ) new_form = re.sub(form_pat, replacement, analysis[FORM]) new_analysis[FORM] = new_form results.append( new_analysis ) # break after the replacement has been made # ( to avoid over-generation ) break if not results: results.append( analysis ) return results
def _convert_amb_verbal_form( analysis )
Converts ambiguous verbal categories of the input analysis. Performs one-to-many conversions.
5.623919
4.650939
1.209201
''' Converts ordinary verbal categories of the input analysis. Performs one-to-one conversions. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for form, replacement in _verb_conversion_rules: # Exact match if analysis[FORM] == form: assert analysis[POSTAG] == 'V', \ '(!) Expected analysis of verb, but got analysis of "'+str(analysis[POSTAG])+'" instead.' analysis[FORM] = replacement # Inclusion : the case of some_prefix+' '+form ; elif analysis[FORM].endswith(' '+form): parts = analysis[FORM].split() prefix = ' '.join( parts[:len(parts)-1] ) analysis[FORM] = prefix+' '+replacement return analysis
def _convert_verbal_form( analysis )
Converts ordinary verbal categories of the input analysis. Performs one-to-one conversions.
8.195963
6.35325
1.290043
''' Provides some post-fixes. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' if 'neg' in analysis[FORM]: analysis[FORM] = re.sub( '^\s*neg ([^,]*)$', '\\1 Neg', analysis[FORM] ) analysis[FORM] = re.sub( ' Neg Neg$', ' Neg', analysis[FORM] ) analysis[FORM] = re.sub( ' Aff Neg$', ' Neg', analysis[FORM] ) analysis[FORM] = re.sub( 'neg', 'Neg', analysis[FORM] ) analysis[FORM] = analysis[FORM].rstrip().lstrip() assert 'neg' not in analysis[FORM], \ '(!) The label "neg" should be removed by now.' assert 'Neg' not in analysis[FORM] or ('Neg' in analysis[FORM] and analysis[FORM].endswith('Neg')), \ '(!) The label "Neg" should end the analysis line: '+str(analysis[FORM]) return analysis
def _make_postfixes_1( analysis )
Provides some post-fixes.
4.200206
3.957735
1.061265
''' Filters the given list of *analyses* by morphological forms: deletes analyses that are listed in *target_forms*, but not in *keep_forms*. ''' to_delete = [] for aid, analysis in enumerate(analyses): delete = False for target in target_forms: if (target == analysis[FORM] and not analysis[FORM] in keep_forms): delete = True if delete: to_delete.append( aid ) if to_delete: to_delete.reverse() for aid in to_delete: del analyses[aid]
def _keep_analyses( analyses, keep_forms, target_forms )
Filters the given list of *analyses* by morphological forms: deletes analyses that are listed in *target_forms*, but not in *keep_forms*.
4.41684
2.585347
1.708413
''' Disambiguates forms ambiguous between multiword negation and some other form; ''' prev_word_lemma = '' for word_dict in words_layer: forms = [ a[FORM] for a in word_dict[ANALYSIS] ] if ('Pers Prs Imprt Sg2' in forms and 'Pers Prs Ind Neg' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saa, ei tee _keep_analyses( word_dict[ANALYSIS], ['Pers Prs Ind Neg'], ['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'] ) else: # saa! tee! _keep_analyses( word_dict[ANALYSIS], ['Pers Prs Imprt Sg2'], ['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'] ) if ('Pers Prt Imprt' in forms and 'Pers Prt Ind Neg' in forms and 'Pers Prt Prc' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saanud, ei teinud _keep_analyses( word_dict[ANALYSIS], ['Pers Prt Ind Neg'], ['Pers Prt Imprt','Pers Prt Ind Neg','Pers Prt Prc'] ) else: # on, oli saanud teinud; kukkunud õun; ... _keep_analyses( word_dict[ANALYSIS], ['Pers Prt Prc'], ['Pers Prt Imprt','Pers Prt Ind Neg','Pers Prt Prc'] ) if ('Impers Prt Ind Neg' in forms and 'Impers Prt Prc' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saadud, ei tehtud _keep_analyses( word_dict[ANALYSIS], ['Impers Prt Ind Neg'], ['Impers Prt Ind Neg','Impers Prt Prc'] ) else: # on, oli saadud tehtud; saadud õun; ... _keep_analyses( word_dict[ANALYSIS], ['Impers Prt Prc'], ['Impers Prt Ind Neg','Impers Prt Prc'] ) prev_word_lemma = word_dict[ANALYSIS][0][ROOT]
def _disambiguate_neg( words_layer )
Disambiguates forms ambiguous between multiword negation and some other form;
3.025528
2.774066
1.090647
''' Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope. The scope could be either CLAUSES or SENTENCES. ''' assert scope in [CLAUSES, SENTENCES], '(!) The scope should be either "clauses" or "sentences".' group_indices = get_unique_clause_indices( text ) if scope==CLAUSES else get_unique_sentence_indices( text ) i = 0 gr_2nd_person_pron = {} while i < len( words_layer ): gr_index = group_indices[i] if gr_index not in gr_2nd_person_pron: # 1) Find out whether the current group (clause or sentence) contains "sina" j = i gr_2nd_person_pron_found = False while j < len( words_layer ): if group_indices[j] == gr_index: forms = [ a[FORM] for a in words_layer[j][ANALYSIS] ] lemmas = [ a[ROOT] for a in words_layer[j][ANALYSIS] ] if 'sina' in lemmas and 'Sg Nom' in forms: gr_2nd_person_pron_found = True break if group_indices[j] >= gr_index+10: # do not venture too far ... break j += 1 gr_2nd_person_pron[gr_index] = gr_2nd_person_pron_found forms = [ a[FORM] for a in words_layer[i][ANALYSIS] ] # 2) Disambiguate verb forms based on existence of 'sina' in the clause if ('Pers Prt Ind Pl3 Aff' in forms and 'Pers Prt Ind Sg2 Aff' in forms): # -sid if not gr_2nd_person_pron[ gr_index ]: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Pl3 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] ) else: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Ind Sg2 Aff'], ['Pers Prt Ind Pl3 Aff', 'Pers Prt Ind Sg2 Aff'] ) if ('Pers Prs Cond Pl3 Aff' in forms and 'Pers Prs Cond Sg2 Aff' in forms): # -ksid if not gr_2nd_person_pron[ gr_index ]: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Pl3 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] ) else: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prs Cond Sg2 Aff'], ['Pers Prs Cond Pl3 Aff', 'Pers Prs Cond Sg2 Aff'] ) if ('Pers Prt Cond Pl3 Aff' in forms and 'Pers Prt Cond Sg2 Aff' in forms): # -nuksid if not gr_2nd_person_pron[ gr_index ]: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Pl3 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] ) else: _keep_analyses( words_layer[i][ANALYSIS], ['Pers Prt Cond Sg2 Aff'], ['Pers Prt Cond Pl3 Aff', 'Pers Prt Cond Sg2 Aff'] ) i += 1
def _disambiguate_sid_ksid( words_layer, text, scope=CLAUSES )
Disambiguates verb forms based on existence of 2nd person pronoun ('sina') in given scope. The scope could be either CLAUSES or SENTENCES.
2.463654
2.218529
1.11049
''' Provides some post-fixes after the disambiguation. ''' for word_dict in words_layer: for analysis in word_dict[ANALYSIS]: analysis[FORM] = re.sub( '(Sg|Pl)([123])', '\\1 \\2', analysis[FORM] ) return words_layer
def _make_postfixes_2( words_layer )
Provides some post-fixes after the disambiguation.
8.279872
5.51331
1.501797
''' Converts a list of analyses (list of dict objects) from FS's vabamorf format to giellatekno (GT) format. Due to one-to-many conversion rules, the number of analyses returned by this method can be greater than the number of analyses in the input list. ''' resulting_analyses = [] for analysis in analyses: # Make a copy of the analysis new_analyses = [ copy_analysis_dict( analysis ) ] # Convert noun categories new_analyses[0] = _convert_nominal_form( new_analyses[0] ) # Convert ambiguous verb categories new_analyses = _convert_amb_verbal_form( new_analyses[0] ) # Convert remaining verbal categories new_analyses = [_convert_verbal_form( a ) for a in new_analyses] # Make postfixes new_analyses = [_make_postfixes_1( a ) for a in new_analyses] resulting_analyses.extend( new_analyses ) return resulting_analyses
def convert_analysis( analyses )
Converts a list of analyses (list of dict objects) from FS's vabamorf format to giellatekno (GT) format. Due to one-to-many conversion rules, the number of analyses returned by this method can be greater than the number of analyses in the input list.
6.036762
2.684758
2.248531
''' Converts all words in a morphologically analysed Text from FS format to giellatekno (GT) format, and stores in a new layer named GT_WORDS. If the keyword argument *layer_name=='words'* , overwrites the old 'words' layer with the new layer containing GT format annotations. Parameters ----------- text : estnltk.text.Text Morphologically annotated text that needs to be converted from FS format to GT format; layer_name : str Name of the Text's layer in which GT format morphological annotations are stored; Defaults to GT_WORDS; ''' assert WORDS in text, \ '(!) The input text should contain "'+str(WORDS)+'" layer.' assert len(text[WORDS])==0 or (len(text[WORDS])>0 and ANALYSIS in text[WORDS][0]), \ '(!) Words in the input text should contain "'+str(ANALYSIS)+'" layer.' new_words_layer = [] # 1) Perform the conversion for word in text[WORDS]: new_analysis = [] new_analysis.extend( convert_analysis( word[ANALYSIS] ) ) new_words_layer.append( {TEXT:word[TEXT], ANALYSIS:new_analysis, START:word[START], END:word[END]} ) # 2) Perform some context-specific disambiguation _disambiguate_neg( new_words_layer ) _disambiguate_sid_ksid( new_words_layer, text, scope=CLAUSES ) _disambiguate_sid_ksid( new_words_layer, text, scope=SENTENCES ) _make_postfixes_2( new_words_layer ) # 3) Attach the layer if layer_name != WORDS: # Simply attach the new layer text[layer_name] = new_words_layer else: # Perform word-by-word replacements # (because simple attaching won't work here) for wid, new_word in enumerate( new_words_layer ): text[WORDS][wid] = new_word return text
def convert_to_gt( text, layer_name=GT_WORDS )
Converts all words in a morphologically analysed Text from FS format to giellatekno (GT) format, and stores in a new layer named GT_WORDS. If the keyword argument *layer_name=='words'* , overwrites the old 'words' layer with the new layer containing GT format annotations. Parameters ----------- text : estnltk.text.Text Morphologically annotated text that needs to be converted from FS format to GT format; layer_name : str Name of the Text's layer in which GT format morphological annotations are stored; Defaults to GT_WORDS;
5.581524
2.881181
1.937235
''' Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as EstNLTK's analysis dict: { "clitic": string, "ending": string, "form": string, "partofspeech": string, "root": string }, Returns the dict; ''' import sys result = { CLITIC:"", ENDING:"", FORM:form, POSTAG:pos, ROOT:"" } breakpoint = -1 for i in range(len(root)-1, -1, -1): if root[i] == '+': breakpoint = i break if breakpoint == -1: result[ROOT] = root result[ENDING] = "0" if not re.match("^\W+$", root): try: print( " No breakpoint found from: ", root, pos, form, file=sys.stderr ) except UnicodeEncodeError: print( " No breakpoint found from input *root*!", file=sys.stderr ) else: result[ROOT] = root[0:breakpoint] result[ENDING] = root[breakpoint+1:] if result[ENDING].endswith('ki') and len(result[ENDING]) > 2: result[CLITIC] = 'ki' result[ENDING] = re.sub('ki$', '', result[ENDING]) if result[ENDING].endswith('gi') and len(result[ENDING]) > 2: result[CLITIC] = 'gi' result[ENDING] = re.sub('gi$', '', result[ENDING]) return result
def get_analysis_dict( root, pos, form )
Takes *root*, *pos* and *form* from Filosoft's mrf input and reformats as EstNLTK's analysis dict: { "clitic": string, "ending": string, "form": string, "partofspeech": string, "root": string }, Returns the dict;
4.41959
2.571469
1.718703
''' Compares the *original* text to *converted* text, and detects changes/differences in morphological annotations. The method constructs line-by-line comparison string, where lines are separated by newline, and '***' at the beginning of the line indicates the difference. Returns a pair: results of the line-by-line comparison as a string, and boolean value indicating whether there were any differences. ''' from estnltk.syntax.syntax_preprocessing import convert_Text_to_mrf old_layer_mrf = convert_Text_to_mrf( original ) new_layer_mrf = convert_Text_to_mrf( converted ) max_len_1 = max([len(l) for l in old_layer_mrf ]) max_len_2 = max([len(l) for l in new_layer_mrf ]) max_len = max( max_len_1, max_len_2 ) format_str = '{:<'+str(max_len+1)+'}' i = 0 j = 0 comp_lines = [] diff_found = False while(i < len(old_layer_mrf) or j < len(new_layer_mrf)): l1 = old_layer_mrf[i] l2 = new_layer_mrf[j] # 1) Output line containing tokens if not l1.startswith(' ') and not l2.startswith(' '): diff = '*** ' if format_str.format(l1) != format_str.format(l2) else ' ' comp_lines.append( diff+format_str.format(l1)+format_str.format(l2) ) if diff == '*** ': diff_found = True i += 1 j += 1 else: # 2) Output analysis line(s) while(i < len(old_layer_mrf) or j < len(new_layer_mrf)): l1 = old_layer_mrf[i] l2 = new_layer_mrf[j] if l1.startswith(' ') and l2.startswith(' '): diff = '*** ' if format_str.format(l1) != format_str.format(l2) else ' ' comp_lines.append( diff+format_str.format(l1)+format_str.format(l2) ) if diff == '*** ': diff_found = True i += 1 j += 1 elif l1.startswith(' ') and not l2.startswith(' '): diff = '*** ' comp_lines.append( diff+format_str.format(l1)+format_str.format(' ') ) diff_found = True i += 1 elif not l1.startswith(' ') and l2.startswith(' '): diff = '*** ' comp_lines.append( diff+format_str.format(' ')+format_str.format(l2) ) diff_found = True j += 1 else: break return '\n'.join( comp_lines ), diff_found
def get_original_vs_converted_diff( original ,converted )
Compares the *original* text to *converted* text, and detects changes/differences in morphological annotations. The method constructs line-by-line comparison string, where lines are separated by newline, and '***' at the beginning of the line indicates the difference. Returns a pair: results of the line-by-line comparison as a string, and boolean value indicating whether there were any differences.
2.479235
1.846709
1.342516
openPat = '|'.join([re.escape(x) for x in openDelim]) # pattern for delimiters expected after each opening delimiter afterPat = {o: re.compile(openPat+'|'+c, re.DOTALL) for o,c in zip(openDelim, closeDelim)} stack = [] start = 0 cur = 0 end = len(text) startSet = False startPat = re.compile(openPat) nextPat = startPat while True: next = nextPat.search(text, cur) if not next: return if not startSet: start = next.start() startSet = True delim = next.group(0) if delim in openDelim: stack.append(delim) nextPat = afterPat[delim] else: opening = stack.pop() # assert opening == openDelim[closeDelim.index(next.group(0))] if stack: nextPat = afterPat[stack[-1]] else: yield start, next.end() nextPat = startPat start = next.end() startSet = False cur = next.end()
def findBalanced(text, openDelim, closeDelim)
Assuming that text contains a properly balanced expression :param openDelim: as opening delimiters and :param closeDelim: as closing delimiters. :return: an iterator producing pairs (start, end) of start and end positions in text containing a balanced expression.
2.715925
2.792399
0.972613
if isinstance(s, six.text_type): return s elif isinstance(s, six.binary_type): return s.decode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
def as_unicode(s, encoding='utf-8')
Force conversion of given string to unicode type. Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x . If the string is already in unicode, then no conversion is done and the same string is returned. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to unicode. encoding: str The encoding of the input string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``str`` for Python3 or ``unicode`` for Python 2.
2.030559
2.310403
0.878876
if isinstance(s, six.text_type): return s.encode(encoding) elif isinstance(s, six.binary_type): # make sure the binary is in required encoding return s.decode(encoding).encode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
def as_binary(s, encoding='utf-8')
Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2.
2.809711
2.958096
0.949837
return [fnm for fnm in os.listdir(root) if fnm.startswith(prefix) and fnm.endswith(suffix)]
def get_filenames(root, prefix=u'', suffix=u'')
Function for listing filenames with given prefix and suffix in the root directory. Parameters ---------- prefix: str The prefix of the required files. suffix: str The suffix of the required files Returns ------- list of str List of filenames matching the prefix and suffix criteria.
2.811144
3.326007
0.845201
if self.search_method == 'ahocorasick': events = self._find_keywords_ahocorasick(text.text) elif self.search_method == 'naive': events = self._find_keywords_naive(text.text) events = self._resolve_conflicts(events) if self.mapping: for item in events: item['type'] = self.map[ text.text[item['start']:item['end']] ] if self.return_layer: return events else: text[self.layer_name] = events
def tag(self, text)
Retrieves list of keywords in text. Parameters ---------- text: Text The text to search for events. Returns ------- list of vents sorted by start, end
4.18583
3.921015
1.067537
matches = self._match(text.text) matches = self._resolve_conflicts(matches) if self.return_layer: return matches else: text[self.layer_name] = matches
def tag(self, text)
Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches
8.967097
8.327634
1.076788
if self.search_method == 'ahocorasick': events = self._find_events_ahocorasick(text.text) elif self.search_method == 'naive': events = self._find_events_naive(text.text) events = self._resolve_conflicts(events) self._event_intervals(events, text) if self.return_layer: return events else: text[self.layer_name] = events
def tag(self, text)
Retrieves list of events in the text. Parameters ---------- text: Text The text to search for events. Returns ------- list of events sorted by start, end
4.304007
4.08944
1.052468
ph_start = m['start'] ph_end = m['end'] start_index = None for ind, word in enumerate(doc['words']): if word['start'] == ph_start: start_index = ind break end_index = None for ind, word in enumerate(doc['words']): if word['end'] == ph_end: end_index = ind break if start_index is not None and end_index is not None: lem = [] for i in doc['words'][start_index:end_index + 1]: word_lem = [] for idx, j in enumerate(i['analysis']): if i['analysis'][idx]['partofspeech'] in ['A', 'D', 'C', 'J']: if i['analysis'][idx]['lemma'] not in word_lem: word_lem.append(i['analysis'][idx]['lemma']) word_lem_str = '|'.join(word_lem) lem.append(word_lem_str) else: raise Exception('Something went really wrong') return lem
def __extract_lemmas(self, doc, m, phrase)
:param sent: sentence from which the match was found :param m: the found match :phrase: name of the phrase :return: tuple of the lemmas in the match
2.584174
2.606655
0.991376
with codecs.open(fnm, 'rb', 'ascii') as f: line = f.readline() while line != '': yield Text(json.loads(line)) line = f.readline()
def yield_json_corpus(fnm)
Function to read a JSON corpus from a file. A JSON corpus contains one document per line, encoded in JSON. Each line is yielded after it is read. Parameters ---------- fnm: str The filename of the corpus. Returns ------- generator of Text
3.083744
3.387262
0.910394
with codecs.open(fnm, 'wb', 'ascii') as f: for document in documents: f.write(json.dumps(document) + '\n') return documents
def write_json_corpus(documents, fnm)
Write a lisst of Text instances as JSON corpus on disk. A JSON corpus contains one document per line, encoded in JSON. Parameters ---------- documents: iterable of estnltk.text.Text The documents of the corpus fnm: str The path to save the corpus.
2.673369
3.408329
0.784364
with codecs.open(fnm, 'rb', 'ascii') as f: return Text(json.loads(f.read()))
def read_document(fnm)
Read a document that is stored in a text file as JSON. Parameters ---------- fnm: str The path of the document. Returns ------- Text
4.849573
5.953674
0.814551
with codecs.open(fnm, 'wb', 'ascii') as f: f.write(json.dumps(doc, indent=2))
def write_document(doc, fnm)
Write a Text document to file. Parameters ---------- doc: Text The document to save. fnm: str The filename to save the document
2.951206
4.894928
0.602911
if not isinstance(sourceSynset, Synset): raise TypeError("sourceSynset not Synset instance") elif not isinstance(targetSynset, Synset): raise TypeError("targetSynset not Synset instance") elif relationName not in RELATION_NAMES: raise TypeError("relationName not in RELATION_NAMES") else: sourceSynset.addRelation( Relation(relationName,targetSynset) ) return sourceSynset
def addRelation(sourceSynset,relationName,targetSynset)
Adds relation with name <relationName> to <targetSynset>.
2.259686
2.352179
0.960678
def fget(self): _out = '' _n = '\n' if len(self): if self.parent: _out = '%s%s%s' % (_out, PolarisText( *self.parent).out,_n) _out = _out + _n.join( map(lambda x: x.polarisText, self) ) else: _out = '' return _out return locals()
def polarisText()
polarisText part of _TypedList objects
5.953743
5.130526
1.160455
'''Appends Feature''' if isinstance(feature, Feature): self.features.append(feature) else: raise TypeError( 'feature Type should be Feature, not %s' % type(feature))
def addFeature(self, feature)
Appends Feature
4.897577
4.781457
1.024285
'''Adds SourceId to External_Info ''' if isinstance(value, Source_Id): self.source_ids.append(value) else: raise (TypeError, 'source_id Type should be Source_Id, not %s' % type(source_id))
def addSourceId(self, value)
Adds SourceId to External_Info
6.469787
5.036607
1.284553
'''Adds SourceId to External_Info ''' if isinstance(value, Corpus_Id): self.corpus_ids.append(value) else: raise (TypeError, 'source_id Type should be Source_Id, not %s' % type(source_id))
def addCorpusId(self, value)
Adds SourceId to External_Info
7.939017
5.303902
1.496826
self.levelNumber = None self.DRN = None self.fieldTag = None self.fieldValue = None self.noQuotes = None if iStr and not(iStr.strip().startswith('#')): iList = iStr.strip().split(' ') self.levelNumber = int(iList.pop(0)) if iList[0].startswith('@') and self.levelNumber != 3: self.DRN = int(iList.pop(0).strip('@')) else: self.DRN = None self.fieldTag = iList.pop(0) if iList and ( iList[0].startswith('"') or iList[0].startswith('@') ): fv = ' '.join(iList) self.fieldValue = fv[1:-1] elif iList: if len(iList) == 1: self.fieldValue = iList.pop(0) else: self.fieldValue = ' '.join(iList) try: self.fieldValue = int(self.fieldValue) except ValueError: self.noQuotes = True
def parse_line(self,iStr)
Parses ewn file line
2.599671
2.557303
1.016567
'''Parses wordnet from <self.file> ''' synList = [] self.milestone = 0 # to start from beginning of file while self.milestone < os.path.getsize(self.fileName) - 5: if debug: print ('self.milestone', self.milestone) a = self.parse_synset(offset=self.milestone) synList.append(a) self.milestone = self.file.tell() return synList
def parse_wordnet(self,debug=False)
Parses wordnet from <self.file>
6.159416
4.855588
1.268521
'''Appends one Translation to translations ''' if isinstance(translation, Translation): self.translations.append(translation) else: raise(TranslationError, 'translation Type should be Translation, not %s' % type( translation) )
def addTranslation(self,translation)
Appends one Translation to translations
6.913588
5.662667
1.220907
'''Appends one VariantFeature to variantFeatures ''' if isinstance(variantFeature, Feature): self.features.append(variantFeature) else: raise(TypeError, 'variantFeature Type should be Feature, not %s' % type( variantFeature) )
def addVariantFeature(self,variantFeature)
Appends one VariantFeature to variantFeatures
5.706336
5.001872
1.14084
'''Appends one Usage_Label to usage_labels ''' if isinstance(usage_label, Usage_Label): self.usage_labels.append(usage_label) else: raise (Usage_LabelError, 'usage_label Type should be Usage_Label, not %s' % type( usage_label) )
def addUsage_Label(self,usage_label)
Appends one Usage_Label to usage_labels
4.120368
3.653173
1.127887
'''Appends one Example to examples ''' if isinstance(example, Example): self.examples.append(example) else: raise (ExampleError, 'example Type should be Example, not %s' % type(example) )
def addExample(self,example)
Appends one Example to examples
6.274019
5.152168
1.217743
def fget(self): if self.variants: return self.variants[0] else: variant = Variant() return variant return locals()
def firstVariant()
first variant of Variants Read-only
5.507567
5.512815
0.999048
'''Returns a list of literals in the Synset read-only ''' def fget(self): if self.variants: return map(lambda x: x.literal, self.variants) else: return None return locals()
def literals()
Returns a list of literals in the Synset read-only
8.317914
4.753856
1.749719
'''Appends variant sth to do that it would be possible to add Variant object ''' var = Variant(literal=literal, sense=sense, gloss=gloss, examples=examples) self.variants.append(var)
def addVariantOld(self, literal='', sense=0, gloss='', examples=[])
Appends variant sth to do that it would be possible to add Variant object
8.411115
2.663411
3.158024
'''Appends one Variant to variants ''' if isinstance(variant, Variant): self.variants.append(variant) else: raise (VariantError, 'variant Type should be Variant, not %s' % type(variant))
def addVariant(self,variant)
Appends one Variant to variants
6.145786
5.065448
1.213276
'''Appends InternalLink ''' if isinstance(link, InternalLink): self.internalLinks.append(link) else: raise InternalLinkError( 'link Type should be InternalLink, not %s' % type(link))
def addInternalLink(self, link)
Appends InternalLink
5.951478
5.07453
1.172814
'''Appends Relation ''' if isinstance(link, Relation): self.internalLinks.append(link) else: raise TypeError( 'link Type should be InternalLink, not %s' % type(link))
def addRelation(self, link)
Appends Relation
7.70744
6.550112
1.176688
'''Appends EqLink ''' if isinstance(link, EqLink): self.eqLinks.append(link) else: raise TypeError( 'link Type should be InternalLink, not %s' % type(link))
def addEqLink(self, link)
Appends EqLink
6.474079
5.490228
1.1792
'''Returns list of named Relations. <name> may be string or list. ''' if self.internalLinks and not neg: if isinstance(name, six.string_types): return filter(lambda x: x.name == name, self.internalLinks) elif isinstance(name, list): return filter(lambda x: x.name in name, self.internalLinks) else: return None #should rise error elif self.internalLinks and neg: if isinstance(name, six.string_types): return filter(lambda x: x.name != name, self.internalLinks) elif isinstance(name, list): return filter(lambda x: x.name not in name, self.internalLinks) else: return None #should rise error else: return []
def named_relations(self, name, neg=False)
Returns list of named Relations. <name> may be string or list.
2.631409
2.148254
1.224906
'''Returns list of named eqLinks. <name> may be string or list. ''' if self.eqLinks and not neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name == name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name in name, self.eqLinks) else: return None #should rise error elif self.eqLinks and neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name != name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name not in name, self.eqLinks) else: return None #should rise error else: return None
def named_eq_relations(self, name, neg=False)
Returns list of named eqLinks. <name> may be string or list.
2.377719
1.861894
1.277043
'''Parses synset from file <fileName> from offset <offset> ''' p = Parser() p.file = open(fileName, 'rb') a = p.parse_synset(offset=offset) p.file.close() self.__dict__.update(a.__dict__)
def parse(self,fileName,offset)
Parses synset from file <fileName> from offset <offset>
5.349188
3.331004
1.605879
'''Appends synset to Polaris IO file <fileName> ''' f = open(fileName, 'ab') f.write('%s%s' % (self.polarisText, Synset.linebreak) ) f.close()
def write(self,fileName)
Appends synset to Polaris IO file <fileName>
13.127562
5.018932
2.615608
''' Fixes out-of-the-sentence links in the given sentence. The sentence is a sublist of *alignments*, starting from *sent_start* and ending one token before *sent_end*; ''' sent_len = sent_end - sent_start j = sent_start while j < sent_start + sent_len: for rel_id, rel in enumerate( alignments[j][PARSER_OUT] ): if int( rel[1] ) >= sent_len: # If the link points out-of-the-sentence, fix # the link so that it points inside the sentence # boundaries: wid = j - sent_start if sent_len == 1: # a single word becomes a root rel[1] = -1 elif wid-1 > -1: # word at the middle/end is linked to the previous rel[1] = wid - 1 elif wid-1 == -1: # word at the beginning is linked to the next rel[1] = wid + 1 alignments[j][PARSER_OUT][rel_id] = rel j += 1
def _fix_out_of_sentence_links( alignments, sent_start, sent_end )
Fixes out-of-the-sentence links in the given sentence. The sentence is a sublist of *alignments*, starting from *sent_start* and ending one token before *sent_end*;
4.715448
3.642029
1.294731
''' Reads the CONLL format syntactic analysis from given file, and returns as a Text object. The Text object has been tokenized for paragraphs, sentences, words, and it contains syntactic analyses aligned with word spans, in the layer *layer_name* (by default: LAYER_CONLL); Attached syntactic analyses are in the format as is the output of utils.normalise_alignments(); Parameters ----------- file_name : str Name of the input file; Should contain syntactically analysed text, following the CONLL format; layer_name : str Name of the Text's layer in which syntactic analyses are stored; Defaults to 'conll_syntax'; For other parameters, see optional parameters of the methods: utils.normalise_alignments(): "rep_miss_w_dummy", "fix_selfrefs", "keep_old", "mark_root"; maltparser_support.align_CONLL_with_Text(): "check_tokens", "add_word_ids"; ''' # 1) Load conll analysed text from file conll_lines = [] in_f = codecs.open(file_name, mode='r', encoding='utf-8') for line in in_f: # Skip comment lines if line.startswith('#'): continue conll_lines.append( line.rstrip() ) in_f.close() # 2) Extract sentences and word tokens sentences = [] sentence = [] for i, line in enumerate( conll_lines ): if len(line) > 0 and '\t' in line: features = line.split('\t') if len(features) != 10: raise Exception(' In file '+in_file+', line '+str(i)+\ ' with unexpected format: "'+line+'" ') word_id = features[0] token = features[1] sentence.append( token ) elif len(line)==0 or re.match('^\s+$', line): # End of a sentence if sentence: # (!) Use double space instead of single space in order to distinguish # word-tokenizing space from the single space in the multiwords # (e.g. 'Rio de Janeiro' as a single word); sentences.append( ' '.join(sentence) ) sentence = [] if sentence: sentences.append( ' '.join(sentence) ) # 3) Construct the estnltk's Text kwargs4text = { # Use custom tokenization utils in order to preserve exactly the same # tokenization as was in the input; "word_tokenizer": RegexpTokenizer(" ", gaps=True), "sentence_tokenizer": LineTokenizer() } from estnltk.text import Text text = Text( '\n'.join(sentences), **kwargs4text ) # Tokenize up to the words layer text.tokenize_words() # 4) Align syntactic analyses with the Text alignments = align_CONLL_with_Text( conll_lines, text, None, **kwargs ) normalise_alignments( alignments, data_type=CONLL_DATA, **kwargs ) # Attach alignments to the text text[ layer_name ] = alignments return text
def read_text_from_conll_file( file_name, layer_name=LAYER_CONLL, **kwargs )
Reads the CONLL format syntactic analysis from given file, and returns as a Text object. The Text object has been tokenized for paragraphs, sentences, words, and it contains syntactic analyses aligned with word spans, in the layer *layer_name* (by default: LAYER_CONLL); Attached syntactic analyses are in the format as is the output of utils.normalise_alignments(); Parameters ----------- file_name : str Name of the input file; Should contain syntactically analysed text, following the CONLL format; layer_name : str Name of the Text's layer in which syntactic analyses are stored; Defaults to 'conll_syntax'; For other parameters, see optional parameters of the methods: utils.normalise_alignments(): "rep_miss_w_dummy", "fix_selfrefs", "keep_old", "mark_root"; maltparser_support.align_CONLL_with_Text(): "check_tokens", "add_word_ids";
7.215507
3.114278
2.316911
''' Given a sentence ( a list of EstNLTK's word tokens ), and a list of dependency syntactic relations ( output of normalise_alignments() ), builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence, and returns as a list of Trees (roots of trees). Note that there is one-to-many correspondence between EstNLTK's sentences and dependency syntactic trees, so the resulting list can contain more than one tree (root); ''' trees_of_sentence = [] nodes = [ -1 ] while( len(nodes) > 0 ): node = nodes.pop(0) # Find tokens in the sentence that take this node as their parent for i, syntax_token in enumerate( syntactic_relations ): parents = [ o[1] for o in syntax_token[PARSER_OUT] ] # There should be only one parent node; If there is more than one, take the # first node; parent = parents[0] if parent == node: labels = [ o[0] for o in syntax_token[PARSER_OUT] ] estnltk_token = sentence[i] tree1 = Tree( estnltk_token, i, sentence_id, labels, parser=layer ) if INIT_PARSER_OUT in syntax_token: tree1.parser_output = syntax_token[INIT_PARSER_OUT] tree1.syntax_token = syntax_token if parent == -1: # Add the root node trees_of_sentence.append( tree1 ) elif parent == i: # If, for some strange reason, the node is unnormalised and is still # linked to itself, add it as a singleton tree trees_of_sentence.append( tree1 ) else: # For each root node, attempt to add the child for root_node in trees_of_sentence: root_node.add_child_to_subtree( parent, tree1 ) if parent != i: # Add the current node as a future parent to be examined nodes.append( i ) return trees_of_sentence
def build_trees_from_sentence( sentence, syntactic_relations, layer=LAYER_VISLCG3, \ sentence_id=0, **kwargs )
Given a sentence ( a list of EstNLTK's word tokens ), and a list of dependency syntactic relations ( output of normalise_alignments() ), builds trees ( estnltk.syntax.utils.Tree objects ) from the sentence, and returns as a list of Trees (roots of trees). Note that there is one-to-many correspondence between EstNLTK's sentences and dependency syntactic trees, so the resulting list can contain more than one tree (root);
6.486589
3.562469
1.820813
''' Given a text object and the name of the layer where dependency syntactic relations are stored, builds trees ( estnltk.syntax.utils.Tree objects ) from all the sentences of the text and returns as a list of Trees. Uses the method build_trees_from_sentence() for acquiring trees of each sentence; Note that there is one-to-many correspondence between EstNLTK's sentences and dependency syntactic trees: one sentence can evoke multiple trees; ''' from estnltk.text import Text assert isinstance(text, Text), \ '(!) Unexpected text argument! Should be Estnltk\'s Text object.' assert layer in text, \ '(!) The layer '+str(layer)+' is missing from the input text.' text_sentences = list( text.divide( layer=WORDS, by=SENTENCES ) ) all_sentence_trees = [] # Collected sentence trees prev_sent_id = -1 # (!) Note: if the Text object has been split into smaller Texts with split_by(), # SENT_ID-s still refer to old text, and thus are not useful as indices # anymore; # Therefore, we also use another variable -- norm_prev_sent_id -- that always # counts sentences starting from 0, and use SENT_ID / prev_sent_id only for # deciding whether one sentence ends and another begins; norm_prev_sent_id = -1 current_sentence = [] k = 0 while k < len( text[layer] ): node_desc = text[layer][k] if prev_sent_id != node_desc[SENT_ID] and current_sentence: norm_prev_sent_id += 1 # If the index of the sentence has changed, and we have collected a sentence, # then build tree(s) from this sentence assert norm_prev_sent_id<len(text_sentences), '(!) Sentence with the index '+str(norm_prev_sent_id)+\ ' not found from the input text.' sentence = text_sentences[norm_prev_sent_id] trees_of_sentence = \ build_trees_from_sentence( sentence, current_sentence, layer, sentence_id=norm_prev_sent_id, \ **kwargs ) # Record trees constructed from this sentence all_sentence_trees.extend( trees_of_sentence ) # Reset the sentence collector current_sentence = [] # Collect sentence current_sentence.append( node_desc ) prev_sent_id = node_desc[SENT_ID] k += 1 if current_sentence: norm_prev_sent_id += 1 assert norm_prev_sent_id<len(text_sentences), '(!) Sentence with the index '+str(norm_prev_sent_id)+\ ' not found from the input text.' sentence = text_sentences[norm_prev_sent_id] # If we have collected a sentence, then build tree(s) from this sentence trees_of_sentence = \ build_trees_from_sentence( sentence, current_sentence, layer, sentence_id=norm_prev_sent_id, \ **kwargs ) # Record trees constructed from this sentence all_sentence_trees.extend( trees_of_sentence ) return all_sentence_trees
def build_trees_from_text( text, layer, **kwargs )
Given a text object and the name of the layer where dependency syntactic relations are stored, builds trees ( estnltk.syntax.utils.Tree objects ) from all the sentences of the text and returns as a list of Trees. Uses the method build_trees_from_sentence() for acquiring trees of each sentence; Note that there is one-to-many correspondence between EstNLTK's sentences and dependency syntactic trees: one sentence can evoke multiple trees;
4.543316
3.092649
1.469069
''' Adds given *tree* as a child of the current tree. ''' assert isinstance(tree, Tree), \ '(!) Unexpected type of argument for '+argName+'! Should be Tree.' if (not self.children): self.children = [] tree.parent = self self.children.append(tree)
def add_child_to_self( self, tree )
Adds given *tree* as a child of the current tree.
7.250054
6.010985
1.206134
''' Searches for the tree with *parent_word_id* from the current subtree (from this tree and from all of its subtrees). If the parent tree is found, attaches the given *tree* as its child. If the parent tree is not found, the current tree is not changed. ''' if (self.word_id == parent_word_id): self.add_child_to_self( tree ) elif (self.children): for child in self.children: child.add_child_to_subtree(parent_word_id, tree)
def add_child_to_subtree( self, parent_word_id, tree )
Searches for the tree with *parent_word_id* from the current subtree (from this tree and from all of its subtrees). If the parent tree is found, attaches the given *tree* as its child. If the parent tree is not found, the current tree is not changed.
4.277397
1.683344
2.541012
''' Returns this tree if it has no parents, or, alternatively, moves up via the parent links of this tree until reaching the tree with no parents, and returnes the parentless tree as the root. ''' if self.parent == None: return self else: return self.parent.get_root( **kwargs )
def get_root( self, **kwargs )
Returns this tree if it has no parents, or, alternatively, moves up via the parent links of this tree until reaching the tree with no parents, and returnes the parentless tree as the root.
9.690489
1.793669
5.402607
''' Check whether given *tree_node* satisfies the conditions given as arguments in *kwargs*. By default (if no conditions are given in *kwargs*), returns True. If there are multiple conditions listed (e.g. 'label_regexp' and 'word_template'), *True* is returned only when the node satisfies all the conditions. Following conditions are supported: ----------------------------------- label : str Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node must have within its analysis; If the node does not have the label, the node will be discarded; label_regexp : str A regular expression pattern (as string) describing the syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node must have within its analysis; If none of the node's labels matches the pattern, the node will be discarded; word_template : estnltk.mw_verbs.utils.WordTemplate A WordTemplate describing morphological constraints imposed to the word of the node; If the word's morphological features do not match the template, the node will be discarded; ''' matches = [] # A) Check syntactic label by matching a string syntactic_label = kwargs.get('label', None) if syntactic_label: matches.append( bool(tree_node.labels and syntactic_label in tree_node.labels) ) # B) Check syntactic label by matching a regular expression synt_label_regexp = kwargs.get('label_regexp', None) if synt_label_regexp: if isinstance(synt_label_regexp, basestring): # Compile the regexp (if it hasn't been compiled yet) synt_label_regexp = re.compile(synt_label_regexp) kwargs['label_regexp'] = synt_label_regexp if isinstance(synt_label_regexp, RE_TYPE): # Apply the pre-compiled regexp if tree_node.labels: matches.append( any([synt_label_regexp.match(label) != None for label in tree_node.labels]) ) else: matches.append( False ) # C) Check whether the word token of the node matches a word template word_template = kwargs.get('word_template', None) if word_template: if isinstance(word_template, WordTemplate): matches.append( word_template.matches( tree_node.token ) ) else: raise Exception('(!) Unexpected word_template. Should be from class WordTemplate.') return len(matches) == 0 or all(matches)
def _satisfies_conditions( self, tree_node, **kwargs )
Check whether given *tree_node* satisfies the conditions given as arguments in *kwargs*. By default (if no conditions are given in *kwargs*), returns True. If there are multiple conditions listed (e.g. 'label_regexp' and 'word_template'), *True* is returned only when the node satisfies all the conditions. Following conditions are supported: ----------------------------------- label : str Syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node must have within its analysis; If the node does not have the label, the node will be discarded; label_regexp : str A regular expression pattern (as string) describing the syntactic label (e.g. '@SUBJ', '@OBJ' etc.) that the node must have within its analysis; If none of the node's labels matches the pattern, the node will be discarded; word_template : estnltk.mw_verbs.utils.WordTemplate A WordTemplate describing morphological constraints imposed to the word of the node; If the word's morphological features do not match the template, the node will be discarded;
4.08478
1.801988
2.266819
''' Finds depth of this tree. ''' if (self.children): depth = 1 childDepths = [] for child in self.children: childDepths.append( child.get_tree_depth() ) return depth + max(childDepths) else: return 0
def get_tree_depth( self )
Finds depth of this tree.
3.183215
2.860409
1.112853
''' *Debug only* method for outputting the tree. ''' print (spacing+" "+str(self.word_id)+" "+str(self.text)) if (self.children): spacing=spacing+" " for child in self.children: child.debug_print_tree(spacing)
def debug_print_tree( self, spacing='' )
*Debug only* method for outputting the tree.
5.644898
3.646008
1.548241
def addRule(self, field, regExpPattern): '''Adds new rule for checking whether a value of the field matches given regular expression regExpPattern; Parameters ---------- field: str keyword, e.g. 'partofspeech', 'root', 'text' etc regExpPattern: str a regular expression that the value of the field must match (using method re.match( regExpPattern, token[field]) ). ''' compiled = re.compile( regExpPattern ) if field in self.analysisFields: if self.analysisRules == None: self.analysisRules = dict() self.analysisRules[field] = compiled else: if self.otherRules == None: self.otherRules = dict() self.otherRules[field] = compiled
Adds new rule for checking whether a value of the field matches given regular expression regExpPattern; Parameters ---------- field: str keyword, e.g. 'partofspeech', 'root', 'text' etc regExpPattern: str a regular expression that the value of the field must match (using method re.match( regExpPattern, token[field]) ).
null
null
null
def matches(self, tokenJson): '''Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is required that at least one item in the list tokenJson[ANALYSIS] satisfies all the rules (but it is not required that all the items should satisfy). Returns a boolean value. Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token; ''' if self.otherRules != None: otherMatches = [] for field in self.otherRules: match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None) otherMatches.append( match ) if not otherMatches or not all(otherMatches): return False elif self.analysisRules == None and all(otherMatches): return True if self.analysisRules != None: assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson) totalMatches = [] for analysis in tokenJson[ANALYSIS]: # Check whether this analysis satisfies all the rules # (if not, discard the analysis) matches = [] for field in self.analysisRules: value = analysis[field] if field in analysis else "" match = (self.analysisRules[field]).match(value) != None matches.append( match ) if not match: break totalMatches.append( all(matches) ) # Return True iff there was at least one analysis that # satisfied all the rules; return any(totalMatches) return False
Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate. If the rules describe tokenJson[ANALYSIS], it is required that at least one item in the list tokenJson[ANALYSIS] satisfies all the rules (but it is not required that all the items should satisfy). Returns a boolean value. Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token;
null
null
null
def matchingAnalyses(self, tokenJson): '''Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate and returns a list of analyses (elements of tokenJson[ANALYSIS]) that are matching all the rules. An empty list is returned if none of the analyses match (all the rules), or (!) if none of the rules are describing the ANALYSIS part of the token; Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token; ''' matchingResults = [] if self.otherRules != None: otherMatches = [] for field in self.otherRules: match = field in tokenJson and ((self.otherRules[field]).match(tokenJson[field]) != None) otherMatches.append( match ) if not otherMatches or not all(otherMatches): return matchingResults if self.analysisRules != None: assert ANALYSIS in tokenJson, "No ANALYSIS found within token: "+str(tokenJson) for analysis in tokenJson[ANALYSIS]: # Check whether this analysis satisfies all the rules # (if not, discard the analysis) matches = [] for field in self.analysisRules: value = analysis[field] if field in analysis else "" match = (self.analysisRules[field]).match(value) != None matches.append( match ) if matches and all(matches): matchingResults.append( analysis ) # Return True iff there was at least one analysis that # satisfied all the rules; return matchingResults return matchingResults
Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate and returns a list of analyses (elements of tokenJson[ANALYSIS]) that are matching all the rules. An empty list is returned if none of the analyses match (all the rules), or (!) if none of the rules are describing the ANALYSIS part of the token; Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token;
null
null
null
def matchingAnalyseIndexes(self, tokenJson): '''Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate and returns a list of analyse indexes that correspond to tokenJson[ANALYSIS] elements that are matching all the rules. An empty list is returned if none of the analyses match (all the rules), or (!) if none of the rules are describing the ANALYSIS part of the token; Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token; ''' matchingResults = self.matchingAnalyses(tokenJson) if matchingResults: indexes = [ tokenJson[ANALYSIS].index(analysis) for analysis in matchingResults ] return indexes return matchingResults
Determines whether given token (tokenJson) satisfies all the rules listed in the WordTemplate and returns a list of analyse indexes that correspond to tokenJson[ANALYSIS] elements that are matching all the rules. An empty list is returned if none of the analyses match (all the rules), or (!) if none of the rules are describing the ANALYSIS part of the token; Parameters ---------- tokenJson: pyvabamorf's analysis of a single word token;
null
null
null
def matchingPositions(self, tokenArray): '''Returns a list of positions (indexes) in the tokenArray where this WordTemplate matches (the method self.matches(token) returns True). Returns an empty list if no matching tokens appear in the input list. Parameters ---------- tokenArray: list of word tokens; A list of word tokens along with their pyvabamorf's analyses; ''' assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray) matchingPos = [] for i in range( len(tokenArray) ): token = tokenArray[i] if self.matches(token): matchingPos.append( i ) return matchingPos
Returns a list of positions (indexes) in the tokenArray where this WordTemplate matches (the method self.matches(token) returns True). Returns an empty list if no matching tokens appear in the input list. Parameters ---------- tokenArray: list of word tokens; A list of word tokens along with their pyvabamorf's analyses;
null
null
null
def matchingTokens(self, tokenArray): '''Returns a list of tokens in the tokenArray that match this WordTemplate (the method self.matches(token) returns True). Returns an empty list if no matching tokens appear in the input list. Parameters ---------- tokenArray: list of word tokens; A list of word tokens along with their pyvabamorf's analyses; ''' assert isinstance(tokenArray, list), "tokenArray should be list "+str(tokenArray) matchingTok = [] for i in range( len(tokenArray) ): token = tokenArray[i] if self.matches(token): matchingTok.append( token ) return matchingTok
Returns a list of tokens in the tokenArray that match this WordTemplate (the method self.matches(token) returns True). Returns an empty list if no matching tokens appear in the input list. Parameters ---------- tokenArray: list of word tokens; A list of word tokens along with their pyvabamorf's analyses;
null
null
null
def annotateText(self, text, layer, addEmptyAnnotations = True): ''' Applies this WordTemplate ( more specifically: its method self.matchingTokens() ) on all words of given text, and adds results of the matching to the text as a new annotation layer. Returns the input text (which is augmented with a new layer). Parameters ---------- text: Text; A text where matching should be performed; layer: str; name of the new layer; addEmptyAnnotations: boolean, optional Whether the new layer should be added, if no match has been found? default: True ''' from estnltk.text import Text assert isinstance(text, Text), "the input should be Text, but it is: "+str(text) # 1) Find words in text that match the given pattern tokens = self.matchingTokens( text[WORDS] ) if not addEmptyAnnotations and not tokens: # if we are not interested in empty annotations return text # 2) Annotated given text with found matches if tokens: # Matches found: add to the layer text[layer] = [{START: t[START], END: t[END], TEXT:t[TEXT]} for t in tokens] else: # No matches found: add an empty layer text[layer] = [] return text
Applies this WordTemplate ( more specifically: its method self.matchingTokens() ) on all words of given text, and adds results of the matching to the text as a new annotation layer. Returns the input text (which is augmented with a new layer). Parameters ---------- text: Text; A text where matching should be performed; layer: str; name of the new layer; addEmptyAnnotations: boolean, optional Whether the new layer should be added, if no match has been found? default: True
null
null
null
is_root_node = False if cache is None: cache = {} is_root_node = True if id(self) in cache: return cache[id(self)] matches = self.get_matches_without_cache(text, cache=cache) cache[id(self)] = matches # if this is the root node, resolve the matches if is_root_node and conflict_resolver is not None: return conflict_resolver(matches) return matches
def get_matches(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage)
Get the matches of the symbol on given text.
2.698341
2.566883
1.051213
text = sectionObj['text'] reftags = [x for x in refTagRegEx.finditer(text)] if reftags: references = [] for tag in reftags: references.append(int(tag.group(1))) sectionObj['references'] = references text = refTagRegEx.sub('', text) sectionObj['text'] = text return sectionObj
def reffinder(sectionObj)
add reference indeces to sectionobj['references'] :param sectionObj :return: a section obj w references: field
3.013332
2.971614
1.014039
references = referencesRegEx.finditer(text) count = 0 refs = [] spans = [] for i in references: refs.append(i.group()) spans.append(i.span()) count += 1 done = set() nameRegEx = re.compile(r) for index, obj in enumerate(refs): if obj.startswith('<ref name='): nameTag = re.escape(nameRegEx.search(obj).group(1)) if nameTag not in done: nameTag = re.escape(nameRegEx.search(obj).group(1)) indeces = [i for i, x in enumerate(refs) if re.search(nameTag, x)] matches = [refs[i] for i in indeces] full = max(matches, key=len) for i in indeces: refs[i] = full done.add(nameTag) #eliminate <ref tag or other rudiments from the ref string for i in range(len(refs)): #print('SIIT', refs[i]) lastindex = refs[i].rindex('<') firstindex = refs[i].index('>')+1 refs[i]=refs[i][firstindex:lastindex] #Handle cite-references for i in range(len(refs)): if 'cite' in refs[i].lower(): newText = '' values = refs[i].split('|') for j in values: if '=' in j: first = j.index('=') newText += j[first+1:].strip() + ';' refs[i] = newText #a ref string:position int dictionary refspos = {} c = 0 for i in refs: if i not in refspos.keys(): refspos[i] = c c +=1 else: continue #print(refspos) #eliminate old, bad <ref> tags and insert clean ones <ref 1..2..3/> to the same spot. newText = '' assert len(spans) == len(refs) #Could happen... havent yet. next = 0 for i in range(len(spans)): start = spans[i][0] newText+=text[next:start]+'<ref '+str(refspos[refs[i]])+'/>' next = spans[i][1] newText+=text[next:] #switch keys:values in the dictionary for use in sectionsParser #positiontag:ref newDict = {y:x for x,y in refspos.items()} return newText, newDict
def referencesFinder(text)
:param text: takes the whole text of an article, searches for references, cleans the text, marks the reference indeces from zero inside the text. :return: the tagged text and a tag:reference dictionary to be used in sectionParser
4.484934
4.413943
1.016083
text = '' lastEnd = 0 ends = [] text = sectionObj['text'] imageStarts = [x.start() for x in imageRegEx.finditer(text)] if imageStarts: images = [] for start in imageStarts: imgText, end = balancedSlicer(text[start:]) end = start + end ends.append(end) #imgText = image.group(0).replace('[[', '').replace(']]', '') img = {'text':imgText} imgText = imgText.split('|') #t= imgText[-1].replace(']]', '') t = imgText[-1][:-2] url = urlBegin + imgText[0].replace(' ', '_').replace('[[', '') img['text'] = t img['url'] = url if ExtLinkBracketedRegex.search(t): img = addExternalLinks(img) intlinks = [x for x in findBalanced(t, openDelim='[[', closeDelim=']]')] if intlinks: img = addIntLinks(img) images.append(img) sectionObj['images'] = images spans = [] for i, j in zip(imageStarts, ends): spans.append((i, j)) sectionObj['text'] = dropSpans(spans, text) return sectionObj
def imageParser(sectionObj)
return a sectionObj with image data added [ { image_url = "http://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg/1024px-R%C3%B5uge_Suurj%C3%A4rv_2011_10.jpg" text: "Rõuge Suurjärv on Eesti sügavaim järv (38 m)." links: [ ...] // sama loogika nagu sektsiooni tasemel lingid. links: [ ...] // sama loogika nagu sektsiooni tasemel lingid. } ]
5.160259
5.091998
1.013405
# we have a number of special names that are not layers but instead # attributes of "words" layer if layer == ANALYSIS: if WORDS in self and len(self[WORDS]) > 0: return ANALYSIS in self[WORDS][0] elif layer == LAYER_CONLL: if LAYER_CONLL in self and len(self[LAYER_CONLL]) > 0: return PARSER_OUT in self[LAYER_CONLL][0] elif layer == LAYER_VISLCG3: if LAYER_VISLCG3 in self and len(self[LAYER_VISLCG3]) > 0: return PARSER_OUT in self[LAYER_VISLCG3][0] elif layer == LABEL: if WORDS in self and len(self[WORDS]) > 0: return LABEL in self[WORDS][0] elif layer == CLAUSE_ANNOTATION: if WORDS in self and len(self[WORDS]) > 0: return CLAUSE_ANNOTATION in self[WORDS][0] elif layer == WORDNET: if WORDS in self and len(self[WORDS]) > 0: if ANALYSIS in self[WORDS][0] and len(self[WORDS][0][ANALYSIS]) > 0: return WORDNET in self[WORDS][0][ANALYSIS][0] else: return layer in self return False
def is_tagged(self, layer)
Is the given element tokenized/tagged?
2.530514
2.46548
1.026378
return self.texts_from_spans(self.spans(layer), sep)
def texts(self, layer, sep=' ')
Retrieve texts for given layer. Parameters ---------- sep: str Separator for multilayer elements (default: ' '). Returns ------- list of str List of strings that make up given layer.
7.554005
15.938762
0.473939
text = self.text texts = [] for start, end in spans: if isinstance(start, list): texts.append(sep.join(text[s:e] for s, e in zip(start, end))) else: texts.append(text[start:end]) return texts
def texts_from_spans(self, spans, sep=' ')
Retrieve texts from a list of (start, end) position spans. Parameters ---------- sep: str Separator for multilayer elements (default: ' '). Returns ------- list of str List of strings that correspond to given spans.
2.115359
2.448398
0.863977
spans = [] for data in self[layer]: spans.append((data[START], data[END])) return spans
def spans(self, layer)
Retrieve (start, end) tuples denoting the spans of given layer elements. Returns ------- list of (int, int) List of (start, end) tuples.
5.23179
7.695096
0.679886
starts = [] for data in self[layer]: starts.append(data[START]) return starts
def starts(self, layer)
Retrieve start positions of elements if given layer.
5.965541
5.555646
1.07378
ends = [] for data in self[layer]: ends.append(data[END]) return ends
def ends(self, layer)
Retrieve end positions of elements if given layer.
6.32608
5.979494
1.057962
return { PARAGRAPHS: self.tokenize_paragraphs, SENTENCES: self.tokenize_sentences, WORDS: self.tokenize_words, ANALYSIS: self.tag_analysis, TIMEXES: self.tag_timexes, NAMED_ENTITIES: self.tag_named_entities, CLAUSE_ANNOTATION: self.tag_clause_annotations, CLAUSES: self.tag_clauses, LAYER_CONLL: self.tag_syntax_vislcg3, LAYER_VISLCG3: self.tag_syntax_maltparser, WORDNET: self.tag_wordnet }
def layer_tagger_mapping(self)
Dictionary that maps layer names to taggers that can create that layer.
4.240787
4.152774
1.021194
mapping = self.layer_tagger_mapping if layer in mapping: mapping[layer]() return self
def tag(self, layer)
Tag the annotations of given layer. It can automatically tag any built-in layer type.
10.889855
7.781786
1.399403
tok = self.__paragraph_tokenizer spans = tok.span_tokenize(self.text) dicts = [] for start, end in spans: dicts.append({'start': start, 'end': end}) self[PARAGRAPHS] = dicts return self
def tokenize_paragraphs(self)
Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer.
4.005653
3.2497
1.232622
if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.texts(PARAGRAPHS)
def paragraph_texts(self)
The list of texts representing ``paragraphs`` layer elements.
5.983615
5.359751
1.116398
if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.spans(PARAGRAPHS)
def paragraph_spans(self)
The list of spans representing ``paragraphs`` layer elements.
5.845344
4.906394
1.191373
if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.starts(PARAGRAPHS)
def paragraph_starts(self)
The start positions of ``paragraphs`` layer elements.
6.237454
5.716742
1.091086
if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.ends(PARAGRAPHS)
def paragraph_ends(self)
The end positions of ``paragraphs`` layer elements.
6.371824
5.809525
1.096789
if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() tok = self.__sentence_tokenizer text = self.text dicts = [] for paragraph in self[PARAGRAPHS]: para_start, para_end = paragraph[START], paragraph[END] para_text = text[para_start:para_end] if not self.is_tagged(WORDS): # Non-hack variant: word tokenization has not been applied yet, # so we proceed in natural order (first sentences, then words) spans = tok.span_tokenize(para_text) for start, end in spans: dicts.append({'start': start+para_start, 'end': end+para_start}) else: # A hack variant: word tokenization has already been made, so # we try to use existing word tokenization (first words, then sentences) para_words = \ [ w for w in self[WORDS] if w[START]>=para_start and w[END]<=para_end ] para_word_texts = \ [ w[TEXT] for w in para_words ] try: # Apply sentences_from_tokens method (if available) sents = tok.sentences_from_tokens( para_word_texts ) except AttributeError as e: raise # Align result of the sentence tokenization with the initial word tokenization # in order to determine the sentence boundaries i = 0 for sentence in sents: j = 0 firstToken = None lastToken = None while i < len(para_words): if para_words[i][TEXT] != sentence[j]: raise Exception('Error on aligning: ', para_word_texts,' and ',sentence,' at positions ',i,j) if j == 0: firstToken = para_words[i] if j == len(sentence) - 1: lastToken = para_words[i] i+=1 break j+=1 i+=1 sentenceDict = \ {'start': firstToken[START], 'end': lastToken[END]} dicts.append( sentenceDict ) # Note: We also need to invalidate the cached properties providing the # sentence information, as otherwise, if the properties have been # called already, new calls would return the old state of sentence # tokenization; for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', \ 'sentence_starts', 'sentence_ends']: try: # invalidate the cache delattr(self, sentence_attrib) except AttributeError: # it's ok, if the cached property has not been called yet pass self[SENTENCES] = dicts return self
def tokenize_sentences(self)
Apply sentence tokenization to this Text instance. Creates ``sentences`` layer. Automatically tokenizes paragraphs, if they are not already tokenized. Also, if word tokenization has already been performed, tries to fit the sentence tokenization into the existing word tokenization;
4.256732
4.042247
1.053061
if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.texts(SENTENCES)
def sentence_texts(self)
The list of texts representing ``sentences`` layer elements.
8.470325
7.463499
1.1349
if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.spans(SENTENCES)
def sentence_spans(self)
The list of spans representing ``sentences`` layer elements.
8.433593
7.080935
1.191028
if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.starts(SENTENCES)
def sentence_starts(self)
The list of start positions representing ``sentences`` layer elements.
9.32409
8.375211
1.113296