_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q7700
copy_analysis_dict
train
def copy_analysis_dict( analysis ): ''' Creates a copy from given analysis dict. ''' assert isinstance(analysis, dict), "(!) Input 'analysis' should be a dict!" new_dict = { POSTAG: analysis[POSTAG],\ ROOT: analysis[ROOT],\ FORM: analysis[FORM],\ CLITIC: analysis[CLITIC],\ ENDING: analysis[ENDING] } if LEMMA in analysis: new_dict[LEMMA] = analysis[LEMMA] if ROOT_TOKENS in analysis: new_dict[ROOT_TOKENS] = analysis[ROOT_TOKENS] return new_dict
python
{ "resource": "" }
q7701
get_unique_clause_indices
train
def get_unique_clause_indices( text ): ''' Returns a list of clause indices for the whole text. For each token in text, the list contains index of the clause the word belongs to, and the indices are unique over the whole text. ''' # Add clause boundary annotation (if missing) if not text.is_tagged( CLAUSES ): text.tag_clauses() # Collect (unique) clause indices over the whole text clause_indices = [] sent_id = 0 for sub_text in text.split_by( SENTENCES ): for word, cl_index in zip( sub_text.words, sub_text.clause_indices ): clause_indices.append( sent_id+cl_index ) nr_of_clauses = len(set(sub_text.clause_indices)) sent_id += nr_of_clauses assert len(clause_indices) == len(text.words), '(!) Number of clause indices should match nr of words!' return clause_indices
python
{ "resource": "" }
q7702
get_unique_sentence_indices
train
def get_unique_sentence_indices( text ): ''' Returns a list of sentence indices for the whole text. For each token in text, the list contains index of the sentence the word belongs to, and the indices are unique over the whole text. ''' # Add sentence annotation (if missing) if not text.is_tagged( SENTENCES ): text.tokenize_sentences() # Collect (unique) sent indices over the whole text sent_indices = [] sent_id = 0 for sub_text in text.split_by( SENTENCES ): for word in sub_text.words: sent_indices.append( sent_id ) sent_id += 1 assert len(sent_indices) == len(text.words), '(!) Number of sent indices should match nr of words!' return sent_indices
python
{ "resource": "" }
q7703
_convert_nominal_form
train
def _convert_nominal_form( analysis ): ''' Converts nominal categories of the input analysis. Performs one-to-one conversions only. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for idx, pattern_items in enumerate(_noun_conversion_rules): pattern_str, replacement = pattern_items if pattern_str in analysis[FORM]: analysis[FORM] = analysis[FORM].replace( pattern_str, replacement ) return analysis
python
{ "resource": "" }
q7704
_convert_amb_verbal_form
train
def _convert_amb_verbal_form( analysis ): ''' Converts ambiguous verbal categories of the input analysis. Performs one-to-many conversions. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' results = [] for root_pat, pos, form_pat, replacements in _amb_verb_conversion_rules: if analysis[POSTAG] == pos and re.match(root_pat, analysis[ROOT]) and \ re.match(form_pat, analysis[FORM]): for replacement in replacements: new_analysis = copy_analysis_dict( analysis ) new_form = re.sub(form_pat, replacement, analysis[FORM]) new_analysis[FORM] = new_form results.append( new_analysis ) # break after the replacement has been made # ( to avoid over-generation ) break if not results: results.append( analysis ) return results
python
{ "resource": "" }
q7705
_convert_verbal_form
train
def _convert_verbal_form( analysis ): ''' Converts ordinary verbal categories of the input analysis. Performs one-to-one conversions. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' for form, replacement in _verb_conversion_rules: # Exact match if analysis[FORM] == form: assert analysis[POSTAG] == 'V', \ '(!) Expected analysis of verb, but got analysis of "'+str(analysis[POSTAG])+'" instead.' analysis[FORM] = replacement # Inclusion : the case of some_prefix+' '+form ; elif analysis[FORM].endswith(' '+form): parts = analysis[FORM].split() prefix = ' '.join( parts[:len(parts)-1] ) analysis[FORM] = prefix+' '+replacement return analysis
python
{ "resource": "" }
q7706
_make_postfixes_1
train
def _make_postfixes_1( analysis ): ''' Provides some post-fixes. ''' assert FORM in analysis, '(!) The input analysis does not contain "'+FORM+'" key.' if 'neg' in analysis[FORM]: analysis[FORM] = re.sub( '^\s*neg ([^,]*)$', '\\1 Neg', analysis[FORM] ) analysis[FORM] = re.sub( ' Neg Neg$', ' Neg', analysis[FORM] ) analysis[FORM] = re.sub( ' Aff Neg$', ' Neg', analysis[FORM] ) analysis[FORM] = re.sub( 'neg', 'Neg', analysis[FORM] ) analysis[FORM] = analysis[FORM].rstrip().lstrip() assert 'neg' not in analysis[FORM], \ '(!) The label "neg" should be removed by now.' assert 'Neg' not in analysis[FORM] or ('Neg' in analysis[FORM] and analysis[FORM].endswith('Neg')), \ '(!) The label "Neg" should end the analysis line: '+str(analysis[FORM]) return analysis
python
{ "resource": "" }
q7707
_disambiguate_neg
train
def _disambiguate_neg( words_layer ): ''' Disambiguates forms ambiguous between multiword negation and some other form; ''' prev_word_lemma = '' for word_dict in words_layer: forms = [ a[FORM] for a in word_dict[ANALYSIS] ] if ('Pers Prs Imprt Sg2' in forms and 'Pers Prs Ind Neg' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saa, ei tee _keep_analyses( word_dict[ANALYSIS], ['Pers Prs Ind Neg'], ['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'] ) else: # saa! tee! _keep_analyses( word_dict[ANALYSIS], ['Pers Prs Imprt Sg2'], ['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'] ) if ('Pers Prt Imprt' in forms and 'Pers Prt Ind Neg' in forms and 'Pers Prt Prc' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saanud, ei teinud _keep_analyses( word_dict[ANALYSIS], ['Pers Prt Ind Neg'], ['Pers Prt Imprt','Pers Prt Ind Neg','Pers Prt Prc'] ) else: # on, oli saanud teinud; kukkunud õun; ... _keep_analyses( word_dict[ANALYSIS], ['Pers Prt Prc'], ['Pers Prt Imprt','Pers Prt Ind Neg','Pers Prt Prc'] ) if ('Impers Prt Ind Neg' in forms and 'Impers Prt Prc' in forms): if (prev_word_lemma == "ei" or prev_word_lemma == "ega"): # ei saadud, ei tehtud _keep_analyses( word_dict[ANALYSIS], ['Impers Prt Ind Neg'], ['Impers Prt Ind Neg','Impers Prt Prc'] ) else: # on, oli saadud tehtud; saadud õun; ... _keep_analyses( word_dict[ANALYSIS], ['Impers Prt Prc'], ['Impers Prt Ind Neg','Impers Prt Prc'] ) prev_word_lemma = word_dict[ANALYSIS][0][ROOT]
python
{ "resource": "" }
q7708
_make_postfixes_2
train
def _make_postfixes_2( words_layer ): ''' Provides some post-fixes after the disambiguation. ''' for word_dict in words_layer: for analysis in word_dict[ANALYSIS]: analysis[FORM] = re.sub( '(Sg|Pl)([123])', '\\1 \\2', analysis[FORM] ) return words_layer
python
{ "resource": "" }
q7709
as_unicode
train
def as_unicode(s, encoding='utf-8'): """Force conversion of given string to unicode type. Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x . If the string is already in unicode, then no conversion is done and the same string is returned. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to unicode. encoding: str The encoding of the input string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``str`` for Python3 or ``unicode`` for Python 2. """ if isinstance(s, six.text_type): return s elif isinstance(s, six.binary_type): return s.decode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
python
{ "resource": "" }
q7710
as_binary
train
def as_binary(s, encoding='utf-8'): """Force conversion of given string to binary type. Binary is ``bytes`` type for Python 3.x and ``str`` for Python 2.x . If the string is already in binary, then no conversion is done and the same string is returned and ``encoding`` argument is ignored. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to binary. encoding: str The encoding of the resulting binary string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``bytes`` for Python3 or ``str`` for Python 2. """ if isinstance(s, six.text_type): return s.encode(encoding) elif isinstance(s, six.binary_type): # make sure the binary is in required encoding return s.decode(encoding).encode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
python
{ "resource": "" }
q7711
KeywordTagger.tag
train
def tag(self, text): """Retrieves list of keywords in text. Parameters ---------- text: Text The text to search for events. Returns ------- list of vents sorted by start, end """ if self.search_method == 'ahocorasick': events = self._find_keywords_ahocorasick(text.text) elif self.search_method == 'naive': events = self._find_keywords_naive(text.text) events = self._resolve_conflicts(events) if self.mapping: for item in events: item['type'] = self.map[ text.text[item['start']:item['end']] ] if self.return_layer: return events else: text[self.layer_name] = events
python
{ "resource": "" }
q7712
RegexTagger.tag
train
def tag(self, text): """Retrieves list of regex_matches in text. Parameters ---------- text: Text The estnltk text object to search for events. Returns ------- list of matches """ matches = self._match(text.text) matches = self._resolve_conflicts(matches) if self.return_layer: return matches else: text[self.layer_name] = matches
python
{ "resource": "" }
q7713
yield_json_corpus
train
def yield_json_corpus(fnm): """Function to read a JSON corpus from a file. A JSON corpus contains one document per line, encoded in JSON. Each line is yielded after it is read. Parameters ---------- fnm: str The filename of the corpus. Returns ------- generator of Text """ with codecs.open(fnm, 'rb', 'ascii') as f: line = f.readline() while line != '': yield Text(json.loads(line)) line = f.readline()
python
{ "resource": "" }
q7714
write_json_corpus
train
def write_json_corpus(documents, fnm): """Write a lisst of Text instances as JSON corpus on disk. A JSON corpus contains one document per line, encoded in JSON. Parameters ---------- documents: iterable of estnltk.text.Text The documents of the corpus fnm: str The path to save the corpus. """ with codecs.open(fnm, 'wb', 'ascii') as f: for document in documents: f.write(json.dumps(document) + '\n') return documents
python
{ "resource": "" }
q7715
read_document
train
def read_document(fnm): """Read a document that is stored in a text file as JSON. Parameters ---------- fnm: str The path of the document. Returns ------- Text """ with codecs.open(fnm, 'rb', 'ascii') as f: return Text(json.loads(f.read()))
python
{ "resource": "" }
q7716
write_document
train
def write_document(doc, fnm): """Write a Text document to file. Parameters ---------- doc: Text The document to save. fnm: str The filename to save the document """ with codecs.open(fnm, 'wb', 'ascii') as f: f.write(json.dumps(doc, indent=2))
python
{ "resource": "" }
q7717
_TypedList.polarisText
train
def polarisText(): """polarisText part of _TypedList objects """ def fget(self): _out = '' _n = '\n' if len(self): if self.parent: _out = '%s%s%s' % (_out, PolarisText( *self.parent).out,_n) _out = _out + _n.join( map(lambda x: x.polarisText, self) ) else: _out = '' return _out return locals()
python
{ "resource": "" }
q7718
Parser.parse_line
train
def parse_line(self,iStr): """Parses ewn file line """ self.levelNumber = None self.DRN = None self.fieldTag = None self.fieldValue = None self.noQuotes = None if iStr and not(iStr.strip().startswith('#')): iList = iStr.strip().split(' ') self.levelNumber = int(iList.pop(0)) if iList[0].startswith('@') and self.levelNumber != 3: self.DRN = int(iList.pop(0).strip('@')) else: self.DRN = None self.fieldTag = iList.pop(0) if iList and ( iList[0].startswith('"') or iList[0].startswith('@') ): fv = ' '.join(iList) self.fieldValue = fv[1:-1] elif iList: if len(iList) == 1: self.fieldValue = iList.pop(0) else: self.fieldValue = ' '.join(iList) try: self.fieldValue = int(self.fieldValue) except ValueError: self.noQuotes = True
python
{ "resource": "" }
q7719
Variant.addTranslation
train
def addTranslation(self,translation): '''Appends one Translation to translations ''' if isinstance(translation, Translation): self.translations.append(translation) else: raise(TranslationError, 'translation Type should be Translation, not %s' % type( translation) )
python
{ "resource": "" }
q7720
Variant.addVariantFeature
train
def addVariantFeature(self,variantFeature): '''Appends one VariantFeature to variantFeatures ''' if isinstance(variantFeature, Feature): self.features.append(variantFeature) else: raise(TypeError, 'variantFeature Type should be Feature, not %s' % type( variantFeature) )
python
{ "resource": "" }
q7721
Variant.addUsage_Label
train
def addUsage_Label(self,usage_label): '''Appends one Usage_Label to usage_labels ''' if isinstance(usage_label, Usage_Label): self.usage_labels.append(usage_label) else: raise (Usage_LabelError, 'usage_label Type should be Usage_Label, not %s' % type( usage_label) )
python
{ "resource": "" }
q7722
Variant.addExample
train
def addExample(self,example): '''Appends one Example to examples ''' if isinstance(example, Example): self.examples.append(example) else: raise (ExampleError, 'example Type should be Example, not %s' % type(example) )
python
{ "resource": "" }
q7723
Synset.firstVariant
train
def firstVariant(): """first variant of Variants Read-only """ def fget(self): if self.variants: return self.variants[0] else: variant = Variant() return variant return locals()
python
{ "resource": "" }
q7724
Synset.literals
train
def literals(): '''Returns a list of literals in the Synset read-only ''' def fget(self): if self.variants: return map(lambda x: x.literal, self.variants) else: return None return locals()
python
{ "resource": "" }
q7725
Synset.addVariantOld
train
def addVariantOld(self, literal='', sense=0, gloss='', examples=[]): '''Appends variant sth to do that it would be possible to add Variant object ''' var = Variant(literal=literal, sense=sense, gloss=gloss, examples=examples) self.variants.append(var)
python
{ "resource": "" }
q7726
Synset.addVariant
train
def addVariant(self,variant): '''Appends one Variant to variants ''' if isinstance(variant, Variant): self.variants.append(variant) else: raise (VariantError, 'variant Type should be Variant, not %s' % type(variant))
python
{ "resource": "" }
q7727
Synset.named_eq_relations
train
def named_eq_relations(self, name, neg=False): '''Returns list of named eqLinks. <name> may be string or list. ''' if self.eqLinks and not neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name == name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name in name, self.eqLinks) else: return None #should rise error elif self.eqLinks and neg: if isinstance(name, six.string_types): return filter(lambda x: x.relation.name != name, self.eqLinks) elif isinstance(name, list): return filter(lambda x: x.relation.name not in name, self.eqLinks) else: return None #should rise error else: return None
python
{ "resource": "" }
q7728
Tree.get_root
train
def get_root( self, **kwargs ): ''' Returns this tree if it has no parents, or, alternatively, moves up via the parent links of this tree until reaching the tree with no parents, and returnes the parentless tree as the root. ''' if self.parent == None: return self else: return self.parent.get_root( **kwargs )
python
{ "resource": "" }
q7729
Tree.get_tree_depth
train
def get_tree_depth( self ): ''' Finds depth of this tree. ''' if (self.children): depth = 1 childDepths = [] for child in self.children: childDepths.append( child.get_tree_depth() ) return depth + max(childDepths) else: return 0
python
{ "resource": "" }
q7730
Symbol.get_matches
train
def get_matches(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage): """Get the matches of the symbol on given text.""" is_root_node = False if cache is None: cache = {} is_root_node = True if id(self) in cache: return cache[id(self)] matches = self.get_matches_without_cache(text, cache=cache) cache[id(self)] = matches # if this is the root node, resolve the matches if is_root_node and conflict_resolver is not None: return conflict_resolver(matches) return matches
python
{ "resource": "" }
q7731
Text.texts
train
def texts(self, layer, sep=' '): """Retrieve texts for given layer. Parameters ---------- sep: str Separator for multilayer elements (default: ' '). Returns ------- list of str List of strings that make up given layer. """ return self.texts_from_spans(self.spans(layer), sep)
python
{ "resource": "" }
q7732
Text.starts
train
def starts(self, layer): """Retrieve start positions of elements if given layer.""" starts = [] for data in self[layer]: starts.append(data[START]) return starts
python
{ "resource": "" }
q7733
Text.ends
train
def ends(self, layer): """Retrieve end positions of elements if given layer.""" ends = [] for data in self[layer]: ends.append(data[END]) return ends
python
{ "resource": "" }
q7734
Text.layer_tagger_mapping
train
def layer_tagger_mapping(self): """Dictionary that maps layer names to taggers that can create that layer.""" return { PARAGRAPHS: self.tokenize_paragraphs, SENTENCES: self.tokenize_sentences, WORDS: self.tokenize_words, ANALYSIS: self.tag_analysis, TIMEXES: self.tag_timexes, NAMED_ENTITIES: self.tag_named_entities, CLAUSE_ANNOTATION: self.tag_clause_annotations, CLAUSES: self.tag_clauses, LAYER_CONLL: self.tag_syntax_vislcg3, LAYER_VISLCG3: self.tag_syntax_maltparser, WORDNET: self.tag_wordnet }
python
{ "resource": "" }
q7735
Text.tag
train
def tag(self, layer): """Tag the annotations of given layer. It can automatically tag any built-in layer type.""" mapping = self.layer_tagger_mapping if layer in mapping: mapping[layer]() return self
python
{ "resource": "" }
q7736
Text.tokenize_paragraphs
train
def tokenize_paragraphs(self): """Apply paragraph tokenization to this Text instance. Creates ``paragraphs`` layer.""" tok = self.__paragraph_tokenizer spans = tok.span_tokenize(self.text) dicts = [] for start, end in spans: dicts.append({'start': start, 'end': end}) self[PARAGRAPHS] = dicts return self
python
{ "resource": "" }
q7737
Text.paragraph_texts
train
def paragraph_texts(self): """The list of texts representing ``paragraphs`` layer elements.""" if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.texts(PARAGRAPHS)
python
{ "resource": "" }
q7738
Text.paragraph_spans
train
def paragraph_spans(self): """The list of spans representing ``paragraphs`` layer elements.""" if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.spans(PARAGRAPHS)
python
{ "resource": "" }
q7739
Text.paragraph_starts
train
def paragraph_starts(self): """The start positions of ``paragraphs`` layer elements.""" if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.starts(PARAGRAPHS)
python
{ "resource": "" }
q7740
Text.paragraph_ends
train
def paragraph_ends(self): """The end positions of ``paragraphs`` layer elements.""" if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() return self.ends(PARAGRAPHS)
python
{ "resource": "" }
q7741
Text.tokenize_sentences
train
def tokenize_sentences(self): """Apply sentence tokenization to this Text instance. Creates ``sentences`` layer. Automatically tokenizes paragraphs, if they are not already tokenized. Also, if word tokenization has already been performed, tries to fit the sentence tokenization into the existing word tokenization; """ if not self.is_tagged(PARAGRAPHS): self.tokenize_paragraphs() tok = self.__sentence_tokenizer text = self.text dicts = [] for paragraph in self[PARAGRAPHS]: para_start, para_end = paragraph[START], paragraph[END] para_text = text[para_start:para_end] if not self.is_tagged(WORDS): # Non-hack variant: word tokenization has not been applied yet, # so we proceed in natural order (first sentences, then words) spans = tok.span_tokenize(para_text) for start, end in spans: dicts.append({'start': start+para_start, 'end': end+para_start}) else: # A hack variant: word tokenization has already been made, so # we try to use existing word tokenization (first words, then sentences) para_words = \ [ w for w in self[WORDS] if w[START]>=para_start and w[END]<=para_end ] para_word_texts = \ [ w[TEXT] for w in para_words ] try: # Apply sentences_from_tokens method (if available) sents = tok.sentences_from_tokens( para_word_texts ) except AttributeError as e: raise # Align result of the sentence tokenization with the initial word tokenization # in order to determine the sentence boundaries i = 0 for sentence in sents: j = 0 firstToken = None lastToken = None while i < len(para_words): if para_words[i][TEXT] != sentence[j]: raise Exception('Error on aligning: ', para_word_texts,' and ',sentence,' at positions ',i,j) if j == 0: firstToken = para_words[i] if j == len(sentence) - 1: lastToken = para_words[i] i+=1 break j+=1 i+=1 sentenceDict = \ {'start': firstToken[START], 'end': lastToken[END]} dicts.append( sentenceDict ) # Note: We also need to invalidate the cached properties providing the # sentence information, as otherwise, if the properties have been # called already, new calls would return the old state of sentence # tokenization; for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', \ 'sentence_starts', 'sentence_ends']: try: # invalidate the cache delattr(self, sentence_attrib) except AttributeError: # it's ok, if the cached property has not been called yet pass self[SENTENCES] = dicts return self
python
{ "resource": "" }
q7742
Text.sentence_texts
train
def sentence_texts(self): """The list of texts representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.texts(SENTENCES)
python
{ "resource": "" }
q7743
Text.sentence_spans
train
def sentence_spans(self): """The list of spans representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.spans(SENTENCES)
python
{ "resource": "" }
q7744
Text.sentence_starts
train
def sentence_starts(self): """The list of start positions representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.starts(SENTENCES)
python
{ "resource": "" }
q7745
Text.sentence_ends
train
def sentence_ends(self): """The list of end positions representing ``sentences`` layer elements.""" if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.ends(SENTENCES)
python
{ "resource": "" }
q7746
Text.tokenize_words
train
def tokenize_words(self): """Apply word tokenization and create ``words`` layer. Automatically creates ``paragraphs`` and ``sentences`` layers. """ if not self.is_tagged(SENTENCES): self.tokenize_sentences() tok = self.__word_tokenizer text = self.text dicts = [] for sentence in self[SENTENCES]: sent_start, sent_end = sentence[START], sentence[END] sent_text = text[sent_start:sent_end] spans = tok.span_tokenize(sent_text) for start, end in spans: dicts.append({START: start+sent_start, END: end+sent_start, TEXT: sent_text[start:end]}) self[WORDS] = dicts return self
python
{ "resource": "" }
q7747
Text.tag_analysis
train
def tag_analysis(self): """Tag ``words`` layer with morphological analysis attributes.""" if not self.is_tagged(WORDS): self.tokenize_words() sentences = self.divide(WORDS, SENTENCES) for sentence in sentences: texts = [word[TEXT] for word in sentence] all_analysis = vabamorf.analyze(texts, **self.__kwargs) for word, analysis in zip(sentence, all_analysis): word[ANALYSIS] = analysis[ANALYSIS] word[TEXT] = analysis[TEXT] return self
python
{ "resource": "" }
q7748
Text.word_texts
train
def word_texts(self): """The list of words representing ``words`` layer elements.""" if not self.is_tagged(WORDS): self.tokenize_words() return [word[TEXT] for word in self[WORDS]]
python
{ "resource": "" }
q7749
Text.word_spans
train
def word_spans(self): """The list of spans representing ``words`` layer elements.""" if not self.is_tagged(WORDS): self.tokenize_words() return self.spans(WORDS)
python
{ "resource": "" }
q7750
Text.word_starts
train
def word_starts(self): """The list of start positions representing ``words`` layer elements.""" if not self.is_tagged(WORDS): self.tokenize_words() return self.starts(WORDS)
python
{ "resource": "" }
q7751
Text.word_ends
train
def word_ends(self): """The list of end positions representing ``words`` layer elements.""" if not self.is_tagged(WORDS): self.tokenize_words() return self.ends(WORDS)
python
{ "resource": "" }
q7752
Text.analysis
train
def analysis(self): """The list of analysis of ``words`` layer elements.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() return [word[ANALYSIS] for word in self.words]
python
{ "resource": "" }
q7753
Text.get_analysis_element
train
def get_analysis_element(self, element, sep='|'): """The list of analysis elements of ``words`` layer. Parameters ---------- element: str The name of the element, for example "lemma", "postag". sep: str The separator for ambiguous analysis (default: "|"). As morphological analysis cannot always yield unambiguous results, we return ambiguous values separated by the pipe character as default. """ return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words]
python
{ "resource": "" }
q7754
Text.roots
train
def roots(self): """The list of word roots. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(ROOT)
python
{ "resource": "" }
q7755
Text.lemmas
train
def lemmas(self): """The list of lemmas. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(LEMMA)
python
{ "resource": "" }
q7756
Text.lemma_lists
train
def lemma_lists(self): """Lemma lists. Ambiguous cases are separate list elements. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]]
python
{ "resource": "" }
q7757
Text.endings
train
def endings(self): """The list of word endings. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(ENDING)
python
{ "resource": "" }
q7758
Text.forms
train
def forms(self): """Tthe list of word forms. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(FORM)
python
{ "resource": "" }
q7759
Text.postags
train
def postags(self): """The list of word part-of-speech tags. Ambiguous cases are separated with pipe character by default. Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(POSTAG)
python
{ "resource": "" }
q7760
Text.postag_descriptions
train
def postag_descriptions(self): """Human-readable POS-tag descriptions.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.get_analysis_element(POSTAG)]
python
{ "resource": "" }
q7761
Text.root_tokens
train
def root_tokens(self): """Root tokens of word roots.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() return self.get_analysis_element(ROOT_TOKENS)
python
{ "resource": "" }
q7762
Text.descriptions
train
def descriptions(self): """Human readable word descriptions.""" descs = [] for postag, form in zip(self.postags, self.forms): desc = VERB_TYPES.get(form, '') if len(desc) == 0: toks = form.split(' ') if len(toks) == 2: plur_desc = PLURALITY.get(toks[0], None) case_desc = CASES.get(toks[1], None) toks = [] if plur_desc is not None: toks.append(plur_desc) if case_desc is not None: toks.append(case_desc) desc = ' '.join(toks) descs.append(desc) return descs
python
{ "resource": "" }
q7763
Text.tag_syntax_vislcg3
train
def tag_syntax_vislcg3(self): """ Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis, and stores the results in the layer named LAYER_VISLCG3.""" if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, VISLCG3Parser): self.__syntactic_parser = VISLCG3Parser() return self.tag_syntax()
python
{ "resource": "" }
q7764
Text.tag_syntax_maltparser
train
def tag_syntax_maltparser(self): """ Changes default syntactic parser to MaltParser, performs syntactic analysis, and stores the results in the layer named LAYER_CONLL.""" if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, MaltParser): self.__syntactic_parser = MaltParser() return self.tag_syntax()
python
{ "resource": "" }
q7765
Text.tag_labels
train
def tag_labels(self): """Tag named entity labels in the ``words`` layer.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() if self.__ner_tagger is None: self.__ner_tagger = load_default_ner_tagger() self.__ner_tagger.tag_document(self) return self
python
{ "resource": "" }
q7766
Text.labels
train
def labels(self): """Named entity labels.""" if not self.is_tagged(LABEL): self.tag_labels() return [word[LABEL] for word in self.words]
python
{ "resource": "" }
q7767
Text.tag_named_entities
train
def tag_named_entities(self): """Tag ``named_entities`` layer. This automatically performs morphological analysis along with all dependencies. """ if not self.is_tagged(LABEL): self.tag_labels() nes = [] word_start = -1 labels = self.labels + ['O'] # last is sentinel words = self.words label = 'O' for i, l in enumerate(labels): if l.startswith('B-') or l == 'O': if word_start != -1: nes.append({START: words[word_start][START], END: words[i-1][END], LABEL: label}) if l.startswith('B-'): word_start = i label = l[2:] else: word_start = -1 self[NAMED_ENTITIES] = nes return self
python
{ "resource": "" }
q7768
Text.named_entities
train
def named_entities(self): """The elements of ``named_entities`` layer.""" if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() phrases = self.split_by(NAMED_ENTITIES) return [' '.join(phrase.lemmas) for phrase in phrases]
python
{ "resource": "" }
q7769
Text.named_entity_texts
train
def named_entity_texts(self): """The texts representing named entities.""" if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() return self.texts(NAMED_ENTITIES)
python
{ "resource": "" }
q7770
Text.named_entity_spans
train
def named_entity_spans(self): """The spans of named entities.""" if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() return self.spans(NAMED_ENTITIES)
python
{ "resource": "" }
q7771
Text.named_entity_labels
train
def named_entity_labels(self): """The named entity labels without BIO prefixes.""" if not self.is_tagged(NAMED_ENTITIES): self.tag_named_entities() return [ne[LABEL] for ne in self[NAMED_ENTITIES]]
python
{ "resource": "" }
q7772
Text.tag_timexes
train
def tag_timexes(self): """Create ``timexes`` layer. Depends on morphological analysis data in ``words`` layer and tags it automatically, if it is not present.""" if not self.is_tagged(ANALYSIS): self.tag_analysis() if not self.is_tagged(TIMEXES): if self.__timex_tagger is None: self.__timex_tagger = load_default_timex_tagger() self.__timex_tagger.tag_document(self, **self.__kwargs) return self
python
{ "resource": "" }
q7773
Text.timex_starts
train
def timex_starts(self): """The list of start positions of ``timexes`` layer elements.""" if not self.is_tagged(TIMEXES): self.tag_timexes() return self.starts(TIMEXES)
python
{ "resource": "" }
q7774
Text.timex_ends
train
def timex_ends(self): """The list of end positions of ``timexes`` layer elements.""" if not self.is_tagged(TIMEXES): self.tag_timexes() return self.ends(TIMEXES)
python
{ "resource": "" }
q7775
Text.timex_spans
train
def timex_spans(self): """The list of spans of ``timexes`` layer elements.""" if not self.is_tagged(TIMEXES): self.tag_timexes() return self.spans(TIMEXES)
python
{ "resource": "" }
q7776
Text.tag_clause_annotations
train
def tag_clause_annotations(self): """Tag clause annotations in ``words`` layer. Depends on morphological analysis. """ if not self.is_tagged(ANALYSIS): self.tag_analysis() if self.__clause_segmenter is None: self.__clause_segmenter = load_default_clausesegmenter() return self.__clause_segmenter.tag(self)
python
{ "resource": "" }
q7777
Text.clause_annotations
train
def clause_annotations(self): """The list of clause annotations in ``words`` layer.""" if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]
python
{ "resource": "" }
q7778
Text.clause_indices
train
def clause_indices(self): """The list of clause indices in ``words`` layer. The indices are unique only in the boundary of a single sentence. """ if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]
python
{ "resource": "" }
q7779
Text.tag_clauses
train
def tag_clauses(self): """Create ``clauses`` multilayer. Depends on clause annotations.""" if not self.is_tagged(CLAUSE_ANNOTATION): self.tag_clause_annotations() def from_sentence(words): """Function that extracts clauses from a signle sentence.""" clauses = defaultdict(list) start = words[0][START] end = words[0][END] clause = words[0][CLAUSE_IDX] for word in words: if word[CLAUSE_IDX] != clause: clauses[clause].append((start, end)) start, clause = word[START], word[CLAUSE_IDX] end = word[END] clauses[clause].append((start, words[-1][END])) clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in clause]}) for key, clause in clauses.items()] return [v for k, v in sorted(clauses)] clauses = [] sentences = self.divide() for sentence in sentences: clauses.extend(from_sentence(sentence)) self[CLAUSES] = clauses return self
python
{ "resource": "" }
q7780
Text.tag_verb_chains
train
def tag_verb_chains(self): """Create ``verb_chains`` layer. Depends on ``clauses`` layer. """ if not self.is_tagged(CLAUSES): self.tag_clauses() if self.__verbchain_detector is None: self.__verbchain_detector = load_default_verbchain_detector() sentences = self.divide() verbchains = [] for sentence in sentences: chains = self.__verbchain_detector.detectVerbChainsFromSent( sentence ) for chain in chains: # 1) Get spans for all words of the phrase word_spans = [ ( sentence[idx][START], sentence[idx][END] ) \ for idx in sorted( chain[PHRASE] ) ] # 2) Assign to the chain chain[START] = [ span[0] for span in word_spans ] chain[END] = [ span[1] for span in word_spans ] verbchains.extend(chains) self[VERB_CHAINS] = verbchains return self
python
{ "resource": "" }
q7781
Text.verb_chain_texts
train
def verb_chain_texts(self): """The list of texts of ``verb_chains`` layer elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.texts(VERB_CHAINS)
python
{ "resource": "" }
q7782
Text.verb_chain_starts
train
def verb_chain_starts(self): """The start positions of ``verb_chains`` elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.starts(VERB_CHAINS)
python
{ "resource": "" }
q7783
Text.verb_chain_ends
train
def verb_chain_ends(self): """The end positions of ``verb_chains`` elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.ends(VERB_CHAINS)
python
{ "resource": "" }
q7784
Text.tag_wordnet
train
def tag_wordnet(self, **kwargs): """Create wordnet attribute in ``words`` layer. See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method for applicable keyword arguments. """ global wordnet_tagger if wordnet_tagger is None: # cached wn tagger wordnet_tagger = WordnetTagger() self.__wordnet_tagger = wordnet_tagger if len(kwargs) > 0: return self.__wordnet_tagger.tag_text(self, **kwargs) return self.__wordnet_tagger.tag_text(self, **self.__kwargs)
python
{ "resource": "" }
q7785
Text.wordnet_annotations
train
def wordnet_annotations(self): """The list of wordnet annotations of ``words`` layer.""" if not self.is_tagged(WORDNET): self.tag_wordnet() return [[a[WORDNET] for a in analysis] for analysis in self.analysis]
python
{ "resource": "" }
q7786
Text.synsets
train
def synsets(self): """The list of annotated synsets of ``words`` layer.""" synsets = [] for wn_annots in self.wordnet_annotations: word_synsets = [] for wn_annot in wn_annots: for synset in wn_annot.get(SYNSETS, []): word_synsets.append(deepcopy(synset)) synsets.append(word_synsets) return synsets
python
{ "resource": "" }
q7787
Text.word_literals
train
def word_literals(self): """The list of literals per word in ``words`` layer.""" literals = [] for word_synsets in self.synsets: word_literals = set() for synset in word_synsets: for variant in synset.get(SYN_VARIANTS): if LITERAL in variant: word_literals.add(variant[LITERAL]) literals.append(list(sorted(word_literals))) return literals
python
{ "resource": "" }
q7788
Text.spelling
train
def spelling(self): """Flag incorrectly spelled words. Returns a list of booleans, where element at each position denotes, if the word at the same position is spelled correctly. """ if not self.is_tagged(WORDS): self.tokenize_words() return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)]
python
{ "resource": "" }
q7789
Text.spelling_suggestions
train
def spelling_suggestions(self): """The list of spelling suggestions per misspelled word.""" if not self.is_tagged(WORDS): self.tokenize_words() return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.word_texts, suggestions=True)]
python
{ "resource": "" }
q7790
Text.fix_spelling
train
def fix_spelling(self): """Fix spelling of the text. Note that this method uses the first suggestion that is given for each misspelled word. It does not perform any sophisticated analysis to determine which one of the suggestions fits best into the context. Returns ------- Text A copy of this instance with automatically fixed spelling. """ if not self.is_tagged(WORDS): self.tokenize_words() text = self.text fixed = vabamorf.fix_spelling(self.word_texts, join=False) spans = self.word_spans assert len(fixed) == len(spans) if len(spans) > 0: newtoks = [] lastend = 0 for fix, (start, end) in zip(fixed, spans): newtoks.append(text[lastend:start]) newtoks.append(fix) lastend = end newtoks.append(text[lastend:]) return Text(''.join(newtoks), **self.__kwargs) return self
python
{ "resource": "" }
q7791
Text.clean
train
def clean(self): """Return a copy of this Text instance with invalid characters removed.""" return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs)
python
{ "resource": "" }
q7792
Text.split_given_spans
train
def split_given_spans(self, spans, sep=' '): """Split the text into several pieces. Resulting texts have all the layers that are present in the text instance that is splitted. The elements are copied to resulting pieces that are covered by their spans. However, this can result in empty layers if no element of a splitted layer fits into a span of a particular output piece. The positions of layer elements that are copied are translated according to the container span, so they are consistent with returned text lengths. Parameters ---------- spans: list of spans. The positions determining the regions that will end up as individual pieces. Spans themselves can be lists of spans, which denote multilayer-style text regions. sep: str The separator that is used to join together text pieces of multilayer spans. Returns ------- list of Text One instance of text per span. """ N = len(spans) results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)] for elem in self: if isinstance(self[elem], list): splits = divide_by_spans(self[elem], spans, translate=True, sep=sep) for idx in range(N): results[idx][elem] = splits[idx] return [Text(res) for res in results]
python
{ "resource": "" }
q7793
Text.split_by
train
def split_by(self, layer, sep=' '): """Split the text into multiple instances defined by elements of given layer. The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans` method. Parameters ---------- layer: str String determining the layer that is used to define the start and end positions of resulting splits. sep: str (default: ' ') The separator to use to join texts of multilayer elements. Returns ------- list of Text """ if not self.is_tagged(layer): self.tag(layer) return self.split_given_spans(self.spans(layer), sep=sep)
python
{ "resource": "" }
q7794
Text.split_by_regex
train
def split_by_regex(self, regex_or_pattern, flags=re.U, gaps=True): """Split the text into multiple instances using a regex. Parameters ---------- regex_or_pattern: str or compiled pattern The regular expression to use for splitting. flags: int (default: re.U) The regular expression flags (only used, when user has not supplied compiled regex). gaps: boolean (default: True) If True, then regions matched by the regex are not included in the resulting Text instances, which is expected behaviour. If False, then only regions matched by the regex are included in the result. Returns ------- list of Text The Text instances obtained by splitting. """ text = self[TEXT] regex = regex_or_pattern if isinstance(regex, six.string_types): regex = re.compile(regex_or_pattern, flags=flags) # else is assumed pattern last_end = 0 spans = [] if gaps: # tag cap spans for mo in regex.finditer(text): start, end = mo.start(), mo.end() if start > last_end: spans.append((last_end, start)) last_end = end if last_end < len(text): spans.append((last_end, len(text))) else: # use matched regions spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)] return self.split_given_spans(spans)
python
{ "resource": "" }
q7795
Text.divide
train
def divide(self, layer=WORDS, by=SENTENCES): """Divide the Text into pieces by keeping references to original elements, when possible. This is not possible only, if the _element_ is a multispan. Parameters ---------- element: str The element to collect and distribute in resulting bins. by: str Each resulting bin is defined by spans of this element. Returns ------- list of (list of dict) """ if not self.is_tagged(layer): self.tag(layer) if not self.is_tagged(by): self.tag(by) return divide(self[layer], self[by])
python
{ "resource": "" }
q7796
resolve_using_maximal_coverage
train
def resolve_using_maximal_coverage(matches): """Given a list of matches, select a subset of matches such that there are no overlaps and the total number of covered characters is maximal. Parameters ---------- matches: list of Match Returns -------- list of Match """ if len(matches) == 0: return matches matches.sort() N = len(matches) scores = [len(match) for match in matches] prev = [-1] * N for i in range(1, N): bestscore = -1 bestprev = -1 j = i while j >= 0: # if matches do not overlap if matches[j].is_before(matches[i]): l = scores[j] + len(matches[i]) if l >= bestscore: bestscore = l bestprev = j else: # in case of overlapping matches l = scores[j] - len(matches[j]) + len(matches[i]) if l >= bestscore: bestscore = l bestprev = prev[j] j = j - 1 scores[i] = bestscore prev[i] = bestprev # first find the matching with highest combined score bestscore = max(scores) bestidx = len(scores) - scores[-1::-1].index(bestscore) -1 # then backtrack the non-conflicting matchings that should be kept keepidxs = [bestidx] bestidx = prev[bestidx] while bestidx != -1: keepidxs.append(bestidx) bestidx = prev[bestidx] # filter the matches return [matches[idx] for idx in reversed(keepidxs)]
python
{ "resource": "" }
q7797
_isFollowedByComma
train
def _isFollowedByComma( wordID, clauseTokens ): ''' Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma; Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False; ''' koma = WordTemplate({ROOT:'^,+$', POSTAG:'Z'}) for i in range(len(clauseTokens)): token = clauseTokens[i] if token[WORD_ID] == wordID: if re.match('^.*,$', token[TEXT]): return True elif i+1 < len(clauseTokens) and koma.matches(clauseTokens[i+1]): return True break return False
python
{ "resource": "" }
q7798
is_valid_regex
train
def is_valid_regex(regex): """Function for checking a valid regex.""" if len(regex) == 0: return False try: re.compile(regex) return True except sre_constants.error: return False
python
{ "resource": "" }
q7799
iterate_intersecting_pairs
train
def iterate_intersecting_pairs(layer): """ Given a layer of estntltk objects, yields pairwise intersecting elements. Breaks when the layer is changed or deleted after initializing the iterator. """ yielded = set() ri = layer[:] # Shallow copy the layer for i1, elem1 in enumerate(ri): for i2, elem2 in enumerate(ri): if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']: inds = (i1, i2) if i1 < i2 else (i2, i1) if inds not in yielded and in_by_identity(layer, elem1) and in_by_identity(layer, elem2): yielded.add(inds) yield elem1, elem2
python
{ "resource": "" }