code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.ends(SENTENCES)
|
def sentence_ends(self)
|
The list of end positions representing ``sentences`` layer elements.
| 9.602919
| 8.326162
| 1.153343
|
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
tok = self.__word_tokenizer
text = self.text
dicts = []
for sentence in self[SENTENCES]:
sent_start, sent_end = sentence[START], sentence[END]
sent_text = text[sent_start:sent_end]
spans = tok.span_tokenize(sent_text)
for start, end in spans:
dicts.append({START: start+sent_start, END: end+sent_start, TEXT: sent_text[start:end]})
self[WORDS] = dicts
return self
|
def tokenize_words(self)
|
Apply word tokenization and create ``words`` layer.
Automatically creates ``paragraphs`` and ``sentences`` layers.
| 2.905329
| 3.083292
| 0.942282
|
if not self.is_tagged(WORDS):
self.tokenize_words()
sentences = self.divide(WORDS, SENTENCES)
for sentence in sentences:
texts = [word[TEXT] for word in sentence]
all_analysis = vabamorf.analyze(texts, **self.__kwargs)
for word, analysis in zip(sentence, all_analysis):
word[ANALYSIS] = analysis[ANALYSIS]
word[TEXT] = analysis[TEXT]
return self
|
def tag_analysis(self)
|
Tag ``words`` layer with morphological analysis attributes.
| 5.60114
| 4.981396
| 1.124412
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return [word[TEXT] for word in self[WORDS]]
|
def word_texts(self)
|
The list of words representing ``words`` layer elements.
| 7.440989
| 7.067214
| 1.052889
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.spans(WORDS)
|
def word_spans(self)
|
The list of spans representing ``words`` layer elements.
| 10.214702
| 8.396372
| 1.216561
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.starts(WORDS)
|
def word_starts(self)
|
The list of start positions representing ``words`` layer elements.
| 11.414009
| 9.312495
| 1.225666
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.ends(WORDS)
|
def word_ends(self)
|
The list of end positions representing ``words`` layer elements.
| 11.688253
| 9.339711
| 1.251458
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words]
|
def analysis(self)
|
The list of analysis of ``words`` layer elements.
| 8.028769
| 6.093833
| 1.317524
|
return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words]
|
def get_analysis_element(self, element, sep='|')
|
The list of analysis elements of ``words`` layer.
Parameters
----------
element: str
The name of the element, for example "lemma", "postag".
sep: str
The separator for ambiguous analysis (default: "|").
As morphological analysis cannot always yield unambiguous results, we
return ambiguous values separated by the pipe character as default.
| 13.524803
| 13.621579
| 0.992895
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT)
|
def roots(self)
|
The list of word roots.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
| 9.868089
| 7.885109
| 1.251484
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(LEMMA)
|
def lemmas(self)
|
The list of lemmas.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
| 8.198957
| 7.163317
| 1.144576
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]]
|
def lemma_lists(self)
|
Lemma lists.
Ambiguous cases are separate list elements.
| 7.932904
| 7.888155
| 1.005673
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING)
|
def endings(self)
|
The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
| 10.91272
| 8.58129
| 1.271688
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(FORM)
|
def forms(self)
|
Tthe list of word forms.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
| 10.575577
| 8.01328
| 1.319756
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG)
|
def postags(self)
|
The list of word part-of-speech tags.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
| 9.45598
| 7.880378
| 1.19994
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.get_analysis_element(POSTAG)]
|
def postag_descriptions(self)
|
Human-readable POS-tag descriptions.
| 6.727025
| 6.288078
| 1.069806
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT_TOKENS)
|
def root_tokens(self)
|
Root tokens of word roots.
| 8.7062
| 6.97389
| 1.248399
|
descs = []
for postag, form in zip(self.postags, self.forms):
desc = VERB_TYPES.get(form, '')
if len(desc) == 0:
toks = form.split(' ')
if len(toks) == 2:
plur_desc = PLURALITY.get(toks[0], None)
case_desc = CASES.get(toks[1], None)
toks = []
if plur_desc is not None:
toks.append(plur_desc)
if case_desc is not None:
toks.append(case_desc)
desc = ' '.join(toks)
descs.append(desc)
return descs
|
def descriptions(self)
|
Human readable word descriptions.
| 2.673469
| 2.619952
| 1.020427
|
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, VISLCG3Parser):
self.__syntactic_parser = VISLCG3Parser()
return self.tag_syntax()
|
def tag_syntax_vislcg3(self)
|
Changes default syntactic parser to VISLCG3Parser, performs syntactic analysis,
and stores the results in the layer named LAYER_VISLCG3.
| 4.206241
| 3.095227
| 1.358944
|
if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, MaltParser):
self.__syntactic_parser = MaltParser()
return self.tag_syntax()
|
def tag_syntax_maltparser(self)
|
Changes default syntactic parser to MaltParser, performs syntactic analysis,
and stores the results in the layer named LAYER_CONLL.
| 3.968974
| 3.583783
| 1.107482
|
# Load default Syntactic tagger:
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
# By default: Use disambiguation for MaltParser's input
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
# By default: Do not use disambiguation for VISLCG3Parser's input
# (VISLCG3 already does its own rule-based disambiguation)
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text( self, **self.__kwargs )
|
def tag_syntax(self)
|
Parses this text with the syntactic analyzer (``self.__syntactic_parser``),
and stores the found syntactic analyses: into the layer LAYER_CONLL (if MaltParser
is used, default), or into the layer LAYER_VISLCG3 (if VISLCG3Parser is used).
| 4.137531
| 3.152065
| 1.312642
|
# If no layer specified, decide the layer based on the type of syntactic
# analyzer used:
if not layer and self.__syntactic_parser:
if isinstance(self.__syntactic_parser, MaltParser):
layer = LAYER_CONLL
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
layer = LAYER_VISLCG3
# If no syntactic analyzer available, pick the layer as the first syntactic
# layer available:
if not layer and self.is_tagged(LAYER_CONLL):
layer = LAYER_CONLL
elif not layer and self.is_tagged(LAYER_VISLCG3):
layer = LAYER_VISLCG3
# Based on the chosen layer, perform the syntactic analysis (if necessary)
# and return the results packaged as tree objects;
if layer:
if layer==LAYER_CONLL:
if not self.is_tagged(layer):
self.tag_syntax_maltparser()
return self.syntax_trees_conll
elif layer==LAYER_VISLCG3:
if not self.is_tagged(layer):
self.tag_syntax_vislcg3()
return self.syntax_trees_vislcg3
else:
raise ValueError('(!) Unexpected layer name: '+str(layer))
else:
raise ValueError('(!) Missing layer name! ')
|
def syntax_trees( self, layer=None )
|
Builds syntactic trees (estnltk.syntax.utils.Tree objects) from
syntactic annotations and returns as a list.
If the input argument *layer* is not specified, the type of the
syntactic parser is used to decide, which syntactic analysis layer
should be produced and taken as basis for building syntactic trees;
If a syntactic parser is not available, then a missing *layer* name
is replaced by the first syntactic layer available (1st LAYER_CONLL,
then LAYER_VISLCG3);
Otherwise, the *layer* must be provided by the user and it must be
either LAYER_CONLL or LAYER_VISLCG3.
| 3.69506
| 2.961025
| 1.247899
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__ner_tagger is None:
self.__ner_tagger = load_default_ner_tagger()
self.__ner_tagger.tag_document(self)
return self
|
def tag_labels(self)
|
Tag named entity labels in the ``words`` layer.
| 4.737818
| 4.932122
| 0.960604
|
if not self.is_tagged(LABEL):
self.tag_labels()
return [word[LABEL] for word in self.words]
|
def labels(self)
|
Named entity labels.
| 7.946763
| 7.098159
| 1.119553
|
if not self.is_tagged(LABEL):
self.tag_labels()
nes = []
word_start = -1
labels = self.labels + ['O'] # last is sentinel
words = self.words
label = 'O'
for i, l in enumerate(labels):
if l.startswith('B-') or l == 'O':
if word_start != -1:
nes.append({START: words[word_start][START],
END: words[i-1][END],
LABEL: label})
if l.startswith('B-'):
word_start = i
label = l[2:]
else:
word_start = -1
self[NAMED_ENTITIES] = nes
return self
|
def tag_named_entities(self)
|
Tag ``named_entities`` layer.
This automatically performs morphological analysis along with all dependencies.
| 3.413675
| 3.574474
| 0.955015
|
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases]
|
def named_entities(self)
|
The elements of ``named_entities`` layer.
| 5.189067
| 5.773529
| 0.898769
|
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.texts(NAMED_ENTITIES)
|
def named_entity_texts(self)
|
The texts representing named entities.
| 6.399909
| 5.396507
| 1.185935
|
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.spans(NAMED_ENTITIES)
|
def named_entity_spans(self)
|
The spans of named entities.
| 5.778366
| 4.866386
| 1.187404
|
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return [ne[LABEL] for ne in self[NAMED_ENTITIES]]
|
def named_entity_labels(self)
|
The named entity labels without BIO prefixes.
| 5.44577
| 5.41212
| 1.006218
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self
|
def tag_timexes(self)
|
Create ``timexes`` layer.
Depends on morphological analysis data in ``words`` layer
and tags it automatically, if it is not present.
| 3.65522
| 3.356699
| 1.088933
|
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES)
|
def timex_starts(self)
|
The list of start positions of ``timexes`` layer elements.
| 6.712165
| 5.669748
| 1.183856
|
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.ends(TIMEXES)
|
def timex_ends(self)
|
The list of end positions of ``timexes`` layer elements.
| 6.832896
| 5.495977
| 1.243254
|
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.spans(TIMEXES)
|
def timex_spans(self)
|
The list of spans of ``timexes`` layer elements.
| 6.587449
| 5.109755
| 1.289191
|
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__clause_segmenter is None:
self.__clause_segmenter = load_default_clausesegmenter()
return self.__clause_segmenter.tag(self)
|
def tag_clause_annotations(self)
|
Tag clause annotations in ``words`` layer.
Depends on morphological analysis.
| 5.847927
| 5.43726
| 1.075528
|
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]
|
def clause_annotations(self)
|
The list of clause annotations in ``words`` layer.
| 5.865542
| 4.300431
| 1.363943
|
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]
|
def clause_indices(self)
|
The list of clause indices in ``words`` layer.
The indices are unique only in the boundary of a single sentence.
| 8.523927
| 7.530911
| 1.131859
|
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
def from_sentence(words):
clauses = defaultdict(list)
start = words[0][START]
end = words[0][END]
clause = words[0][CLAUSE_IDX]
for word in words:
if word[CLAUSE_IDX] != clause:
clauses[clause].append((start, end))
start, clause = word[START], word[CLAUSE_IDX]
end = word[END]
clauses[clause].append((start, words[-1][END]))
clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in clause]}) for key, clause in clauses.items()]
return [v for k, v in sorted(clauses)]
clauses = []
sentences = self.divide()
for sentence in sentences:
clauses.extend(from_sentence(sentence))
self[CLAUSES] = clauses
return self
|
def tag_clauses(self)
|
Create ``clauses`` multilayer.
Depends on clause annotations.
| 3.337173
| 3.227042
| 1.034128
|
if not self.is_tagged(CLAUSES):
self.tag_clauses()
return self.texts(CLAUSES)
|
def clause_texts(self)
|
The texts of ``clauses`` multilayer elements.
Non-consequent spans are concatenated with space character by default.
Use :py:meth:`~estnltk.text.Text.texts` method to supply custom separators.
| 7.811337
| 6.917614
| 1.129195
|
if not self.is_tagged(CLAUSES):
self.tag_clauses()
if self.__verbchain_detector is None:
self.__verbchain_detector = load_default_verbchain_detector()
sentences = self.divide()
verbchains = []
for sentence in sentences:
chains = self.__verbchain_detector.detectVerbChainsFromSent( sentence )
for chain in chains:
# 1) Get spans for all words of the phrase
word_spans = [ ( sentence[idx][START], sentence[idx][END] ) \
for idx in sorted( chain[PHRASE] ) ]
# 2) Assign to the chain
chain[START] = [ span[0] for span in word_spans ]
chain[END] = [ span[1] for span in word_spans ]
verbchains.extend(chains)
self[VERB_CHAINS] = verbchains
return self
|
def tag_verb_chains(self)
|
Create ``verb_chains`` layer.
Depends on ``clauses`` layer.
| 4.373527
| 4.162538
| 1.050688
|
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS)
|
def verb_chain_texts(self)
|
The list of texts of ``verb_chains`` layer elements.
| 5.602717
| 4.80489
| 1.166045
|
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.starts(VERB_CHAINS)
|
def verb_chain_starts(self)
|
The start positions of ``verb_chains`` elements.
| 5.668495
| 4.960574
| 1.142709
|
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.ends(VERB_CHAINS)
|
def verb_chain_ends(self)
|
The end positions of ``verb_chains`` elements.
| 6.018543
| 5.06424
| 1.188439
|
global wordnet_tagger
if wordnet_tagger is None: # cached wn tagger
wordnet_tagger = WordnetTagger()
self.__wordnet_tagger = wordnet_tagger
if len(kwargs) > 0:
return self.__wordnet_tagger.tag_text(self, **kwargs)
return self.__wordnet_tagger.tag_text(self, **self.__kwargs)
|
def tag_wordnet(self, **kwargs)
|
Create wordnet attribute in ``words`` layer.
See :py:meth:`~estnltk.text.wordnet_tagger.WordnetTagger.tag_text` method
for applicable keyword arguments.
| 2.706024
| 2.676852
| 1.010898
|
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis]
|
def wordnet_annotations(self)
|
The list of wordnet annotations of ``words`` layer.
| 7.340646
| 7.761967
| 0.94572
|
synsets = []
for wn_annots in self.wordnet_annotations:
word_synsets = []
for wn_annot in wn_annots:
for synset in wn_annot.get(SYNSETS, []):
word_synsets.append(deepcopy(synset))
synsets.append(word_synsets)
return synsets
|
def synsets(self)
|
The list of annotated synsets of ``words`` layer.
| 2.882998
| 2.728399
| 1.056663
|
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL])
literals.append(list(sorted(word_literals)))
return literals
|
def word_literals(self)
|
The list of literals per word in ``words`` layer.
| 3.743751
| 3.576028
| 1.046902
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts, suggestions=False)]
|
def spelling(self)
|
Flag incorrectly spelled words.
Returns a list of booleans, where element at each position denotes, if the word at the same position
is spelled correctly.
| 20.00601
| 19.569744
| 1.022293
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.word_texts, suggestions=True)]
|
def spelling_suggestions(self)
|
The list of spelling suggestions per misspelled word.
| 18.134089
| 15.39106
| 1.178222
|
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True)
|
def spellcheck_results(self)
|
The list of True/False values denoting the correct spelling of words.
| 21.114262
| 18.922626
| 1.115821
|
if not self.is_tagged(WORDS):
self.tokenize_words()
text = self.text
fixed = vabamorf.fix_spelling(self.word_texts, join=False)
spans = self.word_spans
assert len(fixed) == len(spans)
if len(spans) > 0:
newtoks = []
lastend = 0
for fix, (start, end) in zip(fixed, spans):
newtoks.append(text[lastend:start])
newtoks.append(fix)
lastend = end
newtoks.append(text[lastend:])
return Text(''.join(newtoks), **self.__kwargs)
return self
|
def fix_spelling(self)
|
Fix spelling of the text.
Note that this method uses the first suggestion that is given for each misspelled word.
It does not perform any sophisticated analysis to determine which one of the suggestions
fits best into the context.
Returns
-------
Text
A copy of this instance with automatically fixed spelling.
| 4.017245
| 4.08734
| 0.982851
|
return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs)
|
def clean(self)
|
Return a copy of this Text instance with invalid characters removed.
| 17.501024
| 10.680278
| 1.63863
|
N = len(spans)
results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)]
for elem in self:
if isinstance(self[elem], list):
splits = divide_by_spans(self[elem], spans, translate=True, sep=sep)
for idx in range(N):
results[idx][elem] = splits[idx]
return [Text(res) for res in results]
|
def split_given_spans(self, spans, sep=' ')
|
Split the text into several pieces.
Resulting texts have all the layers that are present in the text instance that is splitted.
The elements are copied to resulting pieces that are covered by their spans.
However, this can result in empty layers if no element of a splitted layer fits into
a span of a particular output piece.
The positions of layer elements that are copied are translated according to the container span,
so they are consistent with returned text lengths.
Parameters
----------
spans: list of spans.
The positions determining the regions that will end up as individual pieces.
Spans themselves can be lists of spans, which denote multilayer-style text regions.
sep: str
The separator that is used to join together text pieces of multilayer spans.
Returns
-------
list of Text
One instance of text per span.
| 4.435038
| 4.639766
| 0.955875
|
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep)
|
def split_by(self, layer, sep=' ')
|
Split the text into multiple instances defined by elements of given layer.
The spans for layer elements are extracted and feed to :py:meth:`~estnltk.text.Text.split_given_spans`
method.
Parameters
----------
layer: str
String determining the layer that is used to define the start and end positions of resulting splits.
sep: str (default: ' ')
The separator to use to join texts of multilayer elements.
Returns
-------
list of Text
| 6.4934
| 5.547598
| 1.170488
|
text = self[TEXT]
regex = regex_or_pattern
if isinstance(regex, six.string_types):
regex = re.compile(regex_or_pattern, flags=flags)
# else is assumed pattern
last_end = 0
spans = []
if gaps: # tag cap spans
for mo in regex.finditer(text):
start, end = mo.start(), mo.end()
if start > last_end:
spans.append((last_end, start))
last_end = end
if last_end < len(text):
spans.append((last_end, len(text)))
else: # use matched regions
spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)]
return self.split_given_spans(spans)
|
def split_by_regex(self, regex_or_pattern, flags=re.U, gaps=True)
|
Split the text into multiple instances using a regex.
Parameters
----------
regex_or_pattern: str or compiled pattern
The regular expression to use for splitting.
flags: int (default: re.U)
The regular expression flags (only used, when user has not supplied compiled regex).
gaps: boolean (default: True)
If True, then regions matched by the regex are not included in the resulting Text instances, which
is expected behaviour.
If False, then only regions matched by the regex are included in the result.
Returns
-------
list of Text
The Text instances obtained by splitting.
| 3.002078
| 3.16471
| 0.948611
|
if not self.is_tagged(layer):
self.tag(layer)
if not self.is_tagged(by):
self.tag(by)
return divide(self[layer], self[by])
|
def divide(self, layer=WORDS, by=SENTENCES)
|
Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict)
| 3.065629
| 4.333745
| 0.707386
|
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result
|
def create_tags_with_concatenated_css_classes(tags)
|
Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
| 2.423793
| 2.415333
| 1.003503
|
if len(matches) == 0:
return matches
matches.sort()
N = len(matches)
scores = [len(match) for match in matches]
prev = [-1] * N
for i in range(1, N):
bestscore = -1
bestprev = -1
j = i
while j >= 0:
# if matches do not overlap
if matches[j].is_before(matches[i]):
l = scores[j] + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = j
else:
# in case of overlapping matches
l = scores[j] - len(matches[j]) + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = prev[j]
j = j - 1
scores[i] = bestscore
prev[i] = bestprev
# first find the matching with highest combined score
bestscore = max(scores)
bestidx = len(scores) - scores[-1::-1].index(bestscore) -1
# then backtrack the non-conflicting matchings that should be kept
keepidxs = [bestidx]
bestidx = prev[bestidx]
while bestidx != -1:
keepidxs.append(bestidx)
bestidx = prev[bestidx]
# filter the matches
return [matches[idx] for idx in reversed(keepidxs)]
|
def resolve_using_maximal_coverage(matches)
|
Given a list of matches, select a subset of matches
such that there are no overlaps and the total number of
covered characters is maximal.
Parameters
----------
matches: list of Match
Returns
--------
list of Match
| 3.04951
| 3.134942
| 0.972749
|
def _isSeparatedByPossibleClauseBreakers( tokens, wordID1, wordID2, punctForbidden = True, \
commaForbidden = True, \
conjWordsForbidden = True ):
'''
Teeb kindlaks, kas j2rjendi tokens s6naindeksite vahemikus [wordID1, wordID2) (vahemiku
algus on inklusiivne) leidub sides6nu (ja/ning/ega/v6i), punktuatsiooni (koma,
sidekriipsud, koolon, kolm j2rjestikkust punkti) v6i adverbe-sidendeid aga/kuid/vaid;
Lippudega saab kontrolli l6dvendada:
*) punctForbidden=False lylitab v2lja punktuatsiooni ( kirjavahem2rgid v.a koma )
kontrolli;
*) commaForbidden=False lylitab v2lja koma kontrolli ( ei puuduta teisi kirjavahem2rke )
kontrolli;
*) conjWordsForbidden=False lylitab v2lja sides6nade ja adverb-sidendite kontrolli;
Tagastab True, kui leidub kasv6i yks eelnimetatud juhtudest, vastasel juhul False;
'''
global _breakerJaNingEgaVoi, _breakerAgaKuidVaid, _breakerKomaLopus, _breakerPunktuats
minWID = min(wordID1, wordID2)
maxWID = max(wordID1, wordID2)
insideCheckArea = False
for i in range(len(tokens)):
token = tokens[i]
if token[WORD_ID] >= minWID:
insideCheckArea = True
if token[WORD_ID] >= maxWID:
insideCheckArea = False
if insideCheckArea:
if punctForbidden and _breakerPunktuats.matches(token):
return True
if commaForbidden and _breakerKomaLopus.matches(token):
return True
if conjWordsForbidden and (_breakerAgaKuidVaid.matches(token) or \
_breakerJaNingEgaVoi.matches(token)):
return True
return False
|
Teeb kindlaks, kas j2rjendi tokens s6naindeksite vahemikus [wordID1, wordID2) (vahemiku
algus on inklusiivne) leidub sides6nu (ja/ning/ega/v6i), punktuatsiooni (koma,
sidekriipsud, koolon, kolm j2rjestikkust punkti) v6i adverbe-sidendeid aga/kuid/vaid;
Lippudega saab kontrolli l6dvendada:
*) punctForbidden=False lylitab v2lja punktuatsiooni ( kirjavahem2rgid v.a koma )
kontrolli;
*) commaForbidden=False lylitab v2lja koma kontrolli ( ei puuduta teisi kirjavahem2rke )
kontrolli;
*) conjWordsForbidden=False lylitab v2lja sides6nade ja adverb-sidendite kontrolli;
Tagastab True, kui leidub kasv6i yks eelnimetatud juhtudest, vastasel juhul False;
| null | null | null |
|
def _isClauseFinal( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6na on osalause l6pus:
-- s6nale ei j2rgne ykski teine s6na;
-- s6nale j2rgnevad vaid punktuatsioonim2rgid ja/v6i sidendid JA/NING/EGA/VÕI;
Tagastab True, kui eeltoodud tingimused on t2idetud, vastasel juhul False;
'''
jaNingEgaVoi = WordTemplate({ROOT:'^(ja|ning|ega|v[\u014D\u00F5]i)$',POSTAG:'[DJ]'})
punktuatsioon = WordTemplate({POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if i+1 == len(clauseTokens):
return True
else:
for j in range(i+1, len(clauseTokens)):
token2 = clauseTokens[j]
if not (jaNingEgaVoi.matches(token2) or punktuatsioon.matches(token2)):
return False
return True
return False
|
Teeb kindlaks, kas etteantud ID-ga s6na on osalause l6pus:
-- s6nale ei j2rgne ykski teine s6na;
-- s6nale j2rgnevad vaid punktuatsioonim2rgid ja/v6i sidendid JA/NING/EGA/VÕI;
Tagastab True, kui eeltoodud tingimused on t2idetud, vastasel juhul False;
| null | null | null |
|
def _isFollowedByComma( wordID, clauseTokens ):
'''
Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma;
Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False;
'''
koma = WordTemplate({ROOT:'^,+$', POSTAG:'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if re.match('^.*,$', token[TEXT]):
return True
elif i+1 < len(clauseTokens) and koma.matches(clauseTokens[i+1]):
return True
break
return False
|
Teeb kindlaks, kas etteantud ID-ga s6nale j2rgneb vahetult koma;
Tagastab True, kui eeltoodud tingimus on t2idetud, vastasel juhul False;
| null | null | null |
|
def _canFormAraPhrase( araVerb, otherVerb ):
''' Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil;
'''
global _verbAraAgreements
for i in range(0, len(_verbAraAgreements), 2):
araVerbTemplate = _verbAraAgreements[i]
otherVerbTemplate = _verbAraAgreements[i+1]
matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)
if matchingAraAnalyses:
matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(otherVerb)
if matchingVerbAnalyses:
return [matchingAraAnalyses, matchingVerbAnalyses]
return []
|
Teeb kindlaks, kas etteantud 'ära' verb (araVerb) yhildub teise verbiga;
Arvestab järgimisi ühilduvusi:
ains 2. pööre: ära_neg.o + V_o
ains 3. pööre: ära_neg.gu + V_gu
mitm 1. pööre: ära_neg.me + V_me
ära_neg.me + V_o
ära_neg.gem + V_gem
mitm 2. pööre: ära_neg.ge + V_ge
mitm 3. pööre: ära_neg.gu + V_gu
passiiv: ära_neg.gu + V_tagu
Kui yhildub, tagastab listide listi, vastasel juhul tagastab tyhja listi.
Tagastatava listi esimene liige on 'ära' verbi analüüside indeksite list
(millised analüüsid vastavad 'ära' verbile) ning listi teine liige on yhilduva
verbi analüüside indeksite list (millised analüüsid vastavad ühilduvale verbile);
Indeksite listid on sellised, nagu neid leitakse meetodi
wordtemplate.matchingAnalyseIndexes(token) abil;
| null | null | null |
|
def _loadVerbSubcatRelations(infile):
'''
Meetod, mis loeb failist sisse verbide rektsiooniseosed infiniitverbidega;
Eeldab, et rektsiooniseosed on tekstifailis, kujul:
häbene da mast
igatse da
St rea alguses on verbilemma ning TAB-iga on sellest eraldatud võimalike
rektsioonide (käändeliste verbide vormitunnuste) loetelu, tähtsuse
järjekorras;
Tagastab rektsiooniseosed sõnastikuna, mille võtmeteks lemmad ning väärtusteks
vastavad vormitunnuste loetelud.
'''
relations = dict()
in_f = codecs.open(infile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
(verb, forms) = line.split('\t')
relations[verb] = forms.split()
in_f.close()
return relations
|
Meetod, mis loeb failist sisse verbide rektsiooniseosed infiniitverbidega;
Eeldab, et rektsiooniseosed on tekstifailis, kujul:
häbene da mast
igatse da
St rea alguses on verbilemma ning TAB-iga on sellest eraldatud võimalike
rektsioonide (käändeliste verbide vormitunnuste) loetelu, tähtsuse
järjekorras;
Tagastab rektsiooniseosed sõnastikuna, mille võtmeteks lemmad ning väärtusteks
vastavad vormitunnuste loetelud.
| null | null | null |
|
def _isVerbExpansible( verbObj, clauseTokens, clauseID ):
'''
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
'''
global _verbInfNonExpansible
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and \
re.match('^(verb)$', verbObj[PATTERN][-1], re.I):
# Leiame viimasele s6nale vastava token'i
lastToken = [token for token in clauseTokens if token[WORD_ID] == verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for '+str(verbObj)+' in '+str( getJsonAsTextString(clauseTokens) ))
lastToken = lastToken[0]
# Leiame, ega tegu pole maks/mas/mast/mata verbidega (neid esialgu ei laienda edasi)
# NB! Tegelikult peaks v2hemalt -mas verbe saama siiski laiendada:
# Ma ei_0 käinud_0 teda palumas_0 ümber otsustada_0 .
# Aga kuidas seda teha v6imalikult v2heste vigadega, vajab edasist uurimist ...
if not _verbInfNonExpansible.matches(lastToken):
# Kontrollime, et fraasi l6pus poleks ja/ning/ega/v6i fraasi:
# kui on, siis esialgu targu seda fraasi laiendama ei hakka:
if len(verbObj[PATTERN]) >=3 and verbObj[PATTERN][-2] == '&':
return False
return True
#
# TODO: siin tuleks ilmselt keelata ka 'saama + Verb_tud' konstruktsioonide laiendused,
# kuna need kipuvad olema pigem vigased (kuigi haruldased); Nt.
#
# ringi hääletades sai_0 rongidega jänest sõita_0 ja vagunisaatjatest neidudega öösiti napsu võetud_0 .
#
return False
|
Kontrollib, kas tavaline verb on laiendatav etteantud osalauses:
*) verbi kontekstis (osalauses) on veel teisi verbe;
*) verb kuulub etteantud osalausesse;
*) tegemist ei ole olema-verbiga (neid vaatame mujal eraldi);
*) tegemist pole maks|mas|mast|mata-verbiga;
*) tegemist pole verbiahelaga, mille l6pus on ja/ning/ega/v6i-fraas;
Tagastab True, kui k6ik tingimused t2idetud;
| null | null | null |
|
def _suitableVerbExpansion( foundSubcatChain ):
'''
V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne;
'''
markings = []
tokens = []
nonConjTokens = []
for (marking, token) in foundSubcatChain:
markings.append( marking )
tokens.append( token )
if marking != '&':
nonConjTokens.append( token )
if (len(markings) == 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&'):
return tokens
elif (len(markings) > 3 and markings[0]==markings[2] and markings[0]!='&' and markings[1]=='&' and \
all([m == '&' for m in markings[3:]]) ):
return tokens[:3]
elif (len(nonConjTokens) > 0):
return nonConjTokens[:1]
return []
|
V6tab etteantud jadast osa, mis sobib:
*) kui liikmeid on 3, keskmine on konjuktsioon ning esimene ja viimane
klapivad, tagastab selle kolmiku;
Nt. ei_0 saa_0 lihtsalt välja astuda_? ja_? uttu tõmmata_?
=> astuda ja tõmmata
*) kui liikmeid on rohkem kui 3, teine on konjuktsioon ning esimene ja
kolmas klapivad, ning l6pus pole verbe, tagastab esikolmiku;
*) kui liikmeid on rohkem kui yks, v6tab liikmeks esimese mitte-
konjunktsiooni (kui selline leidub);
Kui need tingimused pole t2idetud, tagastab tyhis6ne;
| null | null | null |
|
def _expandSaamaWithTud( clauseTokens, clauseID, foundChains ):
'''
Meetod, mis määrab spetsiifilised rektsiooniseosed: täiendab 'saama'-verbiga lõppevaid
verbijadasid, lisades (v6imalusel) nende l6ppu 'tud'-infiniitverbi
(nt. sai tehtud, sai käidud ujumas);
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
'''
verbTud = WordTemplate({POSTAG:'V', FORM:'^(tud|dud)$'})
verb = WordTemplate({POSTAG:'V'})
verbOlema = WordTemplate({POSTAG:'V', ROOT:'^(ole)$'})
for verbObj in foundChains:
# Leiame, kas fraas kuulub antud osalausesse ning on laiendatav
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
lastVerbWID = verbObj[PHRASE][-1]
lastToken = [token for token in clauseTokens if token[WORD_ID] == lastVerbWID]
lastIndex = [i for i in range(len(clauseTokens)) if clauseTokens[i][WORD_ID] == lastVerbWID]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [analysis[ROOT] for analysis in verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0]
# Leiame, kas tegemist on 'saama' verbiga
if mainVerbLemma == 'saa':
#
# Saama + 'tud', lubame eraldada verbiahelana vaid siis, kui:
# *) 'tud' on osalause l6pus ning vahel pole punktuatsioonim2rke, nt:
# Kord sai_0 laadalt isegi aprikoosipuu koduaeda viidud_0 .
# *) 'saama' on osalause l6pus ning vahetult eelneb 'tud', nt:
# Ja et see vajaduse korral avalikustatud_1 saaks_1 .
#
expansion = None
if not _isClauseFinal(lastVerbWID, clauseTokens ):
for i in range(lastIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if verbTud.matches(token) and _isClauseFinal(tokenWID, clauseTokens ) and \
not _isSeparatedByPossibleClauseBreakers( clauseTokens, verbObj[PHRASE][-1], tokenWID, True, True, False):
expansion = token
break
elif lastIndex-1 > -1:
if verbTud.matches(clauseTokens[lastIndex-1]):
expansion = clauseTokens[lastIndex-1]
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append( tokenWID )
verbObj[ANALYSIS_IDS].append( _getMatchingAnalysisIDs( expansion, verbTud ) )
if verbOlema.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb')
|
Meetod, mis määrab spetsiifilised rektsiooniseosed: täiendab 'saama'-verbiga lõppevaid
verbijadasid, lisades (v6imalusel) nende l6ppu 'tud'-infiniitverbi
(nt. sai tehtud, sai käidud ujumas);
Vastavalt leitud laiendustele t2iendab andmeid sisendlistis foundChains;
| null | null | null |
|
def _getMatchingAnalysisIDs( tokenJson, requiredWordTemplate, discardAnalyses = None ):
''' Tagastab listi tokenJson'i analyysidest, mis sobivad etteantud yksiku
sõnamalli või sõnamallide listi mõne elemendiga (requiredWordTemplate võib
olla üks WordTemplate või list WordTemplate elementidega);
Kui discardAnalyses on defineeritud (ning on WordTemplate), visatakse minema
analyysid, mis vastavad sellele s6namallile;
'''
final_ids = set()
if isinstance(requiredWordTemplate, list):
for wt in requiredWordTemplate:
ids = wt.matchingAnalyseIndexes(tokenJson)
if ids:
final_ids.update(ids)
elif isinstance(requiredWordTemplate, WordTemplate):
ids = requiredWordTemplate.matchingAnalyseIndexes(tokenJson)
final_ids = set(ids)
if discardAnalyses:
if isinstance(discardAnalyses, WordTemplate):
ids2 = discardAnalyses.matchingAnalyseIndexes(tokenJson)
if ids2:
final_ids = final_ids.difference(set(ids2))
else:
raise Exception(' The parameter discardAnalyses should be WordTemplate.')
if len(final_ids) == 0:
raise Exception(' Unable to find matching analyse IDs for: '+str(tokenJson))
return list(final_ids)
|
Tagastab listi tokenJson'i analyysidest, mis sobivad etteantud yksiku
sõnamalli või sõnamallide listi mõne elemendiga (requiredWordTemplate võib
olla üks WordTemplate või list WordTemplate elementidega);
Kui discardAnalyses on defineeritud (ning on WordTemplate), visatakse minema
analyysid, mis vastavad sellele s6namallile;
| null | null | null |
|
if len(regex) == 0:
return False
try:
re.compile(regex)
return True
except sre_constants.error:
return False
|
def is_valid_regex(regex)
|
Function for checking a valid regex.
| 2.441005
| 2.395318
| 1.019074
|
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_right(elem1, elem2):
elem1['end'] = elem2['start']
return elem1, elem2
|
def delete_left(elem1, elem2)
|
xxxxx
yyyyy
---------
xxx
yyyyy
| 5.375616
| 6.537999
| 0.822211
|
# assert not (nested(elem1, elem2) or nested(elem2, elem1)), 'deletion not defined for nested elements'
if overlapping_left(elem1, elem2):
elem2['start'] = elem1['end']
return elem1, elem2
|
def delete_right(elem1, elem2)
|
xxxxx
yyyyy
---------
xxxxx
yyy
| 5.647915
| 6.559644
| 0.861009
|
yielded = set()
ri = layer[:] # Shallow copy the layer
for i1, elem1 in enumerate(ri):
for i2, elem2 in enumerate(ri):
if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']:
inds = (i1, i2) if i1 < i2 else (i2, i1)
if inds not in yielded and in_by_identity(layer, elem1) and in_by_identity(layer, elem2):
yielded.add(inds)
yield elem1, elem2
|
def iterate_intersecting_pairs(layer)
|
Given a layer of estntltk objects, yields pairwise intersecting elements.
Breaks when the layer is changed or deleted after initializing the iterator.
| 3.342173
| 3.086248
| 1.082924
|
def _getPOS( self, token, onlyFirst = True ):
''' Returns POS of the current token.
'''
if onlyFirst:
return token[ANALYSIS][0][POSTAG]
else:
return [ a[POSTAG] for a in token[ANALYSIS] ]
|
Returns POS of the current token.
| null | null | null |
|
def _getPhrase( self, i, sentence, NPlabels ):
''' Fetches the full length phrase from the position i
based on the existing NP phrase annotations (from
NPlabels);
Returns list of sentence tokens in the phrase, and
indices of the phrase;
'''
phrase = []
indices = []
if 0 <= i and i < len(sentence) and NPlabels[i] == 'B':
phrase = [ sentence[i] ]
indices = [ i ]
j = i + 1
while ( j < len(sentence) ):
if NPlabels[j] in ['B', '']:
break
else:
phrase.append( sentence[j] )
indices.append( j )
j += 1
return phrase, indices
|
Fetches the full length phrase from the position i
based on the existing NP phrase annotations (from
NPlabels);
Returns list of sentence tokens in the phrase, and
indices of the phrase;
| null | null | null |
|
def _getCaseAgreement(self, token1, token2):
''' Detects whether there is a morphological case agreement
between two consecutive nominals (token1 and token2), and
returns the common case, or None if no agreement exists;
Applies a special set of rules for detecting agreement on
the word in genitive followed by the word in ter, es, ab or
kom.
'''
forms1 = set( [a[FORM] for a in token1[ANALYSIS]] )
forms2 = set( [a[FORM] for a in token2[ANALYSIS]] )
if len(forms1.intersection(forms2))==0:
# Kontrollime ka ni-na-ta-ga k22ndeid:
if 'sg g' in forms1:
if 'sg ter' in forms2:
return 'sg ter'
elif 'sg es' in forms2:
return 'sg es'
elif 'sg ab' in forms2:
return 'sg ab'
elif 'sg kom' in forms2:
return 'sg kom'
elif 'pl g' in forms1:
if 'pl ter' in forms2:
return 'pl ter'
elif 'pl es' in forms2:
return 'pl es'
elif 'pl ab' in forms2:
return 'pl ab'
elif 'pl kom' in forms2:
return 'pl kom'
return None
else:
return list(forms1.intersection(forms2))[0]
|
Detects whether there is a morphological case agreement
between two consecutive nominals (token1 and token2), and
returns the common case, or None if no agreement exists;
Applies a special set of rules for detecting agreement on
the word in genitive followed by the word in ter, es, ab or
kom.
| null | null | null |
|
def get_phrases(self, text, np_labels):
''' Given a Text and a BIO labels (one label for each word in Text) ,
extracts phrases and returns as a list of phrases, where each phrase
is a list of word tokens belonging to the phrase;
Parameters
----------
text: estnltk.text.Text
The input Text, or a list consecutive words (dict objects).
The method attempts to automatically determine the type of
the input;
np_labels : list of str
A list of strings, containing a B-I-O label for each word in
*text*;
Returns
-------
list of (list of tokens)
List of phrases, where each phrase is a list of word tokens
belonging to the phrase;
'''
# 1) Take different inputs to common list of words format:
input_words = []
if isinstance(text, Text):
# input is Text
input_words = text.words
elif isinstance(text, list) and len(text)>0 and isinstance(text[0], dict) and \
TEXT in text[0]:
# input is a list of words
input_words = text
elif text:
raise Exception('Unexpected input text:', text)
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# 2) Extract phrases from input words:
phrases = []
for i, word in enumerate(input_words):
label = np_labels[i]
if label == 'B':
phrases.append([])
if label in ['B', 'I']:
phrases[-1].append( word )
return phrases
|
Given a Text and a BIO labels (one label for each word in Text) ,
extracts phrases and returns as a list of phrases, where each phrase
is a list of word tokens belonging to the phrase;
Parameters
----------
text: estnltk.text.Text
The input Text, or a list consecutive words (dict objects).
The method attempts to automatically determine the type of
the input;
np_labels : list of str
A list of strings, containing a B-I-O label for each word in
*text*;
Returns
-------
list of (list of tokens)
List of phrases, where each phrase is a list of word tokens
belonging to the phrase;
| null | null | null |
|
def get_phrase_texts(self, text, np_labels):
''' Given a Text, and a list describing text annotations in the
B-I-O format (*np_label*), extracts phrases and returns as a
list of phrase texts;
Assumes that the input is same as the input acceptable for
the method NounPhraseChunker.get_phrases();
Returns
-------
list of string
Returns a list of phrase texts;
'''
phrases = self.get_phrases(text, np_labels)
texts = []
for phrase in phrases:
phrase_str = ' '.join([word[TEXT] for word in phrase])
texts.append( phrase_str )
return texts
|
Given a Text, and a list describing text annotations in the
B-I-O format (*np_label*), extracts phrases and returns as a
list of phrase texts;
Assumes that the input is same as the input acceptable for
the method NounPhraseChunker.get_phrases();
Returns
-------
list of string
Returns a list of phrase texts;
| null | null | null |
|
def annotateText(self, text, layer, np_labels = None):
''' Applies this chunker on given Text, and adds results of
the chunking as a new annotation layer to the text.
If the NP annotations are provided (via the input list
*np_labels*), uses the given NP annotations, otherwise
produces new NP_LABEL annotations via the method
self.analyze_text();
Parameters
----------
text: estnltk.text.Text
The input text where the new layer of NP chunking
annotations is to be added;
layer: str
Name of the new layer;
np_labels : list of str
Optional: A list of strings, containing a B-I-O label
for each word in *text*; If provided, uses annotations
from *np_labels*, otherwise creates new annotations
with this chunker;
Returns
-------
text
The input text where a new layer (containing NP
annotations) has been added;
'''
input_words = None
if isinstance(text, Text):
# input is Text
input_words = text.words
else:
raise Exception(' Input text should be of type Text, but it is ', text)
phrases = []
# If NP_LABEL-s are not provided, text needs to be analyzed first:
if not np_labels:
np_labels = self.analyze_text( text, return_type="labels" )
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words ('+str(len(input_words))+\
') does not match number of labels '+str(len(np_labels)))
# Fetch NP chunks
phrases = self.get_phrases( text, np_labels )
# Create and attach annotations to the Text object
annotations = []
if phrases:
for phrase in phrases:
phrase_annotation = {}
phrase_annotation[START] = phrase[0][START]
phrase_annotation[END] = phrase[-1][END]
phrase_annotation[TEXT] = ' '.join([word[TEXT] for word in phrase ])
annotations.append( phrase_annotation )
text[layer] = annotations
return text
|
Applies this chunker on given Text, and adds results of
the chunking as a new annotation layer to the text.
If the NP annotations are provided (via the input list
*np_labels*), uses the given NP annotations, otherwise
produces new NP_LABEL annotations via the method
self.analyze_text();
Parameters
----------
text: estnltk.text.Text
The input text where the new layer of NP chunking
annotations is to be added;
layer: str
Name of the new layer;
np_labels : list of str
Optional: A list of strings, containing a B-I-O label
for each word in *text*; If provided, uses annotations
from *np_labels*, otherwise creates new annotations
with this chunker;
Returns
-------
text
The input text where a new layer (containing NP
annotations) has been added;
| null | null | null |
|
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep)
|
def divide(elements, by, translate=False, sep=' ')
|
Divide lists `elements` and `by`.
All elements are grouped into N bins, where N denotes the elements in `by` list.
Parameters
----------
elements: list of dict
Elements to be grouped into bins.
by: list of dict
Elements defining the bins.
translate: bool (default: False)
When dividing, also translate start and end positions of elements.
sep: str (default ' ')
In case of multispans, what is the default text separator.
This is required in order to tag correct start, end positions of elements.
| 5.881893
| 10.256705
| 0.573468
|
openRE = re.compile(openDelim)
closeRE = re.compile(closeDelim)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -=1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
|
def dropNested(text, openDelim, closeDelim)
|
A matching function for nested expressions, e.g. namespaces and tables.
| 3.569692
| 3.610545
| 0.988685
|
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
|
def dropSpans(spans, text)
|
Drop from text the blocks identified in :param spans:, possibly nested.
| 4.374983
| 4.317253
| 1.013372
|
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
text = newlines.sub(r'\n', text)
text = bulletlist.sub(r'', text)
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
return text
|
def clean(text)
|
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
| 3.199694
| 3.184525
| 1.004764
|
return isinstance(docs, list) and \
all(isinstance(d, (basestring, Text)) for d in docs)
|
def __isListOfTexts(self, docs)
|
Checks whether the input is a list of strings or Text-s;
| 3.758347
| 2.987349
| 1.258088
|
return isinstance(docs, list) and \
all(self.__isListOfTexts(ds) for ds in docs)
|
def __isListOfLists(self, docs)
|
Checks whether the input is a list of list of strings/Text-s;
| 5.317663
| 3.887307
| 1.367955
|
lemmaFreq = dict()
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik s6naga seotud unikaalsed pärisnimelemmad
# (kui neid on)
uniqLemmas = set()
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
uniqLemmas.add( analysis[ROOT] )
# 2) Jäädvustame lemmade sagedused
for lemma in uniqLemmas:
if lemma not in lemmaFreq:
lemmaFreq[lemma] = 1
else:
lemmaFreq[lemma] += 1
return lemmaFreq
|
def __create_proper_names_lexicon(self, docs)
|
Moodustab dokumendikollektsiooni põhjal pärisnimede sagedussõnastiku
(mis kirjeldab, mitu korda iga pärisnimelemma esines);
| 10.3522
| 7.907919
| 1.309093
|
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame kõigi pärisnimede sagedused sagedusleksikonist
highestFreq = 0
properNameAnalyses = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
if analysis[ROOT] in lexicon:
properNameAnalyses.append( analysis )
if lexicon[analysis[ROOT]] > highestFreq:
highestFreq = lexicon[analysis[ROOT]]
else:
raise Exception(' Unable to find lemma ',analysis[ROOT], \
' from the lexicon. ')
# 2) J2tame alles vaid suurima lemmasagedusega pärisnimeanalyysid,
# ylejaanud kustutame maha
if highestFreq > 0:
toDelete = []
for analysis in properNameAnalyses:
if lexicon[analysis[ROOT]] < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
|
def __disambiguate_proper_names_1(self, docs, lexicon)
|
Teeme esmase yleliigsete analyyside kustutamise: kui sõnal on mitu
erineva sagedusega pärisnimeanalüüsi, siis jätame alles vaid
suurima sagedusega analyysi(d) ...
| 8.610538
| 6.213165
| 1.385854
|
certainNames = set()
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid pärisnimeanalüüsidest koosnevaid sõnu
if all([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
# Jäädvustame kõik unikaalsed lemmad kui kindlad pärisnimed
for analysis in word[ANALYSIS]:
certainNames.add( analysis[ROOT] )
return certainNames
|
def __find_certain_proper_names(self, docs)
|
Moodustame kindlate pärisnimede loendi: vaatame sõnu, millel ongi
ainult pärisnimeanalüüsid ning võtame sealt loendisse unikaalsed
pärisnimed;
| 17.43824
| 9.836555
| 1.772799
|
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
# 2) potentsiaalne loendi algus (arv, millele järgneb punkt või
# sulg ja mis ei ole kuupäev);
if not re.match('^[1234567890]*$', word[TEXT] ) and \
not re.match('^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$', word[TEXT] ) and \
re.match("^[1234567890.()]*$", word[TEXT]):
sentencePos = 0
#self.__debug_print_word_in_sentence_str(sentence, word)
continue
if sentencePos == 0:
# Vaatame lausealgulisi sõnu, millel on nii pärisnimeanalüüs(e)
# kui ka mitte-pärisnimeanalüüs(e)
h_postags = [ a[POSTAG] == 'H' for a in word[ANALYSIS] ]
if any( h_postags ) and not all( h_postags ):
for analysis in word[ANALYSIS]:
# Jätame meelde kõik unikaalsed pärisnimelemmad
if analysis[POSTAG] == 'H':
sentInitialNames.add( analysis[ROOT] )
sentencePos += 1
return sentInitialNames
|
def __find_sentence_initial_proper_names(self, docs)
|
Moodustame lausealguliste pärisnimede loendi: vaatame sõnu, millel nii
pärisnimeanalüüs(id) kui ka mittepärisnimeanalüüs(id) ning mis esinevad
lause või nummerdatud loendi alguses - jäädvustame selliste sõnade
unikaalsed lemmad;
| 7.411397
| 5.791997
| 1.279593
|
for doc in docs:
for word in doc[WORDS]:
# Vaatame vaid s6nu, millele on pakutud rohkem kui yks analyys:
if len(word[ANALYSIS]) > 1:
# 1) Leiame analyysid, mis tuleks loendi järgi eemaldada
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:
toDelete.append( analysis )
# 2) Eemaldame yleliigsed analyysid
if toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
|
def __remove_redundant_proper_names(self, docs, lemma_set)
|
Eemaldame yleliigsed pärisnimeanalüüsid etteantud sõnalemmade
loendi (hulga) põhjal;
| 9.316139
| 6.338928
| 1.469671
|
for doc in docs:
for sentence in doc.divide( layer=WORDS, by=SENTENCES ):
sentencePos = 0 # Tavaline lausealgus
for i in range(len(sentence)):
word = sentence[i]
# Täiendavad heuristikud lausealguspositsioonide leidmiseks:
# 1) kirjavahemärk, mis pole koma ega semikoolon, on lausealgus:
if all([ a[POSTAG] == 'Z' for a in word[ANALYSIS] ]) and \
not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
continue
#
# Vaatame ainult mitmeseid s6nu, mis sisaldavad ka p2risnimeanalyysi
#
if len(word[ANALYSIS]) > 1 and \
any([ a[POSTAG] == 'H' for a in word[ANALYSIS] ]):
if sentencePos != 0:
# 1) Kui oleme lause keskel, valime alati vaid nimeanalyysid
# (eeldades, et nyyseks on järgi jäänud vaid korrektsed nimed)
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#if toDelete:
# self.__debug_print_word_in_sentence_str(sentence, word)
else:
# 2) Kui oleme lause alguses, siis valime ainult nimeanalyysid
# juhul, kui vastav lemma esines ka mujal (st lemma esinemis-
# sagedus on suurem kui 1);
# Kas m6ni lemma esineb p2risnimede leksikonis sagedusega > 1 ?
hasRecurringProperName = False
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[ROOT] in lexicon and lexicon[analysis[ROOT]] > 1:
hasRecurringProperName = True
if analysis[POSTAG] not in ['H', 'G']:
toDelete.append( analysis )
if hasRecurringProperName and toDelete:
# Kui p2risnimi esines ka mujal, j2tame alles vaid p2risnime-
# analyysid:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
#self.__debug_print_word_in_sentence_str(sentence, word)
sentencePos += 1
|
def __disambiguate_proper_names_2(self, docs, lexicon)
|
Kustutame üleliigsed mitte-pärisnimeanalüüsid:
-- kui lause keskel on pärisnimeanalüüsiga sõna, jätamegi alles vaid
pärisnimeanalyys(id);
-- kui lause alguses on pärisnimeanalüüsiga s6na, ning pärisnimelemma
esineb korpuses suurema sagedusega kui 1, jätamegi alles vaid
pärisnimeanalyys(id); vastasel juhul ei kustuta midagi;
| 8.737791
| 7.224316
| 1.209497
|
# 1) Leiame pärisnimelemmade sagedusleksikoni
lexicon = self.__create_proper_names_lexicon(docs)
# 2) Teeme esialgse kustutamise: kui sõnal on mitu erineva korpuse-
# sagedusega pärisnimeanalüüsi, siis jätame alles vaid kõige
# sagedasema analyysi ...
self.__disambiguate_proper_names_1(docs, lexicon)
# 3) Eemaldame yleliigsed lause alguse pärisnimeanalüüsid;
# Kõigepealt leiame: kindlad pärisnimed, lause alguses esinevad
# p2risnimed ja lause keskel esinevad pärisnimed
certainNames = self.__find_certain_proper_names(docs)
sentInitialNames = self.__find_sentence_initial_proper_names(docs)
sentCentralNames = self.__find_sentence_central_proper_names(docs)
# 3.1) Võrdleme lause alguses ja keskel esinevaid lemmasid: leiame
# lemmad, mis esinesid ainult lause alguses ...
onlySentenceInitial = sentInitialNames.difference(sentCentralNames)
# 3.2) Võrdleme ainult lause alguses esinevaid ning kindlaid pärisnime-
# lemmasid: kui sõna esines vaid lause alguses ega ole kindel
# pärisnimelemma, pole tõenäoliselt tegu pärisnimega ...
notProperNames = onlySentenceInitial.difference(certainNames)
# 3.3) Eemaldame yleliigsed p2risnimeanalyysid (kui selliseid leidus)
if len(notProperNames) > 0:
self.__remove_redundant_proper_names(docs, notProperNames)
# 4) Leiame uue pärisnimelemmade sagedusleksikoni (sagedused on
# tõenäoliselt vahepeal muutunud);
lexicon = self.__create_proper_names_lexicon(docs)
# 5) Teeme üleliigsete mittepärisnimeanalüüside kustutamise sõnadelt,
# millel on lisaks pärisnimeanalüüsidele ka teisi analüüse:
# lausealgusesse jätame alles vaid pärisnimeanalüüsid, kui neid
# esineb korpuses ka mujal;
# lause keskele jätame igal juhul alles vaid pärisnimeanalüüsid;
self.__disambiguate_proper_names_2(docs, lexicon)
return docs
|
def pre_disambiguate(self, docs)
|
Teostab pärisnimede eelühestamine. Üldiseks eesmärgiks on vähendada mitmesust
suurtähega algavate sonade morf analüüsil, nt eemaldada pärisnime analüüs, kui
suurtäht tähistab tõenäoliselt lausealgust.
| 9.17086
| 8.257819
| 1.110567
|
return POSTAG in analysisA and POSTAG in analysisB and \
analysisA[POSTAG]==analysisB[POSTAG] and \
ROOT in analysisA and ROOT in analysisB and \
analysisA[ROOT]==analysisB[ROOT] and \
FORM in analysisA and FORM in analysisB and \
analysisA[FORM]==analysisB[FORM] and \
CLITIC in analysisA and CLITIC in analysisB and \
analysisA[CLITIC]==analysisB[CLITIC] and \
ENDING in analysisA and ENDING in analysisB and \
analysisA[ENDING]==analysisB[ENDING]
|
def __analyses_match(self, analysisA, analysisB)
|
Leiame, kas tegu on duplikaatidega ehk täpselt üht ja sama
morfoloogilist infot sisaldavate analüüsidega.
| 1.993396
| 2.074299
| 0.960997
|
for doc in docs:
for word in doc[WORDS]:
# 1) Leiame k6ik analyysi-duplikaadid (kui neid on)
toDelete = []
for i in range(len(word[ANALYSIS])):
if i+1 < len(word[ANALYSIS]):
for j in range(i+1, len(word[ANALYSIS])):
analysisI = word[ANALYSIS][i]
analysisJ = word[ANALYSIS][j]
if self.__analyses_match(analysisI, analysisJ):
if j not in toDelete:
toDelete.append(j)
# 2) Kustutame yleliigsed analyysid
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a]
#
# *) Kui verbi analüüside puhul on olemas nii '-tama' kui ka '-ma'
# lõpp, siis jätta alles vaid -ma, ülejäänud kustutada;
# Nt lõpetama: lõp+tama, lõppe+tama, lõpeta+ma
# teatama: tead+tama, teata+ma
#
if any([ a[POSTAG]=='V' and a[ENDING]=='tama' for a in word[ANALYSIS] ]) and \
any([ a[POSTAG]=='V' and a[ENDING]=='ma' for a in word[ANALYSIS] ]):
toDelete = []
for a in range( len(word[ANALYSIS]) ):
if word[ANALYSIS][a][POSTAG]=='V' and \
word[ANALYSIS][a][ENDING]=='tama':
toDelete.append(a)
if toDelete:
for a in sorted(toDelete, reverse=True):
del word[ANALYSIS][a]
|
def __remove_duplicate_and_problematic_analyses(self, docs)
|
1) Eemaldab sisendkorpuse kõigi sõnade morf analüüsidest duplikaadid
ehk siis korduvad analüüsid; Nt sõna 'palk' saab kaks analyysi:
'palk' (mis käändub 'palk\palgi') ja 'palk' (mis käändub 'palk\palga'),
aga pärast duplikaatide eemaldamist jääb alles vaid üks;
2) Kui verbi analüüside hulgas on alles nii '-tama' kui ka '-ma', siis
jätta alles vaid '-ma' analüüsid;
| 5.499781
| 3.970566
| 1.385138
|
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
isAmbiguous = (len(word[ANALYSIS])>1)
# Jäädvustame sagedused, verbide omad eraldiseisva märkega:
for a in word[ANALYSIS]:
lemma = a[ROOT]+'ma' if a[POSTAG]=='V' else a[ROOT]
# 1) Jäädvustame üldise sageduse
if lemma not in lexicon:
lexicon[lemma] = 1
else:
lexicon[lemma] += 1
# 2) Jäädvustame mitmeste sõnade esinemise
if isAmbiguous:
amb_lexicon[lemma] = 1
# Kanname yldisest sagedusleksikonist sagedused yle mitmeste lemmade
# sagedusleksikoni
for lemma in amb_lexicon.keys():
amb_lexicon[lemma] = lexicon[lemma]
|
def __supplement_lemma_frequency_lexicon(self, docs, hiddenWords, lexicon, amb_lexicon)
|
Täiendab etteantud sagedusleksikone antud korpuse (docs) põhjal:
*) yldist sagedusleksikoni, kus on k6ik lemmad, v.a. lemmad,
mis kuuluvad nn peidetud sõnade hulka (hiddenWords);
*) mitmeste sagedusleksikoni, kus on vaid mitmeste analyysidega
s6nades esinenud lemmad, v.a. (hiddenWords) lemmad, koos
nende yldiste esinemissagedustega (esimesest leksikonist);
| 10.736981
| 7.124562
| 1.507037
|
for d in range(len(docs)):
for w in range(len(docs[d][WORDS])):
word = docs[d][WORDS][w]
# Jätame vahele nn peidetud sõnad
if (d, w) in hiddenWords:
continue
# Vaatame vaid mitmeseks jäänud analüüsidega sõnu
if len(word[ANALYSIS]) > 1:
# 1) Leiame suurima esinemissageduse mitmeste lemmade seas
highestFreq = 0
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
if lemma in lexicon and lexicon[lemma] > highestFreq:
highestFreq = lexicon[lemma]
if highestFreq > 0:
# 2) Jätame välja kõik analüüsid, mille lemma esinemissagedus
# on väiksem kui suurim esinemissagedus;
toDelete = []
for analysis in word[ANALYSIS]:
lemma = analysis[ROOT]+'ma' if analysis[POSTAG]=='V' else analysis[ROOT]
freq = lexicon[lemma] if lemma in lexicon else 0
if freq < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
|
def __disambiguate_with_lexicon(self, docs, lexicon, hiddenWords)
|
Teostab lemmade leksikoni järgi mitmeste morf analüüside
ühestamise - eemaldab üleliigsed analüüsid;
Toetub ideele "üks tähendus teksti kohta": kui mitmeseks jäänud
lemma esineb tekstis/korpuses ka mujal ning lõppkokkuvõttes
esineb sagedamini kui alternatiivsed analüüsid, siis tõenäoliselt
see ongi õige lemma/analüüs;
| 6.168169
| 4.879739
| 1.264036
|
for analysis_match in text.analysis:
for candidate in analysis_match:
if candidate['partofspeech'] in PYVABAMORF_TO_WORDNET_POS_MAP:
# Wordnet contains data about the given lemma and pos combination - will annotate.
wordnet_obj = {}
tag_synsets(wordnet_obj, candidate, **kwargs)
return text
|
def tag_text(self, text, **kwargs)
|
Annotates `analysis` entries in `corpus` with a list of lemmas` synsets and queried WordNet data in a 'wordnet' entry.
Note
----
Annotates every `analysis` entry with a `wordnet`:{`synsets`:[..]}.
Parameters
----------
text: estnltk.text.Text
Representation of a corpus in a disassembled form for automatic text analysis with word-level `analysis` entry.
E.g. corpus disassembled into paragraphs, sentences, words ({'paragraphs':[{'sentences':[{'words':[{'analysis':{...}},..]},..]},..]}).
pos : boolean, optional
If True, annotates each synset with a correspnding `pos` (part-of-speech) tag.
variants : boolean, optional
If True, annotates each synset with a list of all its variants' (lemmas') literals.
var_sense : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its sense number.
var_definition : boolean, optional
If True and `variants` is True, annotates each variant/lemma with its definition. Definitions often missing in WordNet.
var_examples : boolean, optional
If True and `variants` is True, annotates each variant/lemma with a list of its examples. Examples often missing in WordNet.
relations : list of str, optional
Holds interested relations. Legal relations are as follows:
`antonym`, `be_in_state`, `belongs_to_class`, `causes`, `fuzzynym`, `has_holo_location`, `has_holo_madeof`, `has_holo_member`,
`has_holo_part`, `has_holo_portion`, `has_holonym`, `has_hyperonym`, `has_hyponym`, `has_instance`, `has_mero_location`,
`has_mero_madeof`, `has_mero_member`, `has_mero_part`, `has_mero_portion`, `has_meronym`, `has_subevent`, `has_xpos_hyperonym`,
`has_xpos_hyponym`, `involved`, `involved_agent`, `involved_instrument`, `involved_location`, `involved_patient`,
`involved_target_direction`, `is_caused_by`, `is_subevent_of`, `near_antonym`, `near_synonym`, `role`, `role_agent`, `role_instrument`,
`role_location`, `role_patient`, `role_target_direction`, `state_of`, `xpos_fuzzynym`, `xpos_near_antonym`, `xpos_near_synonym`.
Annotates each synset with related synsets' indices with respect to queried relations.
Returns
-------
estnltk.text.Text
In-place annotated `text`.
| 20.492054
| 15.941243
| 1.285474
|
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels
|
def get_texts_and_labels(sentence_chunk)
|
Given a sentence chunk, extract original texts and labels.
| 1.972983
| 1.877875
| 1.050647
|
word_spans = []
sentence_spans = []
sentence_chunks = doc.split('\n\n')
sentences = []
for chunk in sentence_chunks:
sent_texts, sent_labels = get_texts_and_labels(chunk.strip())
sentences.append(list(zip(sent_texts, sent_labels)))
return sentences
|
def parse_doc(doc)
|
Exract list of sentences containing (text, label) pairs.
| 4.073728
| 3.565653
| 1.142491
|
raw_tokens = []
curpos = 0
text_spans = []
all_labels = []
sent_spans = []
word_texts = []
for sentence in document:
startpos = curpos
for idx, (text, label) in enumerate(sentence):
raw_tokens.append(text)
word_texts.append(text)
all_labels.append(label)
text_spans.append((curpos, curpos+len(text)))
curpos += len(text)
if idx < len(sentence) - 1:
raw_tokens.append(' ')
curpos += 1
sent_spans.append((startpos, curpos))
raw_tokens.append('\n')
curpos += 1
return {
TEXT: ''.join(raw_tokens),
WORDS: [{TEXT: text, START: start, END: end, LABEL: label} for text, (start, end), label in zip(word_texts, text_spans, all_labels)],
SENTENCES: [{START: start, END:end} for start, end in sent_spans]
}
|
def convert(document)
|
Convert a document to a Text object
| 2.392668
| 2.41974
| 0.988812
|
'''
Cleanup all the local data.
'''
self._select_cb = None
self._commit_cb = None
self._rollback_cb = None
super(TransactionClass, self)._cleanup()
|
def _cleanup(self)
|
Cleanup all the local data.
| 9.201926
| 5.346484
| 1.721117
|
'''
Set this channel to use transactions.
'''
if not self._enabled:
self._enabled = True
self.send_frame(MethodFrame(self.channel_id, 90, 10))
self._select_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_select_ok)
|
def select(self, cb=None)
|
Set this channel to use transactions.
| 7.663911
| 5.689945
| 1.346922
|
'''
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
'''
# Could call select() but spec 1.9.2.3 says to raise an exception
if not self.enabled:
raise self.TransactionsNotEnabled()
self.send_frame(MethodFrame(self.channel_id, 90, 20))
self._commit_cb.append(cb)
self.channel.add_synchronous_cb(self._recv_commit_ok)
|
def commit(self, cb=None)
|
Commit the current transaction. Caller can specify a callback to use
when the transaction is committed.
| 8.521229
| 6.604863
| 1.290145
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.