code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
css_list = [DEFAULT_MARK_CSS]
for aes in self.aesthetics:
css_list.extend(get_mark_css(aes, self.values[aes]))
#print('\n'.join(css_list))
return '\n'.join(css_list)
|
def css(self)
|
Returns
-------
str
The CSS.
| 5.434164
| 6.161584
| 0.881943
|
html = mark_text(text, self.aesthetics, self.rules)
html = html.replace('\n', '<br/>')
if add_header:
html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER])
#print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER)))
return html
|
def render(self, text, add_header=False)
|
Render the HTML.
Parameters
----------
add_header: boolean (default: False)
If True, add HTML5 header and footer.
Returns
-------
str
The rendered HTML.
| 4.448515
| 4.587989
| 0.9696
|
trainer = pycrfsuite.Trainer(algorithm=self.algorithm,
params={'c2': self.c2},
verbose=self.verbose)
for doc in nerdocs:
for snt in doc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = [t.label for t in snt]
trainer.append(xseq, yseq)
trainer.train(mode_filename)
|
def train(self, nerdocs, mode_filename)
|
Train a CRF model using given documents.
Parameters
----------
nerdocs: list of estnltk.estner.ner.Document.
The documents for model training.
mode_filename: str
The fielname where to save the model.
| 3.105516
| 3.219633
| 0.964556
|
labels = []
for snt in nerdoc.sentences:
xseq = [t.feature_list() for t in snt]
yseq = self.tagger.tag(xseq)
labels.append(yseq)
return labels
|
def tag(self, nerdoc)
|
Tag the given document.
Parameters
----------
nerdoc: estnltk.estner.Document
The document to be tagged.
Returns
-------
labels: list of lists of str
Predicted token Labels for each sentence in the document
| 4.433222
| 3.92224
| 1.130278
|
openbr = 0
cur = 0
for char in text:
cur +=1
if char == openDelim:
openbr += 1
if char == closeDelim:
openbr -= 1
if openbr == 0:
break
return text[:cur], cur
|
def balancedSlicer(text, openDelim='[', closeDelim=']')
|
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: text between the delimiters
| 2.755408
| 3.179015
| 0.866749
|
for root, dirs, filenames in os.walk(inp):
for f in filenames:
log = codecs.open(os.path.join(root, f), 'r')
j_obj = json.load(log)
j_obj = json_format(j_obj)
#not needed, cause the json_format takes care of the right structuring
#text = Text(j_obj)
textWriter(j_obj, out, verbose)
|
def json_2_text(inp, out, verbose = False)
|
Convert a Wikipedia article to Text object.
Concatenates the sections in wikipedia file and rearranges other information so it
can be interpreted as a Text object.
Links and other elements with start and end positions are annotated
as layers.
Parameters
----------
inp: directory of parsed et.wikipedia articles in json format
out: output directory of .txt files
verbose: if True, prints every article title and total count of converted files
if False prints every 50th count
Returns
-------
estnltk.text.Text
The Text object.
| 5.247864
| 4.981635
| 1.053442
|
match = Match(a.start, b.end, text[a.start:b.end], name)
for k, v in a.matches.items():
match.matches[k] = v
for k, v in b.matches.items():
match.matches[k] = v
if a.name is not None:
aa = copy(a)
del aa[MATCHES]
match.matches[a.name] = aa
if b.name is not None:
bb = copy(b)
del bb[MATCHES]
match.matches[b.name] = bb
return match
|
def concatenate_matches(a, b, text, name)
|
Concatenate matches a and b.
All submatches will be copied to result.
| 2.143438
| 2.085861
| 1.027604
|
res = copy(self)
if MATCHES in res:
del res[MATCHES]
if NAME in res:
del res[NAME]
res = {self.name: res}
for k, v in self.matches.items():
res[k] = v
if NAME in res[k]:
del res[k][NAME]
return res
|
def dict(self)
|
Dictionary representing this match and all child symbol matches.
| 3.297526
| 3.023471
| 1.090642
|
return re.compile('|'.join([re.escape(c) for c in markers]))
|
def regex_from_markers(markers)
|
Given a string of characters, construct a regex that matches them.
Parameters
----------
markers: str
The list of string containing the markers
Returns
-------
regex
The regular expression matching the given markers.
| 4.358694
| 8.682264
| 0.502023
|
if six.PY2:
if isinstance(word, unicode):
return word.encode('utf-8')
else:
return word.decode('utf-8').encode('utf-8') # make sure it is real utf8, otherwise complain
else: # ==> Py3
if isinstance(word, bytes):
return word.decode('utf-8') # bytes must be in utf8
return word
|
def convert(word)
|
This method converts given `word` to UTF-8 encoding and `bytes` type for the
SWIG wrapper.
| 4.342689
| 4.242505
| 1.023614
|
word, analysis = morphresult
return {
'text': deconvert(word),
'analysis': [postprocess_analysis(a, trim_phonetic, trim_compound) for a in analysis]
}
|
def postprocess_result(morphresult, trim_phonetic, trim_compound)
|
Postprocess vabamorf wrapper output.
| 4.904606
| 4.727903
| 1.037375
|
global phonetic_markers
global phonetic_regex
if root in phonetic_markers:
return root
else:
return phonetic_regex.sub('', root)
|
def trim_phonetics(root)
|
Function that trims phonetic markup from the root.
Parameters
----------
root: str
The string to remove the phonetic markup.
Returns
-------
str
The string with phonetic markup removed.
| 3.746546
| 4.194435
| 0.893218
|
global compound_regex
if not phonetic:
root = trim_phonetics(root)
if not compound:
root = trim_compounds(root)
return root
|
def get_root(root, phonetic, compound)
|
Get the root form without markers.
Parameters
----------
root: str
The word root form.
phonetic: boolean
If True, add phonetic information to the root forms.
compound: boolean
if True, add compound word markers to root forms.
| 5.045793
| 5.760799
| 0.875884
|
global all_markers
if root in all_markers or root in ['-', '_']: # special case
return [[root]]
groups = []
for group in root.split('-'):
toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')]
groups.append(toks)
return groups
|
def get_group_tokens(root)
|
Function to extract tokens in hyphenated groups (saunameheks-tallimeheks).
Parameters
----------
root: str
The root form.
Returns
-------
list of (list of str)
List of grouped root tokens.
| 7.419909
| 7.813946
| 0.949573
|
return Vabamorf.instance().fix_spelling(words, join, joinstring)
|
def fix_spelling(words, join=True, joinstring=' ')
|
Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
| 22.180882
| 38.872929
| 0.5706
|
return Vabamorf.instance().synthesize(lemma, form, partofspeech, hint, guess, phonetic)
|
def synthesize(lemma, form, partofspeech='', hint='', guess=True, phonetic=False)
|
Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
| 7.3371
| 14.772761
| 0.496664
|
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf
|
def instance()
|
Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
| 4.169577
| 2.477213
| 1.683172
|
# if input is a string, then tokenize it
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
morfresults = self._morf.analyze(
vm.StringVector(words),
kwargs.get('disambiguate', True),
kwargs.get('guess', True),
True, # phonetic and compound information
kwargs.get('propername', True))
trim_phonetic = kwargs.get('phonetic', False)
trim_compound = kwargs.get('compound', True)
return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults]
|
def analyze(self, words, **kwargs)
|
Perform morphological analysis and disambiguation of given text.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
disambiguate: boolean (default: True)
Disambiguate the output and remove incosistent analysis.
guess: boolean (default: True)
Use guessing in case of unknown words
propername: boolean (default: True)
Perform additional analysis of proper names.
compound: boolean (default: True)
Add compound word markers to root forms.
phonetic: boolean (default: False)
Add phonetic information to root forms.
Returns
-------
list of (list of dict)
List of analysis for each word in input.
| 5.156601
| 4.151871
| 1.241994
|
words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words])
disambiguated = self._morf.disambiguate(words)
return [postprocess_result(mr, False, True) for mr in disambiguated]
|
def disambiguate(self, words)
|
Disambiguate previously analyzed words.
Parameters
----------
words: list of dict
A sentence of words.
Returns
-------
list of dict
Sentence of disambiguated words.
| 9.30879
| 12.761566
| 0.729439
|
if isinstance(words, six.string_types):
words = words.split()
# convert words to native strings
words = [convert(w) for w in words]
spellresults = self._morf.spellcheck(words, suggestions)
results = []
for spellresult in spellresults:
suggestions = [deconvert(s) for s in spellresult.suggestions]
result = {
'text': deconvert(spellresult.word),
'spelling': spellresult.spelling,
'suggestions': suggestions
}
results.append(result)
return results
|
def spellcheck(self, words, suggestions=True)
|
Spellcheck given sentence.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
suggestions: boolean (default: True)
Add spell suggestions to result.
Returns
-------
list of dict
Each dictionary contains following values:
'word': the original word
'spelling': True, if the word was spelled correctly
'suggestions': list of suggested strings in case of incorrect spelling
| 2.979122
| 3.390992
| 0.87854
|
fixed_words = []
for word in self.spellcheck(words, suggestions=True):
if word['spelling']:
fixed_words.append(word['text'])
else:
suggestions = word['suggestions']
if len(suggestions) > 0:
fixed_words.append(suggestions[0])
else:
fixed_words.append(word['text'])
if join:
return joinstring.join(fixed_words)
else:
return fixed_words
|
def fix_spelling(self, words, join=True, joinstring=' ')
|
Simple function for quickly correcting misspelled words.
Parameters
----------
words: list of str or str
Either a list of pretokenized words or a string. In case of a string, it will be splitted using
default behaviour of string.split() function.
join: boolean (default: True)
Should we join the list of words into a single string.
joinstring: str (default: ' ')
The string that will be used to join together the fixed words.
Returns
-------
str
In case join is True
list of str
In case join is False.
| 2.289819
| 2.504337
| 0.914341
|
words = self._morf.synthesize(
convert(lemma.strip()),
convert(form.strip()),
convert(partofspeech.strip()),
convert(hint.strip()),
guess,
phonetic
)
return [deconvert(w) for w in words]
|
def synthesize(self, lemma, form, partofspeech='', hint='', guess=True, phonetic=False)
|
Synthesize a single word based on given morphological attributes.
Note that spellchecker does not respect pre-tokenized words and concatenates
token sequences such as "New York".
Parameters
----------
lemma: str
The lemma of the word(s) to be synthesized.
form: str
The form of the word(s) to be synthesized.
partofspeech: str
Part-of-speech.
hint: str
Hint.
guess: boolean (default: True)
Use heuristics when synthesizing unknown words.
phonetic: boolean (default: False)
Add phonetic markup to synthesized words.
Returns
-------
list
List of synthesized words.
| 3.629281
| 4.549115
| 0.797799
|
# depending on how the morphological analysis was added, there may be
# phonetic markup. Remove it, if it exists.
for word in sentence:
for analysis in word[ANALYSIS]:
analysis[ROOT] = analysis[ROOT].replace('~', '')
analysis[ROOT] = re.sub('[?<\]]([aioueöäõü])', '\\1', analysis[ROOT])
return json.dumps({WORDS: sentence})
|
def prepare_sentence(self, sentence)
|
Prepare the sentence for segment detection.
| 12.255211
| 12.225444
| 1.002435
|
max_index = 0
max_depth = 1
stack_of_indexes = [ max_index ]
for token in sentence:
if CLAUSE_ANNOT not in token:
token[CLAUSE_IDX] = stack_of_indexes[-1]
else:
# Alustavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KIILU_ALGUS":
# Liigume sügavamale, alustame järgmist kiilu
max_index += 1
stack_of_indexes.append(max_index)
if (len(stack_of_indexes) > max_depth):
max_depth = len(stack_of_indexes)
token[CLAUSE_IDX] = stack_of_indexes[-1]
# Lõpetavad märgendused
for annotation in token[CLAUSE_ANNOT]:
if annotation == "KINDEL_PIIR":
# Liigume edasi samal tasandil, alustame järgmist osalauset
max_index += 1
stack_of_indexes[-1] = max_index
elif annotation == "KIILU_LOPP":
# Taandume sügavusest, sulgeme ühe kiilu
stack_of_indexes.pop()
return sentence
|
def annotate_indices(self, sentence)
|
Add clause indexes to already annotated sentence.
| 5.383855
| 5.243688
| 1.026731
|
annotations = []
for token in sentence:
data = {CLAUSE_IDX: token[CLAUSE_IDX]}
if CLAUSE_ANNOT in token:
if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY
elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START
elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]:
data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END
annotations.append(data)
return annotations
|
def rename_annotations(self, sentence)
|
Function that renames and restructures clause information.
| 3.78301
| 3.660176
| 1.033559
|
''' Re-formats time duration in seconds (*sec*) into more easily readable
form, where (days,) hours, minutes, and seconds are explicitly shown.
Returns the new duration as a formatted string.
'''
import time
if sec < 864000:
# Idea from: http://stackoverflow.com/a/1384565
return time.strftime('%H:%M:%S', time.gmtime(sec))
else:
days = int(sec / 864000)
secs = sec % 864000
return str(days)+'d, '+time.strftime('%H:%M:%S', time.gmtime(secs))
|
def format_time( sec )
|
Re-formats time duration in seconds (*sec*) into more easily readable
form, where (days,) hours, minutes, and seconds are explicitly shown.
Returns the new duration as a formatted string.
| 5.157755
| 2.529081
| 2.039379
|
''' Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
'''
if verbose:
print(' processing '+file_name+' ... ', end="" )
# Tokenize text into sentences
start = timer()
text = text.tokenize_sentences()
all_sentences = len(text[SENTENCES])
end = timer()
if verbose:
print(' (tok time: '+format_time( end-start )+')', end="" )
if all_sentences > max_sentences:
# Acquire spans of length *max_sentences* from the text
start = timer()
i = 0
spans = []
len_total = 0
while i < all_sentences:
startSent = text[SENTENCES][i]
endSent = text[SENTENCES][min(i+(max_sentences-1), all_sentences-1)]
span = (startSent[START], endSent[END])
len_total += (span[1]-span[0])
spans.append(span)
i += max_sentences
# Divide the text into spans
text_spans = text.texts_from_spans(spans)
assert len(text.text) >= len_total, '(!) Total spans_len must be =< than text_len: '+str(len_total)+'/'+str(len(text.text))
new_texts = []
for i, small_text in enumerate( text_spans ):
newText = Text( small_text )
for key in text.keys():
if key != TEXT and key != SENTENCES and key != PARAGRAPHS:
newText[key] = text[key]
newText['_text_split_id'] = i
newText['_text_split_origin'] = str(spans[i]) # Convert it to string; Otherwise, split_by(*) may mistakenly consider
# it a layer and may run into error while trying to split it;
newText['_text_split_file'] = file_name
#print( json.dumps(newText) )
new_texts.append( newText )
end = timer()
if verbose:
print(' (split time: '+format_time( end-start )+')', end="" )
print(' (sents: '+str(all_sentences)+', new_texts:'+str(len(new_texts))+')', end="")
print()
return new_texts
else:
if verbose:
print(' (sents: '+str(all_sentences)+', no_split)', end=" \n")
return [text]
|
def split_Text( text, file_name, verbose = True )
|
Tokenizes the *text* (from *file_name*) into sentences, and if the number of
sentences exceeds *max_sentences*, splits the text into smaller texts.
Returns a list containing the original text (if no splitting was required),
or a list containing results of the splitting (smaller texts);
| 4.304159
| 3.581969
| 1.201618
|
''' Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
'''
name = os.path.basename( old_file_name )
if '.' in name:
new_name = re.sub('\.([^.]+)$', suffix+'.\\1', name)
else:
new_name = name + suffix
new_path = os.path.join( out_dir, new_name )
start = timer()
#write_document( text, new_path ) # <--- this leaves indent=2 - takes too much extra space ...
o_f = codecs.open( new_path, mode='wb', encoding='ascii' )
o_f.write( json.dumps( text ) )
o_f.close()
end = timer()
timestamp = format_time( end-start )
if verbose:
print(' ==> '+new_path+' (file writing time: '+timestamp+')' )
|
def write_Text_into_file( text, old_file_name, out_dir, suffix='__split', verbose=True )
|
Based on *old_file_name*, *suffix* and *out_dir*, constructs a new file name and
writes *text* (in the ascii normalised JSON format) into the new file.
| 4.944479
| 3.425338
| 1.443501
|
documents = []
for fnm in get_filenames(root, prefix, suffix):
path = os.path.join(root, fnm)
docs = parse_tei_corpus(path, target, encoding)
for doc in docs:
doc[FILE] = fnm
documents.extend(docs)
return documents
|
def parse_tei_corpora(root, prefix='', suffix='.xml', target=['artikkel'], encoding=None)
|
Parse documents from TEI style XML files.
Gives each document FILE attribute that denotes the original filename.
Parameters
----------
root: str
The directory path containing the TEI corpora XMl files.
prefix: str
The prefix of filenames to include (default: '')
suffix: str
The suffix of filenames to include (default: '.xml')
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of estnltk.text.Text
Corpus containing parsed documents from all files. The file path
is stored in FILE attribute of the documents.
| 2.87784
| 3.08326
| 0.933376
|
with open(path, 'rb') as f:
html_doc = f.read()
if encoding:
html_doc = html_doc.decode( encoding )
soup = BeautifulSoup(html_doc, 'html5lib')
title = soup.find_all('title')[0].string
documents = []
for div1 in soup.find_all('div1'):
documents.extend(parse_div(div1, dict(), target))
return tokenize_documents(documents)
|
def parse_tei_corpus(path, target=['artikkel'], encoding=None)
|
Parse documents from a TEI style XML file.
Parameters
----------
path: str
The path of the XML file.
target: list of str
List of <div> types, that are considered documents in the XML files (default: ["artikkel"]).
encoding: str
Encoding to be used for decoding the content of the XML file. If not specified (default),
then no separate decoding step is applied.
Returns
-------
list of esnltk.text.Text
| 3.42362
| 3.804889
| 0.899795
|
documents = []
div_type = soup.get('type', None)
div_title = list(soup.children)[0].string.strip()
if div_type in target:
div_authors = soup.find_all('author')
document = {
'type': div_type,
'title': div_title,
'paragraphs': parse_paragraphs(soup)
}
# add author, if it exists
if len(div_authors) > 0:
div_author = div_authors[0].text.strip()
document['author'] = div_author
# add collected metadata
for k, v in metadata.items():
document[k] = v
documents.append(document)
else:
metadata[div_type] = div_title
# recurse subdivs
subdiv_name = get_subdiv(soup.name)
subdivs = []
if subdiv_name is not None:
subdivs = soup.find_all(subdiv_name)
if len(subdivs) > 0:
for subdiv in subdivs:
documents.extend(parse_div(subdiv, deepcopy(metadata), target))
return documents
|
def parse_div(soup, metadata, target)
|
Parse a <div> tag from the file.
The sections in XML files are given in <div1>, <div2> and <div3>
tags. Each such tag has a type and name (plus possibly more extra attributes).
If the div type is found in target variable, the div is parsed
into structured paragraphs, sentences and words.
Otherwise, the type and name are added as metadata to subdivs
and stored in.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
metdata: dict
The metadata for parent divs.
target: list of str
List of <div> types, that are considered documents in the XML files.
| 2.493209
| 2.524111
| 0.987757
|
paragraphs = []
for para in soup.find_all('p'):
sentences = []
for sent in para.find_all('s'):
sentence = sent.text.strip()
if len(sentence) > 0:
sentences.append(sentence)
if len(sentences) > 0:
paragraphs.append({'sentences': sentences})
return paragraphs
|
def parse_paragraphs(soup)
|
Parse sentences and paragraphs in the section.
Parameters
----------
soup: bs4.BeautifulSoup
The parsed XML data.
Returns
-------
list of (list of str)
List of paragraphs given as list of sentences.
| 1.80231
| 2.173346
| 0.829279
|
sep = '\n\n'
texts = []
for doc in docs:
text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[PARAGRAPHS]])
doc[TEXT] = text
del doc[PARAGRAPHS]
texts.append(Text(doc))
return texts
|
def tokenize_documents(docs)
|
Convert the imported documents to :py:class:'~estnltk.text.Text' instances.
| 3.616071
| 3.225907
| 1.120947
|
docs = read_json_corpus(DEFAULT_NER_DATASET)
trainer = NerTrainer(default_nersettings)
trainer.train(docs, DEFAULT_NER_MODEL_DIR)
|
def train_default_model()
|
Function for training the default NER model.
NB! It overwrites the default model, so do not use it unless
you know what are you doing.
The training data is in file estnltk/corpora/estner.json.bz2 .
The resulting model will be saved to estnltk/estner/models/default.bin
| 9.554861
| 8.669349
| 1.102143
|
def process_line(self, line):
assert isinstance(line, str)
try:
self._process.stdin.write(as_binary(line))
self._process.stdin.write(as_binary('\n'))
self._process.stdin.flush()
result = as_unicode(self._process.stdout.readline())
if result == '':
stderr = as_unicode(self._process.stderr.read())
raise Exception('EOF encountered while reading stream. Stderr is {0}.'.format(stderr))
return result
except Exception:
self._process.terminate()
raise
|
Process a line of data.
Sends the data through the pipe to the process and flush it. Reads a resulting line
and returns it.
Parameters
----------
line: str
The data sent to process. Make sure it does not contain any newline characters.
Returns
-------
str: The line returned by the Java process
Raises
------
Exception
In case of EOF is encountered.
IoError
In case it was impossible to read or write from the subprocess standard input / output.
| null | null | null |
|
offsets = {}
current_seeked_offset_idx = 0
ordered_synset_idxes = sorted(synset_idxes)
with codecs.open(_SOI,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
while current_seeked_offset_idx < len(ordered_synset_idxes) and split_line[0] == str(ordered_synset_idxes[current_seeked_offset_idx]):
# Looping on single line entries in case synset_indexes contains duplicates.
offsets[synset_idxes[current_seeked_offset_idx]] = int(split_line[1])
current_seeked_offset_idx += 1
if current_seeked_offset_idx >= len(synset_idxes):
break
return [offsets[synset_idx] for synset_idx in synset_idxes]
|
def _get_synset_offsets(synset_idxes)
|
Returs pointer offset in the WordNet file for every synset index.
Notes
-----
Internal function. Do not call directly.
Preserves order -- for [x,y,z] returns [offset(x),offset(y),offset(z)].
Parameters
----------
synset_idxes : list of ints
Lists synset IDs, which need offset.
Returns
-------
list of ints
Lists pointer offsets in Wordnet file.
| 3.134429
| 3.255229
| 0.962891
|
global parser
if parser is None:
parser = Parser(_WN_FILE)
synsets = []
for offset in synset_offsets:
raw_synset = parser.parse_synset(offset)
synset = Synset(raw_synset)
SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset
SYNSETS_DICT[synset.id] = synset
synsets.append(synset)
return synsets
|
def _get_synsets(synset_offsets)
|
Given synset offsets in the WordNet file, parses synset object for every offset.
Notes
-----
Internal function. Do not call directly.
Stores every parsed synset into global synset dictionary under two keys:
synset's name lemma.pos.sense_no and synset's id (unique integer).
Parameters
----------
synset_offsets : list of ints
Lists pointer offsets from which synset objects will be parsed.
Returns
-------
list of Synsets
Lists synset objects which synset_offsets point to.
| 3.247719
| 3.383431
| 0.959889
|
pos = raw_synset.pos
literal = raw_synset.variants[0].literal
sense = "%02d"%raw_synset.variants[0].sense
return '.'.join([literal,pos,sense])
|
def _get_key_from_raw_synset(raw_synset)
|
Derives synset key in the form of `lemma.pos.sense_no` from the provided eurown.py Synset class,
Notes
-----
Internal function. Do not call directly.
Parameters
----------
raw_synset : eurown.Synset
Synset representation from which lemma, part-of-speech and sense is derived.
Returns
-------
string
Key of the synset in the form of `lemma.pos.sense_no`.
| 5.92749
| 5.348832
| 1.108184
|
if synset_key in SYNSETS_DICT:
return SYNSETS_DICT[synset_key]
def _get_synset_idx(synset_key):
with codecs.open(_SENSE_FILE,'rb', 'utf-8') as fin:
for line in fin:
split_line = line.split(':')
if split_line[0] == synset_key:
return int(split_line[1].strip())
return None
synset_idx = _get_synset_idx(synset_key)
if synset_idx == None:
return None
synset_offset = _get_synset_offsets([synset_idx])
synset = _get_synsets(synset_offset)
return synset[0]
|
def synset(synset_key)
|
Returns synset object with the provided key.
Notes
-----
Uses lazy initialization - synsets will be fetched from a dictionary after the first request.
Parameters
----------
synset_key : string
Unique synset identifier in the form of `lemma.pos.sense_no`.
Returns
-------
Synset
Synset with key `synset_key`.
None, if no match was found.
| 2.520475
| 2.599898
| 0.969452
|
def _get_synset_idxes(lemma,pos):
line_prefix_regexp = "%s:%s:(.*)"%(lemma,pos if pos else "\w+")
line_prefix = re.compile(line_prefix_regexp)
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
for line in fin:
result = line_prefix.match(line)
if result:
res_indices = [int(x) for x in result.group(1).split(' ')]
idxes.extend(res_indices)
LEM_POS_2_SS_IDX[lemma][pos].extend(idxes)
return sorted(idxes)
synset_idxes = None
if lemma in LEM_POS_2_SS_IDX:
if pos in LEM_POS_2_SS_IDX[lemma]:
synset_idxes = LEM_POS_2_SS_IDX[lemma][pos]
else:
synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]]
if not synset_idxes:
synset_idxes = _get_synset_idxes(lemma,pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
return stored_synsets + synsets
|
def synsets(lemma,pos=None)
|
Returns all synset objects which have lemma as one of the variant literals and fixed pos, if provided.
Notes
-----
Uses lazy initialization - parses only those synsets which are not yet initialized, others are fetched from a dictionary.
Parameters
----------
lemma : str
Lemma of the synset.
pos : str, optional
Part-of-speech specification of the searched synsets, defaults to None.
Returns
-------
list of Synsets
Synsets which contain `lemma` and of which part-of-speech is `pos`, if specified.
Empty list, if no match was found.
| 2.311126
| 2.33105
| 0.991453
|
def _get_unique_synset_idxes(pos):
idxes = []
with codecs.open(_LIT_POS_FILE,'rb', 'utf-8') as fin:
if pos == None:
for line in fin:
split_line = line.strip().split(':')
idxes.extend([int(x) for x in split_line[2].split()])
else:
for line in fin:
split_line = line.strip().split(':')
if split_line[1] == pos:
idxes.extend([int(x) for x in split_line[2].split()])
idxes = list(set(idxes))
idxes.sort()
return idxes
if pos in LOADED_POS:
return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]]
else:
synset_idxes = _get_unique_synset_idxes(pos)
if len(synset_idxes) == 0:
return []
stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]
unstored_synset_idxes = [synset_idxes[i] for i in range(len(synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]
synset_offsets = _get_synset_offsets(unstored_synset_idxes)
synsets = _get_synsets(synset_offsets)
for synset in synsets:
for variant in synset.get_variants():
LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id)
LOADED_POS.add(pos)
return stored_synsets + synsets
|
def all_synsets(pos=None)
|
Return all the synsets which have the provided pos.
Notes
-----
Returns thousands or tens of thousands of synsets - first time will take significant time.
Useful for initializing synsets as each returned synset is also stored in a global dictionary for fast retrieval the next time.
Parameters
----------
pos : str
Part-of-speech of the sought synsets. Sensible alternatives are wn.ADJ, wn.ADV, wn.VERB, wn.NOUN and `*`.
If pos == `*`, all the synsets are retrieved and initialized for fast retrieval the next time.
Returns
-------
list of Synsets
Lists the Synsets which have `pos` as part-of-speech.
Empty list, if `pos` not in [wn.ADJ, wn.ADV, wn.VERB, wn.NOUN, `*`].
| 2.431928
| 2.458366
| 0.989246
|
if lemma_key in LEMMAS_DICT:
return LEMMAS_DICT[lemma_key]
split_lemma_key = lemma_key.split('.')
synset_key = '.'.join(split_lemma_key[:3])
lemma_literal = split_lemma_key[3]
lemma_obj = Lemma(synset_key,lemma_literal)
LEMMAS_DICT[lemma_key] = lemma_obj
return lemma_obj
|
def lemma(lemma_key)
|
Returns the Lemma object with the given key.
Parameters
----------
lemma_key : str
Key of the returned lemma.
Returns
-------
Lemma
Lemma matching the `lemma_key`.
| 2.574268
| 2.855635
| 0.901469
|
lemma = lemma.lower()
return [lemma_obj
for synset in synsets(lemma,pos)
for lemma_obj in synset.lemmas()
if lemma_obj.name.lower() == lemma]
|
def lemmas(lemma,pos=None)
|
Returns all the Lemma objects of which name is `lemma` and which have `pos` as part
of speech.
Parameters
----------
lemma : str
Literal of the sought Lemma objects.
pos : str, optional
Part of speech of the sought Lemma objects. If None, matches any part of speech.
Defaults to None
Returns
-------
list of Lemmas
Lists all the matched Lemmas.
| 3.431305
| 4.49106
| 0.76403
|
hypernyms |= synset._recursive_hypernyms(hypernyms)
return hypernyms
|
def _recursive_hypernyms(self, hypernyms):
hypernyms |= set(self.hypernyms())
for synset in self.hypernyms()
|
Finds all the hypernyms of the synset transitively.
Notes
-----
Internal method. Do not call directly.
Parameters
----------
hypernyms : set of Synsets
An set of hypernyms met so far.
Returns
-------
set of Synsets
Returns the input set.
| 4.533209
| 9.65058
| 0.469734
|
if "min_depth" in self.__dict__:
return self.__dict__["min_depth"]
min_depth = 0
hypernyms = self.hypernyms()
if hypernyms:
min_depth = 1 + min(h._min_depth() for h in hypernyms)
self.__dict__["min_depth"] = min_depth
return min_depth
|
def _min_depth(self)
|
Finds minimum path length from the root.
Notes
-----
Internal method. Do not call directly.
Returns
-------
int
Minimum path length from the root.
| 2.493835
| 2.708141
| 0.920866
|
results = []
for relation_candidate in self._raw_synset.internalLinks:
if relation_candidate.name == relation:
linked_synset = synset(_get_key_from_raw_synset(relation_candidate.target_concept))
relation_candidate.target_concept = linked_synset._raw_synset
results.append(linked_synset)
return results
|
def get_related_synsets(self,relation)
|
Retrieves all the synsets which are related by given relation.
Parameters
----------
relation : str
Name of the relation via which the sought synsets are linked.
Returns
-------
list of Synsets
Synsets which are related via `relation`.
| 5.03409
| 5.507398
| 0.91406
|
ancestor_depth = unvisited_ancestors.pop()
if ancestor_depth[1] > depth:
continue
unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)])
ancestors.append(ancestor_depth[0])
return list(set(ancestors))
|
def closure(self, relation, depth=float('inf')):
ancestors = []
unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)]
while len(unvisited_ancestors) > 0
|
Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations.
| 2.945737
| 3.339124
| 0.882188
|
visited = set()
hypernyms_next_level = set(self.hypernyms())
current_hypernyms = set(hypernyms_next_level)
while len(hypernyms_next_level) > 0:
current_hypernyms = set(hypernyms_next_level)
hypernyms_next_level = set()
for synset in current_hypernyms:
if synset in visited:
continue
visited.add(synset)
hypernyms_next_level |= set(synset.hypernyms())
return list(current_hypernyms)
|
def root_hypernyms(self)
|
Retrieves all the root hypernyms.
Returns
-------
list of Synsets
Roots via hypernymy relation.
| 2.131793
| 2.161979
| 0.986038
|
if self._raw_synset.pos != synset._raw_synset.pos:
return None
depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos]
distance = self._shortest_path_distance(synset)
if distance >= 0:
return -math.log((distance + 1) / (2.0 * depth))
else:
return None
|
def lch_similarity(self, synset)
|
Calculates Leacock and Chodorow's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula -log( (dist(synset1,synset2)+1) / (2*maximum taxonomy depth) ).
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Leacock and Chodorow's from `synset`.
None, if synsets are not connected via hypernymy/hyponymy relations. Obvious, if part-of-speeches don't match.
| 5.068255
| 3.819678
| 1.32688
|
lchs = self.lowest_common_hypernyms(target_synset)
lcs_depth = lchs[0]._min_depth() if lchs and len(lchs) else None
self_depth = self._min_depth()
other_depth = target_synset._min_depth()
if lcs_depth is None or self_depth is None or other_depth is None:
return None
return (2.0 * lcs_depth) / (self_depth + other_depth)
|
def wup_similarity(self, target_synset)
|
Calculates Wu and Palmer's similarity between the two synsets.
Notes
-----
Similarity is calculated using the formula ( 2*depth(least_common_subsumer(synset1,synset2)) ) / ( depth(synset1) + depth(synset2) )
Parameters
----------
synset : Synset
Synset from which the similarity is calculated.
Returns
-------
float
Wu and Palmer's similarity from `synset`.
| 3.443172
| 3.389496
| 1.015836
|
return '\n'.join([variant.gloss for variant in self._raw_synset.variants if variant.gloss])
|
def definition(self)
|
Returns the definition of the synset.
Returns
-------
str
Definition of the synset as a new-line separated concatenated string from all its variants' definitions.
| 20.853884
| 8.643259
| 2.412734
|
examples = []
for example in [variant.examples for variant in self._raw_synset.variants if len(variant.examples)]:
examples.extend(example)
return examples
|
def examples(self)
|
Returns the examples of the synset.
Returns
-------
list of str
List of its variants' examples.
| 7.579453
| 4.6026
| 1.646776
|
return [lemma("%s.%s"%(self.name,variant.literal)) for variant in self._raw_synset.variants]
|
def lemmas(self)
|
Returns the synset's lemmas/variants' literal represantions.
Returns
-------
list of Lemmas
List of its variations' literals as Lemma objects.
| 19.065947
| 11.639141
| 1.638089
|
self_hypernyms = self._recursive_hypernyms(set())
other_hypernyms = target_synset._recursive_hypernyms(set())
common_hypernyms = self_hypernyms.intersection(other_hypernyms)
annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms]
annot_common_hypernyms.sort(key = lambda annot_hypernym: annot_hypernym[1],reverse=True)
max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms) > 0 else None
if max_depth != None:
return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth]
else:
return None
|
def lowest_common_hypernyms(self,target_synset)
|
Returns the common hypernyms of the synset and the target synset, which are furthest from the closest roots.
Parameters
----------
target_synset : Synset
Synset with which the common hypernyms are sought.
Returns
-------
list of Synsets
Common synsets which are the furthest from the closest roots.
| 2.136561
| 2.230781
| 0.957763
|
return synset('%s.%s.%s.%s'%(self.synset_literal,self.synset_pos,self.synset_sense,self.literal))
|
def synset(self)
|
Returns synset into which the given lemma belongs to.
Returns
-------
Synset
Synset into which the given lemma belongs to.
| 7.167572
| 7.332563
| 0.977499
|
''' Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s>
'''
if not isinstance( vabamorf_json, dict ):
raise Exception(' Expected dict as an input argument! ')
json_sentences = []
# 1) flatten paragraphs
if 'paragraphs' in vabamorf_json:
for pr in vabamorf_json['paragraphs']:
if 'sentences' in pr:
for sent in pr['sentences']:
json_sentences.append( sent )
# 2) flatten sentences
elif 'sentences' in vabamorf_json:
for sent in vabamorf_json['sentences']:
json_sentences.append( sent )
# 3) Iterate over sentences and perform conversion
results = []
for sentJson in json_sentences:
results.append('<s>')
for wordJson in sentJson['words']:
if wordJson['text'] == '<s>' or wordJson['text'] == '</s>':
continue
wordStr = wordJson['text']
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson['analysis']:
root = analysisJson['root']
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson['partofspeech']
clitic = analysisJson['clitic']
form = analysisJson['form']
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if 'analysis' not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results
|
def convert_vm_json_to_mrf( vabamorf_json )
|
Converts from vabamorf's JSON output, given as dict, into pre-syntactic mrf
format, given as a list of lines, as in the output of etmrf.
The aimed format looks something like this:
<s>
Kolmandaks
kolmandaks+0 //_D_ //
kolmas+ks //_O_ sg tr //
kihutas
kihuta+s //_V_ s //
end
end+0 //_Y_ ? //
ise+0 //_P_ sg p //
soomlane
soomlane+0 //_S_ sg n //
</s>
| 7.198717
| 3.318348
| 2.169368
|
''' Converts from Text object into pre-syntactic mrf format, given as a list of
lines, as in the output of etmrf.
*) If the input Text has already been morphologically analysed, uses the existing
analysis;
*) If the input has not been analysed, performs the analysis with required settings:
word quessing is turned on, proper-name analyses are turned off;
'''
from estnltk.text import Text
if not isinstance( text, Text ):
raise Exception(' Expected estnltk\'s Text as an input argument! ')
if not text.is_tagged( ANALYSIS ):
# If morphological analysis has not been performed yet, set the right arguments and
# perform the analysis
kwargs = text.get_kwargs()
kwargs['vabamorf'] = True
kwargs['guess'] = True
kwargs['propername'] = False
kwargs['disambiguate'] = False
text.__kwargs = kwargs
text = text.tag_analysis()
# Iterate over sentences and perform conversion
results = []
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
results.append('<s>')
for i in range(len(sentence)):
wordJson = sentence[i]
wordStr = wordJson[TEXT]
# Escape double quotation marks
wordStr = _esc_double_quotes( wordStr )
results.append( wordStr )
for analysisJson in wordJson[ANALYSIS]:
root = analysisJson[ROOT]
root = _esc_double_quotes( root )
# NB! ending="0" erineb ending=""-st:
# 1) eestlane (ending="0");
# 2) Rio (ending="") de (ending="") Jaineros;
ending = analysisJson[ENDING]
pos = analysisJson[POSTAG]
clitic = analysisJson[CLITIC]
form = analysisJson[FORM]
if pos == 'Z':
results.append( ''.join([' ',root,' //_Z_ //']) )
else:
results.append( ''.join([' ',root,'+',ending,clitic,' //', '_',pos,'_ ',form,' //']) )
if ANALYSIS not in wordJson:
results.append( ' '+'####' )
results.append('</s>')
return results
|
def convert_Text_to_mrf( text )
|
Converts from Text object into pre-syntactic mrf format, given as a list of
lines, as in the output of etmrf.
*) If the input Text has already been morphologically analysed, uses the existing
analysis;
*) If the input has not been analysed, performs the analysis with required settings:
word quessing is turned on, proper-name analyses are turned off;
| 9.209191
| 5.747602
| 1.602267
|
''' Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped;
'''
rules = {}
in_f = codecs.open(rulesFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if line.startswith('¤'):
continue
parts = line.split('@')
if len(parts) < 4:
raise Exception(' Unexpected format of the line: ', line)
if parts[1] not in rules:
rules[parts[1]] = []
rules[parts[1]].append( parts[3] )
in_f.close()
return rules
|
def load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile )
|
Loads rules that can be used to convert from Filosoft's mrf format to
syntactic analyzer's format. Returns a dict containing rules.
Expects that each line in the input file contains a single rule, and that
different parts of the rule separated by @ symbols, e.g.
1@_S_ ?@Substantiiv apellatiiv@_S_ com @Noun common@Nc@NCSX@kesk-
32@_H_ ?@Substantiiv prooprium@_S_ prop @Noun proper@Np@NPCSX@Kesk-
313@_A_@Adjektiiv positiiv@_A_ pos@Adjective positive@A-p@ASX@salkus
Only 2nd element and 4th element are extracted from each line; 2nd element
will be the key of the dict entry, and 4th element will be added to the
value of the dict entry (the value is a list of strings);
A list is used for storing values because one Filosoft's analysis could
be mapped to multiple syntactic analyzer's analyses;
Lines that have ¤ in the beginning of the line will be skipped;
| 10.488853
| 1.419205
| 7.390653
|
''' Converts given analysis line if it describes punctuation; Uses the set
of predefined punctuation conversion rules from _punctConversions;
_punctConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the converted line (same as input, if no conversion was
performed);
'''
for [pattern, replacement] in _punctConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
break
return line
|
def _convert_punctuation( line )
|
Converts given analysis line if it describes punctuation; Uses the set
of predefined punctuation conversion rules from _punctConversions;
_punctConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the converted line (same as input, if no conversion was
performed);
| 10.049456
| 1.754401
| 5.72814
|
''' Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '): # only consider lines of analysis
# 1) Convert punctuation
if _punctOrAbbrev.search(line):
mrf_lines[i] = _convert_punctuation( line )
if '_Y_' not in line:
i += 1
continue
# 2) Convert morphological analyses that have a form specified
withFormMatch = _morfWithForm.search(line)
if withFormMatch:
root = withFormMatch.group(1)
pos = withFormMatch.group(2)
formStr = withFormMatch.group(3)
forms = formStr.split(',')
all_new_lines = []
for form in forms:
morphKey = pos+' '+form.strip()
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
else:
withoutFormMatch = _morfWithoutForm.search(line)
if withoutFormMatch:
# 3) Convert morphological analyses that have only POS specified
root = withoutFormMatch.group(1)
pos = withoutFormMatch.group(2)
morphKey = pos
all_new_lines = []
if morphKey in conversion_rules:
newlines = [ ' '+root+' //'+_esc_que_mark(r)+' //' for r in conversion_rules[morphKey] ]
all_new_lines.extend( newlines )
if all_new_lines:
del mrf_lines[i]
for newline in all_new_lines:
mrf_lines.insert(i, newline)
i += len(newlines)
continue
i += 1
return mrf_lines
|
def convert_mrf_to_syntax_mrf( mrf_lines, conversion_rules )
|
Converts given lines from Filosoft's mrf format to syntactic analyzer's
format, using the morph-category conversion rules from conversion_rules,
and punctuation via method _convert_punctuation();
As a result of conversion, the input list mrf_lines will be modified,
and also returned after a successful conversion;
Morph-category conversion rules should be loaded via method
load_fs_mrf_to_syntax_mrf_translation_rules( rulesFile ),
usually from a file named 'tmorftrtabel.txt';
Note that the resulting list of lines likely has more lines than the
original list had, because the conversion often requires that the
original Filosoft's analysis is expanded into multiple analyses
suitable for the syntactic analyzer;
| 4.57232
| 2.186316
| 2.091335
|
''' Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if '_P_' in line: # only consider lines containing pronoun analyses
for [pattern, replacement] in _pronConversions:
lastline = line
line = re.sub(pattern, replacement, line)
if lastline != line:
mrf_lines[i] = line
break
i += 1
return mrf_lines
|
def convert_pronouns( mrf_lines )
|
Converts pronouns (analysis lines with '_P_') from Filosoft's mrf to
syntactic analyzer's mrf format;
Uses the set of predefined pronoun conversion rules from _pronConversions;
_pronConversions should be a list of lists, where each outer list stands
for a single conversion rule and inner list contains a pair of elements:
first is the regexp pattern and the second is the replacement, used in
re.sub( pattern, replacement, line )
Returns the input mrf list, with the lines converted from one format
to another;
| 7.421958
| 1.843983
| 4.02496
|
''' Removes duplicate analysis lines from mrf_lines.
Uses special logic for handling adposition analyses ('_K_ pre' && '_K_ post')
that do not have subcategorization information:
*) If a word has both adposition analyses, removes '_K_ pre';
*) If a word has '_K_ post', removes it;
Note that '_K_ pre' and '_K_ post' with subcategorization information will
be kept.
The parameter allow_to_delete_all specifies whether it is allowed to delete
all analysis or not. If allow_to_delete_all == False, then one last analysis
won't be deleted, regardless whether it should be deleted considering the
adposition-deletion rules;
The original implementation corresponds to the settings allow_to_delete_all=True
(and this is also the default value of the parameter);
Returns the input list where the removals have been applied;
'''
i = 0
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' '):
if Kpre_index != -1 and Kpost_index != -1:
# If there was both _K_pre and _K_post, add _K_pre to removables;
to_delete.append( Kpre_index )
elif Kpost_index != -1:
# If there was only _K_post, add _K_post to removables;
to_delete.append( Kpost_index )
# Delete found duplicates
if to_delete:
for k, j in enumerate(sorted(to_delete, reverse=True)):
# If we must preserve at least one analysis, and
# it has been found that all should be deleted, then
# keep the last one
if not allow_to_delete_all and \
analyses_count == len(to_delete) and \
k == len(to_delete) - 1:
continue
# Delete the analysis line
del mrf_lines[j]
i -= 1
# Reset the memory for each new word/token
seen_analyses = []
analyses_count = 0
to_delete = []
Kpre_index = -1
Kpost_index = -1
elif line.startswith(' '): # the line of analysis
analyses_count += 1
if line in seen_analyses:
# Remember line that has been already seen as a duplicate
to_delete.append( i )
else:
# Remember '_K pre' and '_K_ post' indices
if re.search('/_K_\s+pre\s+//', line):
Kpre_index = i
elif re.search('/_K_\s+post\s+//', line):
Kpost_index = i
# Remember that the line has already been seen
seen_analyses.append( line )
i += 1
return mrf_lines
|
def remove_duplicate_analyses( mrf_lines, allow_to_delete_all = True )
|
Removes duplicate analysis lines from mrf_lines.
Uses special logic for handling adposition analyses ('_K_ pre' && '_K_ post')
that do not have subcategorization information:
*) If a word has both adposition analyses, removes '_K_ pre';
*) If a word has '_K_ post', removes it;
Note that '_K_ pre' and '_K_ post' with subcategorization information will
be kept.
The parameter allow_to_delete_all specifies whether it is allowed to delete
all analysis or not. If allow_to_delete_all == False, then one last analysis
won't be deleted, regardless whether it should be deleted considering the
adposition-deletion rules;
The original implementation corresponds to the settings allow_to_delete_all=True
(and this is also the default value of the parameter);
Returns the input list where the removals have been applied;
| 4.655549
| 2.298738
| 2.025263
|
''' Augments analysis lines with various hashtag information:
*) marks words with capital beginning with #cap;
*) marks finite verbs with #FinV;
*) marks nud/tud/mine/nu/tu/v/tav/mata/ja forms;
Hashtags are added at the end of the analysis content (just before the
last '//');
Returns the input list where the augmentation has been applied;
'''
i = 0
cap = False
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
cap = (line[0]).isupper()
elif line.startswith(' '):
if cap:
line = re.sub('(//.+\S)\s+//', '\\1 #cap //', line)
if _morfFinV.search( line ) and not _morfNotFinV.search( line ):
line = re.sub('(//.+\S)\s+//', '\\1 #FinV //', line)
for [pattern, replacement] in _mrfHashTagConversions:
line = re.sub(pattern, replacement, line)
mrf_lines[i] = line
i += 1
return mrf_lines
|
def add_hashtag_info( mrf_lines )
|
Augments analysis lines with various hashtag information:
*) marks words with capital beginning with #cap;
*) marks finite verbs with #FinV;
*) marks nud/tud/mine/nu/tu/v/tav/mata/ja forms;
Hashtags are added at the end of the analysis content (just before the
last '//');
Returns the input list where the augmentation has been applied;
| 9.031023
| 2.773903
| 3.25571
|
''' Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
'''
rules = {}
nonSpacePattern = re.compile('^\S+$')
posTagPattern = re.compile('_._')
in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')
lemma = ''
subcatRules = ''
for line in in_f:
line = line.rstrip()
if nonSpacePattern.match(line) and not posTagPattern.search(line):
lemma = line
elif posTagPattern.search(line):
subcatRules = line
if len(lemma) > 0 and len(subcatRules) > 0:
if lemma not in rules:
rules[lemma] = []
parts = subcatRules.split('&')
for part in parts:
part = part.strip()
rules[lemma].append( part )
lemma = ''
subcatRules = ''
in_f.close()
#print( len(rules.keys()) ) # 4484
return rules
|
def load_subcat_info( subcat_lex_file )
|
Loads subcategorization rules (for verbs and adpositions) from a text
file.
It is expected that the rules are given as pairs, where the first item is
the lemma (of verb/adposition), followed on the next line by the
subcategorization rule, in the following form:
on the left side of '>' is the condition (POS-tag requirement for the
lemma),
and
on the right side is the listing of subcategorization settings (hashtag
items, e.g. names of morphological cases of nominals);
If there are multiple subcategorization rules to be associated with a
single lemma, different rules are separated by '&'.
Example, an excerpt from the rules file:
läbi
_V_ >#Part &_K_ post >#gen |#nom |#el &_K_ pre >#gen
läbista
_V_ >#NGP-P
läbistu
_V_ >#Intr
Returns a dict of lemma to a-list-of-subcatrules mappings.
| 8.013127
| 1.710586
| 4.684434
|
''' Adds subcategorization information (hashtags) to verbs and adpositions;
Argument subcat_rules must be a dict containing subcategorization information,
loaded via method load_subcat_info();
Performs word lemma lookups in subcat_rules, and in case of a match, checks
word part-of-speech conditions. If the POS conditions match, adds subcategorization
information either to a single analysis line, or to multiple analysis lines
(depending on the exact conditions in the rule);
Returns the input list where verb/adposition analyses have been augmented
with available subcategorization information;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if line.startswith(' '):
lemma_match = analysisLemmaPat.match(line)
if lemma_match:
lemma = lemma_match.group(1)
# Find whether there is subcategorization info associated
# with the lemma
if lemma in subcat_rules:
analysis_match = analysisPat.search(line)
if not analysis_match:
raise Exception(' Could not find analysis from the line:',line)
analysis = analysis_match.group(1)
for rule in subcat_rules[lemma]:
condition, addition = rule.split('>')
# Check the condition string; If there are multiple conditions,
# all must be satisfied for the rule to fire
condition = condition.strip()
conditions = condition.split()
satisfied1 = [ _check_condition(c, analysis) for c in conditions ]
if all( satisfied1 ):
#
# There can be multiple additions:
# 1) additions without '|' must be added to a single analysis line;
# 2) additions separated by '|' must be placed on separate analysis
# lines;
#
additions = addition.split('|')
j = i
# Add new line or lines
for a in additions:
line_copy = line if i == j else line[:]
items_to_add = a.split()
for item in items_to_add:
if not _check_condition(item, analysis):
line_copy = \
re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy)
if j == i:
# 1) replace the existing line
mrf_lines[i] = line_copy
else:
# 2) add a new line
mrf_lines.insert(i, line_copy)
j += 1
i = j - 1
# No need to search forward
break
i += 1
return mrf_lines
|
def tag_subcat_info( mrf_lines, subcat_rules )
|
Adds subcategorization information (hashtags) to verbs and adpositions;
Argument subcat_rules must be a dict containing subcategorization information,
loaded via method load_subcat_info();
Performs word lemma lookups in subcat_rules, and in case of a match, checks
word part-of-speech conditions. If the POS conditions match, adds subcategorization
information either to a single analysis line, or to multiple analysis lines
(depending on the exact conditions in the rule);
Returns the input list where verb/adposition analyses have been augmented
with available subcategorization information;
| 5.693232
| 3.221859
| 1.767064
|
''' Converts given mrf lines from syntax preprocessing format to cg3 input
format:
*) surrounds words/tokens with "< and >"
*) surrounds word lemmas with " in analysis;
*) separates word endings from lemmas in analysis, and adds prefix 'L';
*) removes '//' and '//' from analysis;
*) converts hashtags to tags surrounded by < and >;
... and provides other various fix-ups;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
i = 0
while ( i < len(mrf_lines) ):
line = mrf_lines[i]
if not line.startswith(' ') and len(line) > 0:
#
# A line containing word/token
#
# a. surround the word with "< and >"
line = re.sub('^(\S.*)([\n\r]*)$','"<\\1>"\\2', line)
# b. fix the sentence begin/end tags
line = re.sub('<<(s|/s)>>', '<\\1>', line)
mrf_lines[i] = line
elif line.startswith(' '):
#
# A line containing analysis
#
# 1. perform various fixes:
line = re.sub('#cap #cap','cap', line)
line = re.sub('#cap','cap', line)
line = re.sub('\*\*CLB','CLB', line)
line = re.sub('#Correct!','<Correct!>', line)
line = re.sub('####','', line)
line = re.sub('#(\S+)','<\\1>', line)
line = re.sub('\$([,.;!?:<]+)','\\1', line)
line = re.sub('_Y_\s+\? _Z_','_Z_', line)
line = re.sub('_Y_\s+\?\s+_Z_','_Z_', line)
line = re.sub('_Y_\s+_Z_','_Z_', line)
line = re.sub('_Z_\s+\?','_Z_', line)
# 2. convert analysis line \w word ending
line = re.sub('^\s+(\S+)(.*)\+(\S+)\s*//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" L\\3 \\4 \\5 \\6', line)
# 3. convert analysis line \wo word ending
line = re.sub('^\s+(\S+)(.*)\s+//_(\S)_ (.*)//(.*)$', \
' "\\1\\2" \\3 \\4 \\5', line)
mrf_lines[i] = line
i += 1
return mrf_lines
|
def convert_to_cg3_input( mrf_lines )
|
Converts given mrf lines from syntax preprocessing format to cg3 input
format:
*) surrounds words/tokens with "< and >"
*) surrounds word lemmas with " in analysis;
*) separates word endings from lemmas in analysis, and adds prefix 'L';
*) removes '//' and '//' from analysis;
*) converts hashtags to tags surrounded by < and >;
... and provides other various fix-ups;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
| 5.64413
| 3.097444
| 1.82219
|
''' Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_vm_json_to_mrf( json_dict )
return self.process_mrf_lines( mrf_lines, **kwargs )
|
def process_vm_json( self, json_dict, **kwargs )
|
Executes the preprocessing pipeline on vabamorf's JSON, given as a dict;
Returns a list: lines of analyses in the VISL CG3 input format;
| 21.729136
| 2.660294
| 8.167944
|
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs )
|
def process_Text( self, text, **kwargs )
|
Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
| 26.436394
| 3.25086
| 8.132123
|
''' Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
'''
converted1 = convert_mrf_to_syntax_mrf( mrf_lines, self.fs_to_synt_rules )
converted2 = convert_pronouns( converted1 )
converted3 = remove_duplicate_analyses( converted2, allow_to_delete_all=self.allow_to_remove_all )
converted4 = add_hashtag_info( converted3 )
converted5 = tag_subcat_info( converted4, self.subcat_rules )
converted6 = remove_duplicate_analyses( converted5, allow_to_delete_all=self.allow_to_remove_all )
converted7 = convert_to_cg3_input( converted6 )
return converted7
|
def process_mrf_lines( self, mrf_lines, **kwargs )
|
Executes the preprocessing pipeline on mrf_lines.
The input should be an analysis of the text in Filosoft's old mrf format;
Returns the input list, where elements (tokens/analyses) have been converted
into the new format;
| 6.812563
| 3.220235
| 2.115549
|
return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
|
def get_sources(src_dir='src', ending='.cpp')
|
Function to get a list of files ending with `ending` in `src_dir`.
| 2.475784
| 2.366194
| 1.046315
|
def _get_ANSI_colored_font( color ):
''' Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
'''
color = (color.replace('-','')).lower()
#
# Bright colors:
#
if color == 'white':
return '\033[97m'
elif color in ['cyan', 'aqua']:
return '\033[96m'
elif color in ['purple', 'magneta', 'fuchsia']:
return '\033[95m'
elif color == 'blue':
return '\033[94m'
elif color in ['yellow', 'gold']:
return '\033[93m'
elif color in ['green', 'lime']:
return '\033[92m'
elif color == 'red':
return '\033[91m'
#
# Dark colors:
#
elif color in ['grey', 'gray', 'silver']:
return '\033[37m'
elif color in ['darkcyan', 'teal']:
return '\033[36m'
elif color in ['darkpurple', 'darkmagneta']:
return '\033[35m'
elif color in ['darkblue', 'navy']:
return '\033[34m'
elif color in ['darkyellow', 'olive']:
return '\033[33m'
elif color == 'darkgreen':
return '\033[32m'
elif color in ['darkred', 'maroon']:
return '\033[31m'
return None
|
Returns an ANSI escape code (a string) corresponding to switching the font
to given color, or None, if the given color could not be associated with
the available colors.
See also:
https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
| null | null | null |
|
def _construct_end_index( spansStartingFrom ):
''' Creates an index which stores all annotations (from spansStartingFrom)
by their end position in text (annotation[END]).
Each start position (in the index) is associated with a list of
annotation objects (annotations ending at that position).
An annotation object is also a list containing the following information:
*) endTags -- graphic or textual formatting of the end tag,
*) START position (of the annotation);
*) layer name;
Multiple annotation objects ending at the same position are sorted by
their length: shorter annotations preceding the longer ones;
'''
endIndex = {}
for i in spansStartingFrom:
for span1 in spansStartingFrom[i]:
# keep the record of endTags, start positions (for determining the length)
# and layer names
endSpan1 = [ span1[4], span1[0], span1[2] ]
endLoc1 = span1[1]
if endLoc1 not in endIndex:
endIndex[endLoc1] = []
endIndex[endLoc1].append( endSpan1 )
else:
# Make sure that spans are inserted in the order of increasing length:
# shorter spans preceding the longer ones;
inserted = False
for i in range( len(endIndex[endLoc1]) ):
endSpan2 = endIndex[endLoc1][i]
# If an existing span is longer than the current span, insert the
# current span before the existing span ...
if endSpan2[1] < endSpan1[1]:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
elif endSpan2[1] == endSpan1[1] and endSpan2[2] < endSpan1[2]:
# If both spans have equal length, order the spans in the
# alphabetical order of layer names:
endIndex[endLoc1].insert( i, endSpan1 )
inserted = True
break
if not inserted:
endIndex[endLoc1].append( endSpan1 )
return endIndex
|
Creates an index which stores all annotations (from spansStartingFrom)
by their end position in text (annotation[END]).
Each start position (in the index) is associated with a list of
annotation objects (annotations ending at that position).
An annotation object is also a list containing the following information:
*) endTags -- graphic or textual formatting of the end tag,
*) START position (of the annotation);
*) layer name;
Multiple annotation objects ending at the same position are sorted by
their length: shorter annotations preceding the longer ones;
| null | null | null |
|
def _fix_overlapping_graphics( spansStartingFrom ):
''' Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
'''
for startIndex in sorted( spansStartingFrom.keys() ):
for span1 in spansStartingFrom[startIndex]:
# If the span is not graphic, we don't have no worries - we can just skip it
if not span1[5]:
continue
# Otherwise: check for other graphic spans that overlap with the given span
span1Start = span1[0]
span1End = span1[1]
for i in range( span1Start, span1End ):
if i in spansStartingFrom:
for span2 in spansStartingFrom[i]:
span2Start = span2[0]
span2End = span2[1]
# If the spans are not the same, and the span2 is graphic
if span2 != span1 and span2[5]:
# if the overlapping graphic span ends before the current span,
# we have to restart the graphic formatting of given span after
# the end of the overlapping span
if span2End <= span1End:
if not span1[6]:
# If span1 is not bracketed, just add it at the end of
# the overlapping span
span2[4] += span1[3]
else:
# If span1 is bracketed, add it at the end of the
# overlapping span without brackets
wb = span1[3].rstrip('[')
span2[4] += wb
|
Provides a fix for overlapping annotations that are formatted graphically
(underlined or printed in non-default color).
If two graphically formatted annotations overlap, and if one annotation,
say A, ends within another annotation, say B, then ending of graphics of A
also causes graphics of B to end, and so, the end of A should restart the
the graphics of B for a continuous visualisation;
This method modifies ending tags in a way that annotations ending within
other annotations will also contain restarts of the corresponding (super)-
annotations, so that a continuous formatting is ensured.
| null | null | null |
|
def tprint( text, layers, markup_settings = None ):
''' Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and prints the formatted text to the
screen.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
'''
if markup_settings and len(layers) != len(markup_settings):
raise Exception(' Input arguments layers and markup_settings should be equal size lists.')
elif not markup_settings and len(layers) <= len(default_markup_settings):
# Use a subset from default markup settings
markup_settings = default_markup_settings[0:len(layers)]
elif not markup_settings:
raise Exception(' Input argument markup_settings not defined.')
print( _preformat(text, layers, markup_settings=markup_settings) )
|
Formats given text, adding a special ( ANSI-terminal compatible ) markup
to the annotations of given layers, and prints the formatted text to the
screen.
*) layers is a list containing names of the layers to be preformatted in
the text (these layers must be present in Text);
*) markup_settings should be a list containing annotation options for each
layer: one dict with options per layer;
One dict can contain the following visualization options:
* 'bracket' : True -- annotations will be surrounded with brackets; This
works in any terminal;
* 'underline' : True -- annotations will be underlined; This works in ANSI
compatible terminal;
* 'color' : ('red', 'green', 'blue' etc. ) -- annotated text will be
displayed in given color; this works in ANSI compatible
terminal;
*) Alternatively, if markup_settings is undefined, up to 12 layers can be
visualized following the default settings;
Parameters
----------
text: Text
a text object. Must contain given layers;
layer: list of str
list of layer names to be visualised;
markup_settings: list of dict
list of dictionaries containing user-defined visualization options;
(one dict per layer)
| null | null | null |
|
css_prop = AES_CSS_MAP[aes_name]
if isinstance(css_value, list):
return get_mark_css_for_rules(aes_name, css_prop, css_value)
else:
return get_mark_simple_css(aes_name, css_prop, css_value)
|
def get_mark_css(aes_name, css_value)
|
Generate CSS class for <mark> tag.
Parameters
----------
aes_name: str
The name of the class.
css_value: str
The value for the CSS property defined by aes_name.
Returns
-------
list of str
The CSS codeblocks
| 2.994716
| 4.027111
| 0.743639
|
def _loadSubcatRelations( self, inputFile ):
''' Laeb sisendfailist (inputFile) verb-nom/adv-vinf rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(verbikirjeldus)\TAB(nom/adv-kirjeldus)\TAB(vinfkirjeldus)
nt
leid NEG aeg;S;((sg|pl) (p)|adt) da
leid POS võimalus;S;(sg|pl) (n|p|g) da
Salvestab laetud tulemused klassimuutujatesse nomAdvWordTemplates, verbRules
ja verbToVinf;
'''
self.nomAdvWordTemplates = dict()
self.verbRules = dict()
self.verbToVinf = dict()
in_f = codecs.open(inputFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
items = line.split('\t')
if len(items) == 3:
verb = items[0]
nounAdv = items[1]
vinf = items[2]
if nounAdv not in self.nomAdvWordTemplates:
(root,pos,form) = nounAdv.split(';')
if not root.startswith('^') and not root.endswith('$'):
root = '^'+root+'$'
constraints = {ROOT:root, POSTAG:pos}
if form:
constraints[FORM] = form
self.nomAdvWordTemplates[nounAdv] = WordTemplate(constraints)
if verb not in self.verbRules:
self.verbRules[verb] = []
if verb not in self.verbToVinf:
self.verbToVinf[verb] = []
self.verbRules[verb].append( (nounAdv, 'V_'+vinf) )
if 'V_'+vinf not in self.verbToVinf[verb]:
self.verbToVinf[verb].append( 'V_'+vinf )
else:
raise Exception(' Unexpected number of items in the input lexicon line: '+line)
in_f.close()
|
Laeb sisendfailist (inputFile) verb-nom/adv-vinf rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(verbikirjeldus)\TAB(nom/adv-kirjeldus)\TAB(vinfkirjeldus)
nt
leid NEG aeg;S;((sg|pl) (p)|adt) da
leid POS võimalus;S;(sg|pl) (n|p|g) da
Salvestab laetud tulemused klassimuutujatesse nomAdvWordTemplates, verbRules
ja verbToVinf;
| null | null | null |
|
def tokenMatchesNomAdvVinf( self, token, verb, vinf):
''' Teeb kindlaks, kas etteantud token v6iks olla verb'i alluv ning vinf'i ylemus (st
paikneda nende vahel). Kui see nii on, tagastab j2rjendi vahele sobiva s6na morf
analyysidega (meetodi _getMatchingAnalysisIDs abil), vastasel juhul tagastab tyhja
j2rjendi;
'''
if verb in self.verbRules:
for (nounAdv, vinf1) in self.verbRules[verb]:
if vinf == vinf1 and (self.nomAdvWordTemplates[nounAdv]).matches(token):
return _getMatchingAnalysisIDs( token, self.nomAdvWordTemplates[nounAdv] )
return []
|
Teeb kindlaks, kas etteantud token v6iks olla verb'i alluv ning vinf'i ylemus (st
paikneda nende vahel). Kui see nii on, tagastab j2rjendi vahele sobiva s6na morf
analyysidega (meetodi _getMatchingAnalysisIDs abil), vastasel juhul tagastab tyhja
j2rjendi;
| null | null | null |
|
def extendChainsInSentence( self, sentence, foundChains ):
''' Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel.
'''
# 1) Preprocessing
clauses = getClausesByClauseIDs( sentence )
# 2) Extend verb chains in each clause
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
self.extendChainsInClause(clause, clauseID, foundChains)
|
Rakendab meetodit self.extendChainsInClause() antud lause igal osalausel.
| null | null | null |
|
def _canBeExpanded( self, headVerbRoot, headVerbWID, suitableNomAdvExpansions, expansionVerbs, widToToken ):
''' Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
'''
if len(suitableNomAdvExpansions)==1 and expansionVerbs:
# Kontrollime, kas leidub t2pselt yks laiendiks sobiv verb (kui leidub
# rohkem, on kontekst kahtlane ja raske otsustada, kas tasub laiendada
# v6i mitte)
suitableExpansionVerbs = \
[expVerb for expVerb in expansionVerbs if expVerb[2] == suitableNomAdvExpansions[0][2]]
if len( suitableExpansionVerbs ) == 1:
# Kontrollime, et nom/adv ei kuuluks mingi suurema fraasi kooseisu (ei oleks fraasi
# peas6na);
nomAdvWID = suitableNomAdvExpansions[0][0]
if self._isLikelyNotPhrase( headVerbRoot, headVerbWID, nomAdvWID, widToToken ):
return suitableExpansionVerbs[0]
return None
|
Teeb kindlaks, kas kontekst on verbiahela laiendamiseks piisavalt selge/yhene:
1) Nii 'nom/adv' kandidaate kui ka Vinf kandidaate on täpselt üks;
2) Nom/adv ei kuulu mingi suurema fraasi kooseisu (meetodi _isLikelyNotPhrase() abil);
Kui tingimused täidetud, tagastab lisatava verbi listist expansionVerbs, vastasel juhul
tagastab None;
| null | null | null |
|
textStart = 0
#Split the text in sections. Hackish part, but seems to work fine.
entries = re.split("\n=", text[textStart:])
stack = [[]]
intro = {}
sectionTitleRegEx = re.compile(r'={1,}.+={2,}')
section = {}
section['text'] = entries[0]
counts = []
#Presumes the first section is always marked with 2 =
#First count is always 3. (\n=)= First Section of an Article ==
#Parens is omitted. Leaves three = marks.
counts.append(3)
sections = []
sections.append(section)
for i in entries[1:]:
section = {}
title = re.match(sectionTitleRegEx, i)
if title:
titleEnd = title.end()
title = title.group()
text = i[titleEnd:]
level = title.count('=')
section['title']=title.strip('= ')
section['text']=text
sections.append(section.copy())
counts.append(level)
#add images, links, references, tables
for section in sections:
text = section['text']
if 'wikitable' in text or '</table>' in text.lower():
section['text'], section['tables'] = tableCollector(text)
section = relatedArticles(section)
if '<ref' in text:
section = reffinder(section)
if imageRegEx.search(text):
section = imageParser(section)
section['text'] = section['text'].strip()
if ExtLinkBracketedRegex.search(text):
section = addExternalLinks(section)
if '[[' in text:
section = addIntLinks(section)
#clean uneven brackets and whatnot
#take extlink start:end w regex.
el = 'external_links'
if el in section.keys():
#section['text'] = section['text'].replace('[', '').replace(']', '')
text = section['text']
for link in section[el]:
label = link['label']
label = re.compile(re.escape(label))
m = label.search(text)
#if there are unbalanced brackets in the external
#links label inside text then it fails to mark the start and end
try:
link['start'] = m.start()
link['end'] = m.end()
except AttributeError:
print('Problem with external links start:end position!')
print(label)
print(text)
#datastructure nesting thanks to Timo!
if counts:
assert len(counts) == len(sections)
n = len(sections)
pos = 0
levels = [counts[0]]
while pos < n:
count = counts[pos]
elem = sections[pos]
level = levels[-1]
if count == level:
stack[-1].append(elem)
elif count >= level:
stack.append([elem])
levels.append(count)
else:
group = stack.pop()
stack[-1][-1]['sections'] = group
levels.pop()
continue
pos += 1
while len(stack) > 1:
group = stack.pop()
stack[-1][-1]['sections'] = group
stack = stack[0]
return stack
|
def sectionsParser(text)
|
:param text: the whole text of an wikipedia article
:return: a list of nested section objects
[{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."},
sections: [{title: "Rahvaarv",
text: "Eestis elab..."},
{title: "Ajalugu",
text: "..."}],],
| 5.729589
| 5.583734
| 1.026121
|
return ''.join([c for c in text if c in self.alphabet])
|
def clean(self, text)
|
Remove all unwanted characters from text.
| 5.254635
| 4.380015
| 1.199684
|
return ''.join(sorted(set([c for c in text if c not in self.alphabet])))
|
def invalid_characters(self, text)
|
Give simple list of invalid characters present in text.
| 5.494483
| 4.417948
| 1.243673
|
result = defaultdict(list)
for idx, char in enumerate(text):
if char not in self.alphabet:
start = max(0, idx-context_size)
end = min(len(text), idx+context_size)
result[char].append(text[start:end])
return result
|
def find_invalid_chars(self, text, context_size=20)
|
Find invalid characters in text and store information about
the findings.
Parameters
----------
context_size: int
How many characters to return as the context.
| 2.361109
| 2.75283
| 0.857702
|
result = defaultdict(list)
for text in texts:
for char, examples in self.find_invalid_chars(text, context_size).items():
result[char].extend(examples)
return result
|
def compute_report(self, texts, context_size=10)
|
Compute statistics of invalid characters on given texts.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
context_size: int
How many characters to return as the context.
Returns
-------
dict of (char -> list of tuple (index, context))
Returns a dictionary, where keys are invalid characters.
Values are lists containign tuples with character indices
and context strings.
| 4.39242
| 3.654628
| 1.201879
|
result = list(self.compute_report(texts, context_size).items())
result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True)
s = 'Analyzed {0} texts.\n'.format(len(texts))
if (len(texts)) == 0:
f.write(s)
return
if len(result) > 0:
s += 'Invalid characters and their counts:\n'
for c, examples in result:
s += '"{0}"\t{1}\n'.format(c, len(examples))
s += '\n'
for c, examples in result:
s += 'For character "{0}", found {1} occurrences.\nExamples:\n'.format(c, len(examples))
examples = sample(examples, min(len(examples), n_examples))
for idx, example in enumerate(examples):
s += 'example {0}: {1}\n'.format(idx+1, example)
s += '\n'
f.write(s)
else:
f.write('All OK\n')
|
def report(self, texts, n_examples=10, context_size=10, f=sys.stdout)
|
Compute statistics of invalid characters and print them.
Parameters
----------
texts: list of str
The texts to search for invalid characters.
n_examples: int
How many examples to display per invalid character.
context_size: int
How many characters to return as the context.
f: file
The file to print the report (default is sys.stdout)
| 2.707346
| 2.603886
| 1.039733
|
''' Filters the dict of *kwargs*, keeping only arguments
whose keys are in *keep_list* and discarding all other
arguments.
Based on the filtring, constructs and returns a new
dict.
'''
new_kwargs = {}
for argName, argVal in kwargs.items():
if argName.lower() in keep_list:
new_kwargs[argName.lower()] = argVal
return new_kwargs
|
def _filter_kwargs(self, keep_list, **kwargs)
|
Filters the dict of *kwargs*, keeping only arguments
whose keys are in *keep_list* and discarding all other
arguments.
Based on the filtring, constructs and returns a new
dict.
| 6.08593
| 2.007453
| 3.031668
|
''' Augments given Text object with the syntactic information
from the *text_layer*. More specifically, adds information
about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token
in the Text object;
(!) Note: this method is added to provide some initial
consistency with MaltParser based syntactic parsing;
If a better syntactic parsing interface is achieved in
the future, this method will be deprecated ...
'''
j = 0
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
for i in range(len(sentence)):
estnltkToken = sentence[i]
vislcg3Token = text_layer[j]
parse_found = False
if PARSER_OUT in vislcg3Token:
if len( vislcg3Token[PARSER_OUT] ) > 0:
firstParse = vislcg3Token[PARSER_OUT][0]
# Fetch information about the syntactic relation:
estnltkToken['s_label'] = str(i)
estnltkToken['s_head'] = str(firstParse[1])
# Fetch the name of the surface syntactic relation
deprels = '|'.join( [p[0] for p in vislcg3Token[PARSER_OUT]] )
estnltkToken['s_rel'] = deprels
parse_found = True
if not parse_found:
raise Exception("(!) Unable to retrieve syntactic analysis for the ",\
estnltkToken, ' from ', vislcg3Token )
j += 1
|
def _augment_text_w_syntactic_info( self, text, text_layer )
|
Augments given Text object with the syntactic information
from the *text_layer*. More specifically, adds information
about SYNTAX_LABEL, SYNTAX_HEAD and DEPREL to each token
in the Text object;
(!) Note: this method is added to provide some initial
consistency with MaltParser based syntactic parsing;
If a better syntactic parsing interface is achieved in
the future, this method will be deprecated ...
| 7.84227
| 3.832631
| 2.046185
|
''' Rewrites dependency links in the text from sentence-based linking to clause-
based linking:
*) words which have their parent outside-the-clause will become root
nodes (will obtain link value -1), and
*) words which have their parent inside-the-clause will have parent index
according to word indices inside the clause;
'''
sent_start_index = 0
for sent_text in orig_text.split_by( SENTENCES ):
# 1) Create a mapping: from sentence-based dependency links to clause-based dependency links
mapping = dict()
cl_ind = sent_text.clause_indices
for wid, word in enumerate(sent_text[WORDS]):
firstSyntaxRel = sent_text[layer][wid][PARSER_OUT][0]
parentIndex = firstSyntaxRel[1]
if parentIndex != -1:
if cl_ind[parentIndex] != cl_ind[wid]:
# Parent of the word is outside the current clause: make root
# node from the current node
mapping[wid] = -1
else:
# Find the beginning of the clause
clause_start = cl_ind.index( cl_ind[wid] )
# Find the index of parent label in the clause
j = 0
k = 0
while clause_start + j < len(cl_ind):
if clause_start + j == parentIndex:
break
if cl_ind[clause_start + j] == cl_ind[wid]:
k += 1
j += 1
assert clause_start + j < len(cl_ind), '(!) Parent index not found for: '+str(parentIndex)
mapping[wid] = k
else:
mapping[wid] = -1
# 2) Overwrite old links with new ones
for local_wid in mapping.keys():
global_wid = sent_start_index + local_wid
for syntax_rel in orig_text[layer][global_wid][PARSER_OUT]:
syntax_rel[1] = mapping[local_wid]
# 3) Advance the index for processing the next sentence
sent_start_index += len(cl_ind)
return orig_text
|
def _create_clause_based_dep_links( orig_text, layer=LAYER_CONLL )
|
Rewrites dependency links in the text from sentence-based linking to clause-
based linking:
*) words which have their parent outside-the-clause will become root
nodes (will obtain link value -1), and
*) words which have their parent inside-the-clause will have parent index
according to word indices inside the clause;
| 5.572063
| 3.552166
| 1.568638
|
''' Sorts analysis of all the words in the sentence.
This is required for consistency, because by default, analyses are
listed in arbitrary order; '''
for word in sentence:
if ANALYSIS not in word:
raise Exception( '(!) Error: no analysis found from word: '+str(word) )
else:
word[ANALYSIS] = sorted(word[ANALYSIS], \
key=lambda x : "_".join( [x[ROOT],x[POSTAG],x[FORM],x[CLITIC]] ))
return sentence
|
def __sort_analyses(sentence)
|
Sorts analysis of all the words in the sentence.
This is required for consistency, because by default, analyses are
listed in arbitrary order;
| 7.892372
| 4.693582
| 1.681524
|
''' Converts given estnltk Text object into CONLL format and returns as a
string.
Uses given *feature_generator* to produce fields ID, FORM, LEMMA, CPOSTAG,
POSTAG, FEATS for each token.
Fields to predict (HEAD, DEPREL) will be left empty.
This method is used in preparing parsing & testing data for MaltParser.
Parameters
-----------
text : estnltk.text.Text
Morphologically analysed text from which the CONLL file is generated;
feature_generator : CONLLFeatGenerator
An instance of CONLLFeatGenerator, which has method *generate_features()*
for generating morphological features for a single token;
The aimed format looks something like this:
1 Öö öö S S sg|nom _ xxx _ _
2 oli ole V V indic|impf|ps3|sg _ xxx _ _
3 täiesti täiesti D D _ _ xxx _ _
4 tuuletu tuuletu A A sg|nom _ xxx _ _
5 . . Z Z Fst _ xxx _ _
'''
from estnltk.text import Text
if not isinstance( text, Text ):
raise Exception('(!) Unexpected type of input argument! Expected EstNLTK\'s Text. ')
try:
granularity = feature_generator.parseScope
except AttributeError:
granularity = SENTENCES
assert granularity in [SENTENCES, CLAUSES], '(!) Unsupported granularity: "'+str(granularity)+'"!'
sentenceStrs = []
for sentence_text in text.split_by( granularity ):
sentence_text[WORDS] = __sort_analyses( sentence_text[WORDS] )
for i in range(len( sentence_text[WORDS] )):
# Generate features ID, FORM, LEMMA, CPOSTAG, POSTAG, FEATS
strForm = feature_generator.generate_features( sentence_text, i )
# *** HEAD (syntactic parent)
strForm.append( '_' )
strForm.append( '\t' )
# *** DEPREL (label of the syntactic relation)
strForm.append( 'xxx' )
strForm.append( '\t' )
# *** PHEAD
strForm.append( '_' )
strForm.append( '\t' )
# *** PDEPREL
strForm.append( '_' )
sentenceStrs.append( ''.join( strForm ) )
sentenceStrs.append( '' )
return '\n'.join( sentenceStrs )
|
def convert_text_to_CONLL( text, feature_generator )
|
Converts given estnltk Text object into CONLL format and returns as a
string.
Uses given *feature_generator* to produce fields ID, FORM, LEMMA, CPOSTAG,
POSTAG, FEATS for each token.
Fields to predict (HEAD, DEPREL) will be left empty.
This method is used in preparing parsing & testing data for MaltParser.
Parameters
-----------
text : estnltk.text.Text
Morphologically analysed text from which the CONLL file is generated;
feature_generator : CONLLFeatGenerator
An instance of CONLLFeatGenerator, which has method *generate_features()*
for generating morphological features for a single token;
The aimed format looks something like this:
1 Öö öö S S sg|nom _ xxx _ _
2 oli ole V V indic|impf|ps3|sg _ xxx _ _
3 täiesti täiesti D D _ _ xxx _ _
4 tuuletu tuuletu A A sg|nom _ xxx _ _
5 . . Z Z Fst _ xxx _ _
| 6.073492
| 2.173155
| 2.79478
|
''' Executes Maltparser on given (CONLL-style) input string, and
returns the result. The result is an array of lines from Maltparser's
output.
Parameters
----------
input_string: string
input text in CONLL format;
maltparser_jar: string
name of the Maltparser's jar file that should be executed;
model_name: string
name of the model that should be used;
maltparser_dir: string
the directory containing Maltparser's jar and the model file;
Few of the ideas were also borrowed from NLTK's MaltParser class,
see http://www.nltk.org/_modules/nltk/parse/malt.html for the reference;
'''
temp_input_file = \
tempfile.NamedTemporaryFile(prefix='malt_in.', mode='w', delete=False)
temp_input_file.close()
# We have to open separately here for writing, because Py 2.7 does not support
# passing parameter encoding='utf-8' to the NamedTemporaryFile;
out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')
out_f.write( input_string )
out_f.close()
temp_output_file = tempfile.NamedTemporaryFile(prefix='malt_out.', mode='w', delete=False)
temp_output_file.close()
current_dir = os.getcwd()
os.chdir(maltparser_dir)
cmd = ['java', '-jar', os.path.join(maltparser_dir, maltparser_jar), \
'-c', model_name, \
'-i', temp_input_file.name, \
'-o', temp_output_file.name, \
'-m', 'parse' ]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.wait() != 0:
raise Exception(' Error on running Maltparser: ', p.stderr.read() )
os.chdir(current_dir)
results = []
in_f = codecs.open(temp_output_file.name, mode='r', encoding='utf-8')
for line in in_f:
results.append( line.rstrip() )
in_f.close()
if not temp_input_file.closed:
raise Exception('Temp input file unclosed!')
if not temp_output_file.closed:
raise Exception('Temp input file unclosed!')
if not out_f.closed:
raise Exception('Output file unclosed!')
if not in_f.closed:
raise Exception('Input file unclosed!')
# TODO: For some reason, the method gives "ResourceWarning: unclosed file"
# in Python 3.4, although, apparently, all file handles seem to be closed;
# Nothing seems to be wrong in Python 2.7;
os.remove(temp_input_file.name)
os.remove(temp_output_file.name)
return results
|
def _executeMaltparser( input_string, maltparser_dir, maltparser_jar, model_name )
|
Executes Maltparser on given (CONLL-style) input string, and
returns the result. The result is an array of lines from Maltparser's
output.
Parameters
----------
input_string: string
input text in CONLL format;
maltparser_jar: string
name of the Maltparser's jar file that should be executed;
model_name: string
name of the model that should be used;
maltparser_dir: string
the directory containing Maltparser's jar and the model file;
Few of the ideas were also borrowed from NLTK's MaltParser class,
see http://www.nltk.org/_modules/nltk/parse/malt.html for the reference;
| 3.082531
| 2.123948
| 1.451321
|
''' Loads syntactically annotated text from CONLL format input file and
returns as an array of tokens, where each token is represented as
an array in the format:
[sentenceID, wordID, tokenString, morphInfo, selfID, parentID]
If addDepRels == True, then the dependency relation label is also extracted
and added to the end of the array:
[sentenceID, wordID, tokenString, morphInfo, selfID, parentID, depRel]
If splitIntoSentences == True, the array of tokens is further divided
into subarrays representing sentences.
Example input:
2 Monstrumteleskoobid Monstrum_tele_skoop S S prop|pl|nom 0 ROOT _ _
3 ( ( Z Z Opr 4 xxx _ _
4 mosaiik- mosaiik A A pos|sg|nom 2 @<AN _ _
5 ja ja J J crd 6 @J _ _
6 mitmepeeglilised mitme_peegli=line A A pos|pl|nom 4 @<NN _ _
7 ) ) Z Z Cpr 6 xxx _ _
8 . . Z Z Fst 7 xxx _ _
'''
sentenceCount = 0
wordCountInSent = 0
tokens = []
in_f = codecs.open(in_file, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) == 0 or re.match('^\s+$', line):
sentenceCount += 1
wordCountInSent = 0
continue
features = line.split('\t')
if len(features) != 10:
raise Exception(' In file '+in_file+', line with unexpected format: "'+line+'" ')
selfLabel = features[0]
token = features[1]
lemma = features[2]
cpos = features[3]
pos = features[4]
form = features[5]
parentLabel = features[6]
tokens.append( [ str(sentenceCount), str(wordCountInSent), \
token, lemma+" "+pos+" "+form, selfLabel, parentLabel ] )
if addDepRels:
tokens[-1].append( features[7] )
wordCountInSent += 1
in_f.close()
if not splitIntoSentences:
return tokens
else:
sentences = []
lastSentID = ''
for tok in tokens:
if tok[0] != lastSentID:
sentences.append([])
sentences[-1].append(tok)
lastSentID = tok[0]
return sentences
|
def loadCONLLannotations( in_file, addDepRels = False, splitIntoSentences = True )
|
Loads syntactically annotated text from CONLL format input file and
returns as an array of tokens, where each token is represented as
an array in the format:
[sentenceID, wordID, tokenString, morphInfo, selfID, parentID]
If addDepRels == True, then the dependency relation label is also extracted
and added to the end of the array:
[sentenceID, wordID, tokenString, morphInfo, selfID, parentID, depRel]
If splitIntoSentences == True, the array of tokens is further divided
into subarrays representing sentences.
Example input:
2 Monstrumteleskoobid Monstrum_tele_skoop S S prop|pl|nom 0 ROOT _ _
3 ( ( Z Z Opr 4 xxx _ _
4 mosaiik- mosaiik A A pos|sg|nom 2 @<AN _ _
5 ja ja J J crd 6 @J _ _
6 mitmepeeglilised mitme_peegli=line A A pos|pl|nom 4 @<NN _ _
7 ) ) Z Z Cpr 6 xxx _ _
8 . . Z Z Fst 7 xxx _ _
| 4.643341
| 1.634114
| 2.841504
|
''' Loads CONLL format data from given input file, and creates
estnltk Text objects from the data, one Text per each
sentence. Returns a list of Text objects.
By default, applies estnltk's morphological analysis, clause
detection, and verb chain detection to each input sentence.
If addDepRels == True, in addition to SYNTAX_LABEL and SYNTAX_HEAD,
surface syntactic function (DEPREL) is also attributed to each
token;
'''
from estnltk.text import Text
sentences = loadCONLLannotations( in_file, addDepRels = addDepRels, \
splitIntoSentences = True )
if verbose:
print( str(len(sentences))+' sentences loaded. ')
estnltkSentTexts = []
for i in range(len(sentences)):
s = sentences[i]
sentenceString = " ".join( [ t[2] for t in s ] )
sentText = Text(sentenceString, **kwargs)
sentText.tag_analysis()
sentText.tag_clauses()
sentText.tag_verb_chains()
sentText = dict(sentText)
if len(sentText[WORDS]) == len(s):
# Add the dependency syntactic information
for j in range(len(sentText[WORDS])):
estnltkWord = sentText[WORDS][j]
depSyntaxWord = s[j]
estnltkWord[SYNTAX_LABEL] = depSyntaxWord[4]
estnltkWord[SYNTAX_HEAD] = depSyntaxWord[5]
if addDepRels:
estnltkWord[DEPREL] = depSyntaxWord[6]
estnltkSentTexts.append( sentText )
if verbose:
print ('*', end = '')
else:
if verbose:
print("The sentence segmentation of dependency syntax differs from the estnltk's sentence segmentation:", len(sentText[WORDS]), ' vs ',len(s))
return estnltkSentTexts
|
def convertCONLLtoText( in_file, addDepRels = False, verbose = False, **kwargs )
|
Loads CONLL format data from given input file, and creates
estnltk Text objects from the data, one Text per each
sentence. Returns a list of Text objects.
By default, applies estnltk's morphological analysis, clause
detection, and verb chain detection to each input sentence.
If addDepRels == True, in addition to SYNTAX_LABEL and SYNTAX_HEAD,
surface syntactic function (DEPREL) is also attributed to each
token;
| 5.711041
| 3.002967
| 1.9018
|
''' Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
'''
j = 0
for sentence in text.divide( layer=WORDS, by=SENTENCES ):
sentence = __sort_analyses(sentence)
for i in range(len(sentence)):
estnltkToken = sentence[i]
maltparserToken = conll_str_array[j]
if len( maltparserToken ) > 1:
maltParserAnalysis = maltparserToken.split('\t')
if estnltkToken[TEXT] == maltParserAnalysis[1]:
# Fetch information about the syntactic relation:
estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0]
estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6]
# Fetch the name of the surface syntactic relation
estnltkToken[DEPREL] = maltParserAnalysis[7]
else:
raise Exception("A misalignment between Text and Maltparser's output: ",\
estnltkToken, maltparserToken )
j += 1
j += 1
|
def augmentTextWithCONLLstr( conll_str_array, text )
|
Augments given Text object with the information from Maltparser's output.
More specifically, adds information about SYNTAX_LABEL, SYNTAX_HEAD and
DEPREL to each token in the Text object;
| 5.875077
| 4.029716
| 1.457938
|
''' Collects clause with index *clause_id* from given *sentence_text*.
Returns a pair (clause, isEmbedded), where:
*clause* is a list of word tokens in the clause;
*isEmbedded* is a bool indicating whether the clause is embedded;
'''
clause = []
isEmbedded = False
indices = sentence_text.clause_indices
clause_anno = sentence_text.clause_annotations
for wid, token in enumerate(sentence_text[WORDS]):
if indices[wid] == clause_id:
if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START:
isEmbedded = True
clause.append((wid, token))
return clause, isEmbedded
|
def _get_clause_words( sentence_text, clause_id )
|
Collects clause with index *clause_id* from given *sentence_text*.
Returns a pair (clause, isEmbedded), where:
*clause* is a list of word tokens in the clause;
*isEmbedded* is a bool indicating whether the clause is embedded;
| 5.012431
| 2.841401
| 1.76407
|
''' Searches for quotation marks (both opening and closing) closest to
given location in sentence (given as word index *wid*);
If *fromRight == True* (default), searches from the right (all the
words having index greater than *wid*), otherwise, searches from the
left (all the words having index smaller than *wid*);
Returns index of the closest quotation mark found, or -1, if none was
found;
'''
i = wid
while (i > -1 and i < len(sentence_text[WORDS])):
token = sentence_text[WORDS][i]
if _pat_starting_quote.match(token[TEXT]) or \
_pat_ending_quote.match(token[TEXT]):
return i
i += 1 if fromRight else -1
return -1
|
def _detect_quotes( sentence_text, wid, fromRight = True )
|
Searches for quotation marks (both opening and closing) closest to
given location in sentence (given as word index *wid*);
If *fromRight == True* (default), searches from the right (all the
words having index greater than *wid*), otherwise, searches from the
left (all the words having index smaller than *wid*);
Returns index of the closest quotation mark found, or -1, if none was
found;
| 5.145335
| 2.06546
| 2.491133
|
''' Laeb sisendfailist (inputFile) kaassõnade rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(sõnalemma);(sõnaliik);(post|pre);(nõutud_käänete_regexp)
nt
ees;_K_;post;g
eest;_K_;post;g
enne;_K_;pre;p
Tagastab laetud andmed sõnastikuna;
'''
kSubCatRelations = dict()
in_f = codecs.open(inputFile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match("^#.+$", line):
items = line.split(';')
if len(items) == 4:
root = items[0]
partofspeech = items[1]
postPre = items[2]
morphPattern = items[3]
fpattern = '(sg|pl)\s'+morphPattern
if root not in kSubCatRelations:
kSubCatRelations[root] = []
kSubCatRelations[root].append( [postPre, fpattern] )
root_clean = root.replace('_', '')
if root != root_clean:
if root_clean not in kSubCatRelations:
kSubCatRelations[root_clean] = []
kSubCatRelations[root_clean].append( [postPre, fpattern] )
else:
raise Exception(' Unexpected number of items in the input lexicon line: '+line)
in_f.close()
return kSubCatRelations
|
def _loadKSubcatRelations( inputFile )
|
Laeb sisendfailist (inputFile) kaassõnade rektsiooniseoste mustrid.
Iga muster peab olema failis eraldi real, kujul:
(sõnalemma);(sõnaliik);(post|pre);(nõutud_käänete_regexp)
nt
ees;_K_;post;g
eest;_K_;post;g
enne;_K_;pre;p
Tagastab laetud andmed sõnastikuna;
| 7.507693
| 2.229981
| 3.366707
|
''' Given the adposition appearing in the sentence at the location i,
checks whether the adposition appears in the kSubCatRelsLexicon,
and if so, attempts to further detect whether the adposition is a
preposition or a postposition;
Returns a tuple (string, int), where the first item indicates the
type of adposition ('pre', 'post', '_'), and the second item points
to its possible child (index of the word in sentence, or -1, if
possible child was not detected from close range);
'''
curToken = sentence[i]
root = curToken[ANALYSIS][0][ROOT]
if root in kSubCatRelsLexicon:
for [postPre, fpattern] in kSubCatRelsLexicon[root]:
if postPre == 'post' and i-1 > -1:
lastTokenAnalysis = sentence[i-1][ANALYSIS][0]
if re.match(fpattern, lastTokenAnalysis[FORM]):
return ('post', i-1)
elif postPre == 'pre' and i+1 < len(sentence):
nextTokenAnalysis = sentence[i+1][ANALYSIS][0]
if re.match(fpattern, nextTokenAnalysis[FORM]):
return ('pre', i+1)
# If the word is not ambiguous between pre and post, but
# the possible child was not detected, return only the
# post/pre label:
if len(kSubCatRelsLexicon[root]) == 1:
return (kSubCatRelsLexicon[root][0][0], -1)
return ('_', -1)
|
def _detectKsubcatRelType( sentence, i, kSubCatRelsLexicon )
|
Given the adposition appearing in the sentence at the location i,
checks whether the adposition appears in the kSubCatRelsLexicon,
and if so, attempts to further detect whether the adposition is a
preposition or a postposition;
Returns a tuple (string, int), where the first item indicates the
type of adposition ('pre', 'post', '_'), and the second item points
to its possible child (index of the word in sentence, or -1, if
possible child was not detected from close range);
| 4.579482
| 2.216672
| 2.065927
|
''' Attempts to detect all possible K subcategorization relations from
given sentence, using the heuristic method _detectKsubcatRelType();
Returns a dictionary of relations where the key corresponds to the
index of its parent node (the K node) and the value corresponds to
index of its child.
If reverseMapping = True, the mapping is reversed: keys correspond
to children and values correspond to parent nodes (K-s);
'''
relationIndex = dict()
relationType = dict()
for i in range(len(sentence)):
estnltkWord = sentence[i]
# Pick the first analysis
firstAnalysis = estnltkWord[ANALYSIS][0]
if firstAnalysis[POSTAG] == 'K':
(grammCats, kChild) = _detectKsubcatRelType( sentence, i, kSubCatRelsLexicon )
if kChild != -1:
if reverseMapping:
relationIndex[ kChild ] = i
relationType[ kChild ] = grammCats
else:
relationIndex[ i ] = kChild
relationType[ i ] = grammCats
return relationIndex, relationType
|
def _detectPossibleKsubcatRelsFromSent( sentence, kSubCatRelsLexicon, reverseMapping = False )
|
Attempts to detect all possible K subcategorization relations from
given sentence, using the heuristic method _detectKsubcatRelType();
Returns a dictionary of relations where the key corresponds to the
index of its parent node (the K node) and the value corresponds to
index of its child.
If reverseMapping = True, the mapping is reversed: keys correspond
to children and values correspond to parent nodes (K-s);
| 5.674182
| 2.581877
| 2.197697
|
''' Attempts to detect all possible K subcategorization relations from
given sentence, using the heuristic methods _detectKsubcatRelType()
and _detectPossibleKsubcatRelsFromSent();
Returns a dictionary where the keys correspond to token indices,
and values are grammatical features related to K subcat relations.
Not all tokens in the sentence are indexed, but only tokens relevant
to K subcat relations;
If addFeaturesToK == True, grammatical features are added to K-s,
otherwise, grammatical features are added to K's child tokens.
'''
features = dict()
# Add features to the K (adposition)
if addFeaturesToK:
for i in range(len(sentence)):
estnltkWord = sentence[i]
# Pick the first analysis
firstAnalysis = estnltkWord[ANALYSIS][0]
if firstAnalysis[POSTAG] == 'K':
(grammCats, kChild) = _detectKsubcatRelType( sentence, i, kSubCatRelsLexicon )
features[i] = grammCats
# Add features to the noun governed by K
else:
relationIndex, relationType = \
_detectPossibleKsubcatRelsFromSent( sentence, kSubCatRelsLexicon, reverseMapping = True )
for i in relationIndex:
features[i] = relationType[i]
return features
|
def _findKsubcatFeatures( sentence, kSubCatRelsLexicon, addFeaturesToK = True )
|
Attempts to detect all possible K subcategorization relations from
given sentence, using the heuristic methods _detectKsubcatRelType()
and _detectPossibleKsubcatRelsFromSent();
Returns a dictionary where the keys correspond to token indices,
and values are grammatical features related to K subcat relations.
Not all tokens in the sentence are indexed, but only tokens relevant
to K subcat relations;
If addFeaturesToK == True, grammatical features are added to K-s,
otherwise, grammatical features are added to K's child tokens.
| 6.967932
| 2.96851
| 2.347282
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.