source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(self):
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.spans(PARAGRAPHS)
|
paragraph_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(PARAGRAPHS):\n self.tokenize_paragraphs()\n return self.spans(PARAGRAPHS)\n",
"role": "user"
},
{
"content": "paragraph_spans",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.starts(PARAGRAPHS)
|
paragraph_starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(PARAGRAPHS):\n self.tokenize_paragraphs()\n return self.starts(PARAGRAPHS)\n",
"role": "user"
},
{
"content": "paragraph_starts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.ends(PARAGRAPHS)
|
paragraph_ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(PARAGRAPHS):\n self.tokenize_paragraphs()\n return self.ends(PARAGRAPHS)\n",
"role": "user"
},
{
"content": "paragraph_ends",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
tok = self.__sentence_tokenizer
text = self.text
dicts = []
for paragraph in self[PARAGRAPHS]:
para_start, para_end = paragraph[START], paragraph[END]
para_text = text[para_start:para_end]
if not self.is_tagged(WORDS):
spans = tok.span_tokenize(para_text)
for start, end in spans:
dicts.append({'start': start + para_start, 'end': end +
para_start})
else:
para_words = [w for w in self[WORDS] if w[START] >= para_start and
w[END] <= para_end]
para_word_texts = [w[TEXT] for w in para_words]
try:
sents = tok.sentences_from_tokens(para_word_texts)
except AttributeError as e:
raise
i = 0
for sentence in sents:
j = 0
firstToken = None
lastToken = None
while i < len(para_words):
if para_words[i][TEXT] != sentence[j]:
raise Exception('Error on aligning: ',
para_word_texts, ' and ', sentence,
' at positions ', i, j)
if j == 0:
firstToken = para_words[i]
if j == len(sentence) - 1:
lastToken = para_words[i]
i += 1
break
j += 1
i += 1
sentenceDict = {'start': firstToken[START], 'end':
lastToken[END]}
dicts.append(sentenceDict)
for sentence_attrib in ['sentences', 'sentence_texts',
'sentence_spans', 'sentence_starts', 'sentence_ends']:
try:
delattr(self, sentence_attrib)
except AttributeError:
pass
self[SENTENCES] = dicts
return self
|
tokenize_sentences
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(PARAGRAPHS):\n self.tokenize_paragraphs()\n tok = self.__sentence_tokenizer\n text = self.text\n dicts = []\n for paragraph in self[PARAGRAPHS]:\n para_start, para_end = paragraph[START], paragraph[END]\n para_text = text[para_start:para_end]\n if not self.is_tagged(WORDS):\n spans = tok.span_tokenize(para_text)\n for start, end in spans:\n dicts.append({'start': start + para_start, 'end': end +\n para_start})\n else:\n para_words = [w for w in self[WORDS] if w[START] >= para_start and\n w[END] <= para_end]\n para_word_texts = [w[TEXT] for w in para_words]\n try:\n sents = tok.sentences_from_tokens(para_word_texts)\n except AttributeError as e:\n raise\n i = 0\n for sentence in sents:\n j = 0\n firstToken = None\n lastToken = None\n while i < len(para_words):\n if para_words[i][TEXT] != sentence[j]:\n raise Exception('Error on aligning: ',\n para_word_texts, ' and ', sentence,\n ' at positions ', i, j)\n if j == 0:\n firstToken = para_words[i]\n if j == len(sentence) - 1:\n lastToken = para_words[i]\n i += 1\n break\n j += 1\n i += 1\n sentenceDict = {'start': firstToken[START], 'end':\n lastToken[END]}\n dicts.append(sentenceDict)\n for sentence_attrib in ['sentences', 'sentence_texts',\n 'sentence_spans', 'sentence_starts', 'sentence_ends']:\n try:\n delattr(self, sentence_attrib)\n except AttributeError:\n pass\n self[SENTENCES] = dicts\n return self\n",
"role": "user"
},
{
"content": "tokenize_sentences",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.texts(SENTENCES)
|
sentence_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n return self.texts(SENTENCES)\n",
"role": "user"
},
{
"content": "sentence_texts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.spans(SENTENCES)
|
sentence_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n return self.spans(SENTENCES)\n",
"role": "user"
},
{
"content": "sentence_spans",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.starts(SENTENCES)
|
sentence_starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n return self.starts(SENTENCES)\n",
"role": "user"
},
{
"content": "sentence_starts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
return self.ends(SENTENCES)
|
sentence_ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n return self.ends(SENTENCES)\n",
"role": "user"
},
{
"content": "sentence_ends",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(SENTENCES):
self.tokenize_sentences()
tok = self.__word_tokenizer
text = self.text
dicts = []
for sentence in self[SENTENCES]:
sent_start, sent_end = sentence[START], sentence[END]
sent_text = text[sent_start:sent_end]
spans = tok.span_tokenize(sent_text)
for start, end in spans:
dicts.append({START: start + sent_start, END: end + sent_start,
TEXT: sent_text[start:end]})
self[WORDS] = dicts
return self
|
tokenize_words
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(SENTENCES):\n self.tokenize_sentences()\n tok = self.__word_tokenizer\n text = self.text\n dicts = []\n for sentence in self[SENTENCES]:\n sent_start, sent_end = sentence[START], sentence[END]\n sent_text = text[sent_start:sent_end]\n spans = tok.span_tokenize(sent_text)\n for start, end in spans:\n dicts.append({START: start + sent_start, END: end + sent_start,\n TEXT: sent_text[start:end]})\n self[WORDS] = dicts\n return self\n",
"role": "user"
},
{
"content": "tokenize_words",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
sentences = self.divide(WORDS, SENTENCES)
for sentence in sentences:
texts = [word[TEXT] for word in sentence]
all_analysis = vabamorf.analyze(texts, **self.__kwargs)
for word, analysis in zip(sentence, all_analysis):
word[ANALYSIS] = analysis[ANALYSIS]
word[TEXT] = analysis[TEXT]
return self
|
tag_analysis
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n sentences = self.divide(WORDS, SENTENCES)\n for sentence in sentences:\n texts = [word[TEXT] for word in sentence]\n all_analysis = vabamorf.analyze(texts, **self.__kwargs)\n for word, analysis in zip(sentence, all_analysis):\n word[ANALYSIS] = analysis[ANALYSIS]\n word[TEXT] = analysis[TEXT]\n return self\n",
"role": "user"
},
{
"content": "tag_analysis",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return [word[TEXT] for word in self[WORDS]]
|
word_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return [word[TEXT] for word in self[WORDS]]\n",
"role": "user"
},
{
"content": "word_texts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.spans(WORDS)
|
word_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return self.spans(WORDS)\n",
"role": "user"
},
{
"content": "word_spans",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.starts(WORDS)
|
word_starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return self.starts(WORDS)\n",
"role": "user"
},
{
"content": "word_starts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return self.ends(WORDS)
|
word_ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return self.ends(WORDS)\n",
"role": "user"
},
{
"content": "word_ends",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [word[ANALYSIS] for word in self.words]
|
analysis
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return [word[ANALYSIS] for word in self.words]\n",
"role": "user"
},
{
"content": "analysis",
"role": "assistant"
}
] |
def x(self, element, sep='|'):
return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words
]
|
get_analysis_element
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, element, sep='|'):\n return [self.__get_key(word[ANALYSIS], element, sep) for word in self.words\n ]\n",
"role": "user"
},
{
"content": "get_analysis_element",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT)
|
roots
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(ROOT)\n",
"role": "user"
},
{
"content": "roots",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(LEMMA)
|
lemmas
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(LEMMA)\n",
"role": "user"
},
{
"content": "lemmas",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]]
|
lemma_lists
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return [[an[LEMMA] for an in word[ANALYSIS]] for word in self[WORDS]]\n",
"role": "user"
},
{
"content": "lemma_lists",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING)
|
endings
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(ENDING)\n",
"role": "user"
},
{
"content": "endings",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(FORM)
|
forms
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(FORM)\n",
"role": "user"
},
{
"content": "forms",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(POSTAG)
|
postags
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(POSTAG)\n",
"role": "user"
},
{
"content": "postags",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.
get_analysis_element(POSTAG)]
|
postag_descriptions
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return [POSTAG_DESCRIPTIONS.get(tag, '') for tag in self.\n get_analysis_element(POSTAG)]\n",
"role": "user"
},
{
"content": "postag_descriptions",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ROOT_TOKENS)
|
root_tokens
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n return self.get_analysis_element(ROOT_TOKENS)\n",
"role": "user"
},
{
"content": "root_tokens",
"role": "assistant"
}
] |
def x(self):
descs = []
for postag, form in zip(self.postags, self.forms):
desc = VERB_TYPES.get(form, '')
if len(desc) == 0:
toks = form.split(' ')
if len(toks) == 2:
plur_desc = PLURALITY.get(toks[0], None)
case_desc = CASES.get(toks[1], None)
toks = []
if plur_desc is not None:
toks.append(plur_desc)
if case_desc is not None:
toks.append(case_desc)
desc = ' '.join(toks)
descs.append(desc)
return descs
|
descriptions
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n descs = []\n for postag, form in zip(self.postags, self.forms):\n desc = VERB_TYPES.get(form, '')\n if len(desc) == 0:\n toks = form.split(' ')\n if len(toks) == 2:\n plur_desc = PLURALITY.get(toks[0], None)\n case_desc = CASES.get(toks[1], None)\n toks = []\n if plur_desc is not None:\n toks.append(plur_desc)\n if case_desc is not None:\n toks.append(case_desc)\n desc = ' '.join(toks)\n descs.append(desc)\n return descs\n",
"role": "user"
},
{
"content": "descriptions",
"role": "assistant"
}
] |
def x(self):
if not self.__syntactic_parser or not isinstance(self.
__syntactic_parser, VISLCG3Parser):
self.__syntactic_parser = VISLCG3Parser()
return self.tag_syntax()
|
tag_syntax_vislcg3
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.__syntactic_parser or not isinstance(self.\n __syntactic_parser, VISLCG3Parser):\n self.__syntactic_parser = VISLCG3Parser()\n return self.tag_syntax()\n",
"role": "user"
},
{
"content": "tag_syntax_vislcg3",
"role": "assistant"
}
] |
def x(self):
if not self.__syntactic_parser or not isinstance(self.
__syntactic_parser, MaltParser):
self.__syntactic_parser = MaltParser()
return self.tag_syntax()
|
tag_syntax_maltparser
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.__syntactic_parser or not isinstance(self.\n __syntactic_parser, MaltParser):\n self.__syntactic_parser = MaltParser()\n return self.tag_syntax()\n",
"role": "user"
},
{
"content": "tag_syntax_maltparser",
"role": "assistant"
}
] |
def x(self):
if self.__syntactic_parser is None:
self.__syntactic_parser = load_default_syntactic_parser()
if not self.is_tagged(ANALYSIS):
if isinstance(self.__syntactic_parser, MaltParser):
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = True
self.tag_analysis()
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
if 'disambiguate' not in self.__kwargs:
self.__kwargs['disambiguate'] = False
self.tag_analysis()
return self.__syntactic_parser.parse_text(self, **self.__kwargs)
|
tag_syntax
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.__syntactic_parser is None:\n self.__syntactic_parser = load_default_syntactic_parser()\n if not self.is_tagged(ANALYSIS):\n if isinstance(self.__syntactic_parser, MaltParser):\n if 'disambiguate' not in self.__kwargs:\n self.__kwargs['disambiguate'] = True\n self.tag_analysis()\n elif isinstance(self.__syntactic_parser, VISLCG3Parser):\n if 'disambiguate' not in self.__kwargs:\n self.__kwargs['disambiguate'] = False\n self.tag_analysis()\n return self.__syntactic_parser.parse_text(self, **self.__kwargs)\n",
"role": "user"
},
{
"content": "tag_syntax",
"role": "assistant"
}
] |
def x(self, layer=None):
if not layer and self.__syntactic_parser:
if isinstance(self.__syntactic_parser, MaltParser):
layer = LAYER_CONLL
elif isinstance(self.__syntactic_parser, VISLCG3Parser):
layer = LAYER_VISLCG3
if not layer and self.is_tagged(LAYER_CONLL):
layer = LAYER_CONLL
elif not layer and self.is_tagged(LAYER_VISLCG3):
layer = LAYER_VISLCG3
if layer:
if layer == LAYER_CONLL:
if not self.is_tagged(layer):
self.tag_syntax_maltparser()
return self.syntax_trees_conll
elif layer == LAYER_VISLCG3:
if not self.is_tagged(layer):
self.tag_syntax_vislcg3()
return self.syntax_trees_vislcg3
else:
raise ValueError('(!) Unexpected layer name: ' + str(layer))
else:
raise ValueError('(!) Missing layer name! ')
|
syntax_trees
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer=None):\n if not layer and self.__syntactic_parser:\n if isinstance(self.__syntactic_parser, MaltParser):\n layer = LAYER_CONLL\n elif isinstance(self.__syntactic_parser, VISLCG3Parser):\n layer = LAYER_VISLCG3\n if not layer and self.is_tagged(LAYER_CONLL):\n layer = LAYER_CONLL\n elif not layer and self.is_tagged(LAYER_VISLCG3):\n layer = LAYER_VISLCG3\n if layer:\n if layer == LAYER_CONLL:\n if not self.is_tagged(layer):\n self.tag_syntax_maltparser()\n return self.syntax_trees_conll\n elif layer == LAYER_VISLCG3:\n if not self.is_tagged(layer):\n self.tag_syntax_vislcg3()\n return self.syntax_trees_vislcg3\n else:\n raise ValueError('(!) Unexpected layer name: ' + str(layer))\n else:\n raise ValueError('(!) Missing layer name! ')\n",
"role": "user"
},
{
"content": "syntax_trees",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__ner_tagger is None:
self.__ner_tagger = load_default_ner_tagger()
self.__ner_tagger.tag_document(self)
return self
|
tag_labels
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n if self.__ner_tagger is None:\n self.__ner_tagger = load_default_ner_tagger()\n self.__ner_tagger.tag_document(self)\n return self\n",
"role": "user"
},
{
"content": "tag_labels",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(LABEL):
self.tag_labels()
return [word[LABEL] for word in self.words]
|
labels
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(LABEL):\n self.tag_labels()\n return [word[LABEL] for word in self.words]\n",
"role": "user"
},
{
"content": "labels",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(LABEL):
self.tag_labels()
nes = []
word_start = -1
labels = self.labels + ['O']
words = self.words
label = 'O'
for i, l in enumerate(labels):
if l.startswith('B-') or l == 'O':
if word_start != -1:
nes.append({START: words[word_start][START], END: words[i -
1][END], LABEL: label})
if l.startswith('B-'):
word_start = i
label = l[2:]
else:
word_start = -1
self[NAMED_ENTITIES] = nes
return self
|
tag_named_entities
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(LABEL):\n self.tag_labels()\n nes = []\n word_start = -1\n labels = self.labels + ['O']\n words = self.words\n label = 'O'\n for i, l in enumerate(labels):\n if l.startswith('B-') or l == 'O':\n if word_start != -1:\n nes.append({START: words[word_start][START], END: words[i -\n 1][END], LABEL: label})\n if l.startswith('B-'):\n word_start = i\n label = l[2:]\n else:\n word_start = -1\n self[NAMED_ENTITIES] = nes\n return self\n",
"role": "user"
},
{
"content": "tag_named_entities",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
phrases = self.split_by(NAMED_ENTITIES)
return [' '.join(phrase.lemmas) for phrase in phrases]
|
named_entities
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(NAMED_ENTITIES):\n self.tag_named_entities()\n phrases = self.split_by(NAMED_ENTITIES)\n return [' '.join(phrase.lemmas) for phrase in phrases]\n",
"role": "user"
},
{
"content": "named_entities",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.texts(NAMED_ENTITIES)
|
named_entity_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(NAMED_ENTITIES):\n self.tag_named_entities()\n return self.texts(NAMED_ENTITIES)\n",
"role": "user"
},
{
"content": "named_entity_texts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return self.spans(NAMED_ENTITIES)
|
named_entity_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(NAMED_ENTITIES):\n self.tag_named_entities()\n return self.spans(NAMED_ENTITIES)\n",
"role": "user"
},
{
"content": "named_entity_spans",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(NAMED_ENTITIES):
self.tag_named_entities()
return [ne[LABEL] for ne in self[NAMED_ENTITIES]]
|
named_entity_labels
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(NAMED_ENTITIES):\n self.tag_named_entities()\n return [ne[LABEL] for ne in self[NAMED_ENTITIES]]\n",
"role": "user"
},
{
"content": "named_entity_labels",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if not self.is_tagged(TIMEXES):
if self.__timex_tagger is None:
self.__timex_tagger = load_default_timex_tagger()
self.__timex_tagger.tag_document(self, **self.__kwargs)
return self
|
tag_timexes
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n if not self.is_tagged(TIMEXES):\n if self.__timex_tagger is None:\n self.__timex_tagger = load_default_timex_tagger()\n self.__timex_tagger.tag_document(self, **self.__kwargs)\n return self\n",
"role": "user"
},
{
"content": "tag_timexes",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.starts(TIMEXES)
|
timex_starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(TIMEXES):\n self.tag_timexes()\n return self.starts(TIMEXES)\n",
"role": "user"
},
{
"content": "timex_starts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.ends(TIMEXES)
|
timex_ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(TIMEXES):\n self.tag_timexes()\n return self.ends(TIMEXES)\n",
"role": "user"
},
{
"content": "timex_ends",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(TIMEXES):
self.tag_timexes()
return self.spans(TIMEXES)
|
timex_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(TIMEXES):\n self.tag_timexes()\n return self.spans(TIMEXES)\n",
"role": "user"
},
{
"content": "timex_spans",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
if self.__clause_segmenter is None:
self.__clause_segmenter = load_default_clausesegmenter()
return self.__clause_segmenter.tag(self)
|
tag_clause_annotations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(ANALYSIS):\n self.tag_analysis()\n if self.__clause_segmenter is None:\n self.__clause_segmenter = load_default_clausesegmenter()\n return self.__clause_segmenter.tag(self)\n",
"role": "user"
},
{
"content": "tag_clause_annotations",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]
|
clause_annotations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(CLAUSE_ANNOTATION):\n self.tag_clause_annotations()\n return [word.get(CLAUSE_ANNOTATION, None) for word in self[WORDS]]\n",
"role": "user"
},
{
"content": "clause_annotations",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]
|
clause_indices
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(CLAUSE_ANNOTATION):\n self.tag_clause_annotations()\n return [word.get(CLAUSE_IDX, None) for word in self[WORDS]]\n",
"role": "user"
},
{
"content": "clause_indices",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(CLAUSE_ANNOTATION):
self.tag_clause_annotations()
def x(words):
clauses = defaultdict(list)
start = words[0][START]
end = words[0][END]
clause = words[0][CLAUSE_IDX]
for word in words:
if word[CLAUSE_IDX] != clause:
clauses[clause].append((start, end))
start, clause = word[START], word[CLAUSE_IDX]
end = word[END]
clauses[clause].append((start, words[-1][END]))
clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in
clause]}) for key, clause in clauses.items()]
return [v for k, v in sorted(clauses)]
clauses = []
sentences = self.divide()
for sentence in sentences:
clauses.extend(from_sentence(sentence))
self[CLAUSES] = clauses
return self
|
tag_clauses
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(CLAUSE_ANNOTATION):\n self.tag_clause_annotations()\n\n def x(words):\n clauses = defaultdict(list)\n start = words[0][START]\n end = words[0][END]\n clause = words[0][CLAUSE_IDX]\n for word in words:\n if word[CLAUSE_IDX] != clause:\n clauses[clause].append((start, end))\n start, clause = word[START], word[CLAUSE_IDX]\n end = word[END]\n clauses[clause].append((start, words[-1][END]))\n clauses = [(key, {START: [s for s, e in clause], END: [e for s, e in\n clause]}) for key, clause in clauses.items()]\n return [v for k, v in sorted(clauses)]\n clauses = []\n sentences = self.divide()\n for sentence in sentences:\n clauses.extend(from_sentence(sentence))\n self[CLAUSES] = clauses\n return self\n",
"role": "user"
},
{
"content": "tag_clauses",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(CLAUSES):
self.tag_clauses()
return self.texts(CLAUSES)
|
clause_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(CLAUSES):\n self.tag_clauses()\n return self.texts(CLAUSES)\n",
"role": "user"
},
{
"content": "clause_texts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(CLAUSES):
self.tag_clauses()
if self.__verbchain_detector is None:
self.__verbchain_detector = load_default_verbchain_detector()
sentences = self.divide()
verbchains = []
for sentence in sentences:
chains = self.__verbchain_detector.detectVerbChainsFromSent(sentence)
for chain in chains:
word_spans = [(sentence[idx][START], sentence[idx][END]) for
idx in sorted(chain[PHRASE])]
chain[START] = [span[0] for span in word_spans]
chain[END] = [span[1] for span in word_spans]
verbchains.extend(chains)
self[VERB_CHAINS] = verbchains
return self
|
tag_verb_chains
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(CLAUSES):\n self.tag_clauses()\n if self.__verbchain_detector is None:\n self.__verbchain_detector = load_default_verbchain_detector()\n sentences = self.divide()\n verbchains = []\n for sentence in sentences:\n chains = self.__verbchain_detector.detectVerbChainsFromSent(sentence)\n for chain in chains:\n word_spans = [(sentence[idx][START], sentence[idx][END]) for\n idx in sorted(chain[PHRASE])]\n chain[START] = [span[0] for span in word_spans]\n chain[END] = [span[1] for span in word_spans]\n verbchains.extend(chains)\n self[VERB_CHAINS] = verbchains\n return self\n",
"role": "user"
},
{
"content": "tag_verb_chains",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.texts(VERB_CHAINS)
|
verb_chain_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(VERB_CHAINS):\n self.tag_verb_chains()\n return self.texts(VERB_CHAINS)\n",
"role": "user"
},
{
"content": "verb_chain_texts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.starts(VERB_CHAINS)
|
verb_chain_starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(VERB_CHAINS):\n self.tag_verb_chains()\n return self.starts(VERB_CHAINS)\n",
"role": "user"
},
{
"content": "verb_chain_starts",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(VERB_CHAINS):
self.tag_verb_chains()
return self.ends(VERB_CHAINS)
|
verb_chain_ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(VERB_CHAINS):\n self.tag_verb_chains()\n return self.ends(VERB_CHAINS)\n",
"role": "user"
},
{
"content": "verb_chain_ends",
"role": "assistant"
}
] |
def x(self, **kwargs):
global wordnet_tagger
if wordnet_tagger is None:
wordnet_tagger = WordnetTagger()
self.__wordnet_tagger = wordnet_tagger
if len(kwargs) > 0:
return self.__wordnet_tagger.tag_text(self, **kwargs)
return self.__wordnet_tagger.tag_text(self, **self.__kwargs)
|
tag_wordnet
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **kwargs):\n global wordnet_tagger\n if wordnet_tagger is None:\n wordnet_tagger = WordnetTagger()\n self.__wordnet_tagger = wordnet_tagger\n if len(kwargs) > 0:\n return self.__wordnet_tagger.tag_text(self, **kwargs)\n return self.__wordnet_tagger.tag_text(self, **self.__kwargs)\n",
"role": "user"
},
{
"content": "tag_wordnet",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDNET):
self.tag_wordnet()
return [[a[WORDNET] for a in analysis] for analysis in self.analysis]
|
wordnet_annotations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDNET):\n self.tag_wordnet()\n return [[a[WORDNET] for a in analysis] for analysis in self.analysis]\n",
"role": "user"
},
{
"content": "wordnet_annotations",
"role": "assistant"
}
] |
def x(self):
synsets = []
for wn_annots in self.wordnet_annotations:
word_synsets = []
for wn_annot in wn_annots:
for synset in wn_annot.get(SYNSETS, []):
word_synsets.append(deepcopy(synset))
synsets.append(word_synsets)
return synsets
|
synsets
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n synsets = []\n for wn_annots in self.wordnet_annotations:\n word_synsets = []\n for wn_annot in wn_annots:\n for synset in wn_annot.get(SYNSETS, []):\n word_synsets.append(deepcopy(synset))\n synsets.append(word_synsets)\n return synsets\n",
"role": "user"
},
{
"content": "synsets",
"role": "assistant"
}
] |
def x(self):
literals = []
for word_synsets in self.synsets:
word_literals = set()
for synset in word_synsets:
for variant in synset.get(SYN_VARIANTS):
if LITERAL in variant:
word_literals.add(variant[LITERAL])
literals.append(list(sorted(word_literals)))
return literals
|
word_literals
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n literals = []\n for word_synsets in self.synsets:\n word_literals = set()\n for synset in word_synsets:\n for variant in synset.get(SYN_VARIANTS):\n if LITERAL in variant:\n word_literals.add(variant[LITERAL])\n literals.append(list(sorted(word_literals)))\n return literals\n",
"role": "user"
},
{
"content": "word_literals",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts,
suggestions=False)]
|
spelling
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return [data[SPELLING] for data in vabamorf.spellcheck(self.word_texts,\n suggestions=False)]\n",
"role": "user"
},
{
"content": "spelling",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.
word_texts, suggestions=True)]
|
spelling_suggestions
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return [data[SUGGESTIONS] for data in vabamorf.spellcheck(self.\n word_texts, suggestions=True)]\n",
"role": "user"
},
{
"content": "spelling_suggestions",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
return vabamorf.spellcheck(self.word_texts, suggestions=True)
|
spellcheck_results
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n return vabamorf.spellcheck(self.word_texts, suggestions=True)\n",
"role": "user"
},
{
"content": "spellcheck_results",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(WORDS):
self.tokenize_words()
text = self.text
fixed = vabamorf.fix_spelling(self.word_texts, join=False)
spans = self.word_spans
assert len(fixed) == len(spans)
if len(spans) > 0:
newtoks = []
lastend = 0
for fix, (start, end) in zip(fixed, spans):
newtoks.append(text[lastend:start])
newtoks.append(fix)
lastend = end
newtoks.append(text[lastend:])
return Text(''.join(newtoks), **self.__kwargs)
return self
|
fix_spelling
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(WORDS):\n self.tokenize_words()\n text = self.text\n fixed = vabamorf.fix_spelling(self.word_texts, join=False)\n spans = self.word_spans\n assert len(fixed) == len(spans)\n if len(spans) > 0:\n newtoks = []\n lastend = 0\n for fix, (start, end) in zip(fixed, spans):\n newtoks.append(text[lastend:start])\n newtoks.append(fix)\n lastend = end\n newtoks.append(text[lastend:])\n return Text(''.join(newtoks), **self.__kwargs)\n return self\n",
"role": "user"
},
{
"content": "fix_spelling",
"role": "assistant"
}
] |
def x(self):
return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs)
|
clean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return Text(self.__text_cleaner.clean(self[TEXT]), **self.__kwargs)\n",
"role": "user"
},
{
"content": "clean",
"role": "assistant"
}
] |
def x(self, spans, sep=' '):
N = len(spans)
results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)]
for elem in self:
if isinstance(self[elem], list):
splits = divide_by_spans(self[elem], spans, translate=True, sep=sep
)
for idx in range(N):
results[idx][elem] = splits[idx]
return [Text(res) for res in results]
|
split_given_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, spans, sep=' '):\n N = len(spans)\n results = [{TEXT: text} for text in self.texts_from_spans(spans, sep=sep)]\n for elem in self:\n if isinstance(self[elem], list):\n splits = divide_by_spans(self[elem], spans, translate=True, sep=sep\n )\n for idx in range(N):\n results[idx][elem] = splits[idx]\n return [Text(res) for res in results]\n",
"role": "user"
},
{
"content": "split_given_spans",
"role": "assistant"
}
] |
def x(self, layer, sep=' '):
if not self.is_tagged(layer):
self.tag(layer)
return self.split_given_spans(self.spans(layer), sep=sep)
|
split_by
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer, sep=' '):\n if not self.is_tagged(layer):\n self.tag(layer)\n return self.split_given_spans(self.spans(layer), sep=sep)\n",
"role": "user"
},
{
"content": "split_by",
"role": "assistant"
}
] |
def x(self, regex_or_pattern, flags=re.U, gaps=True):
text = self[TEXT]
regex = regex_or_pattern
if isinstance(regex, six.string_types):
regex = re.compile(regex_or_pattern, flags=flags)
last_end = 0
spans = []
if gaps:
for mo in regex.finditer(text):
start, end = mo.start(), mo.end()
if start > last_end:
spans.append((last_end, start))
last_end = end
if last_end < len(text):
spans.append((last_end, len(text)))
else:
spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)]
return self.split_given_spans(spans)
|
split_by_regex
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, regex_or_pattern, flags=re.U, gaps=True):\n text = self[TEXT]\n regex = regex_or_pattern\n if isinstance(regex, six.string_types):\n regex = re.compile(regex_or_pattern, flags=flags)\n last_end = 0\n spans = []\n if gaps:\n for mo in regex.finditer(text):\n start, end = mo.start(), mo.end()\n if start > last_end:\n spans.append((last_end, start))\n last_end = end\n if last_end < len(text):\n spans.append((last_end, len(text)))\n else:\n spans = [(mo.start(), mo.end()) for mo in regex.finditer(text)]\n return self.split_given_spans(spans)\n",
"role": "user"
},
{
"content": "split_by_regex",
"role": "assistant"
}
] |
def x(self, layer=WORDS, by=SENTENCES):
if not self.is_tagged(layer):
self.tag(layer)
if not self.is_tagged(by):
self.tag(by)
return divide(self[layer], self[by])
|
divide
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer=WORDS, by=SENTENCES):\n if not self.is_tagged(layer):\n self.tag(layer)\n if not self.is_tagged(by):\n self.tag(by)\n return divide(self[layer], self[by])\n",
"role": "user"
},
{
"content": "divide",
"role": "assistant"
}
] |
def x(tags):
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
opening_added = False
if len(opening) > 0:
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result
|
create_tags_with_concatenated_css_classes
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tags):\n current_classes = set()\n result = []\n for pos, group in group_tags_at_same_position(tags):\n opening, closing = get_opening_closing_tags(group)\n closing_added = False\n if len(closing) > 0:\n closing_tag = Tag(pos, False, '')\n for tag in closing:\n current_classes.remove(tag.css_class)\n result.append(closing_tag)\n closing_added = True\n opening_added = False\n if len(opening) > 0:\n if not closing_added and len(current_classes) > 0:\n result.append(Tag(pos, False, ''))\n for tag in opening:\n current_classes.add(tag.css_class)\n opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))\n result.append(opening_tag)\n opening_added = True\n if closing_added and not opening_added and len(current_classes) > 0:\n opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))\n result.append(opening_tag)\n return result\n",
"role": "user"
},
{
"content": "create_tags_with_concatenated_css_classes",
"role": "assistant"
}
] |
def x(matches):
if len(matches) == 0:
return matches
matches.sort()
N = len(matches)
scores = [len(match) for match in matches]
prev = [-1] * N
for i in range(1, N):
bestscore = -1
bestprev = -1
j = i
while j >= 0:
if matches[j].is_before(matches[i]):
l = scores[j] + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = j
else:
l = scores[j] - len(matches[j]) + len(matches[i])
if l >= bestscore:
bestscore = l
bestprev = prev[j]
j = j - 1
scores[i] = bestscore
prev[i] = bestprev
bestscore = max(scores)
bestidx = len(scores) - scores[-1::-1].index(bestscore) - 1
keepidxs = [bestidx]
bestidx = prev[bestidx]
while bestidx != -1:
keepidxs.append(bestidx)
bestidx = prev[bestidx]
return [matches[idx] for idx in reversed(keepidxs)]
|
resolve_using_maximal_coverage
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(matches):\n if len(matches) == 0:\n return matches\n matches.sort()\n N = len(matches)\n scores = [len(match) for match in matches]\n prev = [-1] * N\n for i in range(1, N):\n bestscore = -1\n bestprev = -1\n j = i\n while j >= 0:\n if matches[j].is_before(matches[i]):\n l = scores[j] + len(matches[i])\n if l >= bestscore:\n bestscore = l\n bestprev = j\n else:\n l = scores[j] - len(matches[j]) + len(matches[i])\n if l >= bestscore:\n bestscore = l\n bestprev = prev[j]\n j = j - 1\n scores[i] = bestscore\n prev[i] = bestprev\n bestscore = max(scores)\n bestidx = len(scores) - scores[-1::-1].index(bestscore) - 1\n keepidxs = [bestidx]\n bestidx = prev[bestidx]\n while bestidx != -1:\n keepidxs.append(bestidx)\n bestidx = prev[bestidx]\n return [matches[idx] for idx in reversed(keepidxs)]\n",
"role": "user"
},
{
"content": "resolve_using_maximal_coverage",
"role": "assistant"
}
] |
def x(tokens, wordID1, wordID2, punctForbidden=True, commaForbidden=True,
conjWordsForbidden=True):
global _breakerJaNingEgaVoi, _breakerAgaKuidVaid, _breakerKomaLopus, _breakerPunktuats
minWID = min(wordID1, wordID2)
maxWID = max(wordID1, wordID2)
insideCheckArea = False
for i in range(len(tokens)):
token = tokens[i]
if token[WORD_ID] >= minWID:
insideCheckArea = True
if token[WORD_ID] >= maxWID:
insideCheckArea = False
if insideCheckArea:
if punctForbidden and _breakerPunktuats.matches(token):
return True
if commaForbidden and _breakerKomaLopus.matches(token):
return True
if conjWordsForbidden and (_breakerAgaKuidVaid.matches(token) or
_breakerJaNingEgaVoi.matches(token)):
return True
return False
|
isSeparatedByPossibleClauseBreakers
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tokens, wordID1, wordID2, punctForbidden=True, commaForbidden=True,\n conjWordsForbidden=True):\n global _breakerJaNingEgaVoi, _breakerAgaKuidVaid, _breakerKomaLopus, _breakerPunktuats\n minWID = min(wordID1, wordID2)\n maxWID = max(wordID1, wordID2)\n insideCheckArea = False\n for i in range(len(tokens)):\n token = tokens[i]\n if token[WORD_ID] >= minWID:\n insideCheckArea = True\n if token[WORD_ID] >= maxWID:\n insideCheckArea = False\n if insideCheckArea:\n if punctForbidden and _breakerPunktuats.matches(token):\n return True\n if commaForbidden and _breakerKomaLopus.matches(token):\n return True\n if conjWordsForbidden and (_breakerAgaKuidVaid.matches(token) or\n _breakerJaNingEgaVoi.matches(token)):\n return True\n return False\n",
"role": "user"
},
{
"content": "isSeparatedByPossibleClauseBreakers",
"role": "assistant"
}
] |
def x(wordID, clauseTokens):
jaNingEgaVoi = WordTemplate({ROOT: '^(ja|ning|ega|v[ōõ]i)$', POSTAG:
'[DJ]'})
punktuatsioon = WordTemplate({POSTAG: 'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if i + 1 == len(clauseTokens):
return True
else:
for j in range(i + 1, len(clauseTokens)):
token2 = clauseTokens[j]
if not (jaNingEgaVoi.matches(token2) or punktuatsioon.
matches(token2)):
return False
return True
return False
|
isClauseFinal
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(wordID, clauseTokens):\n jaNingEgaVoi = WordTemplate({ROOT: '^(ja|ning|ega|v[ōõ]i)$', POSTAG:\n '[DJ]'})\n punktuatsioon = WordTemplate({POSTAG: 'Z'})\n for i in range(len(clauseTokens)):\n token = clauseTokens[i]\n if token[WORD_ID] == wordID:\n if i + 1 == len(clauseTokens):\n return True\n else:\n for j in range(i + 1, len(clauseTokens)):\n token2 = clauseTokens[j]\n if not (jaNingEgaVoi.matches(token2) or punktuatsioon.\n matches(token2)):\n return False\n return True\n return False\n",
"role": "user"
},
{
"content": "isClauseFinal",
"role": "assistant"
}
] |
def x(wordID, clauseTokens):
koma = WordTemplate({ROOT: '^,+$', POSTAG: 'Z'})
for i in range(len(clauseTokens)):
token = clauseTokens[i]
if token[WORD_ID] == wordID:
if re.match('^.*,$', token[TEXT]):
return True
elif i + 1 < len(clauseTokens) and koma.matches(clauseTokens[i + 1]
):
return True
break
return False
|
isFollowedByComma
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(wordID, clauseTokens):\n koma = WordTemplate({ROOT: '^,+$', POSTAG: 'Z'})\n for i in range(len(clauseTokens)):\n token = clauseTokens[i]\n if token[WORD_ID] == wordID:\n if re.match('^.*,$', token[TEXT]):\n return True\n elif i + 1 < len(clauseTokens) and koma.matches(clauseTokens[i + 1]\n ):\n return True\n break\n return False\n",
"role": "user"
},
{
"content": "isFollowedByComma",
"role": "assistant"
}
] |
def x(araVerb, otherVerb):
global _verbAraAgreements
for i in range(0, len(_verbAraAgreements), 2):
araVerbTemplate = _verbAraAgreements[i]
otherVerbTemplate = _verbAraAgreements[i + 1]
matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)
if matchingAraAnalyses:
matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(
otherVerb)
if matchingVerbAnalyses:
return [matchingAraAnalyses, matchingVerbAnalyses]
return []
|
canFormAraPhrase
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(araVerb, otherVerb):\n global _verbAraAgreements\n for i in range(0, len(_verbAraAgreements), 2):\n araVerbTemplate = _verbAraAgreements[i]\n otherVerbTemplate = _verbAraAgreements[i + 1]\n matchingAraAnalyses = araVerbTemplate.matchingAnalyseIndexes(araVerb)\n if matchingAraAnalyses:\n matchingVerbAnalyses = otherVerbTemplate.matchingAnalyseIndexes(\n otherVerb)\n if matchingVerbAnalyses:\n return [matchingAraAnalyses, matchingVerbAnalyses]\n return []\n",
"role": "user"
},
{
"content": "canFormAraPhrase",
"role": "assistant"
}
] |
def x(clauseTokens, clauseID, foundChains):
verbOle = WordTemplate({ROOT: '^ole$', POSTAG: 'V'})
verbOleJarel1 = WordTemplate({POSTAG: 'V', FORM: '(nud)$'})
verbOleJarel2 = WordTemplate({POSTAG: 'V', FORM: '^(mas|tud)$'})
verbMata = WordTemplate({POSTAG: 'V', FORM: '^(mata)$'})
verbMaDa = WordTemplate({POSTAG: 'V', FORM: '^(da|ma)$'})
annotatedWords = []
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',
verbObj[PATTERN][0]):
continue
annotatedWords.extend(verbObj[PHRASE])
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] != clauseID:
continue
if verbObj[PATTERN][-1] == 'ole' and verbObj[OTHER_VERBS]:
eiOlePattern = len(verbObj[PATTERN]) == 2 and verbObj[PATTERN][0
] == 'ei'
lastVerbWID = verbObj[PHRASE][-1]
lastTokIndex = [i for i in range(len(clauseTokens)) if
clauseTokens[i][WORD_ID] == lastVerbWID]
lastTokIndex = lastTokIndex[0]
expansion = None
appliedRule = 0
if not _isClauseFinal(lastVerbWID, clauseTokens):
maDaVerbsBetween = 0
oleInfFollowing = 0
for i in range(lastTokIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if tokenWID in annotatedWords:
break
if verbMaDa.matches(token):
maDaVerbsBetween += 1
if verbOleJarel1.matches(token) or verbOleJarel2.matches(
token):
if _isClauseFinal(tokenWID, clauseTokens
) and not _isSeparatedByPossibleClauseBreakers(
clauseTokens, verbObj[PHRASE][-1], tokenWID,
True, True, True):
expansion = token
elif verbOleJarel1.matches(token
) and eiOlePattern and i - lastTokIndex <= 2:
expansion = token
oleInfFollowing += 1
break
elif verbMata.matches(token) and maDaVerbsBetween == 0:
if _isClauseFinal(tokenWID, clauseTokens
) and not _isSeparatedByPossibleClauseBreakers(
clauseTokens, verbObj[PHRASE][-1], tokenWID,
True, True, True):
expansion = token
break
oleInfFollowing += 1
if oleInfFollowing == 0 and not expansion:
minWID = min(verbObj[PHRASE])
lastTokIndex = [i for i in range(len(clauseTokens)) if
clauseTokens[i][WORD_ID] == minWID]
lastTokIndex = lastTokIndex[0]
token = clauseTokens[lastTokIndex - 1]
if lastTokIndex - 1 > -1 and token[WORD_ID
] not in annotatedWords:
if verbOleJarel1.matches(token
) or verbOleJarel2.matches(token):
expansion = token
appliedRule = 1
else:
minWID = min(verbObj[PHRASE])
lastTokIndex = [i for i in range(len(clauseTokens)) if
clauseTokens[i][WORD_ID] == minWID]
if lastTokIndex:
lastTokIndex = lastTokIndex[0]
if lastTokIndex - 1 > -1 and clauseTokens[lastTokIndex - 1
][WORD_ID] not in annotatedWords:
token = clauseTokens[lastTokIndex - 1]
if verbOleJarel1.matches(token
) or verbOleJarel2.matches(token):
expansion = token
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append(tokenWID)
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(
expansion, [verbOleJarel1, verbOleJarel2, verbMata]))
if verbOle.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb')
annotatedWords.append(tokenWID)
|
expandOlemaVerbChains
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(clauseTokens, clauseID, foundChains):\n verbOle = WordTemplate({ROOT: '^ole$', POSTAG: 'V'})\n verbOleJarel1 = WordTemplate({POSTAG: 'V', FORM: '(nud)$'})\n verbOleJarel2 = WordTemplate({POSTAG: 'V', FORM: '^(mas|tud)$'})\n verbMata = WordTemplate({POSTAG: 'V', FORM: '^(mata)$'})\n verbMaDa = WordTemplate({POSTAG: 'V', FORM: '^(da|ma)$'})\n annotatedWords = []\n for verbObj in foundChains:\n if verbObj[CLAUSE_IDX] != clauseID:\n continue\n if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',\n verbObj[PATTERN][0]):\n continue\n annotatedWords.extend(verbObj[PHRASE])\n for verbObj in foundChains:\n if verbObj[CLAUSE_IDX] != clauseID:\n continue\n if verbObj[PATTERN][-1] == 'ole' and verbObj[OTHER_VERBS]:\n eiOlePattern = len(verbObj[PATTERN]) == 2 and verbObj[PATTERN][0\n ] == 'ei'\n lastVerbWID = verbObj[PHRASE][-1]\n lastTokIndex = [i for i in range(len(clauseTokens)) if \n clauseTokens[i][WORD_ID] == lastVerbWID]\n lastTokIndex = lastTokIndex[0]\n expansion = None\n appliedRule = 0\n if not _isClauseFinal(lastVerbWID, clauseTokens):\n maDaVerbsBetween = 0\n oleInfFollowing = 0\n for i in range(lastTokIndex + 1, len(clauseTokens)):\n token = clauseTokens[i]\n tokenWID = token[WORD_ID]\n if tokenWID in annotatedWords:\n break\n if verbMaDa.matches(token):\n maDaVerbsBetween += 1\n if verbOleJarel1.matches(token) or verbOleJarel2.matches(\n token):\n if _isClauseFinal(tokenWID, clauseTokens\n ) and not _isSeparatedByPossibleClauseBreakers(\n clauseTokens, verbObj[PHRASE][-1], tokenWID, \n True, True, True):\n expansion = token\n elif verbOleJarel1.matches(token\n ) and eiOlePattern and i - lastTokIndex <= 2:\n expansion = token\n oleInfFollowing += 1\n break\n elif verbMata.matches(token) and maDaVerbsBetween == 0:\n if _isClauseFinal(tokenWID, clauseTokens\n ) and not _isSeparatedByPossibleClauseBreakers(\n clauseTokens, verbObj[PHRASE][-1], tokenWID, \n True, True, True):\n expansion = token\n break\n oleInfFollowing += 1\n if oleInfFollowing == 0 and not expansion:\n minWID = min(verbObj[PHRASE])\n lastTokIndex = [i for i in range(len(clauseTokens)) if \n clauseTokens[i][WORD_ID] == minWID]\n lastTokIndex = lastTokIndex[0]\n token = clauseTokens[lastTokIndex - 1]\n if lastTokIndex - 1 > -1 and token[WORD_ID\n ] not in annotatedWords:\n if verbOleJarel1.matches(token\n ) or verbOleJarel2.matches(token):\n expansion = token\n appliedRule = 1\n else:\n minWID = min(verbObj[PHRASE])\n lastTokIndex = [i for i in range(len(clauseTokens)) if \n clauseTokens[i][WORD_ID] == minWID]\n if lastTokIndex:\n lastTokIndex = lastTokIndex[0]\n if lastTokIndex - 1 > -1 and clauseTokens[lastTokIndex - 1\n ][WORD_ID] not in annotatedWords:\n token = clauseTokens[lastTokIndex - 1]\n if verbOleJarel1.matches(token\n ) or verbOleJarel2.matches(token):\n expansion = token\n if expansion:\n tokenWID = expansion[WORD_ID]\n verbObj[PHRASE].append(tokenWID)\n verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(\n expansion, [verbOleJarel1, verbOleJarel2, verbMata]))\n if verbOle.matches(expansion):\n verbObj[PATTERN].append('ole')\n else:\n verbObj[PATTERN].append('verb')\n annotatedWords.append(tokenWID)\n",
"role": "user"
},
{
"content": "expandOlemaVerbChains",
"role": "assistant"
}
] |
def x(infile):
relations = dict()
in_f = codecs.open(infile, mode='r', encoding='utf-8')
for line in in_f:
line = line.rstrip()
if len(line) > 0 and not re.match('^#.+$', line):
verb, forms = line.split('\t')
relations[verb] = forms.split()
in_f.close()
return relations
|
loadVerbSubcatRelations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(infile):\n relations = dict()\n in_f = codecs.open(infile, mode='r', encoding='utf-8')\n for line in in_f:\n line = line.rstrip()\n if len(line) > 0 and not re.match('^#.+$', line):\n verb, forms = line.split('\\t')\n relations[verb] = forms.split()\n in_f.close()\n return relations\n",
"role": "user"
},
{
"content": "loadVerbSubcatRelations",
"role": "assistant"
}
] |
def x(verbObj, clauseTokens, clauseID):
global _verbInfNonExpansible
if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and re.match(
'^(verb)$', verbObj[PATTERN][-1], re.I):
lastToken = [token for token in clauseTokens if token[WORD_ID] ==
verbObj[PHRASE][-1]]
if not lastToken:
raise Exception(' Last token not found for ' + str(verbObj) +
' in ' + str(getJsonAsTextString(clauseTokens)))
lastToken = lastToken[0]
if not _verbInfNonExpansible.matches(lastToken):
if len(verbObj[PATTERN]) >= 3 and verbObj[PATTERN][-2] == '&':
return False
return True
return False
|
isVerbExpansible
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(verbObj, clauseTokens, clauseID):\n global _verbInfNonExpansible\n if verbObj[OTHER_VERBS] and verbObj[CLAUSE_IDX] == clauseID and re.match(\n '^(verb)$', verbObj[PATTERN][-1], re.I):\n lastToken = [token for token in clauseTokens if token[WORD_ID] ==\n verbObj[PHRASE][-1]]\n if not lastToken:\n raise Exception(' Last token not found for ' + str(verbObj) +\n ' in ' + str(getJsonAsTextString(clauseTokens)))\n lastToken = lastToken[0]\n if not _verbInfNonExpansible.matches(lastToken):\n if len(verbObj[PATTERN]) >= 3 and verbObj[PATTERN][-2] == '&':\n return False\n return True\n return False\n",
"role": "user"
},
{
"content": "isVerbExpansible",
"role": "assistant"
}
] |
def x(foundSubcatChain):
markings = []
tokens = []
nonConjTokens = []
for marking, token in foundSubcatChain:
markings.append(marking)
tokens.append(token)
if marking != '&':
nonConjTokens.append(token)
if len(markings) == 3 and markings[0] == markings[2] and markings[0
] != '&' and markings[1] == '&':
return tokens
elif len(markings) > 3 and markings[0] == markings[2] and markings[0
] != '&' and markings[1] == '&' and all([(m == '&') for m in
markings[3:]]):
return tokens[:3]
elif len(nonConjTokens) > 0:
return nonConjTokens[:1]
return []
|
suitableVerbExpansion
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(foundSubcatChain):\n markings = []\n tokens = []\n nonConjTokens = []\n for marking, token in foundSubcatChain:\n markings.append(marking)\n tokens.append(token)\n if marking != '&':\n nonConjTokens.append(token)\n if len(markings) == 3 and markings[0] == markings[2] and markings[0\n ] != '&' and markings[1] == '&':\n return tokens\n elif len(markings) > 3 and markings[0] == markings[2] and markings[0\n ] != '&' and markings[1] == '&' and all([(m == '&') for m in\n markings[3:]]):\n return tokens[:3]\n elif len(nonConjTokens) > 0:\n return nonConjTokens[:1]\n return []\n",
"role": "user"
},
{
"content": "suitableVerbExpansion",
"role": "assistant"
}
] |
def x(clauseTokens, clauseID, foundChains):
verbTud = WordTemplate({POSTAG: 'V', FORM: '^(tud|dud)$'})
verb = WordTemplate({POSTAG: 'V'})
verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})
for verbObj in foundChains:
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
lastVerbWID = verbObj[PHRASE][-1]
lastToken = [token for token in clauseTokens if token[WORD_ID] ==
lastVerbWID]
lastIndex = [i for i in range(len(clauseTokens)) if
clauseTokens[i][WORD_ID] == lastVerbWID]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [analysis[ROOT] for analysis in verb.
matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0]
if mainVerbLemma == 'saa':
expansion = None
if not _isClauseFinal(lastVerbWID, clauseTokens):
for i in range(lastIndex + 1, len(clauseTokens)):
token = clauseTokens[i]
tokenWID = token[WORD_ID]
if verbTud.matches(token) and _isClauseFinal(tokenWID,
clauseTokens
) and not _isSeparatedByPossibleClauseBreakers(
clauseTokens, verbObj[PHRASE][-1], tokenWID,
True, True, False):
expansion = token
break
elif lastIndex - 1 > -1:
if verbTud.matches(clauseTokens[lastIndex - 1]):
expansion = clauseTokens[lastIndex - 1]
if expansion:
tokenWID = expansion[WORD_ID]
verbObj[PHRASE].append(tokenWID)
verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(
expansion, verbTud))
if verbOlema.matches(expansion):
verbObj[PATTERN].append('ole')
else:
verbObj[PATTERN].append('verb')
|
expandSaamaWithTud
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(clauseTokens, clauseID, foundChains):\n verbTud = WordTemplate({POSTAG: 'V', FORM: '^(tud|dud)$'})\n verb = WordTemplate({POSTAG: 'V'})\n verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})\n for verbObj in foundChains:\n if _isVerbExpansible(verbObj, clauseTokens, clauseID):\n lastVerbWID = verbObj[PHRASE][-1]\n lastToken = [token for token in clauseTokens if token[WORD_ID] ==\n lastVerbWID]\n lastIndex = [i for i in range(len(clauseTokens)) if \n clauseTokens[i][WORD_ID] == lastVerbWID]\n lastToken = lastToken[0]\n lastIndex = lastIndex[0]\n mainVerb = [analysis[ROOT] for analysis in verb.\n matchingAnalyses(lastToken)]\n mainVerbLemma = mainVerb[0]\n if mainVerbLemma == 'saa':\n expansion = None\n if not _isClauseFinal(lastVerbWID, clauseTokens):\n for i in range(lastIndex + 1, len(clauseTokens)):\n token = clauseTokens[i]\n tokenWID = token[WORD_ID]\n if verbTud.matches(token) and _isClauseFinal(tokenWID,\n clauseTokens\n ) and not _isSeparatedByPossibleClauseBreakers(\n clauseTokens, verbObj[PHRASE][-1], tokenWID, \n True, True, False):\n expansion = token\n break\n elif lastIndex - 1 > -1:\n if verbTud.matches(clauseTokens[lastIndex - 1]):\n expansion = clauseTokens[lastIndex - 1]\n if expansion:\n tokenWID = expansion[WORD_ID]\n verbObj[PHRASE].append(tokenWID)\n verbObj[ANALYSIS_IDS].append(_getMatchingAnalysisIDs(\n expansion, verbTud))\n if verbOlema.matches(expansion):\n verbObj[PATTERN].append('ole')\n else:\n verbObj[PATTERN].append('verb')\n",
"role": "user"
},
{
"content": "expandSaamaWithTud",
"role": "assistant"
}
] |
def x(clauseTokens, clauseID, foundChains, verbSubcat, skipQuestionable=
False, breakOnPunctuation=True):
global _breakerJaNingEgaVoi, _breakerKomaLopus, _breakerPunktuats
verb = WordTemplate({POSTAG: 'V'})
verbInf1 = WordTemplate({POSTAG: 'V', FORM: '^(da|ma|maks|mas|mast|mata)$'}
)
verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})
sonaMitte = WordTemplate({ROOT: '^mitte$', POSTAG: 'D'})
annotatedWords = []
for verbObj in foundChains:
if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',
verbObj[PATTERN][0]):
continue
annotatedWords.extend(verbObj[PHRASE])
freeVerbsWIDs = [t[WORD_ID] for t in clauseTokens if verbInf1.matches(t
) and t[WORD_ID] not in annotatedWords]
for verbObj in foundChains:
if _isVerbExpansible(verbObj, clauseTokens, clauseID):
lastToken = [token for token in clauseTokens if token[WORD_ID] ==
verbObj[PHRASE][-1]]
lastIndex = [i for i in range(len(clauseTokens)) if
clauseTokens[i][WORD_ID] == verbObj[PHRASE][-1]]
lastToken = lastToken[0]
lastIndex = lastIndex[0]
mainVerb = [(analysis[ROOT], analysis[FORM]) for analysis in
verb.matchingAnalyses(lastToken)]
mainVerbLemma = mainVerb[0][0]
mainVerbForm = mainVerb[0][1]
positivePhrase = verbObj[POLARITY] == 'POS'
egaPhrase = verbObj[PATTERN][0] == 'ega'
if mainVerbLemma in verbSubcat:
subcatForms = verbSubcat[mainVerbLemma]
for subcatForm in subcatForms:
foundSubcatChain = []
addingCompleted = False
if re.match('^.+\\?$', subcatForm):
if skipQuestionable:
continue
else:
subcatForm = subcatForm.replace('?', '')
j = lastIndex + 1
while j < len(clauseTokens):
token = clauseTokens[j]
tokenWID = token[WORD_ID]
if tokenWID in annotatedWords:
break
if breakOnPunctuation and _breakerPunktuats.matches(
token):
break
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in
verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append((subcatForm, token))
if _breakerKomaLopus.matches(token):
break
j += 1
if not _suitableVerbExpansion(foundSubcatChain
) and not egaPhrase:
minWid = min(verbObj[PHRASE])
j = lastIndex - 1
while j > -1:
token = clauseTokens[j]
tokenWID = token[WORD_ID]
if (tokenWID in annotatedWords and tokenWID not in
verbObj[PHRASE]):
break
if (_breakerKomaLopus.matches(token) or
breakOnPunctuation and _breakerPunktuats.
matches(token)):
break
if token[WORD_ID] + 1 < minWid:
break
if _breakerJaNingEgaVoi.matches(token):
foundSubcatChain.append(('&', token))
if verb.matches(token):
tokenForms = [analysis[FORM] for analysis in
verb.matchingAnalyses(token)]
if subcatForm in tokenForms:
foundSubcatChain.append((subcatForm, token)
)
j -= 1
suitablePhrase = _suitableVerbExpansion(foundSubcatChain)
if suitablePhrase:
for token in suitablePhrase:
tokenWID = token[WORD_ID]
verbObj[PHRASE].append(tokenWID)
annotatedWords.append(tokenWID)
if _breakerJaNingEgaVoi.matches(token):
verbObj[PATTERN].append('&')
verbObj[ANALYSIS_IDS].append(
_getMatchingAnalysisIDs(token,
_breakerJaNingEgaVoi))
elif len(suitablePhrase
) == 1 and verbOlema.matches(token):
verbObj[PATTERN].append('ole')
verbObj[ANALYSIS_IDS].append(
_getMatchingAnalysisIDs(token, verbOlema))
freeVerbsWIDs.remove(tokenWID)
else:
verbObj[PATTERN].append('verb')
analysisIDs = [i for i in range(len(token[
ANALYSIS])) if subcatForm == token[
ANALYSIS][i][FORM]]
assert len(analysisIDs) > 0
verbObj[ANALYSIS_IDS].append(analysisIDs)
freeVerbsWIDs.remove(tokenWID)
if not freeVerbsWIDs:
verbObj[OTHER_VERBS] = False
addingCompleted = True
if addingCompleted:
break
|
expandVerbChainsBySubcat
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(clauseTokens, clauseID, foundChains, verbSubcat, skipQuestionable=\n False, breakOnPunctuation=True):\n global _breakerJaNingEgaVoi, _breakerKomaLopus, _breakerPunktuats\n verb = WordTemplate({POSTAG: 'V'})\n verbInf1 = WordTemplate({POSTAG: 'V', FORM: '^(da|ma|maks|mas|mast|mata)$'}\n )\n verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})\n sonaMitte = WordTemplate({ROOT: '^mitte$', POSTAG: 'D'})\n annotatedWords = []\n for verbObj in foundChains:\n if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',\n verbObj[PATTERN][0]):\n continue\n annotatedWords.extend(verbObj[PHRASE])\n freeVerbsWIDs = [t[WORD_ID] for t in clauseTokens if verbInf1.matches(t\n ) and t[WORD_ID] not in annotatedWords]\n for verbObj in foundChains:\n if _isVerbExpansible(verbObj, clauseTokens, clauseID):\n lastToken = [token for token in clauseTokens if token[WORD_ID] ==\n verbObj[PHRASE][-1]]\n lastIndex = [i for i in range(len(clauseTokens)) if \n clauseTokens[i][WORD_ID] == verbObj[PHRASE][-1]]\n lastToken = lastToken[0]\n lastIndex = lastIndex[0]\n mainVerb = [(analysis[ROOT], analysis[FORM]) for analysis in\n verb.matchingAnalyses(lastToken)]\n mainVerbLemma = mainVerb[0][0]\n mainVerbForm = mainVerb[0][1]\n positivePhrase = verbObj[POLARITY] == 'POS'\n egaPhrase = verbObj[PATTERN][0] == 'ega'\n if mainVerbLemma in verbSubcat:\n subcatForms = verbSubcat[mainVerbLemma]\n for subcatForm in subcatForms:\n foundSubcatChain = []\n addingCompleted = False\n if re.match('^.+\\\\?$', subcatForm):\n if skipQuestionable:\n continue\n else:\n subcatForm = subcatForm.replace('?', '')\n j = lastIndex + 1\n while j < len(clauseTokens):\n token = clauseTokens[j]\n tokenWID = token[WORD_ID]\n if tokenWID in annotatedWords:\n break\n if breakOnPunctuation and _breakerPunktuats.matches(\n token):\n break\n if _breakerJaNingEgaVoi.matches(token):\n foundSubcatChain.append(('&', token))\n if verb.matches(token):\n tokenForms = [analysis[FORM] for analysis in\n verb.matchingAnalyses(token)]\n if subcatForm in tokenForms:\n foundSubcatChain.append((subcatForm, token))\n if _breakerKomaLopus.matches(token):\n break\n j += 1\n if not _suitableVerbExpansion(foundSubcatChain\n ) and not egaPhrase:\n minWid = min(verbObj[PHRASE])\n j = lastIndex - 1\n while j > -1:\n token = clauseTokens[j]\n tokenWID = token[WORD_ID]\n if (tokenWID in annotatedWords and tokenWID not in\n verbObj[PHRASE]):\n break\n if (_breakerKomaLopus.matches(token) or \n breakOnPunctuation and _breakerPunktuats.\n matches(token)):\n break\n if token[WORD_ID] + 1 < minWid:\n break\n if _breakerJaNingEgaVoi.matches(token):\n foundSubcatChain.append(('&', token))\n if verb.matches(token):\n tokenForms = [analysis[FORM] for analysis in\n verb.matchingAnalyses(token)]\n if subcatForm in tokenForms:\n foundSubcatChain.append((subcatForm, token)\n )\n j -= 1\n suitablePhrase = _suitableVerbExpansion(foundSubcatChain)\n if suitablePhrase:\n for token in suitablePhrase:\n tokenWID = token[WORD_ID]\n verbObj[PHRASE].append(tokenWID)\n annotatedWords.append(tokenWID)\n if _breakerJaNingEgaVoi.matches(token):\n verbObj[PATTERN].append('&')\n verbObj[ANALYSIS_IDS].append(\n _getMatchingAnalysisIDs(token,\n _breakerJaNingEgaVoi))\n elif len(suitablePhrase\n ) == 1 and verbOlema.matches(token):\n verbObj[PATTERN].append('ole')\n verbObj[ANALYSIS_IDS].append(\n _getMatchingAnalysisIDs(token, verbOlema))\n freeVerbsWIDs.remove(tokenWID)\n else:\n verbObj[PATTERN].append('verb')\n analysisIDs = [i for i in range(len(token[\n ANALYSIS])) if subcatForm == token[\n ANALYSIS][i][FORM]]\n assert len(analysisIDs) > 0\n verbObj[ANALYSIS_IDS].append(analysisIDs)\n freeVerbsWIDs.remove(tokenWID)\n if not freeVerbsWIDs:\n verbObj[OTHER_VERBS] = False\n addingCompleted = True\n if addingCompleted:\n break\n",
"role": "user"
},
{
"content": "expandVerbChainsBySubcat",
"role": "assistant"
}
] |
def x(clauseTokens, clauseID, foundChains):
verb = WordTemplate({POSTAG: 'V'})
verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})
verbSaama = WordTemplate({POSTAG: 'V', ROOT: '^(saa)$'})
verbEiAra = WordTemplate({ROOT: '^(ära|ei)$', FORM: 'neg.*', POSTAG: 'V'})
verbInf = WordTemplate({POSTAG: 'V', FORM:
'^(da|des|ma|tama|ta|mas|mast|nud|tud|v|mata)$'})
regularVerbInf = WordTemplate({POSTAG: 'V', FORM:
'^(da|ma|maks|mas|mast|mata)$'})
olemaVerbInf = WordTemplate({POSTAG: 'V', FORM:
'^(nud|tud|da|ma|mas|mata)$'})
saamaVerbInf = WordTemplate({POSTAG: 'V', FORM: '^(tud|da|ma)$'})
sonaMitte = WordTemplate({ROOT: '^mitte$', POSTAG: 'D'})
annotatedWords = []
for verbObj in foundChains:
if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',
verbObj[PATTERN][0]):
continue
annotatedWords.extend(verbObj[PHRASE])
finVerbs = [t for t in clauseTokens if verb.matches(t) and not verbInf.
matches(t)]
negFinVerbs = [t for t in finVerbs if verbEiAra.matches(t)]
looseNegVerbs = [t for t in negFinVerbs if t[WORD_ID] not in annotatedWords
]
if len(negFinVerbs) == 0 and len(finVerbs) >= 2:
return
elif len(looseNegVerbs) > 0:
return
elif not looseNegVerbs and negFinVerbs and len(negFinVerbs) - len(finVerbs
) > 0:
return
rVerbFreeVerbs = None
olemaFreeVerbs = None
saamaFreeVerbs = None
for verbObj in foundChains:
if verbObj[CLAUSE_IDX] == clauseID and verbObj[OTHER_VERBS]:
contextClear = False
lastToken = [token for token in clauseTokens if token[WORD_ID] ==
verbObj[PHRASE][-1]]
lastToken = lastToken[0]
analyses = [lastToken[ANALYSIS][j] for j in range(len(lastToken
[ANALYSIS])) if j in verbObj[ANALYSIS_IDS][-1]]
mainVerb = [analysis[ROOT] for analysis in analyses]
mainVerbLemma = mainVerb[0]
if 'saa' == mainVerbLemma:
if saamaFreeVerbs == None:
saamaFreeVerbs = [t[WORD_ID] for t in clauseTokens if
saamaVerbInf.matches(t) and t[WORD_ID] not in
annotatedWords]
if not saamaFreeVerbs:
contextClear = True
elif 'ole' == mainVerbLemma:
if olemaFreeVerbs == None:
olemaFreeVerbs = [t[WORD_ID] for t in clauseTokens if
olemaVerbInf.matches(t) and t[WORD_ID] not in
annotatedWords]
if not olemaFreeVerbs:
contextClear = True
else:
if rVerbFreeVerbs == None:
rVerbFreeVerbs = [t[WORD_ID] for t in clauseTokens if
regularVerbInf.matches(t) and t[WORD_ID] not in
annotatedWords]
if not rVerbFreeVerbs:
contextClear = True
if contextClear:
verbObj[OTHER_VERBS] = False
|
determineVerbChainContextualAmbiguity
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(clauseTokens, clauseID, foundChains):\n verb = WordTemplate({POSTAG: 'V'})\n verbOlema = WordTemplate({POSTAG: 'V', ROOT: '^(ole)$'})\n verbSaama = WordTemplate({POSTAG: 'V', ROOT: '^(saa)$'})\n verbEiAra = WordTemplate({ROOT: '^(ära|ei)$', FORM: 'neg.*', POSTAG: 'V'})\n verbInf = WordTemplate({POSTAG: 'V', FORM:\n '^(da|des|ma|tama|ta|mas|mast|nud|tud|v|mata)$'})\n regularVerbInf = WordTemplate({POSTAG: 'V', FORM:\n '^(da|ma|maks|mas|mast|mata)$'})\n olemaVerbInf = WordTemplate({POSTAG: 'V', FORM:\n '^(nud|tud|da|ma|mas|mata)$'})\n saamaVerbInf = WordTemplate({POSTAG: 'V', FORM: '^(tud|da|ma)$'})\n sonaMitte = WordTemplate({ROOT: '^mitte$', POSTAG: 'D'})\n annotatedWords = []\n for verbObj in foundChains:\n if len(verbObj[PATTERN]) == 1 and re.match('^(ei|ära|ega)$',\n verbObj[PATTERN][0]):\n continue\n annotatedWords.extend(verbObj[PHRASE])\n finVerbs = [t for t in clauseTokens if verb.matches(t) and not verbInf.\n matches(t)]\n negFinVerbs = [t for t in finVerbs if verbEiAra.matches(t)]\n looseNegVerbs = [t for t in negFinVerbs if t[WORD_ID] not in annotatedWords\n ]\n if len(negFinVerbs) == 0 and len(finVerbs) >= 2:\n return\n elif len(looseNegVerbs) > 0:\n return\n elif not looseNegVerbs and negFinVerbs and len(negFinVerbs) - len(finVerbs\n ) > 0:\n return\n rVerbFreeVerbs = None\n olemaFreeVerbs = None\n saamaFreeVerbs = None\n for verbObj in foundChains:\n if verbObj[CLAUSE_IDX] == clauseID and verbObj[OTHER_VERBS]:\n contextClear = False\n lastToken = [token for token in clauseTokens if token[WORD_ID] ==\n verbObj[PHRASE][-1]]\n lastToken = lastToken[0]\n analyses = [lastToken[ANALYSIS][j] for j in range(len(lastToken\n [ANALYSIS])) if j in verbObj[ANALYSIS_IDS][-1]]\n mainVerb = [analysis[ROOT] for analysis in analyses]\n mainVerbLemma = mainVerb[0]\n if 'saa' == mainVerbLemma:\n if saamaFreeVerbs == None:\n saamaFreeVerbs = [t[WORD_ID] for t in clauseTokens if \n saamaVerbInf.matches(t) and t[WORD_ID] not in\n annotatedWords]\n if not saamaFreeVerbs:\n contextClear = True\n elif 'ole' == mainVerbLemma:\n if olemaFreeVerbs == None:\n olemaFreeVerbs = [t[WORD_ID] for t in clauseTokens if \n olemaVerbInf.matches(t) and t[WORD_ID] not in\n annotatedWords]\n if not olemaFreeVerbs:\n contextClear = True\n else:\n if rVerbFreeVerbs == None:\n rVerbFreeVerbs = [t[WORD_ID] for t in clauseTokens if \n regularVerbInf.matches(t) and t[WORD_ID] not in\n annotatedWords]\n if not rVerbFreeVerbs:\n contextClear = True\n if contextClear:\n verbObj[OTHER_VERBS] = False\n",
"role": "user"
},
{
"content": "determineVerbChainContextualAmbiguity",
"role": "assistant"
}
] |
def x(tokenJson, requiredWordTemplate, discardAnalyses=None):
final_ids = set()
if isinstance(requiredWordTemplate, list):
for wt in requiredWordTemplate:
ids = wt.matchingAnalyseIndexes(tokenJson)
if ids:
final_ids.update(ids)
elif isinstance(requiredWordTemplate, WordTemplate):
ids = requiredWordTemplate.matchingAnalyseIndexes(tokenJson)
final_ids = set(ids)
if discardAnalyses:
if isinstance(discardAnalyses, WordTemplate):
ids2 = discardAnalyses.matchingAnalyseIndexes(tokenJson)
if ids2:
final_ids = final_ids.difference(set(ids2))
else:
raise Exception(
' The parameter discardAnalyses should be WordTemplate.')
if len(final_ids) == 0:
raise Exception(' Unable to find matching analyse IDs for: ' + str(
tokenJson))
return list(final_ids)
|
getMatchingAnalysisIDs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(tokenJson, requiredWordTemplate, discardAnalyses=None):\n final_ids = set()\n if isinstance(requiredWordTemplate, list):\n for wt in requiredWordTemplate:\n ids = wt.matchingAnalyseIndexes(tokenJson)\n if ids:\n final_ids.update(ids)\n elif isinstance(requiredWordTemplate, WordTemplate):\n ids = requiredWordTemplate.matchingAnalyseIndexes(tokenJson)\n final_ids = set(ids)\n if discardAnalyses:\n if isinstance(discardAnalyses, WordTemplate):\n ids2 = discardAnalyses.matchingAnalyseIndexes(tokenJson)\n if ids2:\n final_ids = final_ids.difference(set(ids2))\n else:\n raise Exception(\n ' The parameter discardAnalyses should be WordTemplate.')\n if len(final_ids) == 0:\n raise Exception(' Unable to find matching analyse IDs for: ' + str(\n tokenJson))\n return list(final_ids)\n",
"role": "user"
},
{
"content": "getMatchingAnalysisIDs",
"role": "assistant"
}
] |
def x(regex):
if len(regex) == 0:
return False
try:
re.compile(regex)
return True
except sre_constants.error:
return False
|
is_valid_regex
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(regex):\n if len(regex) == 0:\n return False\n try:\n re.compile(regex)\n return True\n except sre_constants.error:\n return False\n",
"role": "user"
},
{
"content": "is_valid_regex",
"role": "assistant"
}
] |
def x(elem1, elem2):
if overlapping_right(elem1, elem2):
elem1['end'] = elem2['start']
return elem1, elem2
|
delete_left
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(elem1, elem2):\n if overlapping_right(elem1, elem2):\n elem1['end'] = elem2['start']\n return elem1, elem2\n",
"role": "user"
},
{
"content": "delete_left",
"role": "assistant"
}
] |
def x(elem1, elem2):
if overlapping_left(elem1, elem2):
elem2['start'] = elem1['end']
return elem1, elem2
|
delete_right
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(elem1, elem2):\n if overlapping_left(elem1, elem2):\n elem2['start'] = elem1['end']\n return elem1, elem2\n",
"role": "user"
},
{
"content": "delete_right",
"role": "assistant"
}
] |
def x(layer):
yielded = set()
ri = layer[:]
for i1, elem1 in enumerate(ri):
for i2, elem2 in enumerate(ri):
if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']:
inds = (i1, i2) if i1 < i2 else (i2, i1)
if inds not in yielded and in_by_identity(layer, elem1
) and in_by_identity(layer, elem2):
yielded.add(inds)
yield elem1, elem2
|
iterate_intersecting_pairs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(layer):\n yielded = set()\n ri = layer[:]\n for i1, elem1 in enumerate(ri):\n for i2, elem2 in enumerate(ri):\n if i1 != i2 and elem1['start'] <= elem2['start'] < elem1['end']:\n inds = (i1, i2) if i1 < i2 else (i2, i1)\n if inds not in yielded and in_by_identity(layer, elem1\n ) and in_by_identity(layer, elem2):\n yielded.add(inds)\n yield elem1, elem2\n",
"role": "user"
},
{
"content": "iterate_intersecting_pairs",
"role": "assistant"
}
] |
def x(self, text, **kwargs):
all_return_types = ['text', 'labels', 'tokens', 'strings']
return_type = all_return_types[0]
cutPhrases = True
cutMaxThreshold = 3
annotate_text = True
syntax_layer_name = LAYER_CONLL
force_parsing = False
for argName, argVal in kwargs.items():
if argName == 'cutPhrases':
cutPhrases = bool(argVal)
elif argName == 'force_parsing':
force_parsing = bool(argVal)
elif argName == 'syntax_layer':
syntax_layer_name = argVal
elif argName == 'cutMaxThreshold':
cutMaxThreshold = int(argVal)
elif argName == 'return_type':
if argVal.lower() in all_return_types:
return_type = argVal.lower()
else:
raise Exception(' Unexpected return type: ', argVal)
else:
raise Exception(' Unsupported argument given: ' + argName)
if not syntax_layer_name in text or force_parsing:
self.parser.parse_text(text)
if isinstance(self.parser, MaltParser):
syntax_layer_name = LAYER_CONLL
elif isinstance(self.parser, VISLCG3Parser):
syntax_layer_name = LAYER_VISLCG3
else:
raise Exception(' (!) Unknown type of syntactic parser: ', self
.parser)
if not text.is_tagged(ANALYSIS):
text = text.tag_analysis()
all_np_labels = []
for sentence_text in text.split_by(SENTENCES):
tokens = sentence_text[WORDS]
syntax_layer = sentence_text[syntax_layer_name]
np_labels = self._find_phrases(tokens, syntax_layer, cutPhrases,
cutMaxThreshold)
np_labels = [('O' if not l in ['B', 'I'] else l) for l in np_labels]
all_np_labels.extend(np_labels)
if annotate_text:
self.annotateText(text, NOUN_CHUNKS, all_np_labels)
if return_type == 'text':
return text
elif return_type == 'labels':
return all_np_labels
elif return_type == 'tokens':
return self.get_phrases(text, all_np_labels)
else:
return self.get_phrase_texts(text, all_np_labels)
|
analyze_text
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, **kwargs):\n all_return_types = ['text', 'labels', 'tokens', 'strings']\n return_type = all_return_types[0]\n cutPhrases = True\n cutMaxThreshold = 3\n annotate_text = True\n syntax_layer_name = LAYER_CONLL\n force_parsing = False\n for argName, argVal in kwargs.items():\n if argName == 'cutPhrases':\n cutPhrases = bool(argVal)\n elif argName == 'force_parsing':\n force_parsing = bool(argVal)\n elif argName == 'syntax_layer':\n syntax_layer_name = argVal\n elif argName == 'cutMaxThreshold':\n cutMaxThreshold = int(argVal)\n elif argName == 'return_type':\n if argVal.lower() in all_return_types:\n return_type = argVal.lower()\n else:\n raise Exception(' Unexpected return type: ', argVal)\n else:\n raise Exception(' Unsupported argument given: ' + argName)\n if not syntax_layer_name in text or force_parsing:\n self.parser.parse_text(text)\n if isinstance(self.parser, MaltParser):\n syntax_layer_name = LAYER_CONLL\n elif isinstance(self.parser, VISLCG3Parser):\n syntax_layer_name = LAYER_VISLCG3\n else:\n raise Exception(' (!) Unknown type of syntactic parser: ', self\n .parser)\n if not text.is_tagged(ANALYSIS):\n text = text.tag_analysis()\n all_np_labels = []\n for sentence_text in text.split_by(SENTENCES):\n tokens = sentence_text[WORDS]\n syntax_layer = sentence_text[syntax_layer_name]\n np_labels = self._find_phrases(tokens, syntax_layer, cutPhrases,\n cutMaxThreshold)\n np_labels = [('O' if not l in ['B', 'I'] else l) for l in np_labels]\n all_np_labels.extend(np_labels)\n if annotate_text:\n self.annotateText(text, NOUN_CHUNKS, all_np_labels)\n if return_type == 'text':\n return text\n elif return_type == 'labels':\n return all_np_labels\n elif return_type == 'tokens':\n return self.get_phrases(text, all_np_labels)\n else:\n return self.get_phrase_texts(text, all_np_labels)\n",
"role": "user"
},
{
"content": "analyze_text",
"role": "assistant"
}
] |
def x(self, token, onlyFirst=True):
if onlyFirst:
return token[ANALYSIS][0][POSTAG]
else:
return [a[POSTAG] for a in token[ANALYSIS]]
|
getPOS
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, token, onlyFirst=True):\n if onlyFirst:\n return token[ANALYSIS][0][POSTAG]\n else:\n return [a[POSTAG] for a in token[ANALYSIS]]\n",
"role": "user"
},
{
"content": "getPOS",
"role": "assistant"
}
] |
def x(self, i, sentence, NPlabels):
phrase = []
indices = []
if 0 <= i and i < len(sentence) and NPlabels[i] == 'B':
phrase = [sentence[i]]
indices = [i]
j = i + 1
while j < len(sentence):
if NPlabels[j] in ['B', '']:
break
else:
phrase.append(sentence[j])
indices.append(j)
j += 1
return phrase, indices
|
getPhrase
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, i, sentence, NPlabels):\n phrase = []\n indices = []\n if 0 <= i and i < len(sentence) and NPlabels[i] == 'B':\n phrase = [sentence[i]]\n indices = [i]\n j = i + 1\n while j < len(sentence):\n if NPlabels[j] in ['B', '']:\n break\n else:\n phrase.append(sentence[j])\n indices.append(j)\n j += 1\n return phrase, indices\n",
"role": "user"
},
{
"content": "getPhrase",
"role": "assistant"
}
] |
def x(self, token1, token2):
forms1 = set([a[FORM] for a in token1[ANALYSIS]])
forms2 = set([a[FORM] for a in token2[ANALYSIS]])
if len(forms1.intersection(forms2)) == 0:
if 'sg g' in forms1:
if 'sg ter' in forms2:
return 'sg ter'
elif 'sg es' in forms2:
return 'sg es'
elif 'sg ab' in forms2:
return 'sg ab'
elif 'sg kom' in forms2:
return 'sg kom'
elif 'pl g' in forms1:
if 'pl ter' in forms2:
return 'pl ter'
elif 'pl es' in forms2:
return 'pl es'
elif 'pl ab' in forms2:
return 'pl ab'
elif 'pl kom' in forms2:
return 'pl kom'
return None
else:
return list(forms1.intersection(forms2))[0]
|
getCaseAgreement
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, token1, token2):\n forms1 = set([a[FORM] for a in token1[ANALYSIS]])\n forms2 = set([a[FORM] for a in token2[ANALYSIS]])\n if len(forms1.intersection(forms2)) == 0:\n if 'sg g' in forms1:\n if 'sg ter' in forms2:\n return 'sg ter'\n elif 'sg es' in forms2:\n return 'sg es'\n elif 'sg ab' in forms2:\n return 'sg ab'\n elif 'sg kom' in forms2:\n return 'sg kom'\n elif 'pl g' in forms1:\n if 'pl ter' in forms2:\n return 'pl ter'\n elif 'pl es' in forms2:\n return 'pl es'\n elif 'pl ab' in forms2:\n return 'pl ab'\n elif 'pl kom' in forms2:\n return 'pl kom'\n return None\n else:\n return list(forms1.intersection(forms2))[0]\n",
"role": "user"
},
{
"content": "getCaseAgreement",
"role": "assistant"
}
] |
def x(self, sentence, NPlabels, cutPhrases, cutMaxThreshold):
for i in range(len(sentence)):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase(i, sentence, NPlabels)
posTags = [self._getPOS(tok) for tok in phrase]
if len(posTags) > 1 and len(set(posTags).difference(set(['Y']))
) == 0:
for k in indices:
NPlabels[k] = ''
if len(posTags) > 1 and posTags[0] == 'A':
forms = [a[FORM] for a in phrase[0][ANALYSIS]]
if 'nud' in forms or 'tud' in forms or 'dud' in forms:
if i - 1 > -1 and (self._verbEi.matches(sentence[i - 1]
) or self._verbOle.matches(sentence[i - 1])):
NPlabels[i] = ''
if len(phrase) > 1 and set(posTags).issubset(set(['O', 'N'])):
for k in indices:
NPlabels[k] = ''
if posTags.count('N') > 7:
for k in range(len(indices)):
ind = indices[k]
pos = posTags[k]
if pos == 'N' and (k == 0 or k > 0 and NPlabels[ind - 1
] == ''):
NPlabels[ind] = ''
elif k > 0 and NPlabels[ind - 1] == '':
NPlabels[ind] = 'B'
elif k > 0 and NPlabels[ind - 1] != '':
NPlabels[ind] = 'I'
verbPartFollowedByTitle = -1
for j in range(len(phrase)):
if self._verbPastParticle.matches(phrase[j]) and not phrase[j][
TEXT].istitle() and j + 1 < len(phrase) and phrase[j + 1][
TEXT].istitle():
verbPartFollowedByTitle = j
if verbPartFollowedByTitle == 0:
NPlabels[indices[verbPartFollowedByTitle]] = ''
NPlabels[indices[verbPartFollowedByTitle] + 1] = 'B'
if posTags[0] == 'C' and i + 1 > -1 and NPlabels[i - 1] == '':
if self._k6ige.matches(sentence[i - 1]):
NPlabels[i - 1] = 'B'
NPlabels[i] = 'I'
if posTags[0] == 'C' and len(posTags) == 2 and posTags[1
] == 'H' and NPlabels[i] == 'B':
NPlabels[i] = ''
NPlabels[i + 1] = 'B'
for i in range(len(sentence)):
if NPlabels[i] == 'I':
if i == 0 or i - 1 > -1 and NPlabels[i - 1] not in ['I', 'B']:
NPlabels[i] = 'B'
if cutPhrases and cutMaxThreshold > 0:
NPheadPos = ['S', 'Y', 'H']
for i in range(len(sentence)):
if NPlabels[i] == 'B':
phrase, indices = self._getPhrase(i, sentence, NPlabels)
posTags = [self._getPOS(tok) for tok in phrase]
if len(phrase) > cutMaxThreshold:
for j in range(len(phrase)):
posTag = posTags[j]
if posTag in NPheadPos:
NPlabels[indices[j]] = 'B'
else:
NPlabels[indices[j]] = ''
|
apply_post_fixes
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, sentence, NPlabels, cutPhrases, cutMaxThreshold):\n for i in range(len(sentence)):\n if NPlabels[i] == 'B':\n phrase, indices = self._getPhrase(i, sentence, NPlabels)\n posTags = [self._getPOS(tok) for tok in phrase]\n if len(posTags) > 1 and len(set(posTags).difference(set(['Y']))\n ) == 0:\n for k in indices:\n NPlabels[k] = ''\n if len(posTags) > 1 and posTags[0] == 'A':\n forms = [a[FORM] for a in phrase[0][ANALYSIS]]\n if 'nud' in forms or 'tud' in forms or 'dud' in forms:\n if i - 1 > -1 and (self._verbEi.matches(sentence[i - 1]\n ) or self._verbOle.matches(sentence[i - 1])):\n NPlabels[i] = ''\n if len(phrase) > 1 and set(posTags).issubset(set(['O', 'N'])):\n for k in indices:\n NPlabels[k] = ''\n if posTags.count('N') > 7:\n for k in range(len(indices)):\n ind = indices[k]\n pos = posTags[k]\n if pos == 'N' and (k == 0 or k > 0 and NPlabels[ind - 1\n ] == ''):\n NPlabels[ind] = ''\n elif k > 0 and NPlabels[ind - 1] == '':\n NPlabels[ind] = 'B'\n elif k > 0 and NPlabels[ind - 1] != '':\n NPlabels[ind] = 'I'\n verbPartFollowedByTitle = -1\n for j in range(len(phrase)):\n if self._verbPastParticle.matches(phrase[j]) and not phrase[j][\n TEXT].istitle() and j + 1 < len(phrase) and phrase[j + 1][\n TEXT].istitle():\n verbPartFollowedByTitle = j\n if verbPartFollowedByTitle == 0:\n NPlabels[indices[verbPartFollowedByTitle]] = ''\n NPlabels[indices[verbPartFollowedByTitle] + 1] = 'B'\n if posTags[0] == 'C' and i + 1 > -1 and NPlabels[i - 1] == '':\n if self._k6ige.matches(sentence[i - 1]):\n NPlabels[i - 1] = 'B'\n NPlabels[i] = 'I'\n if posTags[0] == 'C' and len(posTags) == 2 and posTags[1\n ] == 'H' and NPlabels[i] == 'B':\n NPlabels[i] = ''\n NPlabels[i + 1] = 'B'\n for i in range(len(sentence)):\n if NPlabels[i] == 'I':\n if i == 0 or i - 1 > -1 and NPlabels[i - 1] not in ['I', 'B']:\n NPlabels[i] = 'B'\n if cutPhrases and cutMaxThreshold > 0:\n NPheadPos = ['S', 'Y', 'H']\n for i in range(len(sentence)):\n if NPlabels[i] == 'B':\n phrase, indices = self._getPhrase(i, sentence, NPlabels)\n posTags = [self._getPOS(tok) for tok in phrase]\n if len(phrase) > cutMaxThreshold:\n for j in range(len(phrase)):\n posTag = posTags[j]\n if posTag in NPheadPos:\n NPlabels[indices[j]] = 'B'\n else:\n NPlabels[indices[j]] = ''\n",
"role": "user"
},
{
"content": "apply_post_fixes",
"role": "assistant"
}
] |
def x(self, text, np_labels):
input_words = []
if isinstance(text, Text):
input_words = text.words
elif isinstance(text, list) and len(text) > 0 and isinstance(text[0], dict
) and TEXT in text[0]:
input_words = text
elif text:
raise Exception('Unexpected input text:', text)
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words (' + str(len(input_words)) +
') does not match number of labels ' + str(len(np_labels)))
phrases = []
for i, word in enumerate(input_words):
label = np_labels[i]
if label == 'B':
phrases.append([])
if label in ['B', 'I']:
phrases[-1].append(word)
return phrases
|
get_phrases
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, np_labels):\n input_words = []\n if isinstance(text, Text):\n input_words = text.words\n elif isinstance(text, list) and len(text) > 0 and isinstance(text[0], dict\n ) and TEXT in text[0]:\n input_words = text\n elif text:\n raise Exception('Unexpected input text:', text)\n if len(input_words) != len(np_labels):\n raise Exception(' (!) Number of words (' + str(len(input_words)) +\n ') does not match number of labels ' + str(len(np_labels)))\n phrases = []\n for i, word in enumerate(input_words):\n label = np_labels[i]\n if label == 'B':\n phrases.append([])\n if label in ['B', 'I']:\n phrases[-1].append(word)\n return phrases\n",
"role": "user"
},
{
"content": "get_phrases",
"role": "assistant"
}
] |
def x(self, text, np_labels):
phrases = self.get_phrases(text, np_labels)
texts = []
for phrase in phrases:
phrase_str = ' '.join([word[TEXT] for word in phrase])
texts.append(phrase_str)
return texts
|
get_phrase_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, np_labels):\n phrases = self.get_phrases(text, np_labels)\n texts = []\n for phrase in phrases:\n phrase_str = ' '.join([word[TEXT] for word in phrase])\n texts.append(phrase_str)\n return texts\n",
"role": "user"
},
{
"content": "get_phrase_texts",
"role": "assistant"
}
] |
def x(self, text, layer, np_labels=None):
input_words = None
if isinstance(text, Text):
input_words = text.words
else:
raise Exception(' Input text should be of type Text, but it is ', text)
phrases = []
if not np_labels:
np_labels = self.analyze_text(text, return_type='labels')
if len(input_words) != len(np_labels):
raise Exception(' (!) Number of words (' + str(len(input_words)) +
') does not match number of labels ' + str(len(np_labels)))
phrases = self.get_phrases(text, np_labels)
annotations = []
if phrases:
for phrase in phrases:
phrase_annotation = {}
phrase_annotation[START] = phrase[0][START]
phrase_annotation[END] = phrase[-1][END]
phrase_annotation[TEXT] = ' '.join([word[TEXT] for word in phrase])
annotations.append(phrase_annotation)
text[layer] = annotations
return text
|
annotateText
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, layer, np_labels=None):\n input_words = None\n if isinstance(text, Text):\n input_words = text.words\n else:\n raise Exception(' Input text should be of type Text, but it is ', text)\n phrases = []\n if not np_labels:\n np_labels = self.analyze_text(text, return_type='labels')\n if len(input_words) != len(np_labels):\n raise Exception(' (!) Number of words (' + str(len(input_words)) +\n ') does not match number of labels ' + str(len(np_labels)))\n phrases = self.get_phrases(text, np_labels)\n annotations = []\n if phrases:\n for phrase in phrases:\n phrase_annotation = {}\n phrase_annotation[START] = phrase[0][START]\n phrase_annotation[END] = phrase[-1][END]\n phrase_annotation[TEXT] = ' '.join([word[TEXT] for word in phrase])\n annotations.append(phrase_annotation)\n text[layer] = annotations\n return text\n",
"role": "user"
},
{
"content": "annotateText",
"role": "assistant"
}
] |
def x(elements, by, translate=False, sep=' '):
outer_spans = [spans(elem) for elem in by]
return divide_by_spans(elements, outer_spans, translate=translate, sep=sep)
|
divide
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(elements, by, translate=False, sep=' '):\n outer_spans = [spans(elem) for elem in by]\n return divide_by_spans(elements, outer_spans, translate=translate, sep=sep)\n",
"role": "user"
},
{
"content": "divide",
"role": "assistant"
}
] |
def x(text, openDelim, closeDelim):
openRE = re.compile(openDelim)
closeRE = re.compile(closeDelim)
spans = []
nest = 0
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next:
while nest:
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
if nest:
nest -= 1
last = end.end()
end = closeRE.search(text, end.end())
if not end:
if spans:
span = spans[0][0], last
else:
span = start.start(), last
spans = [span]
break
else:
spans.append((start.start(), end.end()))
start = next
end = closeRE.search(text, next.end())
break
if next != start:
nest += 1
return dropSpans(spans, text)
|
dropNested
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text, openDelim, closeDelim):\n openRE = re.compile(openDelim)\n closeRE = re.compile(closeDelim)\n spans = []\n nest = 0\n start = openRE.search(text, 0)\n if not start:\n return text\n end = closeRE.search(text, start.end())\n next = start\n while end:\n next = openRE.search(text, next.end())\n if not next:\n while nest:\n nest -= 1\n end0 = closeRE.search(text, end.end())\n if end0:\n end = end0\n else:\n break\n spans.append((start.start(), end.end()))\n break\n while end.end() < next.start():\n if nest:\n nest -= 1\n last = end.end()\n end = closeRE.search(text, end.end())\n if not end:\n if spans:\n span = spans[0][0], last\n else:\n span = start.start(), last\n spans = [span]\n break\n else:\n spans.append((start.start(), end.end()))\n start = next\n end = closeRE.search(text, next.end())\n break\n if next != start:\n nest += 1\n return dropSpans(spans, text)\n",
"role": "user"
},
{
"content": "dropNested",
"role": "assistant"
}
] |
def x(spans, text):
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s:
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
|
dropSpans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(spans, text):\n spans.sort()\n res = ''\n offset = 0\n for s, e in spans:\n if offset <= s:\n if offset < s:\n res += text[offset:s]\n offset = e\n res += text[offset:]\n return res\n",
"role": "user"
},
{
"content": "dropSpans",
"role": "assistant"
}
] |
def x(text):
text = bold_italic.sub('\\1', text)
text = bold.sub('\\1', text)
text = italic_quote.sub('"\\1"', text)
text = italic.sub('"\\1"', text)
text = quote_quote.sub('"\\1"', text)
text = text.replace("'''", '').replace("''", '"')
text = newlines.sub('\\n', text)
text = bulletlist.sub('', text)
spans = []
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
text = dropSpans(spans, text)
for tag in discardElements:
text = dropNested(text, '<\\s*%s\\b[^>/]*>' % tag, '<\\s*/\\s*%s>' %
tag)
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\\.\\)\\]»)', '\\1', text)
text = re.sub(u'(\\[\\(«) ', '\\1', text)
text = re.sub('\\n\\W+?\\n', '\n', text, flags=re.U)
text = text.replace(',,', ',').replace(',.', '.')
return text
|
clean
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n text = bold_italic.sub('\\\\1', text)\n text = bold.sub('\\\\1', text)\n text = italic_quote.sub('\"\\\\1\"', text)\n text = italic.sub('\"\\\\1\"', text)\n text = quote_quote.sub('\"\\\\1\"', text)\n text = text.replace(\"'''\", '').replace(\"''\", '\"')\n text = newlines.sub('\\\\n', text)\n text = bulletlist.sub('', text)\n spans = []\n for m in comment.finditer(text):\n spans.append((m.start(), m.end()))\n for pattern in selfClosing_tag_patterns:\n for m in pattern.finditer(text):\n spans.append((m.start(), m.end()))\n for left, right in ignored_tag_patterns:\n for m in left.finditer(text):\n spans.append((m.start(), m.end()))\n for m in right.finditer(text):\n spans.append((m.start(), m.end()))\n text = dropSpans(spans, text)\n for tag in discardElements:\n text = dropNested(text, '<\\\\s*%s\\\\b[^>/]*>' % tag, '<\\\\s*/\\\\s*%s>' %\n tag)\n for pattern, placeholder in placeholder_tag_patterns:\n index = 1\n for match in pattern.finditer(text):\n text = text.replace(match.group(), '%s_%d' % (placeholder, index))\n index += 1\n text = text.replace('<<', u'«').replace('>>', u'»')\n text = text.replace('\\t', ' ')\n text = spaces.sub(' ', text)\n text = dots.sub('...', text)\n text = re.sub(u' (,:\\\\.\\\\)\\\\]»)', '\\\\1', text)\n text = re.sub(u'(\\\\[\\\\(«) ', '\\\\1', text)\n text = re.sub('\\\\n\\\\W+?\\\\n', '\\n', text, flags=re.U)\n text = text.replace(',,', ',').replace(',.', '.')\n return text\n",
"role": "user"
},
{
"content": "clean",
"role": "assistant"
}
] |
def x(self, docs, **kwargs):
use_vabamorf = kwargs.get('vabamorf', True)
use_vabamorf_disambiguate = kwargs.get('disambiguate', True)
use_pre_disambiguation = kwargs.get('pre_disambiguate', True)
use_post_disambiguation = kwargs.get('post_disambiguate', True)
kwargs = kwargs
kwargs['disambiguate'] = False
kwargs['guess'] = True
kwargs['propername'] = True
if not self.__isListOfLists(docs):
if not self.__isListOfTexts(docs):
raise Exception(
"Unexpected input argument 'docs': should be a list of strings or Text-s;"
)
collections = [docs]
else:
collections = docs
for i in range(len(collections)):
docs = [Text(doc, **kwargs) for doc in collections[i]]
if use_vabamorf:
docs = [doc.tag_analysis() for doc in docs]
if use_pre_disambiguation:
docs = self.pre_disambiguate(docs)
if use_vabamorf_disambiguate:
docs = self.__vabamorf_disambiguate(docs)
collections[i] = docs
if use_post_disambiguation:
collections = self.post_disambiguate(collections)
return collections if len(collections) > 1 else collections[0]
|
disambiguate
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs, **kwargs):\n use_vabamorf = kwargs.get('vabamorf', True)\n use_vabamorf_disambiguate = kwargs.get('disambiguate', True)\n use_pre_disambiguation = kwargs.get('pre_disambiguate', True)\n use_post_disambiguation = kwargs.get('post_disambiguate', True)\n kwargs = kwargs\n kwargs['disambiguate'] = False\n kwargs['guess'] = True\n kwargs['propername'] = True\n if not self.__isListOfLists(docs):\n if not self.__isListOfTexts(docs):\n raise Exception(\n \"Unexpected input argument 'docs': should be a list of strings or Text-s;\"\n )\n collections = [docs]\n else:\n collections = docs\n for i in range(len(collections)):\n docs = [Text(doc, **kwargs) for doc in collections[i]]\n if use_vabamorf:\n docs = [doc.tag_analysis() for doc in docs]\n if use_pre_disambiguation:\n docs = self.pre_disambiguate(docs)\n if use_vabamorf_disambiguate:\n docs = self.__vabamorf_disambiguate(docs)\n collections[i] = docs\n if use_post_disambiguation:\n collections = self.post_disambiguate(collections)\n return collections if len(collections) > 1 else collections[0]\n",
"role": "user"
},
{
"content": "disambiguate",
"role": "assistant"
}
] |
def x(self, docs):
return isinstance(docs, list) and all(isinstance(d, (basestring, Text)) for
d in docs)
|
isListOfTexts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs):\n return isinstance(docs, list) and all(isinstance(d, (basestring, Text)) for\n d in docs)\n",
"role": "user"
},
{
"content": "isListOfTexts",
"role": "assistant"
}
] |
def x(self, docs):
return isinstance(docs, list) and all(self.__isListOfTexts(ds) for ds in
docs)
|
isListOfLists
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs):\n return isinstance(docs, list) and all(self.__isListOfTexts(ds) for ds in\n docs)\n",
"role": "user"
},
{
"content": "isListOfLists",
"role": "assistant"
}
] |
def x(self, docs):
lemmaFreq = dict()
for doc in docs:
for word in doc[WORDS]:
uniqLemmas = set()
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
uniqLemmas.add(analysis[ROOT])
for lemma in uniqLemmas:
if lemma not in lemmaFreq:
lemmaFreq[lemma] = 1
else:
lemmaFreq[lemma] += 1
return lemmaFreq
|
create_proper_names_lexicon
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs):\n lemmaFreq = dict()\n for doc in docs:\n for word in doc[WORDS]:\n uniqLemmas = set()\n for analysis in word[ANALYSIS]:\n if analysis[POSTAG] == 'H':\n uniqLemmas.add(analysis[ROOT])\n for lemma in uniqLemmas:\n if lemma not in lemmaFreq:\n lemmaFreq[lemma] = 1\n else:\n lemmaFreq[lemma] += 1\n return lemmaFreq\n",
"role": "user"
},
{
"content": "create_proper_names_lexicon",
"role": "assistant"
}
] |
def x(self, docs, lexicon):
for doc in docs:
for word in doc[WORDS]:
if len(word[ANALYSIS]) > 1:
highestFreq = 0
properNameAnalyses = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
if analysis[ROOT] in lexicon:
properNameAnalyses.append(analysis)
if lexicon[analysis[ROOT]] > highestFreq:
highestFreq = lexicon[analysis[ROOT]]
else:
raise Exception(' Unable to find lemma ',
analysis[ROOT], ' from the lexicon. ')
if highestFreq > 0:
toDelete = []
for analysis in properNameAnalyses:
if lexicon[analysis[ROOT]] < highestFreq:
toDelete.append(analysis)
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
|
disambiguate_proper_names_1
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs, lexicon):\n for doc in docs:\n for word in doc[WORDS]:\n if len(word[ANALYSIS]) > 1:\n highestFreq = 0\n properNameAnalyses = []\n for analysis in word[ANALYSIS]:\n if analysis[POSTAG] == 'H':\n if analysis[ROOT] in lexicon:\n properNameAnalyses.append(analysis)\n if lexicon[analysis[ROOT]] > highestFreq:\n highestFreq = lexicon[analysis[ROOT]]\n else:\n raise Exception(' Unable to find lemma ',\n analysis[ROOT], ' from the lexicon. ')\n if highestFreq > 0:\n toDelete = []\n for analysis in properNameAnalyses:\n if lexicon[analysis[ROOT]] < highestFreq:\n toDelete.append(analysis)\n for analysis in toDelete:\n word[ANALYSIS].remove(analysis)\n",
"role": "user"
},
{
"content": "disambiguate_proper_names_1",
"role": "assistant"
}
] |
def x(self, docs):
certainNames = set()
for doc in docs:
for word in doc[WORDS]:
if all([(a[POSTAG] == 'H') for a in word[ANALYSIS]]):
for analysis in word[ANALYSIS]:
certainNames.add(analysis[ROOT])
return certainNames
|
find_certain_proper_names
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs):\n certainNames = set()\n for doc in docs:\n for word in doc[WORDS]:\n if all([(a[POSTAG] == 'H') for a in word[ANALYSIS]]):\n for analysis in word[ANALYSIS]:\n certainNames.add(analysis[ROOT])\n return certainNames\n",
"role": "user"
},
{
"content": "find_certain_proper_names",
"role": "assistant"
}
] |
def x(self, docs):
sentInitialNames = set()
for doc in docs:
for sentence in doc.divide(layer=WORDS, by=SENTENCES):
sentencePos = 0
for i in range(len(sentence)):
word = sentence[i]
if all([(a[POSTAG] == 'Z') for a in word[ANALYSIS]]
) and not re.match('^[,;]+$', word[TEXT]):
sentencePos = 0
continue
if not re.match('^[1234567890]*$', word[TEXT]
) and not re.match(
'^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$',
word[TEXT]) and re.match('^[1234567890.()]*$', word[TEXT]):
sentencePos = 0
continue
if sentencePos == 0:
h_postags = [(a[POSTAG] == 'H') for a in word[ANALYSIS]]
if any(h_postags) and not all(h_postags):
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H':
sentInitialNames.add(analysis[ROOT])
sentencePos += 1
return sentInitialNames
|
find_sentence_initial_proper_names
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs):\n sentInitialNames = set()\n for doc in docs:\n for sentence in doc.divide(layer=WORDS, by=SENTENCES):\n sentencePos = 0\n for i in range(len(sentence)):\n word = sentence[i]\n if all([(a[POSTAG] == 'Z') for a in word[ANALYSIS]]\n ) and not re.match('^[,;]+$', word[TEXT]):\n sentencePos = 0\n continue\n if not re.match('^[1234567890]*$', word[TEXT]\n ) and not re.match(\n '^[1234567890]{1,2}.[1234567890]{1,2}.[1234567890]{4}$',\n word[TEXT]) and re.match('^[1234567890.()]*$', word[TEXT]):\n sentencePos = 0\n continue\n if sentencePos == 0:\n h_postags = [(a[POSTAG] == 'H') for a in word[ANALYSIS]]\n if any(h_postags) and not all(h_postags):\n for analysis in word[ANALYSIS]:\n if analysis[POSTAG] == 'H':\n sentInitialNames.add(analysis[ROOT])\n sentencePos += 1\n return sentInitialNames\n",
"role": "user"
},
{
"content": "find_sentence_initial_proper_names",
"role": "assistant"
}
] |
def x(self, docs, lemma_set):
for doc in docs:
for word in doc[WORDS]:
if len(word[ANALYSIS]) > 1:
toDelete = []
for analysis in word[ANALYSIS]:
if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:
toDelete.append(analysis)
if toDelete:
for analysis in toDelete:
word[ANALYSIS].remove(analysis)
|
remove_redundant_proper_names
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, docs, lemma_set):\n for doc in docs:\n for word in doc[WORDS]:\n if len(word[ANALYSIS]) > 1:\n toDelete = []\n for analysis in word[ANALYSIS]:\n if analysis[POSTAG] == 'H' and analysis[ROOT] in lemma_set:\n toDelete.append(analysis)\n if toDelete:\n for analysis in toDelete:\n word[ANALYSIS].remove(analysis)\n",
"role": "user"
},
{
"content": "remove_redundant_proper_names",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.