source_code
stringlengths 45
7.24k
| function_name
stringlengths 1
111
| messages
list |
|---|---|---|
def x(document):
def x(list_of_lists):
for i in itertools.product(*[set(j) for j in list_of_lists]):
yield ' '.join(i)
sents = document.split_by_sentences()
for order, sent in enumerate(sents):
postags = list(unroll_lists(sent.postag_lists))
lemmas = list(unroll_lists(sent.lemma_lists))
text = sent.text
words = copy.deepcopy(sent.words)
for i in words:
del i['start']
del i['end']
sentence = {'estnltk_text_object': json.dumps(sent), 'meta': {
'order_in_parent': order}, 'text': text, 'words': words,
'postags': postags, 'lemmas': lemmas}
yield json.dumps(sentence)
|
get_indexable_sentences
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(document):\n\n def x(list_of_lists):\n for i in itertools.product(*[set(j) for j in list_of_lists]):\n yield ' '.join(i)\n sents = document.split_by_sentences()\n for order, sent in enumerate(sents):\n postags = list(unroll_lists(sent.postag_lists))\n lemmas = list(unroll_lists(sent.lemma_lists))\n text = sent.text\n words = copy.deepcopy(sent.words)\n for i in words:\n del i['start']\n del i['end']\n sentence = {'estnltk_text_object': json.dumps(sent), 'meta': {\n 'order_in_parent': order}, 'text': text, 'words': words,\n 'postags': postags, 'lemmas': lemmas}\n yield json.dumps(sentence)\n",
"role": "user"
},
{
"content": "get_indexable_sentences",
"role": "assistant"
}
] |
def x(toks, templates):
for template in templates:
name = '|'.join([('%s[%d]' % (f, o)) for f, o in template])
for t in range(len(toks)):
values_list = []
for field, offset in template:
p = t + offset
if p < 0 or p >= len(toks):
values_list = []
break
if field in toks[p]:
value = toks[p][field]
values_list.append(value if isinstance(value, (set,
list)) else [value])
if len(template) == len(values_list):
for values in product(*values_list):
toks[t]['F'].append('%s=%s' % (name, '|'.join(values)))
|
apply_templates
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(toks, templates):\n for template in templates:\n name = '|'.join([('%s[%d]' % (f, o)) for f, o in template])\n for t in range(len(toks)):\n values_list = []\n for field, offset in template:\n p = t + offset\n if p < 0 or p >= len(toks):\n values_list = []\n break\n if field in toks[p]:\n value = toks[p][field]\n values_list.append(value if isinstance(value, (set,\n list)) else [value])\n if len(template) == len(values_list):\n for values in product(*values_list):\n toks[t]['F'].append('%s=%s' % (name, '|'.join(values)))\n",
"role": "user"
},
{
"content": "apply_templates",
"role": "assistant"
}
] |
def x(jsondoc):
sentences = []
for json_sent in jsondoc.split_by_sentences():
snt = Sentence()
zipped = list(zip(json_sent.word_texts, json_sent.lemmas, json_sent
.root_tokens, json_sent.forms, json_sent.endings, json_sent.
postags))
json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens,
FORM: form, ENDING: ending, POSTAG: postag} for text, lemma,
root_tokens, form, ending, postag in zipped]
for tok, word in zip(json_toks, json_sent.words):
if LABEL in word:
tok[LABEL] = word[LABEL]
for json_tok in json_toks:
token = json_token_to_estner_token(json_tok)
snt.append(token)
if snt:
for i in range(1, len(snt)):
snt[i - 1].next = snt[i]
snt[i].prew = snt[i - 1]
sentences.append(snt)
return Document(sentences=sentences)
|
json_document_to_estner_document
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(jsondoc):\n sentences = []\n for json_sent in jsondoc.split_by_sentences():\n snt = Sentence()\n zipped = list(zip(json_sent.word_texts, json_sent.lemmas, json_sent\n .root_tokens, json_sent.forms, json_sent.endings, json_sent.\n postags))\n json_toks = [{TEXT: text, LEMMA: lemma, ROOT_TOKENS: root_tokens,\n FORM: form, ENDING: ending, POSTAG: postag} for text, lemma,\n root_tokens, form, ending, postag in zipped]\n for tok, word in zip(json_toks, json_sent.words):\n if LABEL in word:\n tok[LABEL] = word[LABEL]\n for json_tok in json_toks:\n token = json_token_to_estner_token(json_tok)\n snt.append(token)\n if snt:\n for i in range(1, len(snt)):\n snt[i - 1].next = snt[i]\n snt[i].prew = snt[i - 1]\n sentences.append(snt)\n return Document(sentences=sentences)\n",
"role": "user"
},
{
"content": "json_document_to_estner_document",
"role": "assistant"
}
] |
def x(json_token):
token = Token()
word = json_token[TEXT]
lemma = word
morph = ''
label = 'O'
ending = json_token[ENDING]
root_toks = json_token[ROOT_TOKENS]
if isinstance(root_toks[0], list):
root_toks = root_toks[0]
lemma = '_'.join(root_toks) + ('+' + ending if ending else '')
if not lemma:
lemma = word
morph = '_%s_' % json_token[POSTAG]
morph += ' ' + json_token[FORM]
if LABEL in json_token:
label = json_token[LABEL]
return Token(word, lemma, morph, label)
|
json_token_to_estner_token
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(json_token):\n token = Token()\n word = json_token[TEXT]\n lemma = word\n morph = ''\n label = 'O'\n ending = json_token[ENDING]\n root_toks = json_token[ROOT_TOKENS]\n if isinstance(root_toks[0], list):\n root_toks = root_toks[0]\n lemma = '_'.join(root_toks) + ('+' + ending if ending else '')\n if not lemma:\n lemma = word\n morph = '_%s_' % json_token[POSTAG]\n morph += ' ' + json_token[FORM]\n if LABEL in json_token:\n label = json_token[LABEL]\n return Token(word, lemma, morph, label)\n",
"role": "user"
},
{
"content": "json_token_to_estner_token",
"role": "assistant"
}
] |
def x(self):
try:
os.makedirs(self.model_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
|
makedir
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n try:\n os.makedirs(self.model_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n",
"role": "user"
},
{
"content": "makedir",
"role": "assistant"
}
] |
def x(self, settings_module):
source = inspect.getsourcefile(settings_module)
dest = os.path.join(self.model_dir, 'settings.py')
shutil.copyfile(source, dest)
|
copy_settings
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, settings_module):\n source = inspect.getsourcefile(settings_module)\n dest = os.path.join(self.model_dir, 'settings.py')\n shutil.copyfile(source, dest)\n",
"role": "user"
},
{
"content": "copy_settings",
"role": "assistant"
}
] |
def x(self):
mname = 'loaded_module'
if six.PY2:
import imp
return imp.load_source(mname, self.settings_filename)
else:
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(mname, self.
settings_filename)
return loader.load_module(mname)
|
load_settings
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n mname = 'loaded_module'\n if six.PY2:\n import imp\n return imp.load_source(mname, self.settings_filename)\n else:\n import importlib.machinery\n loader = importlib.machinery.SourceFileLoader(mname, self.\n settings_filename)\n return loader.load_module(mname)\n",
"role": "user"
},
{
"content": "load_settings",
"role": "assistant"
}
] |
def x(self, jsondocs, model_dir):
modelUtil = ModelStorageUtil(model_dir)
modelUtil.makedir()
modelUtil.copy_settings(self.settings)
nerdocs = [json_document_to_estner_document(jsondoc) for jsondoc in
jsondocs]
self.fex.prepare(nerdocs)
self.fex.process(nerdocs)
self.trainer.train(nerdocs, modelUtil.model_filename)
|
train
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, jsondocs, model_dir):\n modelUtil = ModelStorageUtil(model_dir)\n modelUtil.makedir()\n modelUtil.copy_settings(self.settings)\n nerdocs = [json_document_to_estner_document(jsondoc) for jsondoc in\n jsondocs]\n self.fex.prepare(nerdocs)\n self.fex.process(nerdocs)\n self.trainer.train(nerdocs, modelUtil.model_filename)\n",
"role": "user"
},
{
"content": "train",
"role": "assistant"
}
] |
def x(lines, **kwargs):
if not isinstance(lines, list):
raise Exception(
'(!) Unexpected type of input argument! Expected a list of strings.'
)
remove_caps = False
remove_clo = False
double_quotes = None
fix_sent_tags = False
for argName, argVal in kwargs.items():
if argName in ['remove_caps', 'remove_cap']:
remove_caps = bool(argVal)
if argName == 'remove_clo':
remove_clo = bool(argVal)
if argName == 'fix_sent_tags':
fix_sent_tags = bool(argVal)
if argName in ['double_quotes', 'quotes'] and argVal and argVal.lower(
) in ['esc', 'escape', 'unesc', 'unescape']:
double_quotes = argVal.lower()
pat_token_line = re.compile('^"<(.+)>"\\s*$')
pat_analysis_start = re.compile('^(\\s+)"(.+)"(\\s[LZT].*)$')
i = 0
to_delete = []
while i < len(lines):
line = lines[i]
isAnalysisLine = line.startswith(' ') or line.startswith('\t')
if not isAnalysisLine:
removeCurrentTokenAndAnalysis = False
if line.startswith('"<{>"'):
if i + 1 == len(lines) or i + 1 < len(lines
) and not '"{"' in lines[i + 1]:
removeCurrentTokenAndAnalysis = True
if line.startswith('"<}>"'):
if i + 1 == len(lines) or i + 1 < len(lines
) and not '"}"' in lines[i + 1]:
removeCurrentTokenAndAnalysis = True
if removeCurrentTokenAndAnalysis:
del lines[i]
j = i
while j < len(lines):
line2 = lines[j]
if line2.startswith(' ') or line2.startswith('\t'):
del lines[j]
else:
break
continue
if double_quotes:
if pat_token_line.match(lines[i]):
token_cleaned = pat_token_line.match(lines[i]).group(1)
if double_quotes in ['esc', 'escape']:
token_cleaned = token_cleaned.replace('"', '\\"')
lines[i] = '"<' + token_cleaned + '>"'
elif double_quotes in ['unesc', 'unescape']:
token_cleaned = token_cleaned.replace('\\"', '"')
lines[i] = '"<' + token_cleaned + '>"'
else:
lines[i] = re.sub('^\\s{4,}', '\t', lines[i])
lines[i] = re.sub('(.*)" ([LZT].*) CLBC (.*)', '\\1" \\2 \\3',
lines[i])
lines[i] = re.sub('(.*)" L([^"<]*) ["<]([^@]*) (@.*)',
'\\1" L\\2 \\4', lines[i])
if remove_caps:
lines[i] = lines[i].replace(' cap ', ' ')
if double_quotes and double_quotes in ['unesc', 'unescape']:
lines[i] = lines[i].replace('\\"', '"')
elif double_quotes and double_quotes in ['esc', 'escape']:
m = pat_analysis_start.match(lines[i])
if m:
start = m.group(1)
content = m.group(2)
end = m.group(3)
content = content.replace('"', '\\"')
lines[i] = ''.join([start, '"', content, '"', end])
if remove_clo and 'CL' in lines[i]:
lines[i] = re.sub('\\sCL[OCB]', ' ', lines[i])
lines[i] = re.sub('\\s{2,}', ' ', lines[i])
if fix_sent_tags:
if i - 1 > -1 and ('"</s>"' in lines[i - 1] or '"<s>"' in
lines[i - 1]):
lines[i] = ''
i += 1
return lines
|
cleanup_lines
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(lines, **kwargs):\n if not isinstance(lines, list):\n raise Exception(\n '(!) Unexpected type of input argument! Expected a list of strings.'\n )\n remove_caps = False\n remove_clo = False\n double_quotes = None\n fix_sent_tags = False\n for argName, argVal in kwargs.items():\n if argName in ['remove_caps', 'remove_cap']:\n remove_caps = bool(argVal)\n if argName == 'remove_clo':\n remove_clo = bool(argVal)\n if argName == 'fix_sent_tags':\n fix_sent_tags = bool(argVal)\n if argName in ['double_quotes', 'quotes'] and argVal and argVal.lower(\n ) in ['esc', 'escape', 'unesc', 'unescape']:\n double_quotes = argVal.lower()\n pat_token_line = re.compile('^\"<(.+)>\"\\\\s*$')\n pat_analysis_start = re.compile('^(\\\\s+)\"(.+)\"(\\\\s[LZT].*)$')\n i = 0\n to_delete = []\n while i < len(lines):\n line = lines[i]\n isAnalysisLine = line.startswith(' ') or line.startswith('\\t')\n if not isAnalysisLine:\n removeCurrentTokenAndAnalysis = False\n if line.startswith('\"<{>\"'):\n if i + 1 == len(lines) or i + 1 < len(lines\n ) and not '\"{\"' in lines[i + 1]:\n removeCurrentTokenAndAnalysis = True\n if line.startswith('\"<}>\"'):\n if i + 1 == len(lines) or i + 1 < len(lines\n ) and not '\"}\"' in lines[i + 1]:\n removeCurrentTokenAndAnalysis = True\n if removeCurrentTokenAndAnalysis:\n del lines[i]\n j = i\n while j < len(lines):\n line2 = lines[j]\n if line2.startswith(' ') or line2.startswith('\\t'):\n del lines[j]\n else:\n break\n continue\n if double_quotes:\n if pat_token_line.match(lines[i]):\n token_cleaned = pat_token_line.match(lines[i]).group(1)\n if double_quotes in ['esc', 'escape']:\n token_cleaned = token_cleaned.replace('\"', '\\\\\"')\n lines[i] = '\"<' + token_cleaned + '>\"'\n elif double_quotes in ['unesc', 'unescape']:\n token_cleaned = token_cleaned.replace('\\\\\"', '\"')\n lines[i] = '\"<' + token_cleaned + '>\"'\n else:\n lines[i] = re.sub('^\\\\s{4,}', '\\t', lines[i])\n lines[i] = re.sub('(.*)\" ([LZT].*) CLBC (.*)', '\\\\1\" \\\\2 \\\\3',\n lines[i])\n lines[i] = re.sub('(.*)\" L([^\"<]*) [\"<]([^@]*) (@.*)',\n '\\\\1\" L\\\\2 \\\\4', lines[i])\n if remove_caps:\n lines[i] = lines[i].replace(' cap ', ' ')\n if double_quotes and double_quotes in ['unesc', 'unescape']:\n lines[i] = lines[i].replace('\\\\\"', '\"')\n elif double_quotes and double_quotes in ['esc', 'escape']:\n m = pat_analysis_start.match(lines[i])\n if m:\n start = m.group(1)\n content = m.group(2)\n end = m.group(3)\n content = content.replace('\"', '\\\\\"')\n lines[i] = ''.join([start, '\"', content, '\"', end])\n if remove_clo and 'CL' in lines[i]:\n lines[i] = re.sub('\\\\sCL[OCB]', ' ', lines[i])\n lines[i] = re.sub('\\\\s{2,}', ' ', lines[i])\n if fix_sent_tags:\n if i - 1 > -1 and ('\"</s>\"' in lines[i - 1] or '\"<s>\"' in\n lines[i - 1]):\n lines[i] = ''\n i += 1\n return lines\n",
"role": "user"
},
{
"content": "cleanup_lines",
"role": "assistant"
}
] |
def x(lines, text, **kwargs):
from estnltk.text import Text
if not isinstance(text, Text):
raise Exception(
"(!) Unexpected type of input argument! Expected EstNLTK's Text. ")
if not isinstance(lines, list):
raise Exception(
'(!) Unexpected type of input argument! Expected a list of strings.'
)
check_tokens = False
add_word_ids = False
for argName, argVal in kwargs.items():
if argName in ['check_tokens', 'check'] and argVal in [True, False]:
check_tokens = argVal
if argName in ['add_word_ids', 'word_ids'] and argVal in [True, False]:
add_word_ids = argVal
pat_empty_line = re.compile('^\\s+$')
pat_token_line = re.compile('^"<(.+)>"$')
pat_analysis_start = re.compile('^(\\s+)"(.+)"(\\s[LZTS].*)$')
pat_sent_bound = re.compile('^("<s>"|"</s>"|<s>|</s>)\\s*$')
generalWID = 0
sentWID = 0
sentenceID = 0
j = 0
results = []
for sentence in text.divide(layer=WORDS, by=SENTENCES):
sentWID = 0
for i in range(len(sentence)):
wordJson = sentence[i]
wordStr = wordJson[TEXT]
cg3word = None
cg3analyses = []
while j < len(lines):
if pat_sent_bound.match(lines[j]) and j + 1 < len(lines) and (
len(lines[j + 1]) == 0 or pat_empty_line.match(lines[j +
1])):
j += 2
continue
token_match = pat_token_line.match(lines[j].rstrip())
if token_match:
cg3word = token_match.group(1)
j += 1
while j < len(lines):
if pat_analysis_start.match(lines[j]):
cg3analyses.append(lines[j])
else:
break
j += 1
break
j += 1
if cg3word:
if check_tokens and wordStr != cg3word:
raise Exception("(!) Unable to align EstNLTK's token nr ",
generalWID, ':', wordStr, ' vs ', cg3word)
result_dict = {START: wordJson[START], END: wordJson[END],
SENT_ID: sentenceID, PARSER_OUT: cg3analyses}
if add_word_ids:
result_dict['text_word_id'] = generalWID
result_dict['sent_word_id'] = sentWID
results.append(result_dict)
else:
if j >= len(lines):
print('(!) End of VISLCG3 analysis reached: ' + str(j) +
' ' + str(len(lines)), file=sys.stderr)
raise Exception(
'(!) Unable to find matching syntactic analysis ',
"for EstNLTK's token nr ", generalWID, ':', wordStr)
sentWID += 1
generalWID += 1
sentenceID += 1
return results
|
align_cg3_with_Text
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(lines, text, **kwargs):\n from estnltk.text import Text\n if not isinstance(text, Text):\n raise Exception(\n \"(!) Unexpected type of input argument! Expected EstNLTK's Text. \")\n if not isinstance(lines, list):\n raise Exception(\n '(!) Unexpected type of input argument! Expected a list of strings.'\n )\n check_tokens = False\n add_word_ids = False\n for argName, argVal in kwargs.items():\n if argName in ['check_tokens', 'check'] and argVal in [True, False]:\n check_tokens = argVal\n if argName in ['add_word_ids', 'word_ids'] and argVal in [True, False]:\n add_word_ids = argVal\n pat_empty_line = re.compile('^\\\\s+$')\n pat_token_line = re.compile('^\"<(.+)>\"$')\n pat_analysis_start = re.compile('^(\\\\s+)\"(.+)\"(\\\\s[LZTS].*)$')\n pat_sent_bound = re.compile('^(\"<s>\"|\"</s>\"|<s>|</s>)\\\\s*$')\n generalWID = 0\n sentWID = 0\n sentenceID = 0\n j = 0\n results = []\n for sentence in text.divide(layer=WORDS, by=SENTENCES):\n sentWID = 0\n for i in range(len(sentence)):\n wordJson = sentence[i]\n wordStr = wordJson[TEXT]\n cg3word = None\n cg3analyses = []\n while j < len(lines):\n if pat_sent_bound.match(lines[j]) and j + 1 < len(lines) and (\n len(lines[j + 1]) == 0 or pat_empty_line.match(lines[j +\n 1])):\n j += 2\n continue\n token_match = pat_token_line.match(lines[j].rstrip())\n if token_match:\n cg3word = token_match.group(1)\n j += 1\n while j < len(lines):\n if pat_analysis_start.match(lines[j]):\n cg3analyses.append(lines[j])\n else:\n break\n j += 1\n break\n j += 1\n if cg3word:\n if check_tokens and wordStr != cg3word:\n raise Exception(\"(!) Unable to align EstNLTK's token nr \",\n generalWID, ':', wordStr, ' vs ', cg3word)\n result_dict = {START: wordJson[START], END: wordJson[END],\n SENT_ID: sentenceID, PARSER_OUT: cg3analyses}\n if add_word_ids:\n result_dict['text_word_id'] = generalWID\n result_dict['sent_word_id'] = sentWID\n results.append(result_dict)\n else:\n if j >= len(lines):\n print('(!) End of VISLCG3 analysis reached: ' + str(j) +\n ' ' + str(len(lines)), file=sys.stderr)\n raise Exception(\n '(!) Unable to find matching syntactic analysis ',\n \"for EstNLTK's token nr \", generalWID, ':', wordStr)\n sentWID += 1\n generalWID += 1\n sentenceID += 1\n return results\n",
"role": "user"
},
{
"content": "align_cg3_with_Text",
"role": "assistant"
}
] |
def x(lines, **kwargs):
if not isinstance(lines, list):
raise Exception(
'(!) Unexpected type of input argument! Expected a list of strings.'
)
fix_selfrefs = True
fix_open_punct = True
unesc_quotes = True
rep_spaces = False
error_on_unexp = False
for argName, argVal in kwargs.items():
if argName in ['selfrefs', 'fix_selfrefs'] and argVal in [True, False]:
fix_selfrefs = argVal
if argName in ['fix_open_punct'] and argVal in [True, False]:
fix_open_punct = argVal
if argName in ['error_on_unexp'] and argVal in [True, False]:
error_on_unexp = argVal
if argName in ['unesc_quotes'] and argVal in [True, False]:
unesc_quotes = argVal
if argName in ['rep_spaces'] and argVal in [True, False]:
rep_spaces = argVal
pat_empty_line = re.compile('^\\s+$')
pat_token_line = re.compile('^"<(.+)>"$')
pat_analysis_line = re.compile('^\\s+"(.+)"\\s([^"]+)$')
pat_ending_pos_form = re.compile('^L\\S+\\s+\\S\\s+([^#@]+).+$')
pat_pos_form = re.compile('^\\S\\s+([^#@]+).+$')
pat_ending_pos = re.compile('^(L\\S+\\s+)?\\S\\s+[#@].+$')
pat_opening_punct = re.compile('.+\\s(Opr|Oqu|Quo)\\s')
analyses_added = 0
conll_lines = []
word_id = 1
i = 0
while i < len(lines):
line = lines[i]
if not (line.startswith(' ') or line.startswith('\t')):
if len(line) > 0 and not (line.startswith('"<s>"') or line.
startswith('"</s>"')) and not pat_empty_line.match(line):
if unesc_quotes:
line = line.replace('\\"', '"')
if analyses_added == 0 and word_id > 1:
if error_on_unexp:
raise Exception('(!) Analysis missing at line ' +
str(i) + ': ' + '\n' + lines[i - 1])
else:
print('(!) Analysis missing at line ' + str(i) +
': ' + '\n' + lines[i - 1], file=sys.stderr)
conll_lines[-1] += '\t_'
conll_lines[-1] += '\tX'
conll_lines[-1] += '\tX'
conll_lines[-1] += '\t_'
conll_lines[-1] += '\t' + str(word_id - 2)
conll_lines[-1] += '\txxx'
conll_lines[-1] += '\t_'
conll_lines[-1] += '\t_'
token_match = pat_token_line.match(line.rstrip())
if token_match:
word = token_match.group(1)
else:
raise Exception('(!) Unexpected token format: ', line)
if rep_spaces and re.search('\\s', word):
word = re.sub('\\s+', '_', word)
conll_lines.append(str(word_id) + '\t' + word)
analyses_added = 0
word_id += 1
if line.startswith('"</s>"'):
conll_lines.append('')
word_id = 1
else:
if line.count('"') > 2:
new_line = []
q_count = 0
for j in range(len(line)):
if line[j] == '"' and (j == 0 or line[j - 1] != '\\'):
q_count += 1
if q_count < 3:
new_line.append(line[j])
else:
new_line.append(line[j])
line = ''.join(new_line)
if unesc_quotes:
line = line.replace('\\"', '"')
analysis_match = pat_analysis_line.match(line)
if analysis_match and analyses_added == 0:
lemma = analysis_match.group(1)
cats = analysis_match.group(2)
if cats.startswith('Z '):
postag = 'Z'
else:
postag = cats.split()[1] if len(cats.split()) > 1 else 'X'
deprels = re.findall('(@\\S+)', cats)
deprel = deprels[0] if deprels else 'xxx'
heads = re.findall('#\\d+\\s*->\\s*(\\d+)', cats)
head = heads[0] if heads else str(word_id - 2)
m1 = pat_ending_pos_form.match(cats)
m2 = pat_pos_form.match(cats)
m3 = pat_ending_pos.match(cats)
if m1:
forms = m1.group(1).split()
elif m2:
forms = m2.group(1).split()
elif m3:
forms = ['_']
elif error_on_unexp:
raise Exception(
'(!) Unexpected format of analysis line: ' + line)
else:
postag = 'X'
forms = ['_']
print('(!) Unexpected format of analysis line: ' + line,
file=sys.stderr)
if fix_selfrefs and int(head
) == word_id - 1 and word_id - 2 > 0:
head = str(word_id - 2)
if fix_open_punct and pat_opening_punct.match(line):
head = str(word_id)
conll_lines[-1] += '\t' + lemma
conll_lines[-1] += '\t' + postag
conll_lines[-1] += '\t' + postag
conll_lines[-1] += '\t' + '|'.join(forms)
conll_lines[-1] += '\t' + head
conll_lines[-1] += '\t' + deprel
conll_lines[-1] += '\t_'
conll_lines[-1] += '\t_'
analyses_added += 1
i += 1
return conll_lines
|
convert_cg3_to_conll
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(lines, **kwargs):\n if not isinstance(lines, list):\n raise Exception(\n '(!) Unexpected type of input argument! Expected a list of strings.'\n )\n fix_selfrefs = True\n fix_open_punct = True\n unesc_quotes = True\n rep_spaces = False\n error_on_unexp = False\n for argName, argVal in kwargs.items():\n if argName in ['selfrefs', 'fix_selfrefs'] and argVal in [True, False]:\n fix_selfrefs = argVal\n if argName in ['fix_open_punct'] and argVal in [True, False]:\n fix_open_punct = argVal\n if argName in ['error_on_unexp'] and argVal in [True, False]:\n error_on_unexp = argVal\n if argName in ['unesc_quotes'] and argVal in [True, False]:\n unesc_quotes = argVal\n if argName in ['rep_spaces'] and argVal in [True, False]:\n rep_spaces = argVal\n pat_empty_line = re.compile('^\\\\s+$')\n pat_token_line = re.compile('^\"<(.+)>\"$')\n pat_analysis_line = re.compile('^\\\\s+\"(.+)\"\\\\s([^\"]+)$')\n pat_ending_pos_form = re.compile('^L\\\\S+\\\\s+\\\\S\\\\s+([^#@]+).+$')\n pat_pos_form = re.compile('^\\\\S\\\\s+([^#@]+).+$')\n pat_ending_pos = re.compile('^(L\\\\S+\\\\s+)?\\\\S\\\\s+[#@].+$')\n pat_opening_punct = re.compile('.+\\\\s(Opr|Oqu|Quo)\\\\s')\n analyses_added = 0\n conll_lines = []\n word_id = 1\n i = 0\n while i < len(lines):\n line = lines[i]\n if not (line.startswith(' ') or line.startswith('\\t')):\n if len(line) > 0 and not (line.startswith('\"<s>\"') or line.\n startswith('\"</s>\"')) and not pat_empty_line.match(line):\n if unesc_quotes:\n line = line.replace('\\\\\"', '\"')\n if analyses_added == 0 and word_id > 1:\n if error_on_unexp:\n raise Exception('(!) Analysis missing at line ' +\n str(i) + ': ' + '\\n' + lines[i - 1])\n else:\n print('(!) Analysis missing at line ' + str(i) +\n ': ' + '\\n' + lines[i - 1], file=sys.stderr)\n conll_lines[-1] += '\\t_'\n conll_lines[-1] += '\\tX'\n conll_lines[-1] += '\\tX'\n conll_lines[-1] += '\\t_'\n conll_lines[-1] += '\\t' + str(word_id - 2)\n conll_lines[-1] += '\\txxx'\n conll_lines[-1] += '\\t_'\n conll_lines[-1] += '\\t_'\n token_match = pat_token_line.match(line.rstrip())\n if token_match:\n word = token_match.group(1)\n else:\n raise Exception('(!) Unexpected token format: ', line)\n if rep_spaces and re.search('\\\\s', word):\n word = re.sub('\\\\s+', '_', word)\n conll_lines.append(str(word_id) + '\\t' + word)\n analyses_added = 0\n word_id += 1\n if line.startswith('\"</s>\"'):\n conll_lines.append('')\n word_id = 1\n else:\n if line.count('\"') > 2:\n new_line = []\n q_count = 0\n for j in range(len(line)):\n if line[j] == '\"' and (j == 0 or line[j - 1] != '\\\\'):\n q_count += 1\n if q_count < 3:\n new_line.append(line[j])\n else:\n new_line.append(line[j])\n line = ''.join(new_line)\n if unesc_quotes:\n line = line.replace('\\\\\"', '\"')\n analysis_match = pat_analysis_line.match(line)\n if analysis_match and analyses_added == 0:\n lemma = analysis_match.group(1)\n cats = analysis_match.group(2)\n if cats.startswith('Z '):\n postag = 'Z'\n else:\n postag = cats.split()[1] if len(cats.split()) > 1 else 'X'\n deprels = re.findall('(@\\\\S+)', cats)\n deprel = deprels[0] if deprels else 'xxx'\n heads = re.findall('#\\\\d+\\\\s*->\\\\s*(\\\\d+)', cats)\n head = heads[0] if heads else str(word_id - 2)\n m1 = pat_ending_pos_form.match(cats)\n m2 = pat_pos_form.match(cats)\n m3 = pat_ending_pos.match(cats)\n if m1:\n forms = m1.group(1).split()\n elif m2:\n forms = m2.group(1).split()\n elif m3:\n forms = ['_']\n elif error_on_unexp:\n raise Exception(\n '(!) Unexpected format of analysis line: ' + line)\n else:\n postag = 'X'\n forms = ['_']\n print('(!) Unexpected format of analysis line: ' + line,\n file=sys.stderr)\n if fix_selfrefs and int(head\n ) == word_id - 1 and word_id - 2 > 0:\n head = str(word_id - 2)\n if fix_open_punct and pat_opening_punct.match(line):\n head = str(word_id)\n conll_lines[-1] += '\\t' + lemma\n conll_lines[-1] += '\\t' + postag\n conll_lines[-1] += '\\t' + postag\n conll_lines[-1] += '\\t' + '|'.join(forms)\n conll_lines[-1] += '\\t' + head\n conll_lines[-1] += '\\t' + deprel\n conll_lines[-1] += '\\t_'\n conll_lines[-1] += '\\t_'\n analyses_added += 1\n i += 1\n return conll_lines\n",
"role": "user"
},
{
"content": "convert_cg3_to_conll",
"role": "assistant"
}
] |
def x(self, vislcg_cmd1):
for path in os.environ['PATH'].split(os.pathsep):
path1 = path.strip('"')
file1 = os.path.join(path1, vislcg_cmd1)
if os.path.isfile(file1) or os.path.isfile(file1 + '.exe'):
return True
return False
|
check_if_vislcg_is_in_path
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, vislcg_cmd1):\n for path in os.environ['PATH'].split(os.pathsep):\n path1 = path.strip('\"')\n file1 = os.path.join(path1, vislcg_cmd1)\n if os.path.isfile(file1) or os.path.isfile(file1 + '.exe'):\n return True\n return False\n",
"role": "user"
},
{
"content": "check_if_vislcg_is_in_path",
"role": "assistant"
}
] |
def x(self, input_lines, **kwargs):
split_result_lines = False
remove_info = True
for argName, argVal in kwargs.items():
if argName in ['split_result_lines', 'split_result'] and argVal in [
True, False]:
split_result_lines = argVal
if argName in ['remove_info', 'info_remover', 'clean_up'
] and argVal in [True, False]:
remove_info = argVal
temp_input_file = tempfile.NamedTemporaryFile(prefix='vislcg3_in.',
mode='w', delete=False)
temp_input_file.close()
out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')
for line in input_lines:
out_f.write(line.rstrip())
out_f.write('\n')
out_f.close()
pipeline = []
for i in range(len(self.rules_pipeline)):
rule_file = self.rules_pipeline[i]
process_cmd = [self.vislcg_cmd, '-o', '-g', os.path.join(self.
rules_dir, rule_file)]
process = None
if i == 0:
process_cmd.extend(['-I', temp_input_file.name])
process = Popen(process_cmd, stdin=PIPE, stdout=PIPE)
else:
process = Popen(process_cmd, stdin=pipeline[-1]['process'].
stdout, stdout=PIPE)
process_dict = {'process': process, 'cmd': process_cmd}
pipeline.append(process_dict)
for i in range(len(pipeline)):
if i != len(pipeline) - 1:
pipeline[i]['process'].stdout.close()
result = as_unicode(pipeline[-1]['process'].communicate()[0])
pipeline[-1]['process'].stdout.close()
os.remove(temp_input_file.name)
if remove_info:
result = '\n'.join(cleanup_lines(result.split('\n'), **kwargs))
return result if not split_result_lines else result.split('\n')
|
process_lines
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, input_lines, **kwargs):\n split_result_lines = False\n remove_info = True\n for argName, argVal in kwargs.items():\n if argName in ['split_result_lines', 'split_result'] and argVal in [\n True, False]:\n split_result_lines = argVal\n if argName in ['remove_info', 'info_remover', 'clean_up'\n ] and argVal in [True, False]:\n remove_info = argVal\n temp_input_file = tempfile.NamedTemporaryFile(prefix='vislcg3_in.',\n mode='w', delete=False)\n temp_input_file.close()\n out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')\n for line in input_lines:\n out_f.write(line.rstrip())\n out_f.write('\\n')\n out_f.close()\n pipeline = []\n for i in range(len(self.rules_pipeline)):\n rule_file = self.rules_pipeline[i]\n process_cmd = [self.vislcg_cmd, '-o', '-g', os.path.join(self.\n rules_dir, rule_file)]\n process = None\n if i == 0:\n process_cmd.extend(['-I', temp_input_file.name])\n process = Popen(process_cmd, stdin=PIPE, stdout=PIPE)\n else:\n process = Popen(process_cmd, stdin=pipeline[-1]['process'].\n stdout, stdout=PIPE)\n process_dict = {'process': process, 'cmd': process_cmd}\n pipeline.append(process_dict)\n for i in range(len(pipeline)):\n if i != len(pipeline) - 1:\n pipeline[i]['process'].stdout.close()\n result = as_unicode(pipeline[-1]['process'].communicate()[0])\n pipeline[-1]['process'].stdout.close()\n os.remove(temp_input_file.name)\n if remove_info:\n result = '\\n'.join(cleanup_lines(result.split('\\n'), **kwargs))\n return result if not split_result_lines else result.split('\\n')\n",
"role": "user"
},
{
"content": "process_lines",
"role": "assistant"
}
] |
def x(analysis):
assert isinstance(analysis, dict), "(!) Input 'analysis' should be a dict!"
new_dict = {POSTAG: analysis[POSTAG], ROOT: analysis[ROOT], FORM:
analysis[FORM], CLITIC: analysis[CLITIC], ENDING: analysis[ENDING]}
if LEMMA in analysis:
new_dict[LEMMA] = analysis[LEMMA]
if ROOT_TOKENS in analysis:
new_dict[ROOT_TOKENS] = analysis[ROOT_TOKENS]
return new_dict
|
copy_analysis_dict
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analysis):\n assert isinstance(analysis, dict), \"(!) Input 'analysis' should be a dict!\"\n new_dict = {POSTAG: analysis[POSTAG], ROOT: analysis[ROOT], FORM:\n analysis[FORM], CLITIC: analysis[CLITIC], ENDING: analysis[ENDING]}\n if LEMMA in analysis:\n new_dict[LEMMA] = analysis[LEMMA]\n if ROOT_TOKENS in analysis:\n new_dict[ROOT_TOKENS] = analysis[ROOT_TOKENS]\n return new_dict\n",
"role": "user"
},
{
"content": "copy_analysis_dict",
"role": "assistant"
}
] |
def x(text):
if not text.is_tagged(CLAUSES):
text.tag_clauses()
clause_indices = []
sent_id = 0
for sub_text in text.split_by(SENTENCES):
for word, cl_index in zip(sub_text.words, sub_text.clause_indices):
clause_indices.append(sent_id + cl_index)
nr_of_clauses = len(set(sub_text.clause_indices))
sent_id += nr_of_clauses
assert len(clause_indices) == len(text.words
), '(!) Number of clause indices should match nr of words!'
return clause_indices
|
get_unique_clause_indices
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n if not text.is_tagged(CLAUSES):\n text.tag_clauses()\n clause_indices = []\n sent_id = 0\n for sub_text in text.split_by(SENTENCES):\n for word, cl_index in zip(sub_text.words, sub_text.clause_indices):\n clause_indices.append(sent_id + cl_index)\n nr_of_clauses = len(set(sub_text.clause_indices))\n sent_id += nr_of_clauses\n assert len(clause_indices) == len(text.words\n ), '(!) Number of clause indices should match nr of words!'\n return clause_indices\n",
"role": "user"
},
{
"content": "get_unique_clause_indices",
"role": "assistant"
}
] |
def x(text):
if not text.is_tagged(SENTENCES):
text.tokenize_sentences()
sent_indices = []
sent_id = 0
for sub_text in text.split_by(SENTENCES):
for word in sub_text.words:
sent_indices.append(sent_id)
sent_id += 1
assert len(sent_indices) == len(text.words
), '(!) Number of sent indices should match nr of words!'
return sent_indices
|
get_unique_sentence_indices
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n if not text.is_tagged(SENTENCES):\n text.tokenize_sentences()\n sent_indices = []\n sent_id = 0\n for sub_text in text.split_by(SENTENCES):\n for word in sub_text.words:\n sent_indices.append(sent_id)\n sent_id += 1\n assert len(sent_indices) == len(text.words\n ), '(!) Number of sent indices should match nr of words!'\n return sent_indices\n",
"role": "user"
},
{
"content": "get_unique_sentence_indices",
"role": "assistant"
}
] |
def x(analysis):
assert FORM in analysis, '(!) The input analysis does not contain "' + FORM + '" key.'
for idx, pattern_items in enumerate(_noun_conversion_rules):
pattern_str, replacement = pattern_items
if pattern_str in analysis[FORM]:
analysis[FORM] = analysis[FORM].replace(pattern_str, replacement)
return analysis
|
convert_nominal_form
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analysis):\n assert FORM in analysis, '(!) The input analysis does not contain \"' + FORM + '\" key.'\n for idx, pattern_items in enumerate(_noun_conversion_rules):\n pattern_str, replacement = pattern_items\n if pattern_str in analysis[FORM]:\n analysis[FORM] = analysis[FORM].replace(pattern_str, replacement)\n return analysis\n",
"role": "user"
},
{
"content": "convert_nominal_form",
"role": "assistant"
}
] |
def x(analysis):
assert FORM in analysis, '(!) The input analysis does not contain "' + FORM + '" key.'
results = []
for root_pat, pos, form_pat, replacements in _amb_verb_conversion_rules:
if analysis[POSTAG] == pos and re.match(root_pat, analysis[ROOT]
) and re.match(form_pat, analysis[FORM]):
for replacement in replacements:
new_analysis = copy_analysis_dict(analysis)
new_form = re.sub(form_pat, replacement, analysis[FORM])
new_analysis[FORM] = new_form
results.append(new_analysis)
break
if not results:
results.append(analysis)
return results
|
convert_amb_verbal_form
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analysis):\n assert FORM in analysis, '(!) The input analysis does not contain \"' + FORM + '\" key.'\n results = []\n for root_pat, pos, form_pat, replacements in _amb_verb_conversion_rules:\n if analysis[POSTAG] == pos and re.match(root_pat, analysis[ROOT]\n ) and re.match(form_pat, analysis[FORM]):\n for replacement in replacements:\n new_analysis = copy_analysis_dict(analysis)\n new_form = re.sub(form_pat, replacement, analysis[FORM])\n new_analysis[FORM] = new_form\n results.append(new_analysis)\n break\n if not results:\n results.append(analysis)\n return results\n",
"role": "user"
},
{
"content": "convert_amb_verbal_form",
"role": "assistant"
}
] |
def x(analysis):
assert FORM in analysis, '(!) The input analysis does not contain "' + FORM + '" key.'
for form, replacement in _verb_conversion_rules:
if analysis[FORM] == form:
assert analysis[POSTAG
] == 'V', '(!) Expected analysis of verb, but got analysis of "' + str(
analysis[POSTAG]) + '" instead.'
analysis[FORM] = replacement
elif analysis[FORM].endswith(' ' + form):
parts = analysis[FORM].split()
prefix = ' '.join(parts[:len(parts) - 1])
analysis[FORM] = prefix + ' ' + replacement
return analysis
|
convert_verbal_form
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analysis):\n assert FORM in analysis, '(!) The input analysis does not contain \"' + FORM + '\" key.'\n for form, replacement in _verb_conversion_rules:\n if analysis[FORM] == form:\n assert analysis[POSTAG\n ] == 'V', '(!) Expected analysis of verb, but got analysis of \"' + str(\n analysis[POSTAG]) + '\" instead.'\n analysis[FORM] = replacement\n elif analysis[FORM].endswith(' ' + form):\n parts = analysis[FORM].split()\n prefix = ' '.join(parts[:len(parts) - 1])\n analysis[FORM] = prefix + ' ' + replacement\n return analysis\n",
"role": "user"
},
{
"content": "convert_verbal_form",
"role": "assistant"
}
] |
def x(analysis):
assert FORM in analysis, '(!) The input analysis does not contain "' + FORM + '" key.'
if 'neg' in analysis[FORM]:
analysis[FORM] = re.sub('^\\s*neg ([^,]*)$', '\\1 Neg', analysis[FORM])
analysis[FORM] = re.sub(' Neg Neg$', ' Neg', analysis[FORM])
analysis[FORM] = re.sub(' Aff Neg$', ' Neg', analysis[FORM])
analysis[FORM] = re.sub('neg', 'Neg', analysis[FORM])
analysis[FORM] = analysis[FORM].rstrip().lstrip()
assert 'neg' not in analysis[FORM
], '(!) The label "neg" should be removed by now.'
assert 'Neg' not in analysis[FORM] or 'Neg' in analysis[FORM] and analysis[
FORM].endswith('Neg'
), '(!) The label "Neg" should end the analysis line: ' + str(analysis
[FORM])
return analysis
|
make_postfixes_1
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analysis):\n assert FORM in analysis, '(!) The input analysis does not contain \"' + FORM + '\" key.'\n if 'neg' in analysis[FORM]:\n analysis[FORM] = re.sub('^\\\\s*neg ([^,]*)$', '\\\\1 Neg', analysis[FORM])\n analysis[FORM] = re.sub(' Neg Neg$', ' Neg', analysis[FORM])\n analysis[FORM] = re.sub(' Aff Neg$', ' Neg', analysis[FORM])\n analysis[FORM] = re.sub('neg', 'Neg', analysis[FORM])\n analysis[FORM] = analysis[FORM].rstrip().lstrip()\n assert 'neg' not in analysis[FORM\n ], '(!) The label \"neg\" should be removed by now.'\n assert 'Neg' not in analysis[FORM] or 'Neg' in analysis[FORM] and analysis[\n FORM].endswith('Neg'\n ), '(!) The label \"Neg\" should end the analysis line: ' + str(analysis\n [FORM])\n return analysis\n",
"role": "user"
},
{
"content": "make_postfixes_1",
"role": "assistant"
}
] |
def x(analyses, keep_forms, target_forms):
to_delete = []
for aid, analysis in enumerate(analyses):
delete = False
for target in target_forms:
if target == analysis[FORM] and not analysis[FORM] in keep_forms:
delete = True
if delete:
to_delete.append(aid)
if to_delete:
to_delete.reverse()
for aid in to_delete:
del analyses[aid]
|
keep_analyses
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analyses, keep_forms, target_forms):\n to_delete = []\n for aid, analysis in enumerate(analyses):\n delete = False\n for target in target_forms:\n if target == analysis[FORM] and not analysis[FORM] in keep_forms:\n delete = True\n if delete:\n to_delete.append(aid)\n if to_delete:\n to_delete.reverse()\n for aid in to_delete:\n del analyses[aid]\n",
"role": "user"
},
{
"content": "keep_analyses",
"role": "assistant"
}
] |
def x(words_layer):
prev_word_lemma = ''
for word_dict in words_layer:
forms = [a[FORM] for a in word_dict[ANALYSIS]]
if 'Pers Prs Imprt Sg2' in forms and 'Pers Prs Ind Neg' in forms:
if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':
_keep_analyses(word_dict[ANALYSIS], ['Pers Prs Ind Neg'], [
'Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'])
else:
_keep_analyses(word_dict[ANALYSIS], ['Pers Prs Imprt Sg2'],
['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'])
if ('Pers Prt Imprt' in forms and 'Pers Prt Ind Neg' in forms and
'Pers Prt Prc' in forms):
if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':
_keep_analyses(word_dict[ANALYSIS], ['Pers Prt Ind Neg'], [
'Pers Prt Imprt', 'Pers Prt Ind Neg', 'Pers Prt Prc'])
else:
_keep_analyses(word_dict[ANALYSIS], ['Pers Prt Prc'], [
'Pers Prt Imprt', 'Pers Prt Ind Neg', 'Pers Prt Prc'])
if 'Impers Prt Ind Neg' in forms and 'Impers Prt Prc' in forms:
if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':
_keep_analyses(word_dict[ANALYSIS], ['Impers Prt Ind Neg'],
['Impers Prt Ind Neg', 'Impers Prt Prc'])
else:
_keep_analyses(word_dict[ANALYSIS], ['Impers Prt Prc'], [
'Impers Prt Ind Neg', 'Impers Prt Prc'])
prev_word_lemma = word_dict[ANALYSIS][0][ROOT]
|
disambiguate_neg
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(words_layer):\n prev_word_lemma = ''\n for word_dict in words_layer:\n forms = [a[FORM] for a in word_dict[ANALYSIS]]\n if 'Pers Prs Imprt Sg2' in forms and 'Pers Prs Ind Neg' in forms:\n if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':\n _keep_analyses(word_dict[ANALYSIS], ['Pers Prs Ind Neg'], [\n 'Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'])\n else:\n _keep_analyses(word_dict[ANALYSIS], ['Pers Prs Imprt Sg2'],\n ['Pers Prs Imprt Sg2', 'Pers Prs Ind Neg'])\n if ('Pers Prt Imprt' in forms and 'Pers Prt Ind Neg' in forms and \n 'Pers Prt Prc' in forms):\n if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':\n _keep_analyses(word_dict[ANALYSIS], ['Pers Prt Ind Neg'], [\n 'Pers Prt Imprt', 'Pers Prt Ind Neg', 'Pers Prt Prc'])\n else:\n _keep_analyses(word_dict[ANALYSIS], ['Pers Prt Prc'], [\n 'Pers Prt Imprt', 'Pers Prt Ind Neg', 'Pers Prt Prc'])\n if 'Impers Prt Ind Neg' in forms and 'Impers Prt Prc' in forms:\n if prev_word_lemma == 'ei' or prev_word_lemma == 'ega':\n _keep_analyses(word_dict[ANALYSIS], ['Impers Prt Ind Neg'],\n ['Impers Prt Ind Neg', 'Impers Prt Prc'])\n else:\n _keep_analyses(word_dict[ANALYSIS], ['Impers Prt Prc'], [\n 'Impers Prt Ind Neg', 'Impers Prt Prc'])\n prev_word_lemma = word_dict[ANALYSIS][0][ROOT]\n",
"role": "user"
},
{
"content": "disambiguate_neg",
"role": "assistant"
}
] |
def x(words_layer, text, scope=CLAUSES):
assert scope in [CLAUSES, SENTENCES
], '(!) The scope should be either "clauses" or "sentences".'
group_indices = get_unique_clause_indices(text
) if scope == CLAUSES else get_unique_sentence_indices(text)
i = 0
gr_2nd_person_pron = {}
while i < len(words_layer):
gr_index = group_indices[i]
if gr_index not in gr_2nd_person_pron:
j = i
gr_2nd_person_pron_found = False
while j < len(words_layer):
if group_indices[j] == gr_index:
forms = [a[FORM] for a in words_layer[j][ANALYSIS]]
lemmas = [a[ROOT] for a in words_layer[j][ANALYSIS]]
if 'sina' in lemmas and 'Sg Nom' in forms:
gr_2nd_person_pron_found = True
break
if group_indices[j] >= gr_index + 10:
break
j += 1
gr_2nd_person_pron[gr_index] = gr_2nd_person_pron_found
forms = [a[FORM] for a in words_layer[i][ANALYSIS]]
if 'Pers Prt Ind Pl3 Aff' in forms and 'Pers Prt Ind Sg2 Aff' in forms:
if not gr_2nd_person_pron[gr_index]:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prt Ind Pl3 Aff'], ['Pers Prt Ind Pl3 Aff',
'Pers Prt Ind Sg2 Aff'])
else:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prt Ind Sg2 Aff'], ['Pers Prt Ind Pl3 Aff',
'Pers Prt Ind Sg2 Aff'])
if ('Pers Prs Cond Pl3 Aff' in forms and 'Pers Prs Cond Sg2 Aff' in
forms):
if not gr_2nd_person_pron[gr_index]:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prs Cond Pl3 Aff'], ['Pers Prs Cond Pl3 Aff',
'Pers Prs Cond Sg2 Aff'])
else:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prs Cond Sg2 Aff'], ['Pers Prs Cond Pl3 Aff',
'Pers Prs Cond Sg2 Aff'])
if ('Pers Prt Cond Pl3 Aff' in forms and 'Pers Prt Cond Sg2 Aff' in
forms):
if not gr_2nd_person_pron[gr_index]:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prt Cond Pl3 Aff'], ['Pers Prt Cond Pl3 Aff',
'Pers Prt Cond Sg2 Aff'])
else:
_keep_analyses(words_layer[i][ANALYSIS], [
'Pers Prt Cond Sg2 Aff'], ['Pers Prt Cond Pl3 Aff',
'Pers Prt Cond Sg2 Aff'])
i += 1
|
disambiguate_sid_ksid
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(words_layer, text, scope=CLAUSES):\n assert scope in [CLAUSES, SENTENCES\n ], '(!) The scope should be either \"clauses\" or \"sentences\".'\n group_indices = get_unique_clause_indices(text\n ) if scope == CLAUSES else get_unique_sentence_indices(text)\n i = 0\n gr_2nd_person_pron = {}\n while i < len(words_layer):\n gr_index = group_indices[i]\n if gr_index not in gr_2nd_person_pron:\n j = i\n gr_2nd_person_pron_found = False\n while j < len(words_layer):\n if group_indices[j] == gr_index:\n forms = [a[FORM] for a in words_layer[j][ANALYSIS]]\n lemmas = [a[ROOT] for a in words_layer[j][ANALYSIS]]\n if 'sina' in lemmas and 'Sg Nom' in forms:\n gr_2nd_person_pron_found = True\n break\n if group_indices[j] >= gr_index + 10:\n break\n j += 1\n gr_2nd_person_pron[gr_index] = gr_2nd_person_pron_found\n forms = [a[FORM] for a in words_layer[i][ANALYSIS]]\n if 'Pers Prt Ind Pl3 Aff' in forms and 'Pers Prt Ind Sg2 Aff' in forms:\n if not gr_2nd_person_pron[gr_index]:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prt Ind Pl3 Aff'], ['Pers Prt Ind Pl3 Aff',\n 'Pers Prt Ind Sg2 Aff'])\n else:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prt Ind Sg2 Aff'], ['Pers Prt Ind Pl3 Aff',\n 'Pers Prt Ind Sg2 Aff'])\n if ('Pers Prs Cond Pl3 Aff' in forms and 'Pers Prs Cond Sg2 Aff' in\n forms):\n if not gr_2nd_person_pron[gr_index]:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prs Cond Pl3 Aff'], ['Pers Prs Cond Pl3 Aff',\n 'Pers Prs Cond Sg2 Aff'])\n else:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prs Cond Sg2 Aff'], ['Pers Prs Cond Pl3 Aff',\n 'Pers Prs Cond Sg2 Aff'])\n if ('Pers Prt Cond Pl3 Aff' in forms and 'Pers Prt Cond Sg2 Aff' in\n forms):\n if not gr_2nd_person_pron[gr_index]:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prt Cond Pl3 Aff'], ['Pers Prt Cond Pl3 Aff',\n 'Pers Prt Cond Sg2 Aff'])\n else:\n _keep_analyses(words_layer[i][ANALYSIS], [\n 'Pers Prt Cond Sg2 Aff'], ['Pers Prt Cond Pl3 Aff',\n 'Pers Prt Cond Sg2 Aff'])\n i += 1\n",
"role": "user"
},
{
"content": "disambiguate_sid_ksid",
"role": "assistant"
}
] |
def x(words_layer):
for word_dict in words_layer:
for analysis in word_dict[ANALYSIS]:
analysis[FORM] = re.sub('(Sg|Pl)([123])', '\\1 \\2', analysis[FORM]
)
return words_layer
|
make_postfixes_2
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(words_layer):\n for word_dict in words_layer:\n for analysis in word_dict[ANALYSIS]:\n analysis[FORM] = re.sub('(Sg|Pl)([123])', '\\\\1 \\\\2', analysis[FORM]\n )\n return words_layer\n",
"role": "user"
},
{
"content": "make_postfixes_2",
"role": "assistant"
}
] |
def x(analyses):
resulting_analyses = []
for analysis in analyses:
new_analyses = [copy_analysis_dict(analysis)]
new_analyses[0] = _convert_nominal_form(new_analyses[0])
new_analyses = _convert_amb_verbal_form(new_analyses[0])
new_analyses = [_convert_verbal_form(a) for a in new_analyses]
new_analyses = [_make_postfixes_1(a) for a in new_analyses]
resulting_analyses.extend(new_analyses)
return resulting_analyses
|
convert_analysis
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(analyses):\n resulting_analyses = []\n for analysis in analyses:\n new_analyses = [copy_analysis_dict(analysis)]\n new_analyses[0] = _convert_nominal_form(new_analyses[0])\n new_analyses = _convert_amb_verbal_form(new_analyses[0])\n new_analyses = [_convert_verbal_form(a) for a in new_analyses]\n new_analyses = [_make_postfixes_1(a) for a in new_analyses]\n resulting_analyses.extend(new_analyses)\n return resulting_analyses\n",
"role": "user"
},
{
"content": "convert_analysis",
"role": "assistant"
}
] |
def x(text, layer_name=GT_WORDS):
assert WORDS in text, '(!) The input text should contain "' + str(WORDS
) + '" layer.'
assert len(text[WORDS]) == 0 or len(text[WORDS]) > 0 and ANALYSIS in text[
WORDS][0], '(!) Words in the input text should contain "' + str(
ANALYSIS) + '" layer.'
new_words_layer = []
for word in text[WORDS]:
new_analysis = []
new_analysis.extend(convert_analysis(word[ANALYSIS]))
new_words_layer.append({TEXT: word[TEXT], ANALYSIS: new_analysis,
START: word[START], END: word[END]})
_disambiguate_neg(new_words_layer)
_disambiguate_sid_ksid(new_words_layer, text, scope=CLAUSES)
_disambiguate_sid_ksid(new_words_layer, text, scope=SENTENCES)
_make_postfixes_2(new_words_layer)
if layer_name != WORDS:
text[layer_name] = new_words_layer
else:
for wid, new_word in enumerate(new_words_layer):
text[WORDS][wid] = new_word
return text
|
convert_to_gt
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text, layer_name=GT_WORDS):\n assert WORDS in text, '(!) The input text should contain \"' + str(WORDS\n ) + '\" layer.'\n assert len(text[WORDS]) == 0 or len(text[WORDS]) > 0 and ANALYSIS in text[\n WORDS][0], '(!) Words in the input text should contain \"' + str(\n ANALYSIS) + '\" layer.'\n new_words_layer = []\n for word in text[WORDS]:\n new_analysis = []\n new_analysis.extend(convert_analysis(word[ANALYSIS]))\n new_words_layer.append({TEXT: word[TEXT], ANALYSIS: new_analysis,\n START: word[START], END: word[END]})\n _disambiguate_neg(new_words_layer)\n _disambiguate_sid_ksid(new_words_layer, text, scope=CLAUSES)\n _disambiguate_sid_ksid(new_words_layer, text, scope=SENTENCES)\n _make_postfixes_2(new_words_layer)\n if layer_name != WORDS:\n text[layer_name] = new_words_layer\n else:\n for wid, new_word in enumerate(new_words_layer):\n text[WORDS][wid] = new_word\n return text\n",
"role": "user"
},
{
"content": "convert_to_gt",
"role": "assistant"
}
] |
def x(root, pos, form):
import sys
result = {CLITIC: '', ENDING: '', FORM: form, POSTAG: pos, ROOT: ''}
breakpoint = -1
for i in range(len(root) - 1, -1, -1):
if root[i] == '+':
breakpoint = i
break
if breakpoint == -1:
result[ROOT] = root
result[ENDING] = '0'
if not re.match('^\\W+$', root):
try:
print(' No breakpoint found from: ', root, pos, form, file=
sys.stderr)
except UnicodeEncodeError:
print(' No breakpoint found from input *root*!', file=sys.
stderr)
else:
result[ROOT] = root[0:breakpoint]
result[ENDING] = root[breakpoint + 1:]
if result[ENDING].endswith('ki') and len(result[ENDING]) > 2:
result[CLITIC] = 'ki'
result[ENDING] = re.sub('ki$', '', result[ENDING])
if result[ENDING].endswith('gi') and len(result[ENDING]) > 2:
result[CLITIC] = 'gi'
result[ENDING] = re.sub('gi$', '', result[ENDING])
return result
|
get_analysis_dict
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(root, pos, form):\n import sys\n result = {CLITIC: '', ENDING: '', FORM: form, POSTAG: pos, ROOT: ''}\n breakpoint = -1\n for i in range(len(root) - 1, -1, -1):\n if root[i] == '+':\n breakpoint = i\n break\n if breakpoint == -1:\n result[ROOT] = root\n result[ENDING] = '0'\n if not re.match('^\\\\W+$', root):\n try:\n print(' No breakpoint found from: ', root, pos, form, file=\n sys.stderr)\n except UnicodeEncodeError:\n print(' No breakpoint found from input *root*!', file=sys.\n stderr)\n else:\n result[ROOT] = root[0:breakpoint]\n result[ENDING] = root[breakpoint + 1:]\n if result[ENDING].endswith('ki') and len(result[ENDING]) > 2:\n result[CLITIC] = 'ki'\n result[ENDING] = re.sub('ki$', '', result[ENDING])\n if result[ENDING].endswith('gi') and len(result[ENDING]) > 2:\n result[CLITIC] = 'gi'\n result[ENDING] = re.sub('gi$', '', result[ENDING])\n return result\n",
"role": "user"
},
{
"content": "get_analysis_dict",
"role": "assistant"
}
] |
def x(file_name, layer_name=WORDS, keep_init_lines=False):
from nltk.tokenize.simple import LineTokenizer
from nltk.tokenize.regexp import RegexpTokenizer
from estnltk import Text
init_lines = []
words = []
sentence = []
sentences = []
prev_sent_id = -1
prev_word_id = -1
in_f = codecs.open(file_name, mode='r', encoding='utf-8')
for line in in_f:
fields = line.split('\t')
assert len(fields
) == 8, '(!) Unexpected number of fields in the line: ' + str(len
(fields))
sent_id = fields[0]
word_id = fields[1]
clause_id = fields[2]
token = fields[3]
if prev_sent_id != sent_id:
if sentence:
sentences.append(' '.join(sentence))
sentence = []
if prev_word_id != word_id:
sentence.append(token)
word = {TEXT: token, ANALYSIS: []}
words.append(word)
lemma = fields[4]
root = fields[5]
pos = fields[6]
form = fields[7].rstrip()
ending = ''
clitic = ''
analysis = get_analysis_dict(root, pos, form)
analysis[LEMMA] = lemma
words[-1][ANALYSIS].append(analysis)
prev_sent_id = sent_id
prev_word_id = word_id
if keep_init_lines:
init_lines.append([sent_id + ' ' + word_id, line])
in_f.close()
if sentence:
sentences.append(' '.join(sentence))
kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),
'sentence_tokenizer': LineTokenizer()}
from estnltk.text import Text
text = Text('\n'.join(sentences), **kwargs4text)
text.tokenize_words()
assert len(text[WORDS]) == len(words
), '(!) Number of words from input does not match with the number of words in EstNLTK Text: ' + str(
len(text[WORDS])) + ' != ' + str(len(words))
if layer_name != WORDS:
text[layer_name] = []
for word in text[WORDS]:
text[layer_name].append({START: word[START], END: word[END],
TEXT: word[TEXT]})
for wid, word in enumerate(text[WORDS]):
text[layer_name][wid][ANALYSIS] = words[wid][ANALYSIS]
if layer_name == WORDS:
assert text.is_tagged(ANALYSIS
), '(!) The layer of analysis should exist by now!'
if keep_init_lines:
text['init_lines'] = []
i = 0
for wid, word in enumerate(text[layer_name]):
words_lines = []
while i < len(init_lines):
[lid, line] = init_lines[i]
if not words_lines or words_lines[-1][0] == lid:
words_lines.append([lid, line])
else:
break
i += 1
text['init_lines'].append({START: word[START], END: word[END],
'lines': [l[1] for l in words_lines]})
assert len(text['init_lines']) == len(text[layer_name]
), '(!) The number of initial lines should match the number of words in text!'
return text
|
read_text_from_idx_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name, layer_name=WORDS, keep_init_lines=False):\n from nltk.tokenize.simple import LineTokenizer\n from nltk.tokenize.regexp import RegexpTokenizer\n from estnltk import Text\n init_lines = []\n words = []\n sentence = []\n sentences = []\n prev_sent_id = -1\n prev_word_id = -1\n in_f = codecs.open(file_name, mode='r', encoding='utf-8')\n for line in in_f:\n fields = line.split('\\t')\n assert len(fields\n ) == 8, '(!) Unexpected number of fields in the line: ' + str(len\n (fields))\n sent_id = fields[0]\n word_id = fields[1]\n clause_id = fields[2]\n token = fields[3]\n if prev_sent_id != sent_id:\n if sentence:\n sentences.append(' '.join(sentence))\n sentence = []\n if prev_word_id != word_id:\n sentence.append(token)\n word = {TEXT: token, ANALYSIS: []}\n words.append(word)\n lemma = fields[4]\n root = fields[5]\n pos = fields[6]\n form = fields[7].rstrip()\n ending = ''\n clitic = ''\n analysis = get_analysis_dict(root, pos, form)\n analysis[LEMMA] = lemma\n words[-1][ANALYSIS].append(analysis)\n prev_sent_id = sent_id\n prev_word_id = word_id\n if keep_init_lines:\n init_lines.append([sent_id + ' ' + word_id, line])\n in_f.close()\n if sentence:\n sentences.append(' '.join(sentence))\n kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),\n 'sentence_tokenizer': LineTokenizer()}\n from estnltk.text import Text\n text = Text('\\n'.join(sentences), **kwargs4text)\n text.tokenize_words()\n assert len(text[WORDS]) == len(words\n ), '(!) Number of words from input does not match with the number of words in EstNLTK Text: ' + str(\n len(text[WORDS])) + ' != ' + str(len(words))\n if layer_name != WORDS:\n text[layer_name] = []\n for word in text[WORDS]:\n text[layer_name].append({START: word[START], END: word[END],\n TEXT: word[TEXT]})\n for wid, word in enumerate(text[WORDS]):\n text[layer_name][wid][ANALYSIS] = words[wid][ANALYSIS]\n if layer_name == WORDS:\n assert text.is_tagged(ANALYSIS\n ), '(!) The layer of analysis should exist by now!'\n if keep_init_lines:\n text['init_lines'] = []\n i = 0\n for wid, word in enumerate(text[layer_name]):\n words_lines = []\n while i < len(init_lines):\n [lid, line] = init_lines[i]\n if not words_lines or words_lines[-1][0] == lid:\n words_lines.append([lid, line])\n else:\n break\n i += 1\n text['init_lines'].append({START: word[START], END: word[END],\n 'lines': [l[1] for l in words_lines]})\n assert len(text['init_lines']) == len(text[layer_name]\n ), '(!) The number of initial lines should match the number of words in text!'\n return text\n",
"role": "user"
},
{
"content": "read_text_from_idx_file",
"role": "assistant"
}
] |
def x(original, converted):
from estnltk.syntax.syntax_preprocessing import convert_Text_to_mrf
old_layer_mrf = convert_Text_to_mrf(original)
new_layer_mrf = convert_Text_to_mrf(converted)
max_len_1 = max([len(l) for l in old_layer_mrf])
max_len_2 = max([len(l) for l in new_layer_mrf])
max_len = max(max_len_1, max_len_2)
format_str = '{:<' + str(max_len + 1) + '}'
i = 0
j = 0
comp_lines = []
diff_found = False
while i < len(old_layer_mrf) or j < len(new_layer_mrf):
l1 = old_layer_mrf[i]
l2 = new_layer_mrf[j]
if not l1.startswith(' ') and not l2.startswith(' '):
diff = '*** ' if format_str.format(l1) != format_str.format(l2
) else ' '
comp_lines.append(diff + format_str.format(l1) + format_str.
format(l2))
if diff == '*** ':
diff_found = True
i += 1
j += 1
else:
while i < len(old_layer_mrf) or j < len(new_layer_mrf):
l1 = old_layer_mrf[i]
l2 = new_layer_mrf[j]
if l1.startswith(' ') and l2.startswith(' '):
diff = '*** ' if format_str.format(l1
) != format_str.format(l2) else ' '
comp_lines.append(diff + format_str.format(l1) +
format_str.format(l2))
if diff == '*** ':
diff_found = True
i += 1
j += 1
elif l1.startswith(' ') and not l2.startswith(' '):
diff = '*** '
comp_lines.append(diff + format_str.format(l1) +
format_str.format(' '))
diff_found = True
i += 1
elif not l1.startswith(' ') and l2.startswith(' '):
diff = '*** '
comp_lines.append(diff + format_str.format(' ') +
format_str.format(l2))
diff_found = True
j += 1
else:
break
return '\n'.join(comp_lines), diff_found
|
get_original_vs_converted_diff
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(original, converted):\n from estnltk.syntax.syntax_preprocessing import convert_Text_to_mrf\n old_layer_mrf = convert_Text_to_mrf(original)\n new_layer_mrf = convert_Text_to_mrf(converted)\n max_len_1 = max([len(l) for l in old_layer_mrf])\n max_len_2 = max([len(l) for l in new_layer_mrf])\n max_len = max(max_len_1, max_len_2)\n format_str = '{:<' + str(max_len + 1) + '}'\n i = 0\n j = 0\n comp_lines = []\n diff_found = False\n while i < len(old_layer_mrf) or j < len(new_layer_mrf):\n l1 = old_layer_mrf[i]\n l2 = new_layer_mrf[j]\n if not l1.startswith(' ') and not l2.startswith(' '):\n diff = '*** ' if format_str.format(l1) != format_str.format(l2\n ) else ' '\n comp_lines.append(diff + format_str.format(l1) + format_str.\n format(l2))\n if diff == '*** ':\n diff_found = True\n i += 1\n j += 1\n else:\n while i < len(old_layer_mrf) or j < len(new_layer_mrf):\n l1 = old_layer_mrf[i]\n l2 = new_layer_mrf[j]\n if l1.startswith(' ') and l2.startswith(' '):\n diff = '*** ' if format_str.format(l1\n ) != format_str.format(l2) else ' '\n comp_lines.append(diff + format_str.format(l1) +\n format_str.format(l2))\n if diff == '*** ':\n diff_found = True\n i += 1\n j += 1\n elif l1.startswith(' ') and not l2.startswith(' '):\n diff = '*** '\n comp_lines.append(diff + format_str.format(l1) +\n format_str.format(' '))\n diff_found = True\n i += 1\n elif not l1.startswith(' ') and l2.startswith(' '):\n diff = '*** '\n comp_lines.append(diff + format_str.format(' ') +\n format_str.format(l2))\n diff_found = True\n j += 1\n else:\n break\n return '\\n'.join(comp_lines), diff_found\n",
"role": "user"
},
{
"content": "get_original_vs_converted_diff",
"role": "assistant"
}
] |
def x(foundChains, removeOverlapping=True, removeSingleAraAndEi=False):
toDelete = []
for i in range(len(foundChains)):
matchObj1 = foundChains[i]
if removeOverlapping:
for j in range(i + 1, len(foundChains)):
matchObj2 = foundChains[j]
if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX
] == matchObj2[CLAUSE_IDX]:
phrase1 = set(matchObj1[PHRASE])
phrase2 = set(matchObj2[PHRASE])
intersect = phrase1.intersection(phrase2)
if len(intersect) > 0:
minWid1 = min(matchObj1[PHRASE])
minWid2 = min(matchObj2[PHRASE])
if minWid1 < minWid2:
matchObj1[OTHER_VERBS] = True
toDelete.append(j)
else:
matchObj2[OTHER_VERBS] = True
toDelete.append(i)
if removeSingleAraAndEi:
if len(matchObj1[PATTERN]) == 1 and re.match('^(ei|ära)$',
matchObj1[PATTERN][0]):
toDelete.append(i)
if toDelete:
if len(set(toDelete)) != len(toDelete):
toDelete = list(set(toDelete))
toDelete = [foundChains[i] for i in toDelete]
for verbObj in toDelete:
foundChains.remove(verbObj)
|
removeRedundantVerbChains
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(foundChains, removeOverlapping=True, removeSingleAraAndEi=False):\n toDelete = []\n for i in range(len(foundChains)):\n matchObj1 = foundChains[i]\n if removeOverlapping:\n for j in range(i + 1, len(foundChains)):\n matchObj2 = foundChains[j]\n if matchObj1 != matchObj2 and matchObj1[CLAUSE_IDX\n ] == matchObj2[CLAUSE_IDX]:\n phrase1 = set(matchObj1[PHRASE])\n phrase2 = set(matchObj2[PHRASE])\n intersect = phrase1.intersection(phrase2)\n if len(intersect) > 0:\n minWid1 = min(matchObj1[PHRASE])\n minWid2 = min(matchObj2[PHRASE])\n if minWid1 < minWid2:\n matchObj1[OTHER_VERBS] = True\n toDelete.append(j)\n else:\n matchObj2[OTHER_VERBS] = True\n toDelete.append(i)\n if removeSingleAraAndEi:\n if len(matchObj1[PATTERN]) == 1 and re.match('^(ei|ära)$',\n matchObj1[PATTERN][0]):\n toDelete.append(i)\n if toDelete:\n if len(set(toDelete)) != len(toDelete):\n toDelete = list(set(toDelete))\n toDelete = [foundChains[i] for i in toDelete]\n for verbObj in toDelete:\n foundChains.remove(verbObj)\n",
"role": "user"
},
{
"content": "removeRedundantVerbChains",
"role": "assistant"
}
] |
def x(self, sentence, **kwargs):
expand2ndTime = False
removeOverlapping = True
removeSingleAraEi = True
breakOnPunctuation = False
for argName, argVal in kwargs.items():
if argName == 'expand2ndTime':
expand2ndTime = bool(argVal)
elif argName == 'removeOverlapping':
removeOverlapping = bool(argVal)
elif argName == 'removeSingleAraEi':
removeSingleAraEi = bool(argVal)
elif argName == 'breakOnPunctuation':
breakOnPunctuation = bool(argVal)
else:
raise Exception(' Unsupported argument given: ' + argName)
sentence = addWordIDs(sentence)
clauses = getClausesByClauseIDs(sentence)
allDetectedVerbChains = []
for clauseID in clauses:
clause = clauses[clauseID]
detectedBasicChains = _extractBasicPredicateFromClause(clause, clauseID
)
allDetectedVerbChains.extend(detectedBasicChains)
_expandSaamaWithTud(clause, clauseID, allDetectedVerbChains)
_expandOlemaVerbChains(clause, clauseID, allDetectedVerbChains)
_expandVerbChainsBySubcat(clause, clauseID, allDetectedVerbChains,
self.verbInfSubcatLexicon, False, breakOnPunctuation)
_determineVerbChainContextualAmbiguity(clause, clauseID,
allDetectedVerbChains)
if expand2ndTime:
_expandVerbChainsBySubcat(clause, clauseID,
allDetectedVerbChains, self.verbInfSubcatLexicon, False,
breakOnPunctuation)
expandableEgaFound = _extractEgaNegFromSent(sentence, clauses,
allDetectedVerbChains)
if expandableEgaFound:
for clauseID in clauses:
clause = clauses[clauseID]
_expandVerbChainsBySubcat(clause, clauseID,
allDetectedVerbChains, self.verbInfSubcatLexicon, False,
breakOnPunctuation)
if self.verbNomAdvVinfExtender:
addGrammaticalFeatsAndRoots(sentence, allDetectedVerbChains)
for clauseID in clauses:
clause = clauses[clauseID]
expansionPerformed = (self.verbNomAdvVinfExtender.
extendChainsInClause(clause, clauseID, allDetectedVerbChains))
if expansionPerformed:
_determineVerbChainContextualAmbiguity(clause, clauseID,
allDetectedVerbChains)
removeRedundantVerbChains(allDetectedVerbChains, removeOverlapping=
removeOverlapping, removeSingleAraAndEi=removeSingleAraEi)
addGrammaticalFeatsAndRoots(sentence, allDetectedVerbChains)
return allDetectedVerbChains
|
detectVerbChainsFromSent
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, sentence, **kwargs):\n expand2ndTime = False\n removeOverlapping = True\n removeSingleAraEi = True\n breakOnPunctuation = False\n for argName, argVal in kwargs.items():\n if argName == 'expand2ndTime':\n expand2ndTime = bool(argVal)\n elif argName == 'removeOverlapping':\n removeOverlapping = bool(argVal)\n elif argName == 'removeSingleAraEi':\n removeSingleAraEi = bool(argVal)\n elif argName == 'breakOnPunctuation':\n breakOnPunctuation = bool(argVal)\n else:\n raise Exception(' Unsupported argument given: ' + argName)\n sentence = addWordIDs(sentence)\n clauses = getClausesByClauseIDs(sentence)\n allDetectedVerbChains = []\n for clauseID in clauses:\n clause = clauses[clauseID]\n detectedBasicChains = _extractBasicPredicateFromClause(clause, clauseID\n )\n allDetectedVerbChains.extend(detectedBasicChains)\n _expandSaamaWithTud(clause, clauseID, allDetectedVerbChains)\n _expandOlemaVerbChains(clause, clauseID, allDetectedVerbChains)\n _expandVerbChainsBySubcat(clause, clauseID, allDetectedVerbChains,\n self.verbInfSubcatLexicon, False, breakOnPunctuation)\n _determineVerbChainContextualAmbiguity(clause, clauseID,\n allDetectedVerbChains)\n if expand2ndTime:\n _expandVerbChainsBySubcat(clause, clauseID,\n allDetectedVerbChains, self.verbInfSubcatLexicon, False,\n breakOnPunctuation)\n expandableEgaFound = _extractEgaNegFromSent(sentence, clauses,\n allDetectedVerbChains)\n if expandableEgaFound:\n for clauseID in clauses:\n clause = clauses[clauseID]\n _expandVerbChainsBySubcat(clause, clauseID,\n allDetectedVerbChains, self.verbInfSubcatLexicon, False,\n breakOnPunctuation)\n if self.verbNomAdvVinfExtender:\n addGrammaticalFeatsAndRoots(sentence, allDetectedVerbChains)\n for clauseID in clauses:\n clause = clauses[clauseID]\n expansionPerformed = (self.verbNomAdvVinfExtender.\n extendChainsInClause(clause, clauseID, allDetectedVerbChains))\n if expansionPerformed:\n _determineVerbChainContextualAmbiguity(clause, clauseID,\n allDetectedVerbChains)\n removeRedundantVerbChains(allDetectedVerbChains, removeOverlapping=\n removeOverlapping, removeSingleAraAndEi=removeSingleAraEi)\n addGrammaticalFeatsAndRoots(sentence, allDetectedVerbChains)\n return allDetectedVerbChains\n",
"role": "user"
},
{
"content": "detectVerbChainsFromSent",
"role": "assistant"
}
] |
def x(text, openDelim, closeDelim):
openPat = '|'.join([re.escape(x) for x in openDelim])
afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in zip
(openDelim, closeDelim)}
stack = []
start = 0
cur = 0
end = len(text)
startSet = False
startPat = re.compile(openPat)
nextPat = startPat
while True:
next = nextPat.search(text, cur)
if not next:
return
if not startSet:
start = next.start()
startSet = True
delim = next.group(0)
if delim in openDelim:
stack.append(delim)
nextPat = afterPat[delim]
else:
opening = stack.pop()
if stack:
nextPat = afterPat[stack[-1]]
else:
yield start, next.end()
nextPat = startPat
start = next.end()
startSet = False
cur = next.end()
|
findBalanced
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text, openDelim, closeDelim):\n openPat = '|'.join([re.escape(x) for x in openDelim])\n afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in zip\n (openDelim, closeDelim)}\n stack = []\n start = 0\n cur = 0\n end = len(text)\n startSet = False\n startPat = re.compile(openPat)\n nextPat = startPat\n while True:\n next = nextPat.search(text, cur)\n if not next:\n return\n if not startSet:\n start = next.start()\n startSet = True\n delim = next.group(0)\n if delim in openDelim:\n stack.append(delim)\n nextPat = afterPat[delim]\n else:\n opening = stack.pop()\n if stack:\n nextPat = afterPat[stack[-1]]\n else:\n yield start, next.end()\n nextPat = startPat\n start = next.end()\n startSet = False\n cur = next.end()\n",
"role": "user"
},
{
"content": "findBalanced",
"role": "assistant"
}
] |
def x(s, encoding='utf-8'):
if isinstance(s, six.text_type):
return s
elif isinstance(s, six.binary_type):
return s.decode(encoding)
else:
raise ValueError('Can only convert types {0} and {1}'.format(six.
text_type, six.binary_type))
|
as_unicode
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(s, encoding='utf-8'):\n if isinstance(s, six.text_type):\n return s\n elif isinstance(s, six.binary_type):\n return s.decode(encoding)\n else:\n raise ValueError('Can only convert types {0} and {1}'.format(six.\n text_type, six.binary_type))\n",
"role": "user"
},
{
"content": "as_unicode",
"role": "assistant"
}
] |
def x(s, encoding='utf-8'):
if isinstance(s, six.text_type):
return s.encode(encoding)
elif isinstance(s, six.binary_type):
return s.decode(encoding).encode(encoding)
else:
raise ValueError('Can only convert types {0} and {1}'.format(six.
text_type, six.binary_type))
|
as_binary
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(s, encoding='utf-8'):\n if isinstance(s, six.text_type):\n return s.encode(encoding)\n elif isinstance(s, six.binary_type):\n return s.decode(encoding).encode(encoding)\n else:\n raise ValueError('Can only convert types {0} and {1}'.format(six.\n text_type, six.binary_type))\n",
"role": "user"
},
{
"content": "as_binary",
"role": "assistant"
}
] |
def x(root, prefix=u'', suffix=u''):
return [fnm for fnm in os.listdir(root) if fnm.startswith(prefix) and
fnm.endswith(suffix)]
|
get_filenames
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(root, prefix=u'', suffix=u''):\n return [fnm for fnm in os.listdir(root) if fnm.startswith(prefix) and\n fnm.endswith(suffix)]\n",
"role": "user"
},
{
"content": "get_filenames",
"role": "assistant"
}
] |
def x(self, text):
if self.search_method == 'ahocorasick':
events = self._find_keywords_ahocorasick(text.text)
elif self.search_method == 'naive':
events = self._find_keywords_naive(text.text)
events = self._resolve_conflicts(events)
if self.mapping:
for item in events:
item['type'] = self.map[text.text[item['start']:item['end']]]
if self.return_layer:
return events
else:
text[self.layer_name] = events
|
tag
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text):\n if self.search_method == 'ahocorasick':\n events = self._find_keywords_ahocorasick(text.text)\n elif self.search_method == 'naive':\n events = self._find_keywords_naive(text.text)\n events = self._resolve_conflicts(events)\n if self.mapping:\n for item in events:\n item['type'] = self.map[text.text[item['start']:item['end']]]\n if self.return_layer:\n return events\n else:\n text[self.layer_name] = events\n",
"role": "user"
},
{
"content": "tag",
"role": "assistant"
}
] |
def x(self, text):
matches = self._match(text.text)
matches = self._resolve_conflicts(matches)
if self.return_layer:
return matches
else:
text[self.layer_name] = matches
|
tag
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text):\n matches = self._match(text.text)\n matches = self._resolve_conflicts(matches)\n if self.return_layer:\n return matches\n else:\n text[self.layer_name] = matches\n",
"role": "user"
},
{
"content": "tag",
"role": "assistant"
}
] |
def x(self, text):
if self.search_method == 'ahocorasick':
events = self._find_events_ahocorasick(text.text)
elif self.search_method == 'naive':
events = self._find_events_naive(text.text)
events = self._resolve_conflicts(events)
self._event_intervals(events, text)
if self.return_layer:
return events
else:
text[self.layer_name] = events
|
tag
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text):\n if self.search_method == 'ahocorasick':\n events = self._find_events_ahocorasick(text.text)\n elif self.search_method == 'naive':\n events = self._find_events_naive(text.text)\n events = self._resolve_conflicts(events)\n self._event_intervals(events, text)\n if self.return_layer:\n return events\n else:\n text[self.layer_name] = events\n",
"role": "user"
},
{
"content": "tag",
"role": "assistant"
}
] |
def x(self, doc, m, phrase):
ph_start = m['start']
ph_end = m['end']
start_index = None
for ind, word in enumerate(doc['words']):
if word['start'] == ph_start:
start_index = ind
break
end_index = None
for ind, word in enumerate(doc['words']):
if word['end'] == ph_end:
end_index = ind
break
if start_index is not None and end_index is not None:
lem = []
for i in doc['words'][start_index:end_index + 1]:
word_lem = []
for idx, j in enumerate(i['analysis']):
if i['analysis'][idx]['partofspeech'] in ['A', 'D', 'C', 'J']:
if i['analysis'][idx]['lemma'] not in word_lem:
word_lem.append(i['analysis'][idx]['lemma'])
word_lem_str = '|'.join(word_lem)
lem.append(word_lem_str)
else:
raise Exception('Something went really wrong')
return lem
|
extract_lemmas
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, doc, m, phrase):\n ph_start = m['start']\n ph_end = m['end']\n start_index = None\n for ind, word in enumerate(doc['words']):\n if word['start'] == ph_start:\n start_index = ind\n break\n end_index = None\n for ind, word in enumerate(doc['words']):\n if word['end'] == ph_end:\n end_index = ind\n break\n if start_index is not None and end_index is not None:\n lem = []\n for i in doc['words'][start_index:end_index + 1]:\n word_lem = []\n for idx, j in enumerate(i['analysis']):\n if i['analysis'][idx]['partofspeech'] in ['A', 'D', 'C', 'J']:\n if i['analysis'][idx]['lemma'] not in word_lem:\n word_lem.append(i['analysis'][idx]['lemma'])\n word_lem_str = '|'.join(word_lem)\n lem.append(word_lem_str)\n else:\n raise Exception('Something went really wrong')\n return lem\n",
"role": "user"
},
{
"content": "extract_lemmas",
"role": "assistant"
}
] |
def x(fnm):
with codecs.open(fnm, 'rb', 'ascii') as f:
line = f.readline()
while line != '':
yield Text(json.loads(line))
line = f.readline()
|
yield_json_corpus
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fnm):\n with codecs.open(fnm, 'rb', 'ascii') as f:\n line = f.readline()\n while line != '':\n yield Text(json.loads(line))\n line = f.readline()\n",
"role": "user"
},
{
"content": "yield_json_corpus",
"role": "assistant"
}
] |
def x(documents, fnm):
with codecs.open(fnm, 'wb', 'ascii') as f:
for document in documents:
f.write(json.dumps(document) + '\n')
return documents
|
write_json_corpus
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(documents, fnm):\n with codecs.open(fnm, 'wb', 'ascii') as f:\n for document in documents:\n f.write(json.dumps(document) + '\\n')\n return documents\n",
"role": "user"
},
{
"content": "write_json_corpus",
"role": "assistant"
}
] |
def x(fnm):
with codecs.open(fnm, 'rb', 'ascii') as f:
return Text(json.loads(f.read()))
|
read_document
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(fnm):\n with codecs.open(fnm, 'rb', 'ascii') as f:\n return Text(json.loads(f.read()))\n",
"role": "user"
},
{
"content": "read_document",
"role": "assistant"
}
] |
def x(doc, fnm):
with codecs.open(fnm, 'wb', 'ascii') as f:
f.write(json.dumps(doc, indent=2))
|
write_document
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(doc, fnm):\n with codecs.open(fnm, 'wb', 'ascii') as f:\n f.write(json.dumps(doc, indent=2))\n",
"role": "user"
},
{
"content": "write_document",
"role": "assistant"
}
] |
def x(sourceSynset, relationName, targetSynset):
if not isinstance(sourceSynset, Synset):
raise TypeError('sourceSynset not Synset instance')
elif not isinstance(targetSynset, Synset):
raise TypeError('targetSynset not Synset instance')
elif relationName not in RELATION_NAMES:
raise TypeError('relationName not in RELATION_NAMES')
else:
sourceSynset.addRelation(Relation(relationName, targetSynset))
return sourceSynset
|
addRelation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sourceSynset, relationName, targetSynset):\n if not isinstance(sourceSynset, Synset):\n raise TypeError('sourceSynset not Synset instance')\n elif not isinstance(targetSynset, Synset):\n raise TypeError('targetSynset not Synset instance')\n elif relationName not in RELATION_NAMES:\n raise TypeError('relationName not in RELATION_NAMES')\n else:\n sourceSynset.addRelation(Relation(relationName, targetSynset))\n return sourceSynset\n",
"role": "user"
},
{
"content": "addRelation",
"role": "assistant"
}
] |
def x():
def x(self):
_out = ''
_n = '\n'
if len(self):
if self.parent:
_out = '%s%s%s' % (_out, PolarisText(*self.parent).out, _n)
_out = _out + _n.join(map(lambda x: x.polarisText, self))
else:
_out = ''
return _out
return locals()
|
polarisText
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n\n def x(self):\n _out = ''\n _n = '\\n'\n if len(self):\n if self.parent:\n _out = '%s%s%s' % (_out, PolarisText(*self.parent).out, _n)\n _out = _out + _n.join(map(lambda x: x.polarisText, self))\n else:\n _out = ''\n return _out\n return locals()\n",
"role": "user"
},
{
"content": "polarisText",
"role": "assistant"
}
] |
def x(self, feature):
if isinstance(feature, Feature):
self.features.append(feature)
else:
raise TypeError('feature Type should be Feature, not %s' % type(
feature))
|
addFeature
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, feature):\n if isinstance(feature, Feature):\n self.features.append(feature)\n else:\n raise TypeError('feature Type should be Feature, not %s' % type(\n feature))\n",
"role": "user"
},
{
"content": "addFeature",
"role": "assistant"
}
] |
def x(self, value):
if isinstance(value, Source_Id):
self.source_ids.append(value)
else:
raise (TypeError, 'source_id Type should be Source_Id, not %s' %
type(source_id))
|
addSourceId
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, value):\n if isinstance(value, Source_Id):\n self.source_ids.append(value)\n else:\n raise (TypeError, 'source_id Type should be Source_Id, not %s' %\n type(source_id))\n",
"role": "user"
},
{
"content": "addSourceId",
"role": "assistant"
}
] |
def x(self, value):
if isinstance(value, Corpus_Id):
self.corpus_ids.append(value)
else:
raise (TypeError, 'source_id Type should be Source_Id, not %s' %
type(source_id))
|
addCorpusId
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, value):\n if isinstance(value, Corpus_Id):\n self.corpus_ids.append(value)\n else:\n raise (TypeError, 'source_id Type should be Source_Id, not %s' %\n type(source_id))\n",
"role": "user"
},
{
"content": "addCorpusId",
"role": "assistant"
}
] |
def x(self, iStr):
self.levelNumber = None
self.DRN = None
self.fieldTag = None
self.fieldValue = None
self.noQuotes = None
if iStr and not iStr.strip().startswith('#'):
iList = iStr.strip().split(' ')
self.levelNumber = int(iList.pop(0))
if iList[0].startswith('@') and self.levelNumber != 3:
self.DRN = int(iList.pop(0).strip('@'))
else:
self.DRN = None
self.fieldTag = iList.pop(0)
if iList and (iList[0].startswith('"') or iList[0].startswith('@')):
fv = ' '.join(iList)
self.fieldValue = fv[1:-1]
elif iList:
if len(iList) == 1:
self.fieldValue = iList.pop(0)
else:
self.fieldValue = ' '.join(iList)
try:
self.fieldValue = int(self.fieldValue)
except ValueError:
self.noQuotes = True
|
parse_line
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, iStr):\n self.levelNumber = None\n self.DRN = None\n self.fieldTag = None\n self.fieldValue = None\n self.noQuotes = None\n if iStr and not iStr.strip().startswith('#'):\n iList = iStr.strip().split(' ')\n self.levelNumber = int(iList.pop(0))\n if iList[0].startswith('@') and self.levelNumber != 3:\n self.DRN = int(iList.pop(0).strip('@'))\n else:\n self.DRN = None\n self.fieldTag = iList.pop(0)\n if iList and (iList[0].startswith('\"') or iList[0].startswith('@')):\n fv = ' '.join(iList)\n self.fieldValue = fv[1:-1]\n elif iList:\n if len(iList) == 1:\n self.fieldValue = iList.pop(0)\n else:\n self.fieldValue = ' '.join(iList)\n try:\n self.fieldValue = int(self.fieldValue)\n except ValueError:\n self.noQuotes = True\n",
"role": "user"
},
{
"content": "parse_line",
"role": "assistant"
}
] |
def x(self, debug=False):
synList = []
self.milestone = 0
while self.milestone < os.path.getsize(self.fileName) - 5:
if debug:
print('self.milestone', self.milestone)
a = self.parse_synset(offset=self.milestone)
synList.append(a)
self.milestone = self.file.tell()
return synList
|
parse_wordnet
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, debug=False):\n synList = []\n self.milestone = 0\n while self.milestone < os.path.getsize(self.fileName) - 5:\n if debug:\n print('self.milestone', self.milestone)\n a = self.parse_synset(offset=self.milestone)\n synList.append(a)\n self.milestone = self.file.tell()\n return synList\n",
"role": "user"
},
{
"content": "parse_wordnet",
"role": "assistant"
}
] |
def x(self, translation):
if isinstance(translation, Translation):
self.translations.append(translation)
else:
raise (TranslationError,
'translation Type should be Translation, not %s' % type(
translation))
|
addTranslation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, translation):\n if isinstance(translation, Translation):\n self.translations.append(translation)\n else:\n raise (TranslationError, \n 'translation Type should be Translation, not %s' % type(\n translation))\n",
"role": "user"
},
{
"content": "addTranslation",
"role": "assistant"
}
] |
def x(self, variantFeature):
if isinstance(variantFeature, Feature):
self.features.append(variantFeature)
else:
raise (TypeError, 'variantFeature Type should be Feature, not %s' %
type(variantFeature))
|
addVariantFeature
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, variantFeature):\n if isinstance(variantFeature, Feature):\n self.features.append(variantFeature)\n else:\n raise (TypeError, 'variantFeature Type should be Feature, not %s' %\n type(variantFeature))\n",
"role": "user"
},
{
"content": "addVariantFeature",
"role": "assistant"
}
] |
def x(self, usage_label):
if isinstance(usage_label, Usage_Label):
self.usage_labels.append(usage_label)
else:
raise (Usage_LabelError,
'usage_label Type should be Usage_Label, not %s' % type(
usage_label))
|
addUsage_Label
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, usage_label):\n if isinstance(usage_label, Usage_Label):\n self.usage_labels.append(usage_label)\n else:\n raise (Usage_LabelError, \n 'usage_label Type should be Usage_Label, not %s' % type(\n usage_label))\n",
"role": "user"
},
{
"content": "addUsage_Label",
"role": "assistant"
}
] |
def x(self, example):
if isinstance(example, Example):
self.examples.append(example)
else:
raise (ExampleError, 'example Type should be Example, not %s' %
type(example))
|
addExample
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, example):\n if isinstance(example, Example):\n self.examples.append(example)\n else:\n raise (ExampleError, 'example Type should be Example, not %s' %\n type(example))\n",
"role": "user"
},
{
"content": "addExample",
"role": "assistant"
}
] |
def x():
def x(self):
if self.variants:
return self.variants[0]
else:
variant = Variant()
return variant
return locals()
|
firstVariant
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n\n def x(self):\n if self.variants:\n return self.variants[0]\n else:\n variant = Variant()\n return variant\n return locals()\n",
"role": "user"
},
{
"content": "firstVariant",
"role": "assistant"
}
] |
def x():
def x(self):
if self.variants:
return map(lambda x: x.literal, self.variants)
else:
return None
return locals()
|
literals
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x():\n\n def x(self):\n if self.variants:\n return map(lambda x: x.literal, self.variants)\n else:\n return None\n return locals()\n",
"role": "user"
},
{
"content": "literals",
"role": "assistant"
}
] |
def x(self, literal='', sense=0, gloss='', examples=[]):
var = Variant(literal=literal, sense=sense, gloss=gloss, examples=examples)
self.variants.append(var)
|
addVariantOld
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, literal='', sense=0, gloss='', examples=[]):\n var = Variant(literal=literal, sense=sense, gloss=gloss, examples=examples)\n self.variants.append(var)\n",
"role": "user"
},
{
"content": "addVariantOld",
"role": "assistant"
}
] |
def x(self, variant):
if isinstance(variant, Variant):
self.variants.append(variant)
else:
raise (VariantError, 'variant Type should be Variant, not %s' %
type(variant))
|
addVariant
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, variant):\n if isinstance(variant, Variant):\n self.variants.append(variant)\n else:\n raise (VariantError, 'variant Type should be Variant, not %s' %\n type(variant))\n",
"role": "user"
},
{
"content": "addVariant",
"role": "assistant"
}
] |
def x(self, link):
if isinstance(link, InternalLink):
self.internalLinks.append(link)
else:
raise InternalLinkError('link Type should be InternalLink, not %s' %
type(link))
|
addInternalLink
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link):\n if isinstance(link, InternalLink):\n self.internalLinks.append(link)\n else:\n raise InternalLinkError('link Type should be InternalLink, not %s' %\n type(link))\n",
"role": "user"
},
{
"content": "addInternalLink",
"role": "assistant"
}
] |
def x(self, link):
if isinstance(link, Relation):
self.internalLinks.append(link)
else:
raise TypeError('link Type should be InternalLink, not %s' % type(link)
)
|
addRelation
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link):\n if isinstance(link, Relation):\n self.internalLinks.append(link)\n else:\n raise TypeError('link Type should be InternalLink, not %s' % type(link)\n )\n",
"role": "user"
},
{
"content": "addRelation",
"role": "assistant"
}
] |
def x(self, link):
if isinstance(link, EqLink):
self.eqLinks.append(link)
else:
raise TypeError('link Type should be InternalLink, not %s' % type(link)
)
|
addEqLink
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, link):\n if isinstance(link, EqLink):\n self.eqLinks.append(link)\n else:\n raise TypeError('link Type should be InternalLink, not %s' % type(link)\n )\n",
"role": "user"
},
{
"content": "addEqLink",
"role": "assistant"
}
] |
def x(self, name, neg=False):
if self.internalLinks and not neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.name == name, self.internalLinks)
elif isinstance(name, list):
return filter(lambda x: x.name in name, self.internalLinks)
else:
return None
elif self.internalLinks and neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.name != name, self.internalLinks)
elif isinstance(name, list):
return filter(lambda x: x.name not in name, self.internalLinks)
else:
return None
else:
return []
|
named_relations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name, neg=False):\n if self.internalLinks and not neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.name == name, self.internalLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.name in name, self.internalLinks)\n else:\n return None\n elif self.internalLinks and neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.name != name, self.internalLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.name not in name, self.internalLinks)\n else:\n return None\n else:\n return []\n",
"role": "user"
},
{
"content": "named_relations",
"role": "assistant"
}
] |
def x(self, name, neg=False):
if self.eqLinks and not neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name == name, self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name in name, self.eqLinks)
else:
return None
elif self.eqLinks and neg:
if isinstance(name, six.string_types):
return filter(lambda x: x.relation.name != name, self.eqLinks)
elif isinstance(name, list):
return filter(lambda x: x.relation.name not in name, self.eqLinks)
else:
return None
else:
return None
|
named_eq_relations
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, name, neg=False):\n if self.eqLinks and not neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.relation.name == name, self.eqLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.relation.name in name, self.eqLinks)\n else:\n return None\n elif self.eqLinks and neg:\n if isinstance(name, six.string_types):\n return filter(lambda x: x.relation.name != name, self.eqLinks)\n elif isinstance(name, list):\n return filter(lambda x: x.relation.name not in name, self.eqLinks)\n else:\n return None\n else:\n return None\n",
"role": "user"
},
{
"content": "named_eq_relations",
"role": "assistant"
}
] |
def x(self, fileName, offset):
p = Parser()
p.file = open(fileName, 'rb')
a = p.parse_synset(offset=offset)
p.file.close()
self.__dict__.update(a.__dict__)
|
parse
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fileName, offset):\n p = Parser()\n p.file = open(fileName, 'rb')\n a = p.parse_synset(offset=offset)\n p.file.close()\n self.__dict__.update(a.__dict__)\n",
"role": "user"
},
{
"content": "parse",
"role": "assistant"
}
] |
def x(self, fileName):
f = open(fileName, 'ab')
f.write('%s%s' % (self.polarisText, Synset.linebreak))
f.close()
|
write
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, fileName):\n f = open(fileName, 'ab')\n f.write('%s%s' % (self.polarisText, Synset.linebreak))\n f.close()\n",
"role": "user"
},
{
"content": "write",
"role": "assistant"
}
] |
def x(alignments, sent_start, sent_end):
sent_len = sent_end - sent_start
j = sent_start
while j < sent_start + sent_len:
for rel_id, rel in enumerate(alignments[j][PARSER_OUT]):
if int(rel[1]) >= sent_len:
wid = j - sent_start
if sent_len == 1:
rel[1] = -1
elif wid - 1 > -1:
rel[1] = wid - 1
elif wid - 1 == -1:
rel[1] = wid + 1
alignments[j][PARSER_OUT][rel_id] = rel
j += 1
|
fix_out_of_sentence_links
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alignments, sent_start, sent_end):\n sent_len = sent_end - sent_start\n j = sent_start\n while j < sent_start + sent_len:\n for rel_id, rel in enumerate(alignments[j][PARSER_OUT]):\n if int(rel[1]) >= sent_len:\n wid = j - sent_start\n if sent_len == 1:\n rel[1] = -1\n elif wid - 1 > -1:\n rel[1] = wid - 1\n elif wid - 1 == -1:\n rel[1] = wid + 1\n alignments[j][PARSER_OUT][rel_id] = rel\n j += 1\n",
"role": "user"
},
{
"content": "fix_out_of_sentence_links",
"role": "assistant"
}
] |
def x(alignments, data_type=VISLCG3_DATA, **kwargs):
if not isinstance(alignments, list):
raise Exception(
'(!) Unexpected type of input argument! Expected a list of strings.'
)
if data_type.lower() == VISLCG3_DATA:
data_type = VISLCG3_DATA
elif data_type.lower() == CONLL_DATA:
data_type = CONLL_DATA
else:
raise Exception('(!) Unexpected type of data: ', data_type)
keep_old = False
rep_miss_w_dummy = True
mark_root = False
fix_selfrefs = True
fix_out_of_sent = False
for argName, argVal in kwargs.items():
if argName in ['selfrefs', 'fix_selfrefs'] and argVal in [True, False]:
fix_selfrefs = argVal
if argName in ['keep_old'] and argVal in [True, False]:
keep_old = argVal
if argName in ['rep_miss_w_dummy', 'rep_miss'] and argVal in [True,
False]:
rep_miss_w_dummy = argVal
if argName in ['mark_root', 'root'] and argVal in [True, False]:
mark_root = argVal
if argName in ['fix_out_of_sent']:
fix_out_of_sent = bool(argVal)
prev_sent_id = -1
wordID = 0
sentStart = -1
for i in range(len(alignments)):
alignment = alignments[i]
if prev_sent_id != alignment[SENT_ID]:
if fix_out_of_sent and sentStart > -1:
_fix_out_of_sentence_links(alignments, sentStart, i)
wordID = 0
sentStart = i
foundRelations = []
if data_type == VISLCG3_DATA:
for line in alignment[PARSER_OUT]:
sfuncs = pat_cg3_surface_rel.findall(line)
deprels = pat_cg3_dep_rel.findall(line)
sfuncs = ['xxx'] if not sfuncs else sfuncs
for func in sfuncs:
for relS, relT in deprels:
relS = int(relS) - 1
relT = int(relT) - 1
foundRelations.append([func, relT])
elif data_type == CONLL_DATA:
for line in alignment[PARSER_OUT]:
parts = line.split('\t')
if len(parts) != 10:
raise Exception(
'(!) Unexpected line format for CONLL data:', line)
relT = int(parts[6]) - 1
func = parts[7]
foundRelations.append([func, relT])
if not foundRelations:
if rep_miss_w_dummy:
foundRelations.append(['xxx', wordID])
else:
raise Exception('(!) Analysis missing for the word nr.',
alignment[0])
if fix_selfrefs:
for r in range(len(foundRelations)):
if foundRelations[r][1] == wordID:
foundRelations[r][1
] = wordID - 1 if wordID - 1 > -1 else wordID + 1
if wordID - 1 == -1 and (i + 1 == len(alignments) or
alignments[i][SENT_ID] != alignments[i + 1][SENT_ID]):
foundRelations[r][1] = -1
if mark_root:
for r in range(len(foundRelations)):
if foundRelations[r][1] == -1:
foundRelations[r][0] = 'ROOT'
if not keep_old:
alignment[PARSER_OUT] = foundRelations
else:
alignment[INIT_PARSER_OUT] = alignment[PARSER_OUT]
alignment[PARSER_OUT] = foundRelations
alignments[i] = alignment
prev_sent_id = alignment[SENT_ID]
wordID += 1
if fix_out_of_sent and sentStart > -1:
_fix_out_of_sentence_links(alignments, sentStart, len(alignments))
return alignments
|
normalise_alignments
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(alignments, data_type=VISLCG3_DATA, **kwargs):\n if not isinstance(alignments, list):\n raise Exception(\n '(!) Unexpected type of input argument! Expected a list of strings.'\n )\n if data_type.lower() == VISLCG3_DATA:\n data_type = VISLCG3_DATA\n elif data_type.lower() == CONLL_DATA:\n data_type = CONLL_DATA\n else:\n raise Exception('(!) Unexpected type of data: ', data_type)\n keep_old = False\n rep_miss_w_dummy = True\n mark_root = False\n fix_selfrefs = True\n fix_out_of_sent = False\n for argName, argVal in kwargs.items():\n if argName in ['selfrefs', 'fix_selfrefs'] and argVal in [True, False]:\n fix_selfrefs = argVal\n if argName in ['keep_old'] and argVal in [True, False]:\n keep_old = argVal\n if argName in ['rep_miss_w_dummy', 'rep_miss'] and argVal in [True,\n False]:\n rep_miss_w_dummy = argVal\n if argName in ['mark_root', 'root'] and argVal in [True, False]:\n mark_root = argVal\n if argName in ['fix_out_of_sent']:\n fix_out_of_sent = bool(argVal)\n prev_sent_id = -1\n wordID = 0\n sentStart = -1\n for i in range(len(alignments)):\n alignment = alignments[i]\n if prev_sent_id != alignment[SENT_ID]:\n if fix_out_of_sent and sentStart > -1:\n _fix_out_of_sentence_links(alignments, sentStart, i)\n wordID = 0\n sentStart = i\n foundRelations = []\n if data_type == VISLCG3_DATA:\n for line in alignment[PARSER_OUT]:\n sfuncs = pat_cg3_surface_rel.findall(line)\n deprels = pat_cg3_dep_rel.findall(line)\n sfuncs = ['xxx'] if not sfuncs else sfuncs\n for func in sfuncs:\n for relS, relT in deprels:\n relS = int(relS) - 1\n relT = int(relT) - 1\n foundRelations.append([func, relT])\n elif data_type == CONLL_DATA:\n for line in alignment[PARSER_OUT]:\n parts = line.split('\\t')\n if len(parts) != 10:\n raise Exception(\n '(!) Unexpected line format for CONLL data:', line)\n relT = int(parts[6]) - 1\n func = parts[7]\n foundRelations.append([func, relT])\n if not foundRelations:\n if rep_miss_w_dummy:\n foundRelations.append(['xxx', wordID])\n else:\n raise Exception('(!) Analysis missing for the word nr.',\n alignment[0])\n if fix_selfrefs:\n for r in range(len(foundRelations)):\n if foundRelations[r][1] == wordID:\n foundRelations[r][1\n ] = wordID - 1 if wordID - 1 > -1 else wordID + 1\n if wordID - 1 == -1 and (i + 1 == len(alignments) or \n alignments[i][SENT_ID] != alignments[i + 1][SENT_ID]):\n foundRelations[r][1] = -1\n if mark_root:\n for r in range(len(foundRelations)):\n if foundRelations[r][1] == -1:\n foundRelations[r][0] = 'ROOT'\n if not keep_old:\n alignment[PARSER_OUT] = foundRelations\n else:\n alignment[INIT_PARSER_OUT] = alignment[PARSER_OUT]\n alignment[PARSER_OUT] = foundRelations\n alignments[i] = alignment\n prev_sent_id = alignment[SENT_ID]\n wordID += 1\n if fix_out_of_sent and sentStart > -1:\n _fix_out_of_sentence_links(alignments, sentStart, len(alignments))\n return alignments\n",
"role": "user"
},
{
"content": "normalise_alignments",
"role": "assistant"
}
] |
def x(file_name, layer_name=LAYER_VISLCG3, **kwargs):
clean_up = False
for argName, argVal in kwargs.items():
if argName in ['clean_up', 'cleanup'] and argVal in [True, False]:
clean_up = argVal
cg3_lines = []
in_f = codecs.open(file_name, mode='r', encoding='utf-8')
for line in in_f:
if line.startswith('#'):
continue
cg3_lines.append(line.rstrip())
in_f.close()
if clean_up:
cg3_lines = cleanup_lines(cg3_lines, **kwargs)
sentences = []
sentence = []
for i, line in enumerate(cg3_lines):
if line == '"<s>"':
if sentence:
print('(!) Sentence begins before previous ends at line: ' +
str(i), file=sys.stderr)
sentence = []
elif pat_double_quoted.match(line
) and line != '"<s>"' and line != '"</s>"':
token_match = pat_cg3_word_token.match(line)
if token_match:
line = token_match.group(1)
else:
raise Exception('(!) Unexpected token format: ', line)
sentence.append(line)
elif line == '"</s>"':
if not sentence:
print('(!) Empty sentence at line: ' + str(i), file=sys.stderr)
sentences.append(' '.join(sentence))
sentence = []
kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),
'sentence_tokenizer': LineTokenizer()}
from estnltk.text import Text
text = Text('\n'.join(sentences), **kwargs4text)
text.tokenize_words()
alignments = align_cg3_with_Text(cg3_lines, text, **kwargs)
normalise_alignments(alignments, data_type=VISLCG3_DATA, **kwargs)
text[layer_name] = alignments
return text
|
read_text_from_cg3_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name, layer_name=LAYER_VISLCG3, **kwargs):\n clean_up = False\n for argName, argVal in kwargs.items():\n if argName in ['clean_up', 'cleanup'] and argVal in [True, False]:\n clean_up = argVal\n cg3_lines = []\n in_f = codecs.open(file_name, mode='r', encoding='utf-8')\n for line in in_f:\n if line.startswith('#'):\n continue\n cg3_lines.append(line.rstrip())\n in_f.close()\n if clean_up:\n cg3_lines = cleanup_lines(cg3_lines, **kwargs)\n sentences = []\n sentence = []\n for i, line in enumerate(cg3_lines):\n if line == '\"<s>\"':\n if sentence:\n print('(!) Sentence begins before previous ends at line: ' +\n str(i), file=sys.stderr)\n sentence = []\n elif pat_double_quoted.match(line\n ) and line != '\"<s>\"' and line != '\"</s>\"':\n token_match = pat_cg3_word_token.match(line)\n if token_match:\n line = token_match.group(1)\n else:\n raise Exception('(!) Unexpected token format: ', line)\n sentence.append(line)\n elif line == '\"</s>\"':\n if not sentence:\n print('(!) Empty sentence at line: ' + str(i), file=sys.stderr)\n sentences.append(' '.join(sentence))\n sentence = []\n kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),\n 'sentence_tokenizer': LineTokenizer()}\n from estnltk.text import Text\n text = Text('\\n'.join(sentences), **kwargs4text)\n text.tokenize_words()\n alignments = align_cg3_with_Text(cg3_lines, text, **kwargs)\n normalise_alignments(alignments, data_type=VISLCG3_DATA, **kwargs)\n text[layer_name] = alignments\n return text\n",
"role": "user"
},
{
"content": "read_text_from_cg3_file",
"role": "assistant"
}
] |
def x(file_name, layer_name=LAYER_CONLL, **kwargs):
conll_lines = []
in_f = codecs.open(file_name, mode='r', encoding='utf-8')
for line in in_f:
if line.startswith('#'):
continue
conll_lines.append(line.rstrip())
in_f.close()
sentences = []
sentence = []
for i, line in enumerate(conll_lines):
if len(line) > 0 and '\t' in line:
features = line.split('\t')
if len(features) != 10:
raise Exception(' In file ' + in_file + ', line ' + str(i) +
' with unexpected format: "' + line + '" ')
word_id = features[0]
token = features[1]
sentence.append(token)
elif len(line) == 0 or re.match('^\\s+$', line):
if sentence:
sentences.append(' '.join(sentence))
sentence = []
if sentence:
sentences.append(' '.join(sentence))
kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),
'sentence_tokenizer': LineTokenizer()}
from estnltk.text import Text
text = Text('\n'.join(sentences), **kwargs4text)
text.tokenize_words()
alignments = align_CONLL_with_Text(conll_lines, text, None, **kwargs)
normalise_alignments(alignments, data_type=CONLL_DATA, **kwargs)
text[layer_name] = alignments
return text
|
read_text_from_conll_file
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(file_name, layer_name=LAYER_CONLL, **kwargs):\n conll_lines = []\n in_f = codecs.open(file_name, mode='r', encoding='utf-8')\n for line in in_f:\n if line.startswith('#'):\n continue\n conll_lines.append(line.rstrip())\n in_f.close()\n sentences = []\n sentence = []\n for i, line in enumerate(conll_lines):\n if len(line) > 0 and '\\t' in line:\n features = line.split('\\t')\n if len(features) != 10:\n raise Exception(' In file ' + in_file + ', line ' + str(i) +\n ' with unexpected format: \"' + line + '\" ')\n word_id = features[0]\n token = features[1]\n sentence.append(token)\n elif len(line) == 0 or re.match('^\\\\s+$', line):\n if sentence:\n sentences.append(' '.join(sentence))\n sentence = []\n if sentence:\n sentences.append(' '.join(sentence))\n kwargs4text = {'word_tokenizer': RegexpTokenizer(' ', gaps=True),\n 'sentence_tokenizer': LineTokenizer()}\n from estnltk.text import Text\n text = Text('\\n'.join(sentences), **kwargs4text)\n text.tokenize_words()\n alignments = align_CONLL_with_Text(conll_lines, text, None, **kwargs)\n normalise_alignments(alignments, data_type=CONLL_DATA, **kwargs)\n text[layer_name] = alignments\n return text\n",
"role": "user"
},
{
"content": "read_text_from_conll_file",
"role": "assistant"
}
] |
def x(sentence, syntactic_relations, layer=LAYER_VISLCG3, sentence_id=0, **
kwargs):
trees_of_sentence = []
nodes = [-1]
while len(nodes) > 0:
node = nodes.pop(0)
for i, syntax_token in enumerate(syntactic_relations):
parents = [o[1] for o in syntax_token[PARSER_OUT]]
parent = parents[0]
if parent == node:
labels = [o[0] for o in syntax_token[PARSER_OUT]]
estnltk_token = sentence[i]
tree1 = Tree(estnltk_token, i, sentence_id, labels, parser=
layer)
if INIT_PARSER_OUT in syntax_token:
tree1.parser_output = syntax_token[INIT_PARSER_OUT]
tree1.syntax_token = syntax_token
if parent == -1:
trees_of_sentence.append(tree1)
elif parent == i:
trees_of_sentence.append(tree1)
else:
for root_node in trees_of_sentence:
root_node.add_child_to_subtree(parent, tree1)
if parent != i:
nodes.append(i)
return trees_of_sentence
|
build_trees_from_sentence
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sentence, syntactic_relations, layer=LAYER_VISLCG3, sentence_id=0, **\n kwargs):\n trees_of_sentence = []\n nodes = [-1]\n while len(nodes) > 0:\n node = nodes.pop(0)\n for i, syntax_token in enumerate(syntactic_relations):\n parents = [o[1] for o in syntax_token[PARSER_OUT]]\n parent = parents[0]\n if parent == node:\n labels = [o[0] for o in syntax_token[PARSER_OUT]]\n estnltk_token = sentence[i]\n tree1 = Tree(estnltk_token, i, sentence_id, labels, parser=\n layer)\n if INIT_PARSER_OUT in syntax_token:\n tree1.parser_output = syntax_token[INIT_PARSER_OUT]\n tree1.syntax_token = syntax_token\n if parent == -1:\n trees_of_sentence.append(tree1)\n elif parent == i:\n trees_of_sentence.append(tree1)\n else:\n for root_node in trees_of_sentence:\n root_node.add_child_to_subtree(parent, tree1)\n if parent != i:\n nodes.append(i)\n return trees_of_sentence\n",
"role": "user"
},
{
"content": "build_trees_from_sentence",
"role": "assistant"
}
] |
def x(text, layer, **kwargs):
from estnltk.text import Text
assert isinstance(text, Text
), "(!) Unexpected text argument! Should be Estnltk's Text object."
assert layer in text, '(!) The layer ' + str(layer
) + ' is missing from the input text.'
text_sentences = list(text.divide(layer=WORDS, by=SENTENCES))
all_sentence_trees = []
prev_sent_id = -1
norm_prev_sent_id = -1
current_sentence = []
k = 0
while k < len(text[layer]):
node_desc = text[layer][k]
if prev_sent_id != node_desc[SENT_ID] and current_sentence:
norm_prev_sent_id += 1
assert norm_prev_sent_id < len(text_sentences
), '(!) Sentence with the index ' + str(norm_prev_sent_id
) + ' not found from the input text.'
sentence = text_sentences[norm_prev_sent_id]
trees_of_sentence = build_trees_from_sentence(sentence,
current_sentence, layer, sentence_id=norm_prev_sent_id, **
kwargs)
all_sentence_trees.extend(trees_of_sentence)
current_sentence = []
current_sentence.append(node_desc)
prev_sent_id = node_desc[SENT_ID]
k += 1
if current_sentence:
norm_prev_sent_id += 1
assert norm_prev_sent_id < len(text_sentences
), '(!) Sentence with the index ' + str(norm_prev_sent_id
) + ' not found from the input text.'
sentence = text_sentences[norm_prev_sent_id]
trees_of_sentence = build_trees_from_sentence(sentence,
current_sentence, layer, sentence_id=norm_prev_sent_id, **kwargs)
all_sentence_trees.extend(trees_of_sentence)
return all_sentence_trees
|
build_trees_from_text
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text, layer, **kwargs):\n from estnltk.text import Text\n assert isinstance(text, Text\n ), \"(!) Unexpected text argument! Should be Estnltk's Text object.\"\n assert layer in text, '(!) The layer ' + str(layer\n ) + ' is missing from the input text.'\n text_sentences = list(text.divide(layer=WORDS, by=SENTENCES))\n all_sentence_trees = []\n prev_sent_id = -1\n norm_prev_sent_id = -1\n current_sentence = []\n k = 0\n while k < len(text[layer]):\n node_desc = text[layer][k]\n if prev_sent_id != node_desc[SENT_ID] and current_sentence:\n norm_prev_sent_id += 1\n assert norm_prev_sent_id < len(text_sentences\n ), '(!) Sentence with the index ' + str(norm_prev_sent_id\n ) + ' not found from the input text.'\n sentence = text_sentences[norm_prev_sent_id]\n trees_of_sentence = build_trees_from_sentence(sentence,\n current_sentence, layer, sentence_id=norm_prev_sent_id, **\n kwargs)\n all_sentence_trees.extend(trees_of_sentence)\n current_sentence = []\n current_sentence.append(node_desc)\n prev_sent_id = node_desc[SENT_ID]\n k += 1\n if current_sentence:\n norm_prev_sent_id += 1\n assert norm_prev_sent_id < len(text_sentences\n ), '(!) Sentence with the index ' + str(norm_prev_sent_id\n ) + ' not found from the input text.'\n sentence = text_sentences[norm_prev_sent_id]\n trees_of_sentence = build_trees_from_sentence(sentence,\n current_sentence, layer, sentence_id=norm_prev_sent_id, **kwargs)\n all_sentence_trees.extend(trees_of_sentence)\n return all_sentence_trees\n",
"role": "user"
},
{
"content": "build_trees_from_text",
"role": "assistant"
}
] |
def x(self, tree):
assert isinstance(tree, Tree
), '(!) Unexpected type of argument for ' + argName + '! Should be Tree.'
if not self.children:
self.children = []
tree.parent = self
self.children.append(tree)
|
add_child_to_self
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tree):\n assert isinstance(tree, Tree\n ), '(!) Unexpected type of argument for ' + argName + '! Should be Tree.'\n if not self.children:\n self.children = []\n tree.parent = self\n self.children.append(tree)\n",
"role": "user"
},
{
"content": "add_child_to_self",
"role": "assistant"
}
] |
def x(self, parent_word_id, tree):
if self.word_id == parent_word_id:
self.add_child_to_self(tree)
elif self.children:
for child in self.children:
child.add_child_to_subtree(parent_word_id, tree)
|
add_child_to_subtree
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, parent_word_id, tree):\n if self.word_id == parent_word_id:\n self.add_child_to_self(tree)\n elif self.children:\n for child in self.children:\n child.add_child_to_subtree(parent_word_id, tree)\n",
"role": "user"
},
{
"content": "add_child_to_subtree",
"role": "assistant"
}
] |
def x(self, **kwargs):
if self.parent == None:
return self
else:
return self.parent.get_root(**kwargs)
|
get_root
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **kwargs):\n if self.parent == None:\n return self\n else:\n return self.parent.get_root(**kwargs)\n",
"role": "user"
},
{
"content": "get_root",
"role": "assistant"
}
] |
def x(self, tree_node, **kwargs):
matches = []
syntactic_label = kwargs.get('label', None)
if syntactic_label:
matches.append(bool(tree_node.labels and syntactic_label in
tree_node.labels))
synt_label_regexp = kwargs.get('label_regexp', None)
if synt_label_regexp:
if isinstance(synt_label_regexp, basestring):
synt_label_regexp = re.compile(synt_label_regexp)
kwargs['label_regexp'] = synt_label_regexp
if isinstance(synt_label_regexp, RE_TYPE):
if tree_node.labels:
matches.append(any([(synt_label_regexp.match(label) != None
) for label in tree_node.labels]))
else:
matches.append(False)
word_template = kwargs.get('word_template', None)
if word_template:
if isinstance(word_template, WordTemplate):
matches.append(word_template.matches(tree_node.token))
else:
raise Exception(
'(!) Unexpected word_template. Should be from class WordTemplate.'
)
return len(matches) == 0 or all(matches)
|
satisfies_conditions
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tree_node, **kwargs):\n matches = []\n syntactic_label = kwargs.get('label', None)\n if syntactic_label:\n matches.append(bool(tree_node.labels and syntactic_label in\n tree_node.labels))\n synt_label_regexp = kwargs.get('label_regexp', None)\n if synt_label_regexp:\n if isinstance(synt_label_regexp, basestring):\n synt_label_regexp = re.compile(synt_label_regexp)\n kwargs['label_regexp'] = synt_label_regexp\n if isinstance(synt_label_regexp, RE_TYPE):\n if tree_node.labels:\n matches.append(any([(synt_label_regexp.match(label) != None\n ) for label in tree_node.labels]))\n else:\n matches.append(False)\n word_template = kwargs.get('word_template', None)\n if word_template:\n if isinstance(word_template, WordTemplate):\n matches.append(word_template.matches(tree_node.token))\n else:\n raise Exception(\n '(!) Unexpected word_template. Should be from class WordTemplate.'\n )\n return len(matches) == 0 or all(matches)\n",
"role": "user"
},
{
"content": "satisfies_conditions",
"role": "assistant"
}
] |
def x(self, **kwargs):
depth_limit = kwargs.get('depth_limit', 922337203685477580)
include_self = kwargs.get('include_self', False)
sorted_by_word_ids = kwargs.get('sorted', False)
subtrees = []
if include_self:
if self._satisfies_conditions(self, **kwargs):
subtrees.append(self)
if depth_limit >= 1 and self.children:
for child in self.children:
if self._satisfies_conditions(child, **kwargs):
subtrees.append(child)
kwargs['include_self'] = False
kwargs['depth_limit'] = depth_limit - 1
for child in self.children:
childs_results = child.get_children(**kwargs)
if childs_results:
subtrees.extend(childs_results)
if sorted_by_word_ids:
subtrees = sorted(subtrees, key=lambda x: x.word_id)
return subtrees
|
get_children
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, **kwargs):\n depth_limit = kwargs.get('depth_limit', 922337203685477580)\n include_self = kwargs.get('include_self', False)\n sorted_by_word_ids = kwargs.get('sorted', False)\n subtrees = []\n if include_self:\n if self._satisfies_conditions(self, **kwargs):\n subtrees.append(self)\n if depth_limit >= 1 and self.children:\n for child in self.children:\n if self._satisfies_conditions(child, **kwargs):\n subtrees.append(child)\n kwargs['include_self'] = False\n kwargs['depth_limit'] = depth_limit - 1\n for child in self.children:\n childs_results = child.get_children(**kwargs)\n if childs_results:\n subtrees.extend(childs_results)\n if sorted_by_word_ids:\n subtrees = sorted(subtrees, key=lambda x: x.word_id)\n return subtrees\n",
"role": "user"
},
{
"content": "get_children",
"role": "assistant"
}
] |
def x(self, keep_dummy_root=False, add_morph=True):
from nltk.parse.dependencygraph import DependencyGraph
graph = DependencyGraph(zero_based=True)
all_tree_nodes = [self] + self.get_children()
if keep_dummy_root:
graph.nodes[-1] = graph.nodes[0]
graph.nodes[-1].update({'address': -1})
graph.root = graph.nodes[-1]
del graph.nodes[0]
for child in all_tree_nodes:
rel = 'xxx' if not child.labels else '|'.join(child.labels)
address = child.word_id
word = child.text
graph.nodes[address].update({'address': address, 'word': child.text,
'rel': rel})
if not keep_dummy_root and child == self:
graph.root = graph.nodes[address]
if add_morph and child.morph:
lemmas = set([analysis[LEMMA] for analysis in child.morph])
postags = set([analysis[POSTAG] for analysis in child.morph])
feats = set([analysis[FORM] for analysis in child.morph])
lemma = '|'.join(list(lemmas)).replace(' ', '_')
postag = '|'.join(list(postags)).replace(' ', '_')
feats = '|'.join(list(feats)).replace(' ', '_')
graph.nodes[address].update({'tag ': postag, 'ctag': postag,
'feats': feats, 'lemma': lemma})
for child in all_tree_nodes:
deps = [] if not child.children else [c.word_id for c in child.children
]
head_address = child.word_id
for dep in deps:
graph.add_arc(head_address, dep)
if child.parent == None and keep_dummy_root:
graph.add_arc(-1, head_address)
head = -1 if not child.parent else child.parent.word_id
graph.nodes[head_address].update({'head': head})
return graph
|
as_dependencygraph
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, keep_dummy_root=False, add_morph=True):\n from nltk.parse.dependencygraph import DependencyGraph\n graph = DependencyGraph(zero_based=True)\n all_tree_nodes = [self] + self.get_children()\n if keep_dummy_root:\n graph.nodes[-1] = graph.nodes[0]\n graph.nodes[-1].update({'address': -1})\n graph.root = graph.nodes[-1]\n del graph.nodes[0]\n for child in all_tree_nodes:\n rel = 'xxx' if not child.labels else '|'.join(child.labels)\n address = child.word_id\n word = child.text\n graph.nodes[address].update({'address': address, 'word': child.text,\n 'rel': rel})\n if not keep_dummy_root and child == self:\n graph.root = graph.nodes[address]\n if add_morph and child.morph:\n lemmas = set([analysis[LEMMA] for analysis in child.morph])\n postags = set([analysis[POSTAG] for analysis in child.morph])\n feats = set([analysis[FORM] for analysis in child.morph])\n lemma = '|'.join(list(lemmas)).replace(' ', '_')\n postag = '|'.join(list(postags)).replace(' ', '_')\n feats = '|'.join(list(feats)).replace(' ', '_')\n graph.nodes[address].update({'tag ': postag, 'ctag': postag,\n 'feats': feats, 'lemma': lemma})\n for child in all_tree_nodes:\n deps = [] if not child.children else [c.word_id for c in child.children\n ]\n head_address = child.word_id\n for dep in deps:\n graph.add_arc(head_address, dep)\n if child.parent == None and keep_dummy_root:\n graph.add_arc(-1, head_address)\n head = -1 if not child.parent else child.parent.word_id\n graph.nodes[head_address].update({'head': head})\n return graph\n",
"role": "user"
},
{
"content": "as_dependencygraph",
"role": "assistant"
}
] |
def x(self):
if self.children:
depth = 1
childDepths = []
for child in self.children:
childDepths.append(child.get_tree_depth())
return depth + max(childDepths)
else:
return 0
|
get_tree_depth
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if self.children:\n depth = 1\n childDepths = []\n for child in self.children:\n childDepths.append(child.get_tree_depth())\n return depth + max(childDepths)\n else:\n return 0\n",
"role": "user"
},
{
"content": "get_tree_depth",
"role": "assistant"
}
] |
def x(self, spacing=''):
print(spacing + ' ' + str(self.word_id) + ' ' + str(self.text))
if self.children:
spacing = spacing + ' '
for child in self.children:
child.debug_print_tree(spacing)
|
debug_print_tree
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, spacing=''):\n print(spacing + ' ' + str(self.word_id) + ' ' + str(self.text))\n if self.children:\n spacing = spacing + ' '\n for child in self.children:\n child.debug_print_tree(spacing)\n",
"role": "user"
},
{
"content": "debug_print_tree",
"role": "assistant"
}
] |
def x(self, field, regExpPattern):
compiled = re.compile(regExpPattern)
if field in self.analysisFields:
if self.analysisRules == None:
self.analysisRules = dict()
self.analysisRules[field] = compiled
else:
if self.otherRules == None:
self.otherRules = dict()
self.otherRules[field] = compiled
|
addRule
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, field, regExpPattern):\n compiled = re.compile(regExpPattern)\n if field in self.analysisFields:\n if self.analysisRules == None:\n self.analysisRules = dict()\n self.analysisRules[field] = compiled\n else:\n if self.otherRules == None:\n self.otherRules = dict()\n self.otherRules[field] = compiled\n",
"role": "user"
},
{
"content": "addRule",
"role": "assistant"
}
] |
def x(self, tokenJson):
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and self.otherRules[field].match(
tokenJson[field]) != None
otherMatches.append(match)
if not otherMatches or not all(otherMatches):
return False
elif self.analysisRules == None and all(otherMatches):
return True
if self.analysisRules != None:
assert ANALYSIS in tokenJson, 'No ANALYSIS found within token: ' + str(
tokenJson)
totalMatches = []
for analysis in tokenJson[ANALYSIS]:
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ''
match = self.analysisRules[field].match(value) != None
matches.append(match)
if not match:
break
totalMatches.append(all(matches))
return any(totalMatches)
return False
|
matches
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tokenJson):\n if self.otherRules != None:\n otherMatches = []\n for field in self.otherRules:\n match = field in tokenJson and self.otherRules[field].match(\n tokenJson[field]) != None\n otherMatches.append(match)\n if not otherMatches or not all(otherMatches):\n return False\n elif self.analysisRules == None and all(otherMatches):\n return True\n if self.analysisRules != None:\n assert ANALYSIS in tokenJson, 'No ANALYSIS found within token: ' + str(\n tokenJson)\n totalMatches = []\n for analysis in tokenJson[ANALYSIS]:\n matches = []\n for field in self.analysisRules:\n value = analysis[field] if field in analysis else ''\n match = self.analysisRules[field].match(value) != None\n matches.append(match)\n if not match:\n break\n totalMatches.append(all(matches))\n return any(totalMatches)\n return False\n",
"role": "user"
},
{
"content": "matches",
"role": "assistant"
}
] |
def x(self, tokenJson):
matchingResults = []
if self.otherRules != None:
otherMatches = []
for field in self.otherRules:
match = field in tokenJson and self.otherRules[field].match(
tokenJson[field]) != None
otherMatches.append(match)
if not otherMatches or not all(otherMatches):
return matchingResults
if self.analysisRules != None:
assert ANALYSIS in tokenJson, 'No ANALYSIS found within token: ' + str(
tokenJson)
for analysis in tokenJson[ANALYSIS]:
matches = []
for field in self.analysisRules:
value = analysis[field] if field in analysis else ''
match = self.analysisRules[field].match(value) != None
matches.append(match)
if matches and all(matches):
matchingResults.append(analysis)
return matchingResults
return matchingResults
|
matchingAnalyses
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tokenJson):\n matchingResults = []\n if self.otherRules != None:\n otherMatches = []\n for field in self.otherRules:\n match = field in tokenJson and self.otherRules[field].match(\n tokenJson[field]) != None\n otherMatches.append(match)\n if not otherMatches or not all(otherMatches):\n return matchingResults\n if self.analysisRules != None:\n assert ANALYSIS in tokenJson, 'No ANALYSIS found within token: ' + str(\n tokenJson)\n for analysis in tokenJson[ANALYSIS]:\n matches = []\n for field in self.analysisRules:\n value = analysis[field] if field in analysis else ''\n match = self.analysisRules[field].match(value) != None\n matches.append(match)\n if matches and all(matches):\n matchingResults.append(analysis)\n return matchingResults\n return matchingResults\n",
"role": "user"
},
{
"content": "matchingAnalyses",
"role": "assistant"
}
] |
def x(self, tokenJson):
matchingResults = self.matchingAnalyses(tokenJson)
if matchingResults:
indexes = [tokenJson[ANALYSIS].index(analysis) for analysis in
matchingResults]
return indexes
return matchingResults
|
matchingAnalyseIndexes
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tokenJson):\n matchingResults = self.matchingAnalyses(tokenJson)\n if matchingResults:\n indexes = [tokenJson[ANALYSIS].index(analysis) for analysis in\n matchingResults]\n return indexes\n return matchingResults\n",
"role": "user"
},
{
"content": "matchingAnalyseIndexes",
"role": "assistant"
}
] |
def x(self, tokenArray):
assert isinstance(tokenArray, list), 'tokenArray should be list ' + str(
tokenArray)
matchingPos = []
for i in range(len(tokenArray)):
token = tokenArray[i]
if self.matches(token):
matchingPos.append(i)
return matchingPos
|
matchingPositions
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tokenArray):\n assert isinstance(tokenArray, list), 'tokenArray should be list ' + str(\n tokenArray)\n matchingPos = []\n for i in range(len(tokenArray)):\n token = tokenArray[i]\n if self.matches(token):\n matchingPos.append(i)\n return matchingPos\n",
"role": "user"
},
{
"content": "matchingPositions",
"role": "assistant"
}
] |
def x(self, tokenArray):
assert isinstance(tokenArray, list), 'tokenArray should be list ' + str(
tokenArray)
matchingTok = []
for i in range(len(tokenArray)):
token = tokenArray[i]
if self.matches(token):
matchingTok.append(token)
return matchingTok
|
matchingTokens
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, tokenArray):\n assert isinstance(tokenArray, list), 'tokenArray should be list ' + str(\n tokenArray)\n matchingTok = []\n for i in range(len(tokenArray)):\n token = tokenArray[i]\n if self.matches(token):\n matchingTok.append(token)\n return matchingTok\n",
"role": "user"
},
{
"content": "matchingTokens",
"role": "assistant"
}
] |
def x(self, text, layer, addEmptyAnnotations=True):
from estnltk.text import Text
assert isinstance(text, Text
), 'the input should be Text, but it is: ' + str(text)
tokens = self.matchingTokens(text[WORDS])
if not addEmptyAnnotations and not tokens:
return text
if tokens:
text[layer] = [{START: t[START], END: t[END], TEXT: t[TEXT]} for t in
tokens]
else:
text[layer] = []
return text
|
annotateText
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, layer, addEmptyAnnotations=True):\n from estnltk.text import Text\n assert isinstance(text, Text\n ), 'the input should be Text, but it is: ' + str(text)\n tokens = self.matchingTokens(text[WORDS])\n if not addEmptyAnnotations and not tokens:\n return text\n if tokens:\n text[layer] = [{START: t[START], END: t[END], TEXT: t[TEXT]} for t in\n tokens]\n else:\n text[layer] = []\n return text\n",
"role": "user"
},
{
"content": "annotateText",
"role": "assistant"
}
] |
def x(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage
):
is_root_node = False
if cache is None:
cache = {}
is_root_node = True
if id(self) in cache:
return cache[id(self)]
matches = self.get_matches_without_cache(text, cache=cache)
cache[id(self)] = matches
if is_root_node and conflict_resolver is not None:
return conflict_resolver(matches)
return matches
|
get_matches
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, text, cache=None, conflict_resolver=resolve_using_maximal_coverage\n ):\n is_root_node = False\n if cache is None:\n cache = {}\n is_root_node = True\n if id(self) in cache:\n return cache[id(self)]\n matches = self.get_matches_without_cache(text, cache=cache)\n cache[id(self)] = matches\n if is_root_node and conflict_resolver is not None:\n return conflict_resolver(matches)\n return matches\n",
"role": "user"
},
{
"content": "get_matches",
"role": "assistant"
}
] |
def x(sectionObj):
text = sectionObj['text']
reftags = [x for x in refTagRegEx.finditer(text)]
if reftags:
references = []
for tag in reftags:
references.append(int(tag.group(1)))
sectionObj['references'] = references
text = refTagRegEx.sub('', text)
sectionObj['text'] = text
return sectionObj
|
reffinder
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sectionObj):\n text = sectionObj['text']\n reftags = [x for x in refTagRegEx.finditer(text)]\n if reftags:\n references = []\n for tag in reftags:\n references.append(int(tag.group(1)))\n sectionObj['references'] = references\n text = refTagRegEx.sub('', text)\n sectionObj['text'] = text\n return sectionObj\n",
"role": "user"
},
{
"content": "reffinder",
"role": "assistant"
}
] |
def x(text):
references = referencesRegEx.finditer(text)
count = 0
refs = []
spans = []
for i in references:
refs.append(i.group())
spans.append(i.span())
count += 1
done = set()
nameRegEx = re.compile('(name=["\']*.*?["\']*)(\\s|/|>)')
for index, obj in enumerate(refs):
if obj.startswith('<ref name='):
nameTag = re.escape(nameRegEx.search(obj).group(1))
if nameTag not in done:
nameTag = re.escape(nameRegEx.search(obj).group(1))
indeces = [i for i, x in enumerate(refs) if re.search(
nameTag, x)]
matches = [refs[i] for i in indeces]
full = max(matches, key=len)
for i in indeces:
refs[i] = full
done.add(nameTag)
for i in range(len(refs)):
lastindex = refs[i].rindex('<')
firstindex = refs[i].index('>') + 1
refs[i] = refs[i][firstindex:lastindex]
for i in range(len(refs)):
if 'cite' in refs[i].lower():
newText = ''
values = refs[i].split('|')
for j in values:
if '=' in j:
first = j.index('=')
newText += j[first + 1:].strip() + ';'
refs[i] = newText
refspos = {}
c = 0
for i in refs:
if i not in refspos.keys():
refspos[i] = c
c += 1
else:
continue
newText = ''
assert len(spans) == len(refs)
next = 0
for i in range(len(spans)):
start = spans[i][0]
newText += text[next:start] + '<ref ' + str(refspos[refs[i]]) + '/>'
next = spans[i][1]
newText += text[next:]
newDict = {y: x for x, y in refspos.items()}
return newText, newDict
|
referencesFinder
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(text):\n references = referencesRegEx.finditer(text)\n count = 0\n refs = []\n spans = []\n for i in references:\n refs.append(i.group())\n spans.append(i.span())\n count += 1\n done = set()\n nameRegEx = re.compile('(name=[\"\\']*.*?[\"\\']*)(\\\\s|/|>)')\n for index, obj in enumerate(refs):\n if obj.startswith('<ref name='):\n nameTag = re.escape(nameRegEx.search(obj).group(1))\n if nameTag not in done:\n nameTag = re.escape(nameRegEx.search(obj).group(1))\n indeces = [i for i, x in enumerate(refs) if re.search(\n nameTag, x)]\n matches = [refs[i] for i in indeces]\n full = max(matches, key=len)\n for i in indeces:\n refs[i] = full\n done.add(nameTag)\n for i in range(len(refs)):\n lastindex = refs[i].rindex('<')\n firstindex = refs[i].index('>') + 1\n refs[i] = refs[i][firstindex:lastindex]\n for i in range(len(refs)):\n if 'cite' in refs[i].lower():\n newText = ''\n values = refs[i].split('|')\n for j in values:\n if '=' in j:\n first = j.index('=')\n newText += j[first + 1:].strip() + ';'\n refs[i] = newText\n refspos = {}\n c = 0\n for i in refs:\n if i not in refspos.keys():\n refspos[i] = c\n c += 1\n else:\n continue\n newText = ''\n assert len(spans) == len(refs)\n next = 0\n for i in range(len(spans)):\n start = spans[i][0]\n newText += text[next:start] + '<ref ' + str(refspos[refs[i]]) + '/>'\n next = spans[i][1]\n newText += text[next:]\n newDict = {y: x for x, y in refspos.items()}\n return newText, newDict\n",
"role": "user"
},
{
"content": "referencesFinder",
"role": "assistant"
}
] |
def x(sectionObj):
text = ''
lastEnd = 0
ends = []
text = sectionObj['text']
imageStarts = [x.start() for x in imageRegEx.finditer(text)]
if imageStarts:
images = []
for start in imageStarts:
imgText, end = balancedSlicer(text[start:])
end = start + end
ends.append(end)
img = {'text': imgText}
imgText = imgText.split('|')
t = imgText[-1][:-2]
url = urlBegin + imgText[0].replace(' ', '_').replace('[[', '')
img['text'] = t
img['url'] = url
if ExtLinkBracketedRegex.search(t):
img = addExternalLinks(img)
intlinks = [x for x in findBalanced(t, openDelim='[[',
closeDelim=']]')]
if intlinks:
img = addIntLinks(img)
images.append(img)
sectionObj['images'] = images
spans = []
for i, j in zip(imageStarts, ends):
spans.append((i, j))
sectionObj['text'] = dropSpans(spans, text)
return sectionObj
|
imageParser
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(sectionObj):\n text = ''\n lastEnd = 0\n ends = []\n text = sectionObj['text']\n imageStarts = [x.start() for x in imageRegEx.finditer(text)]\n if imageStarts:\n images = []\n for start in imageStarts:\n imgText, end = balancedSlicer(text[start:])\n end = start + end\n ends.append(end)\n img = {'text': imgText}\n imgText = imgText.split('|')\n t = imgText[-1][:-2]\n url = urlBegin + imgText[0].replace(' ', '_').replace('[[', '')\n img['text'] = t\n img['url'] = url\n if ExtLinkBracketedRegex.search(t):\n img = addExternalLinks(img)\n intlinks = [x for x in findBalanced(t, openDelim='[[',\n closeDelim=']]')]\n if intlinks:\n img = addIntLinks(img)\n images.append(img)\n sectionObj['images'] = images\n spans = []\n for i, j in zip(imageStarts, ends):\n spans.append((i, j))\n sectionObj['text'] = dropSpans(spans, text)\n return sectionObj\n",
"role": "user"
},
{
"content": "imageParser",
"role": "assistant"
}
] |
def x(self, layer):
if layer == ANALYSIS:
if WORDS in self and len(self[WORDS]) > 0:
return ANALYSIS in self[WORDS][0]
elif layer == LAYER_CONLL:
if LAYER_CONLL in self and len(self[LAYER_CONLL]) > 0:
return PARSER_OUT in self[LAYER_CONLL][0]
elif layer == LAYER_VISLCG3:
if LAYER_VISLCG3 in self and len(self[LAYER_VISLCG3]) > 0:
return PARSER_OUT in self[LAYER_VISLCG3][0]
elif layer == LABEL:
if WORDS in self and len(self[WORDS]) > 0:
return LABEL in self[WORDS][0]
elif layer == CLAUSE_ANNOTATION:
if WORDS in self and len(self[WORDS]) > 0:
return CLAUSE_ANNOTATION in self[WORDS][0]
elif layer == WORDNET:
if WORDS in self and len(self[WORDS]) > 0:
if ANALYSIS in self[WORDS][0] and len(self[WORDS][0][ANALYSIS]
) > 0:
return WORDNET in self[WORDS][0][ANALYSIS][0]
else:
return layer in self
return False
|
is_tagged
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer):\n if layer == ANALYSIS:\n if WORDS in self and len(self[WORDS]) > 0:\n return ANALYSIS in self[WORDS][0]\n elif layer == LAYER_CONLL:\n if LAYER_CONLL in self and len(self[LAYER_CONLL]) > 0:\n return PARSER_OUT in self[LAYER_CONLL][0]\n elif layer == LAYER_VISLCG3:\n if LAYER_VISLCG3 in self and len(self[LAYER_VISLCG3]) > 0:\n return PARSER_OUT in self[LAYER_VISLCG3][0]\n elif layer == LABEL:\n if WORDS in self and len(self[WORDS]) > 0:\n return LABEL in self[WORDS][0]\n elif layer == CLAUSE_ANNOTATION:\n if WORDS in self and len(self[WORDS]) > 0:\n return CLAUSE_ANNOTATION in self[WORDS][0]\n elif layer == WORDNET:\n if WORDS in self and len(self[WORDS]) > 0:\n if ANALYSIS in self[WORDS][0] and len(self[WORDS][0][ANALYSIS]\n ) > 0:\n return WORDNET in self[WORDS][0][ANALYSIS][0]\n else:\n return layer in self\n return False\n",
"role": "user"
},
{
"content": "is_tagged",
"role": "assistant"
}
] |
def x(self, layer, sep=' '):
return self.texts_from_spans(self.spans(layer), sep)
|
texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer, sep=' '):\n return self.texts_from_spans(self.spans(layer), sep)\n",
"role": "user"
},
{
"content": "texts",
"role": "assistant"
}
] |
def x(self, spans, sep=' '):
text = self.text
texts = []
for start, end in spans:
if isinstance(start, list):
texts.append(sep.join(text[s:e] for s, e in zip(start, end)))
else:
texts.append(text[start:end])
return texts
|
texts_from_spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, spans, sep=' '):\n text = self.text\n texts = []\n for start, end in spans:\n if isinstance(start, list):\n texts.append(sep.join(text[s:e] for s, e in zip(start, end)))\n else:\n texts.append(text[start:end])\n return texts\n",
"role": "user"
},
{
"content": "texts_from_spans",
"role": "assistant"
}
] |
def x(self, layer):
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans
|
spans
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer):\n spans = []\n for data in self[layer]:\n spans.append((data[START], data[END]))\n return spans\n",
"role": "user"
},
{
"content": "spans",
"role": "assistant"
}
] |
def x(self, layer):
starts = []
for data in self[layer]:
starts.append(data[START])
return starts
|
starts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer):\n starts = []\n for data in self[layer]:\n starts.append(data[START])\n return starts\n",
"role": "user"
},
{
"content": "starts",
"role": "assistant"
}
] |
def x(self, layer):
ends = []
for data in self[layer]:
ends.append(data[END])
return ends
|
ends
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer):\n ends = []\n for data in self[layer]:\n ends.append(data[END])\n return ends\n",
"role": "user"
},
{
"content": "ends",
"role": "assistant"
}
] |
def x(self):
return {PARAGRAPHS: self.tokenize_paragraphs, SENTENCES: self.
tokenize_sentences, WORDS: self.tokenize_words, ANALYSIS: self.
tag_analysis, TIMEXES: self.tag_timexes, NAMED_ENTITIES: self.
tag_named_entities, CLAUSE_ANNOTATION: self.tag_clause_annotations,
CLAUSES: self.tag_clauses, LAYER_CONLL: self.tag_syntax_vislcg3,
LAYER_VISLCG3: self.tag_syntax_maltparser, WORDNET: self.tag_wordnet}
|
layer_tagger_mapping
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n return {PARAGRAPHS: self.tokenize_paragraphs, SENTENCES: self.\n tokenize_sentences, WORDS: self.tokenize_words, ANALYSIS: self.\n tag_analysis, TIMEXES: self.tag_timexes, NAMED_ENTITIES: self.\n tag_named_entities, CLAUSE_ANNOTATION: self.tag_clause_annotations,\n CLAUSES: self.tag_clauses, LAYER_CONLL: self.tag_syntax_vislcg3,\n LAYER_VISLCG3: self.tag_syntax_maltparser, WORDNET: self.tag_wordnet}\n",
"role": "user"
},
{
"content": "layer_tagger_mapping",
"role": "assistant"
}
] |
def x(self, layer):
mapping = self.layer_tagger_mapping
if layer in mapping:
mapping[layer]()
return self
|
tag
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self, layer):\n mapping = self.layer_tagger_mapping\n if layer in mapping:\n mapping[layer]()\n return self\n",
"role": "user"
},
{
"content": "tag",
"role": "assistant"
}
] |
def x(self):
tok = self.__paragraph_tokenizer
spans = tok.span_tokenize(self.text)
dicts = []
for start, end in spans:
dicts.append({'start': start, 'end': end})
self[PARAGRAPHS] = dicts
return self
|
tokenize_paragraphs
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n tok = self.__paragraph_tokenizer\n spans = tok.span_tokenize(self.text)\n dicts = []\n for start, end in spans:\n dicts.append({'start': start, 'end': end})\n self[PARAGRAPHS] = dicts\n return self\n",
"role": "user"
},
{
"content": "tokenize_paragraphs",
"role": "assistant"
}
] |
def x(self):
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
return self.texts(PARAGRAPHS)
|
paragraph_texts
|
[
{
"content": "Given the source code of a python function, suggest a fitting name for the function.",
"role": "system"
},
{
"content": "def x(self):\n if not self.is_tagged(PARAGRAPHS):\n self.tokenize_paragraphs()\n return self.texts(PARAGRAPHS)\n",
"role": "user"
},
{
"content": "paragraph_texts",
"role": "assistant"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.