source_code
stringlengths
45
7.24k
function_name
stringlengths
1
111
messages
list
def x(root, phonetic, compound): global compound_regex if not phonetic: root = trim_phonetics(root) if not compound: root = trim_compounds(root) return root
get_root
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(root, phonetic, compound):\n global compound_regex\n if not phonetic:\n root = trim_phonetics(root)\n if not compound:\n root = trim_compounds(root)\n return root\n", "role": "user" }, { "content": "get_root", "role": "assistant" } ]
def x(root): global all_markers if root in all_markers or root in ['-', '_']: return [[root]] groups = [] for group in root.split('-'): toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_') ] groups.append(toks) return groups
get_group_tokens
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(root):\n global all_markers\n if root in all_markers or root in ['-', '_']:\n return [[root]]\n groups = []\n for group in root.split('-'):\n toks = [trim_phonetics(trim_compounds(tok)) for tok in group.split('_')\n ]\n groups.append(toks)\n return groups\n", "role": "user" }, { "content": "get_group_tokens", "role": "assistant" } ]
def x(words, join=True, joinstring=' '): return Vabamorf.instance().fix_spelling(words, join, joinstring)
fix_spelling
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(words, join=True, joinstring=' '):\n return Vabamorf.instance().fix_spelling(words, join, joinstring)\n", "role": "user" }, { "content": "fix_spelling", "role": "assistant" } ]
def x(lemma, form, partofspeech='', hint='', guess=True, phonetic=False): return Vabamorf.instance().synthesize(lemma, form, partofspeech, hint, guess, phonetic)
synthesize
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(lemma, form, partofspeech='', hint='', guess=True, phonetic=False):\n return Vabamorf.instance().synthesize(lemma, form, partofspeech, hint,\n guess, phonetic)\n", "role": "user" }, { "content": "synthesize", "role": "assistant" } ]
def x(): if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid(): Vabamorf.pid = os.getpid() Vabamorf.morf = Vabamorf() return Vabamorf.morf
instance
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():\n Vabamorf.pid = os.getpid()\n Vabamorf.morf = Vabamorf()\n return Vabamorf.morf\n", "role": "user" }, { "content": "instance", "role": "assistant" } ]
def x(self, words, **kwargs): if isinstance(words, six.string_types): words = words.split() words = [convert(w) for w in words] morfresults = self._morf.analyze(vm.StringVector(words), kwargs.get( 'disambiguate', True), kwargs.get('guess', True), True, kwargs.get( 'propername', True)) trim_phonetic = kwargs.get('phonetic', False) trim_compound = kwargs.get('compound', True) return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in morfresults]
analyze
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, words, **kwargs):\n if isinstance(words, six.string_types):\n words = words.split()\n words = [convert(w) for w in words]\n morfresults = self._morf.analyze(vm.StringVector(words), kwargs.get(\n 'disambiguate', True), kwargs.get('guess', True), True, kwargs.get(\n 'propername', True))\n trim_phonetic = kwargs.get('phonetic', False)\n trim_compound = kwargs.get('compound', True)\n return [postprocess_result(mr, trim_phonetic, trim_compound) for mr in\n morfresults]\n", "role": "user" }, { "content": "analyze", "role": "assistant" } ]
def x(self, words): words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words]) disambiguated = self._morf.disambiguate(words) return [postprocess_result(mr, False, True) for mr in disambiguated]
disambiguate
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, words):\n words = vm.SentenceAnalysis([as_wordanalysis(w) for w in words])\n disambiguated = self._morf.disambiguate(words)\n return [postprocess_result(mr, False, True) for mr in disambiguated]\n", "role": "user" }, { "content": "disambiguate", "role": "assistant" } ]
def x(self, words, suggestions=True): if isinstance(words, six.string_types): words = words.split() words = [convert(w) for w in words] spellresults = self._morf.spellcheck(words, suggestions) results = [] for spellresult in spellresults: suggestions = [deconvert(s) for s in spellresult.suggestions] result = {'text': deconvert(spellresult.word), 'spelling': spellresult.spelling, 'suggestions': suggestions} results.append(result) return results
spellcheck
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, words, suggestions=True):\n if isinstance(words, six.string_types):\n words = words.split()\n words = [convert(w) for w in words]\n spellresults = self._morf.spellcheck(words, suggestions)\n results = []\n for spellresult in spellresults:\n suggestions = [deconvert(s) for s in spellresult.suggestions]\n result = {'text': deconvert(spellresult.word), 'spelling':\n spellresult.spelling, 'suggestions': suggestions}\n results.append(result)\n return results\n", "role": "user" }, { "content": "spellcheck", "role": "assistant" } ]
def x(self, words, join=True, joinstring=' '): fixed_words = [] for word in self.spellcheck(words, suggestions=True): if word['spelling']: fixed_words.append(word['text']) else: suggestions = word['suggestions'] if len(suggestions) > 0: fixed_words.append(suggestions[0]) else: fixed_words.append(word['text']) if join: return joinstring.join(fixed_words) else: return fixed_words
fix_spelling
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, words, join=True, joinstring=' '):\n fixed_words = []\n for word in self.spellcheck(words, suggestions=True):\n if word['spelling']:\n fixed_words.append(word['text'])\n else:\n suggestions = word['suggestions']\n if len(suggestions) > 0:\n fixed_words.append(suggestions[0])\n else:\n fixed_words.append(word['text'])\n if join:\n return joinstring.join(fixed_words)\n else:\n return fixed_words\n", "role": "user" }, { "content": "fix_spelling", "role": "assistant" } ]
def x(self, lemma, form, partofspeech='', hint='', guess=True, phonetic=False): words = self._morf.synthesize(convert(lemma.strip()), convert(form. strip()), convert(partofspeech.strip()), convert(hint.strip()), guess, phonetic) return [deconvert(w) for w in words]
synthesize
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, lemma, form, partofspeech='', hint='', guess=True, phonetic=False):\n words = self._morf.synthesize(convert(lemma.strip()), convert(form.\n strip()), convert(partofspeech.strip()), convert(hint.strip()),\n guess, phonetic)\n return [deconvert(w) for w in words]\n", "role": "user" }, { "content": "synthesize", "role": "assistant" } ]
def x(self, sentence): for word in sentence: for analysis in word[ANALYSIS]: analysis[ROOT] = analysis[ROOT].replace('~', '') analysis[ROOT] = re.sub('[?<\\]]([aioueöäõü])', '\\1', analysis [ROOT]) return json.dumps({WORDS: sentence})
prepare_sentence
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sentence):\n for word in sentence:\n for analysis in word[ANALYSIS]:\n analysis[ROOT] = analysis[ROOT].replace('~', '')\n analysis[ROOT] = re.sub('[?<\\\\]]([aioueöäõü])', '\\\\1', analysis\n [ROOT])\n return json.dumps({WORDS: sentence})\n", "role": "user" }, { "content": "prepare_sentence", "role": "assistant" } ]
def x(self, sentence): max_index = 0 max_depth = 1 stack_of_indexes = [max_index] for token in sentence: if CLAUSE_ANNOT not in token: token[CLAUSE_IDX] = stack_of_indexes[-1] else: for annotation in token[CLAUSE_ANNOT]: if annotation == 'KIILU_ALGUS': max_index += 1 stack_of_indexes.append(max_index) if len(stack_of_indexes) > max_depth: max_depth = len(stack_of_indexes) token[CLAUSE_IDX] = stack_of_indexes[-1] for annotation in token[CLAUSE_ANNOT]: if annotation == 'KINDEL_PIIR': max_index += 1 stack_of_indexes[-1] = max_index elif annotation == 'KIILU_LOPP': stack_of_indexes.pop() return sentence
annotate_indices
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sentence):\n max_index = 0\n max_depth = 1\n stack_of_indexes = [max_index]\n for token in sentence:\n if CLAUSE_ANNOT not in token:\n token[CLAUSE_IDX] = stack_of_indexes[-1]\n else:\n for annotation in token[CLAUSE_ANNOT]:\n if annotation == 'KIILU_ALGUS':\n max_index += 1\n stack_of_indexes.append(max_index)\n if len(stack_of_indexes) > max_depth:\n max_depth = len(stack_of_indexes)\n token[CLAUSE_IDX] = stack_of_indexes[-1]\n for annotation in token[CLAUSE_ANNOT]:\n if annotation == 'KINDEL_PIIR':\n max_index += 1\n stack_of_indexes[-1] = max_index\n elif annotation == 'KIILU_LOPP':\n stack_of_indexes.pop()\n return sentence\n", "role": "user" }, { "content": "annotate_indices", "role": "assistant" } ]
def x(self, sentence): annotations = [] for token in sentence: data = {CLAUSE_IDX: token[CLAUSE_IDX]} if CLAUSE_ANNOT in token: if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]: data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END annotations.append(data) return annotations
rename_annotations
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sentence):\n annotations = []\n for token in sentence:\n data = {CLAUSE_IDX: token[CLAUSE_IDX]}\n if CLAUSE_ANNOT in token:\n if 'KINDEL_PIIR' in token[CLAUSE_ANNOT]:\n data[CLAUSE_ANNOTATION] = CLAUSE_BOUNDARY\n elif 'KIILU_ALGUS' in token[CLAUSE_ANNOT]:\n data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_START\n elif 'KIILU_LOPP' in token[CLAUSE_ANNOT]:\n data[CLAUSE_ANNOTATION] = EMBEDDED_CLAUSE_END\n annotations.append(data)\n return annotations\n", "role": "user" }, { "content": "rename_annotations", "role": "assistant" } ]
def x(sec): import time if sec < 864000: return time.strftime('%H:%M:%S', time.gmtime(sec)) else: days = int(sec / 864000) secs = sec % 864000 return str(days) + 'd, ' + time.strftime('%H:%M:%S', time.gmtime(secs))
format_time
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sec):\n import time\n if sec < 864000:\n return time.strftime('%H:%M:%S', time.gmtime(sec))\n else:\n days = int(sec / 864000)\n secs = sec % 864000\n return str(days) + 'd, ' + time.strftime('%H:%M:%S', time.gmtime(secs))\n", "role": "user" }, { "content": "format_time", "role": "assistant" } ]
def x(text, file_name, verbose=True): if verbose: print(' processing ' + file_name + ' ... ', end='') start = timer() text = text.tokenize_sentences() all_sentences = len(text[SENTENCES]) end = timer() if verbose: print(' (tok time: ' + format_time(end - start) + ')', end='') if all_sentences > max_sentences: start = timer() i = 0 spans = [] len_total = 0 while i < all_sentences: startSent = text[SENTENCES][i] endSent = text[SENTENCES][min(i + (max_sentences - 1), all_sentences - 1)] span = startSent[START], endSent[END] len_total += span[1] - span[0] spans.append(span) i += max_sentences text_spans = text.texts_from_spans(spans) assert len(text.text ) >= len_total, '(!) Total spans_len must be =< than text_len: ' + str( len_total) + '/' + str(len(text.text)) new_texts = [] for i, small_text in enumerate(text_spans): newText = Text(small_text) for key in text.keys(): if key != TEXT and key != SENTENCES and key != PARAGRAPHS: newText[key] = text[key] newText['_text_split_id'] = i newText['_text_split_origin'] = str(spans[i]) newText['_text_split_file'] = file_name new_texts.append(newText) end = timer() if verbose: print(' (split time: ' + format_time(end - start) + ')', end='') print(' (sents: ' + str(all_sentences) + ', new_texts:' + str( len(new_texts)) + ')', end='') print() return new_texts else: if verbose: print(' (sents: ' + str(all_sentences) + ', no_split)', end=' \n') return [text]
split_Text
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, file_name, verbose=True):\n if verbose:\n print(' processing ' + file_name + ' ... ', end='')\n start = timer()\n text = text.tokenize_sentences()\n all_sentences = len(text[SENTENCES])\n end = timer()\n if verbose:\n print(' (tok time: ' + format_time(end - start) + ')', end='')\n if all_sentences > max_sentences:\n start = timer()\n i = 0\n spans = []\n len_total = 0\n while i < all_sentences:\n startSent = text[SENTENCES][i]\n endSent = text[SENTENCES][min(i + (max_sentences - 1), \n all_sentences - 1)]\n span = startSent[START], endSent[END]\n len_total += span[1] - span[0]\n spans.append(span)\n i += max_sentences\n text_spans = text.texts_from_spans(spans)\n assert len(text.text\n ) >= len_total, '(!) Total spans_len must be =< than text_len: ' + str(\n len_total) + '/' + str(len(text.text))\n new_texts = []\n for i, small_text in enumerate(text_spans):\n newText = Text(small_text)\n for key in text.keys():\n if key != TEXT and key != SENTENCES and key != PARAGRAPHS:\n newText[key] = text[key]\n newText['_text_split_id'] = i\n newText['_text_split_origin'] = str(spans[i])\n newText['_text_split_file'] = file_name\n new_texts.append(newText)\n end = timer()\n if verbose:\n print(' (split time: ' + format_time(end - start) + ')', end='')\n print(' (sents: ' + str(all_sentences) + ', new_texts:' + str(\n len(new_texts)) + ')', end='')\n print()\n return new_texts\n else:\n if verbose:\n print(' (sents: ' + str(all_sentences) + ', no_split)', end=' \\n')\n return [text]\n", "role": "user" }, { "content": "split_Text", "role": "assistant" } ]
def x(text, old_file_name, out_dir, suffix='__split', verbose=True): name = os.path.basename(old_file_name) if '.' in name: new_name = re.sub('\\.([^.]+)$', suffix + '.\\1', name) else: new_name = name + suffix new_path = os.path.join(out_dir, new_name) start = timer() o_f = codecs.open(new_path, mode='wb', encoding='ascii') o_f.write(json.dumps(text)) o_f.close() end = timer() timestamp = format_time(end - start) if verbose: print(' ==> ' + new_path + ' (file writing time: ' + timestamp + ')')
write_Text_into_file
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, old_file_name, out_dir, suffix='__split', verbose=True):\n name = os.path.basename(old_file_name)\n if '.' in name:\n new_name = re.sub('\\\\.([^.]+)$', suffix + '.\\\\1', name)\n else:\n new_name = name + suffix\n new_path = os.path.join(out_dir, new_name)\n start = timer()\n o_f = codecs.open(new_path, mode='wb', encoding='ascii')\n o_f.write(json.dumps(text))\n o_f.close()\n end = timer()\n timestamp = format_time(end - start)\n if verbose:\n print(' ==> ' + new_path + ' (file writing time: ' + timestamp + ')')\n", "role": "user" }, { "content": "write_Text_into_file", "role": "assistant" } ]
def x(root, prefix='', suffix='.xml', target=['artikkel'], encoding=None): documents = [] for fnm in get_filenames(root, prefix, suffix): path = os.path.join(root, fnm) docs = parse_tei_corpus(path, target, encoding) for doc in docs: doc[FILE] = fnm documents.extend(docs) return documents
parse_tei_corpora
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(root, prefix='', suffix='.xml', target=['artikkel'], encoding=None):\n documents = []\n for fnm in get_filenames(root, prefix, suffix):\n path = os.path.join(root, fnm)\n docs = parse_tei_corpus(path, target, encoding)\n for doc in docs:\n doc[FILE] = fnm\n documents.extend(docs)\n return documents\n", "role": "user" }, { "content": "parse_tei_corpora", "role": "assistant" } ]
def x(path, target=['artikkel'], encoding=None): with open(path, 'rb') as f: html_doc = f.read() if encoding: html_doc = html_doc.decode(encoding) soup = BeautifulSoup(html_doc, 'html5lib') title = soup.find_all('title')[0].string documents = [] for div1 in soup.find_all('div1'): documents.extend(parse_div(div1, dict(), target)) return tokenize_documents(documents)
parse_tei_corpus
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(path, target=['artikkel'], encoding=None):\n with open(path, 'rb') as f:\n html_doc = f.read()\n if encoding:\n html_doc = html_doc.decode(encoding)\n soup = BeautifulSoup(html_doc, 'html5lib')\n title = soup.find_all('title')[0].string\n documents = []\n for div1 in soup.find_all('div1'):\n documents.extend(parse_div(div1, dict(), target))\n return tokenize_documents(documents)\n", "role": "user" }, { "content": "parse_tei_corpus", "role": "assistant" } ]
def x(soup, metadata, target): documents = [] div_type = soup.get('type', None) div_title = list(soup.children)[0].string.strip() if div_type in target: div_authors = soup.find_all('author') document = {'type': div_type, 'title': div_title, 'paragraphs': parse_paragraphs(soup)} if len(div_authors) > 0: div_author = div_authors[0].text.strip() document['author'] = div_author for k, v in metadata.items(): document[k] = v documents.append(document) else: metadata[div_type] = div_title subdiv_name = get_subdiv(soup.name) subdivs = [] if subdiv_name is not None: subdivs = soup.find_all(subdiv_name) if len(subdivs) > 0: for subdiv in subdivs: documents.extend(parse_div(subdiv, deepcopy(metadata), target)) return documents
parse_div
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(soup, metadata, target):\n documents = []\n div_type = soup.get('type', None)\n div_title = list(soup.children)[0].string.strip()\n if div_type in target:\n div_authors = soup.find_all('author')\n document = {'type': div_type, 'title': div_title, 'paragraphs':\n parse_paragraphs(soup)}\n if len(div_authors) > 0:\n div_author = div_authors[0].text.strip()\n document['author'] = div_author\n for k, v in metadata.items():\n document[k] = v\n documents.append(document)\n else:\n metadata[div_type] = div_title\n subdiv_name = get_subdiv(soup.name)\n subdivs = []\n if subdiv_name is not None:\n subdivs = soup.find_all(subdiv_name)\n if len(subdivs) > 0:\n for subdiv in subdivs:\n documents.extend(parse_div(subdiv, deepcopy(metadata), target))\n return documents\n", "role": "user" }, { "content": "parse_div", "role": "assistant" } ]
def x(soup): paragraphs = [] for para in soup.find_all('p'): sentences = [] for sent in para.find_all('s'): sentence = sent.text.strip() if len(sentence) > 0: sentences.append(sentence) if len(sentences) > 0: paragraphs.append({'sentences': sentences}) return paragraphs
parse_paragraphs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(soup):\n paragraphs = []\n for para in soup.find_all('p'):\n sentences = []\n for sent in para.find_all('s'):\n sentence = sent.text.strip()\n if len(sentence) > 0:\n sentences.append(sentence)\n if len(sentences) > 0:\n paragraphs.append({'sentences': sentences})\n return paragraphs\n", "role": "user" }, { "content": "parse_paragraphs", "role": "assistant" } ]
def x(docs): sep = '\n\n' texts = [] for doc in docs: text = '\n\n'.join(['\n'.join(para[SENTENCES]) for para in doc[ PARAGRAPHS]]) doc[TEXT] = text del doc[PARAGRAPHS] texts.append(Text(doc)) return texts
tokenize_documents
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(docs):\n sep = '\\n\\n'\n texts = []\n for doc in docs:\n text = '\\n\\n'.join(['\\n'.join(para[SENTENCES]) for para in doc[\n PARAGRAPHS]])\n doc[TEXT] = text\n del doc[PARAGRAPHS]\n texts.append(Text(doc))\n return texts\n", "role": "user" }, { "content": "tokenize_documents", "role": "assistant" } ]
def x(): docs = read_json_corpus(DEFAULT_NER_DATASET) trainer = NerTrainer(default_nersettings) trainer.train(docs, DEFAULT_NER_MODEL_DIR)
train_default_model
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x():\n docs = read_json_corpus(DEFAULT_NER_DATASET)\n trainer = NerTrainer(default_nersettings)\n trainer.train(docs, DEFAULT_NER_MODEL_DIR)\n", "role": "user" }, { "content": "train_default_model", "role": "assistant" } ]
def x(self, line): assert isinstance(line, str) try: self._process.stdin.write(as_binary(line)) self._process.stdin.write(as_binary('\n')) self._process.stdin.flush() result = as_unicode(self._process.stdout.readline()) if result == '': stderr = as_unicode(self._process.stderr.read()) raise Exception( 'EOF encountered while reading stream. Stderr is {0}.'. format(stderr)) return result except Exception: self._process.terminate() raise
process_line
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, line):\n assert isinstance(line, str)\n try:\n self._process.stdin.write(as_binary(line))\n self._process.stdin.write(as_binary('\\n'))\n self._process.stdin.flush()\n result = as_unicode(self._process.stdout.readline())\n if result == '':\n stderr = as_unicode(self._process.stderr.read())\n raise Exception(\n 'EOF encountered while reading stream. Stderr is {0}.'.\n format(stderr))\n return result\n except Exception:\n self._process.terminate()\n raise\n", "role": "user" }, { "content": "process_line", "role": "assistant" } ]
def x(synset_idxes): offsets = {} current_seeked_offset_idx = 0 ordered_synset_idxes = sorted(synset_idxes) with codecs.open(_SOI, 'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') while current_seeked_offset_idx < len(ordered_synset_idxes ) and split_line[0] == str(ordered_synset_idxes[ current_seeked_offset_idx]): offsets[synset_idxes[current_seeked_offset_idx]] = int( split_line[1]) current_seeked_offset_idx += 1 if current_seeked_offset_idx >= len(synset_idxes): break return [offsets[synset_idx] for synset_idx in synset_idxes]
get_synset_offsets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(synset_idxes):\n offsets = {}\n current_seeked_offset_idx = 0\n ordered_synset_idxes = sorted(synset_idxes)\n with codecs.open(_SOI, 'rb', 'utf-8') as fin:\n for line in fin:\n split_line = line.split(':')\n while current_seeked_offset_idx < len(ordered_synset_idxes\n ) and split_line[0] == str(ordered_synset_idxes[\n current_seeked_offset_idx]):\n offsets[synset_idxes[current_seeked_offset_idx]] = int(\n split_line[1])\n current_seeked_offset_idx += 1\n if current_seeked_offset_idx >= len(synset_idxes):\n break\n return [offsets[synset_idx] for synset_idx in synset_idxes]\n", "role": "user" }, { "content": "get_synset_offsets", "role": "assistant" } ]
def x(synset_offsets): global parser if parser is None: parser = Parser(_WN_FILE) synsets = [] for offset in synset_offsets: raw_synset = parser.parse_synset(offset) synset = Synset(raw_synset) SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset SYNSETS_DICT[synset.id] = synset synsets.append(synset) return synsets
get_synsets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(synset_offsets):\n global parser\n if parser is None:\n parser = Parser(_WN_FILE)\n synsets = []\n for offset in synset_offsets:\n raw_synset = parser.parse_synset(offset)\n synset = Synset(raw_synset)\n SYNSETS_DICT[_get_key_from_raw_synset(raw_synset)] = synset\n SYNSETS_DICT[synset.id] = synset\n synsets.append(synset)\n return synsets\n", "role": "user" }, { "content": "get_synsets", "role": "assistant" } ]
def x(raw_synset): pos = raw_synset.pos literal = raw_synset.variants[0].literal sense = '%02d' % raw_synset.variants[0].sense return '.'.join([literal, pos, sense])
get_key_from_raw_synset
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(raw_synset):\n pos = raw_synset.pos\n literal = raw_synset.variants[0].literal\n sense = '%02d' % raw_synset.variants[0].sense\n return '.'.join([literal, pos, sense])\n", "role": "user" }, { "content": "get_key_from_raw_synset", "role": "assistant" } ]
def x(synset_key): if synset_key in SYNSETS_DICT: return SYNSETS_DICT[synset_key] def x(synset_key): with codecs.open(_SENSE_FILE, 'rb', 'utf-8') as fin: for line in fin: split_line = line.split(':') if split_line[0] == synset_key: return int(split_line[1].strip()) return None synset_idx = _get_synset_idx(synset_key) if synset_idx == None: return None synset_offset = _get_synset_offsets([synset_idx]) synset = _get_synsets(synset_offset) return synset[0]
synset
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(synset_key):\n if synset_key in SYNSETS_DICT:\n return SYNSETS_DICT[synset_key]\n\n def x(synset_key):\n with codecs.open(_SENSE_FILE, 'rb', 'utf-8') as fin:\n for line in fin:\n split_line = line.split(':')\n if split_line[0] == synset_key:\n return int(split_line[1].strip())\n return None\n synset_idx = _get_synset_idx(synset_key)\n if synset_idx == None:\n return None\n synset_offset = _get_synset_offsets([synset_idx])\n synset = _get_synsets(synset_offset)\n return synset[0]\n", "role": "user" }, { "content": "synset", "role": "assistant" } ]
def x(lemma, pos=None): def x(lemma, pos): line_prefix_regexp = '%s:%s:(.*)' % (lemma, pos if pos else '\\w+') line_prefix = re.compile(line_prefix_regexp) idxes = [] with codecs.open(_LIT_POS_FILE, 'rb', 'utf-8') as fin: for line in fin: result = line_prefix.match(line) if result: res_indices = [int(x) for x in result.group(1).split(' ')] idxes.extend(res_indices) LEM_POS_2_SS_IDX[lemma][pos].extend(idxes) return sorted(idxes) synset_idxes = None if lemma in LEM_POS_2_SS_IDX: if pos in LEM_POS_2_SS_IDX[lemma]: synset_idxes = LEM_POS_2_SS_IDX[lemma][pos] else: synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in LEM_POS_2_SS_IDX[lemma][pos]] if not synset_idxes: synset_idxes = _get_synset_idxes(lemma, pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len( synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len( synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) return stored_synsets + synsets
synsets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(lemma, pos=None):\n\n def x(lemma, pos):\n line_prefix_regexp = '%s:%s:(.*)' % (lemma, pos if pos else '\\\\w+')\n line_prefix = re.compile(line_prefix_regexp)\n idxes = []\n with codecs.open(_LIT_POS_FILE, 'rb', 'utf-8') as fin:\n for line in fin:\n result = line_prefix.match(line)\n if result:\n res_indices = [int(x) for x in result.group(1).split(' ')]\n idxes.extend(res_indices)\n LEM_POS_2_SS_IDX[lemma][pos].extend(idxes)\n return sorted(idxes)\n synset_idxes = None\n if lemma in LEM_POS_2_SS_IDX:\n if pos in LEM_POS_2_SS_IDX[lemma]:\n synset_idxes = LEM_POS_2_SS_IDX[lemma][pos]\n else:\n synset_idxes = [idx for pos in LEM_POS_2_SS_IDX[lemma] for idx in\n LEM_POS_2_SS_IDX[lemma][pos]]\n if not synset_idxes:\n synset_idxes = _get_synset_idxes(lemma, pos)\n if len(synset_idxes) == 0:\n return []\n stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(\n synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]\n unstored_synset_idxes = [synset_idxes[i] for i in range(len(\n synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]\n synset_offsets = _get_synset_offsets(unstored_synset_idxes)\n synsets = _get_synsets(synset_offsets)\n return stored_synsets + synsets\n", "role": "user" }, { "content": "synsets", "role": "assistant" } ]
def x(pos=None): def x(pos): idxes = [] with codecs.open(_LIT_POS_FILE, 'rb', 'utf-8') as fin: if pos == None: for line in fin: split_line = line.strip().split(':') idxes.extend([int(x) for x in split_line[2].split()]) else: for line in fin: split_line = line.strip().split(':') if split_line[1] == pos: idxes.extend([int(x) for x in split_line[2].split()]) idxes = list(set(idxes)) idxes.sort() return idxes if pos in LOADED_POS: return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in LEM_POS_2_SS_IDX[lemma][pos]] else: synset_idxes = _get_unique_synset_idxes(pos) if len(synset_idxes) == 0: return [] stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len( synset_idxes)) if synset_idxes[i] in SYNSETS_DICT] unstored_synset_idxes = [synset_idxes[i] for i in range(len( synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT] synset_offsets = _get_synset_offsets(unstored_synset_idxes) synsets = _get_synsets(synset_offsets) for synset in synsets: for variant in synset.get_variants(): LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id) LOADED_POS.add(pos) return stored_synsets + synsets
all_synsets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(pos=None):\n\n def x(pos):\n idxes = []\n with codecs.open(_LIT_POS_FILE, 'rb', 'utf-8') as fin:\n if pos == None:\n for line in fin:\n split_line = line.strip().split(':')\n idxes.extend([int(x) for x in split_line[2].split()])\n else:\n for line in fin:\n split_line = line.strip().split(':')\n if split_line[1] == pos:\n idxes.extend([int(x) for x in split_line[2].split()])\n idxes = list(set(idxes))\n idxes.sort()\n return idxes\n if pos in LOADED_POS:\n return [SYNSETS_DICT[idx] for lemma in LEM_POS_2_SS_IDX for idx in\n LEM_POS_2_SS_IDX[lemma][pos]]\n else:\n synset_idxes = _get_unique_synset_idxes(pos)\n if len(synset_idxes) == 0:\n return []\n stored_synsets = [SYNSETS_DICT[synset_idxes[i]] for i in range(len(\n synset_idxes)) if synset_idxes[i] in SYNSETS_DICT]\n unstored_synset_idxes = [synset_idxes[i] for i in range(len(\n synset_idxes)) if synset_idxes[i] not in SYNSETS_DICT]\n synset_offsets = _get_synset_offsets(unstored_synset_idxes)\n synsets = _get_synsets(synset_offsets)\n for synset in synsets:\n for variant in synset.get_variants():\n LEM_POS_2_SS_IDX[variant.literal][synset.pos].append(synset.id)\n LOADED_POS.add(pos)\n return stored_synsets + synsets\n", "role": "user" }, { "content": "all_synsets", "role": "assistant" } ]
def x(lemma_key): if lemma_key in LEMMAS_DICT: return LEMMAS_DICT[lemma_key] split_lemma_key = lemma_key.split('.') synset_key = '.'.join(split_lemma_key[:3]) lemma_literal = split_lemma_key[3] lemma_obj = Lemma(synset_key, lemma_literal) LEMMAS_DICT[lemma_key] = lemma_obj return lemma_obj
lemma
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(lemma_key):\n if lemma_key in LEMMAS_DICT:\n return LEMMAS_DICT[lemma_key]\n split_lemma_key = lemma_key.split('.')\n synset_key = '.'.join(split_lemma_key[:3])\n lemma_literal = split_lemma_key[3]\n lemma_obj = Lemma(synset_key, lemma_literal)\n LEMMAS_DICT[lemma_key] = lemma_obj\n return lemma_obj\n", "role": "user" }, { "content": "lemma", "role": "assistant" } ]
def x(lemma, pos=None): lemma = lemma.lower() return [lemma_obj for synset in synsets(lemma, pos) for lemma_obj in synset.lemmas() if lemma_obj.name.lower() == lemma]
lemmas
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(lemma, pos=None):\n lemma = lemma.lower()\n return [lemma_obj for synset in synsets(lemma, pos) for lemma_obj in\n synset.lemmas() if lemma_obj.name.lower() == lemma]\n", "role": "user" }, { "content": "lemmas", "role": "assistant" } ]
def x(self, hypernyms): hypernyms |= set(self.hypernyms()) for synset in self.hypernyms(): hypernyms |= synset._recursive_hypernyms(hypernyms) return hypernyms
recursive_hypernyms
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, hypernyms):\n hypernyms |= set(self.hypernyms())\n for synset in self.hypernyms():\n hypernyms |= synset._recursive_hypernyms(hypernyms)\n return hypernyms\n", "role": "user" }, { "content": "recursive_hypernyms", "role": "assistant" } ]
def x(self): if 'min_depth' in self.__dict__: return self.__dict__['min_depth'] min_depth = 0 hypernyms = self.hypernyms() if hypernyms: min_depth = 1 + min(h._min_depth() for h in hypernyms) self.__dict__['min_depth'] = min_depth return min_depth
min_depth
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n if 'min_depth' in self.__dict__:\n return self.__dict__['min_depth']\n min_depth = 0\n hypernyms = self.hypernyms()\n if hypernyms:\n min_depth = 1 + min(h._min_depth() for h in hypernyms)\n self.__dict__['min_depth'] = min_depth\n return min_depth\n", "role": "user" }, { "content": "min_depth", "role": "assistant" } ]
def x(self, relation): results = [] for relation_candidate in self._raw_synset.internalLinks: if relation_candidate.name == relation: linked_synset = synset(_get_key_from_raw_synset( relation_candidate.target_concept)) relation_candidate.target_concept = linked_synset._raw_synset results.append(linked_synset) return results
get_related_synsets
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, relation):\n results = []\n for relation_candidate in self._raw_synset.internalLinks:\n if relation_candidate.name == relation:\n linked_synset = synset(_get_key_from_raw_synset(\n relation_candidate.target_concept))\n relation_candidate.target_concept = linked_synset._raw_synset\n results.append(linked_synset)\n return results\n", "role": "user" }, { "content": "get_related_synsets", "role": "assistant" } ]
def x(self, relation, depth=float('inf')): ancestors = [] unvisited_ancestors = [(synset, 1) for synset in self. get_related_synsets(relation)] while len(unvisited_ancestors) > 0: ancestor_depth = unvisited_ancestors.pop() if ancestor_depth[1] > depth: continue unvisited_ancestors.extend([(synset, ancestor_depth[1] + 1) for synset in ancestor_depth[0].get_related_synsets(relation)]) ancestors.append(ancestor_depth[0]) return list(set(ancestors))
closure
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, relation, depth=float('inf')):\n ancestors = []\n unvisited_ancestors = [(synset, 1) for synset in self.\n get_related_synsets(relation)]\n while len(unvisited_ancestors) > 0:\n ancestor_depth = unvisited_ancestors.pop()\n if ancestor_depth[1] > depth:\n continue\n unvisited_ancestors.extend([(synset, ancestor_depth[1] + 1) for\n synset in ancestor_depth[0].get_related_synsets(relation)])\n ancestors.append(ancestor_depth[0])\n return list(set(ancestors))\n", "role": "user" }, { "content": "closure", "role": "assistant" } ]
def x(self): visited = set() hypernyms_next_level = set(self.hypernyms()) current_hypernyms = set(hypernyms_next_level) while len(hypernyms_next_level) > 0: current_hypernyms = set(hypernyms_next_level) hypernyms_next_level = set() for synset in current_hypernyms: if synset in visited: continue visited.add(synset) hypernyms_next_level |= set(synset.hypernyms()) return list(current_hypernyms)
root_hypernyms
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n visited = set()\n hypernyms_next_level = set(self.hypernyms())\n current_hypernyms = set(hypernyms_next_level)\n while len(hypernyms_next_level) > 0:\n current_hypernyms = set(hypernyms_next_level)\n hypernyms_next_level = set()\n for synset in current_hypernyms:\n if synset in visited:\n continue\n visited.add(synset)\n hypernyms_next_level |= set(synset.hypernyms())\n return list(current_hypernyms)\n", "role": "user" }, { "content": "root_hypernyms", "role": "assistant" } ]
def x(self, synset): if self._raw_synset.pos != synset._raw_synset.pos: return None depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos] distance = self._shortest_path_distance(synset) if distance >= 0: return -math.log((distance + 1) / (2.0 * depth)) else: return None
lch_similarity
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, synset):\n if self._raw_synset.pos != synset._raw_synset.pos:\n return None\n depth = MAX_TAXONOMY_DEPTHS[self._raw_synset.pos]\n distance = self._shortest_path_distance(synset)\n if distance >= 0:\n return -math.log((distance + 1) / (2.0 * depth))\n else:\n return None\n", "role": "user" }, { "content": "lch_similarity", "role": "assistant" } ]
def x(self, target_synset): lchs = self.lowest_common_hypernyms(target_synset) lcs_depth = lchs[0]._min_depth() if lchs and len(lchs) else None self_depth = self._min_depth() other_depth = target_synset._min_depth() if lcs_depth is None or self_depth is None or other_depth is None: return None return 2.0 * lcs_depth / (self_depth + other_depth)
wup_similarity
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, target_synset):\n lchs = self.lowest_common_hypernyms(target_synset)\n lcs_depth = lchs[0]._min_depth() if lchs and len(lchs) else None\n self_depth = self._min_depth()\n other_depth = target_synset._min_depth()\n if lcs_depth is None or self_depth is None or other_depth is None:\n return None\n return 2.0 * lcs_depth / (self_depth + other_depth)\n", "role": "user" }, { "content": "wup_similarity", "role": "assistant" } ]
def x(self): return '\n'.join([variant.gloss for variant in self._raw_synset. variants if variant.gloss])
definition
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return '\\n'.join([variant.gloss for variant in self._raw_synset.\n variants if variant.gloss])\n", "role": "user" }, { "content": "definition", "role": "assistant" } ]
def x(self): examples = [] for example in [variant.examples for variant in self._raw_synset. variants if len(variant.examples)]: examples.extend(example) return examples
examples
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n examples = []\n for example in [variant.examples for variant in self._raw_synset.\n variants if len(variant.examples)]:\n examples.extend(example)\n return examples\n", "role": "user" }, { "content": "examples", "role": "assistant" } ]
def x(self): return [lemma('%s.%s' % (self.name, variant.literal)) for variant in self._raw_synset.variants]
lemmas
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return [lemma('%s.%s' % (self.name, variant.literal)) for variant in\n self._raw_synset.variants]\n", "role": "user" }, { "content": "lemmas", "role": "assistant" } ]
def x(self, target_synset): self_hypernyms = self._recursive_hypernyms(set()) other_hypernyms = target_synset._recursive_hypernyms(set()) common_hypernyms = self_hypernyms.intersection(other_hypernyms) annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for hypernym in common_hypernyms] annot_common_hypernyms.sort(key=lambda annot_hypernym: annot_hypernym[1 ], reverse=True) max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms ) > 0 else None if max_depth != None: return [annot_common_hypernym[0] for annot_common_hypernym in annot_common_hypernyms if annot_common_hypernym[1] == max_depth] else: return None
lowest_common_hypernyms
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, target_synset):\n self_hypernyms = self._recursive_hypernyms(set())\n other_hypernyms = target_synset._recursive_hypernyms(set())\n common_hypernyms = self_hypernyms.intersection(other_hypernyms)\n annot_common_hypernyms = [(hypernym, hypernym._min_depth()) for\n hypernym in common_hypernyms]\n annot_common_hypernyms.sort(key=lambda annot_hypernym: annot_hypernym[1\n ], reverse=True)\n max_depth = annot_common_hypernyms[0][1] if len(annot_common_hypernyms\n ) > 0 else None\n if max_depth != None:\n return [annot_common_hypernym[0] for annot_common_hypernym in\n annot_common_hypernyms if annot_common_hypernym[1] == max_depth]\n else:\n return None\n", "role": "user" }, { "content": "lowest_common_hypernyms", "role": "assistant" } ]
def x(self): return synset('%s.%s.%s.%s' % (self.synset_literal, self.synset_pos, self.synset_sense, self.literal))
synset
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self):\n return synset('%s.%s.%s.%s' % (self.synset_literal, self.synset_pos,\n self.synset_sense, self.literal))\n", "role": "user" }, { "content": "synset", "role": "assistant" } ]
def x(vabamorf_json): if not isinstance(vabamorf_json, dict): raise Exception(' Expected dict as an input argument! ') json_sentences = [] if 'paragraphs' in vabamorf_json: for pr in vabamorf_json['paragraphs']: if 'sentences' in pr: for sent in pr['sentences']: json_sentences.append(sent) elif 'sentences' in vabamorf_json: for sent in vabamorf_json['sentences']: json_sentences.append(sent) results = [] for sentJson in json_sentences: results.append('<s>') for wordJson in sentJson['words']: if wordJson['text'] == '<s>' or wordJson['text'] == '</s>': continue wordStr = wordJson['text'] wordStr = _esc_double_quotes(wordStr) results.append(wordStr) for analysisJson in wordJson['analysis']: root = analysisJson['root'] root = _esc_double_quotes(root) ending = analysisJson[ENDING] pos = analysisJson['partofspeech'] clitic = analysisJson['clitic'] form = analysisJson['form'] if pos == 'Z': results.append(''.join([' ', root, ' //_Z_ //'])) else: results.append(''.join([' ', root, '+', ending, clitic, ' //', '_', pos, '_ ', form, ' //'])) if 'analysis' not in wordJson: results.append(' ' + '####') results.append('</s>') return results
convert_vm_json_to_mrf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(vabamorf_json):\n if not isinstance(vabamorf_json, dict):\n raise Exception(' Expected dict as an input argument! ')\n json_sentences = []\n if 'paragraphs' in vabamorf_json:\n for pr in vabamorf_json['paragraphs']:\n if 'sentences' in pr:\n for sent in pr['sentences']:\n json_sentences.append(sent)\n elif 'sentences' in vabamorf_json:\n for sent in vabamorf_json['sentences']:\n json_sentences.append(sent)\n results = []\n for sentJson in json_sentences:\n results.append('<s>')\n for wordJson in sentJson['words']:\n if wordJson['text'] == '<s>' or wordJson['text'] == '</s>':\n continue\n wordStr = wordJson['text']\n wordStr = _esc_double_quotes(wordStr)\n results.append(wordStr)\n for analysisJson in wordJson['analysis']:\n root = analysisJson['root']\n root = _esc_double_quotes(root)\n ending = analysisJson[ENDING]\n pos = analysisJson['partofspeech']\n clitic = analysisJson['clitic']\n form = analysisJson['form']\n if pos == 'Z':\n results.append(''.join([' ', root, ' //_Z_ //']))\n else:\n results.append(''.join([' ', root, '+', ending,\n clitic, ' //', '_', pos, '_ ', form, ' //']))\n if 'analysis' not in wordJson:\n results.append(' ' + '####')\n results.append('</s>')\n return results\n", "role": "user" }, { "content": "convert_vm_json_to_mrf", "role": "assistant" } ]
def x(text): from estnltk.text import Text if not isinstance(text, Text): raise Exception(" Expected estnltk's Text as an input argument! ") if not text.is_tagged(ANALYSIS): kwargs = text.get_kwargs() kwargs['vabamorf'] = True kwargs['guess'] = True kwargs['propername'] = False kwargs['disambiguate'] = False text.__kwargs = kwargs text = text.tag_analysis() results = [] for sentence in text.divide(layer=WORDS, by=SENTENCES): results.append('<s>') for i in range(len(sentence)): wordJson = sentence[i] wordStr = wordJson[TEXT] wordStr = _esc_double_quotes(wordStr) results.append(wordStr) for analysisJson in wordJson[ANALYSIS]: root = analysisJson[ROOT] root = _esc_double_quotes(root) ending = analysisJson[ENDING] pos = analysisJson[POSTAG] clitic = analysisJson[CLITIC] form = analysisJson[FORM] if pos == 'Z': results.append(''.join([' ', root, ' //_Z_ //'])) else: results.append(''.join([' ', root, '+', ending, clitic, ' //', '_', pos, '_ ', form, ' //'])) if ANALYSIS not in wordJson: results.append(' ' + '####') results.append('</s>') return results
convert_Text_to_mrf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text):\n from estnltk.text import Text\n if not isinstance(text, Text):\n raise Exception(\" Expected estnltk's Text as an input argument! \")\n if not text.is_tagged(ANALYSIS):\n kwargs = text.get_kwargs()\n kwargs['vabamorf'] = True\n kwargs['guess'] = True\n kwargs['propername'] = False\n kwargs['disambiguate'] = False\n text.__kwargs = kwargs\n text = text.tag_analysis()\n results = []\n for sentence in text.divide(layer=WORDS, by=SENTENCES):\n results.append('<s>')\n for i in range(len(sentence)):\n wordJson = sentence[i]\n wordStr = wordJson[TEXT]\n wordStr = _esc_double_quotes(wordStr)\n results.append(wordStr)\n for analysisJson in wordJson[ANALYSIS]:\n root = analysisJson[ROOT]\n root = _esc_double_quotes(root)\n ending = analysisJson[ENDING]\n pos = analysisJson[POSTAG]\n clitic = analysisJson[CLITIC]\n form = analysisJson[FORM]\n if pos == 'Z':\n results.append(''.join([' ', root, ' //_Z_ //']))\n else:\n results.append(''.join([' ', root, '+', ending,\n clitic, ' //', '_', pos, '_ ', form, ' //']))\n if ANALYSIS not in wordJson:\n results.append(' ' + '####')\n results.append('</s>')\n return results\n", "role": "user" }, { "content": "convert_Text_to_mrf", "role": "assistant" } ]
def x(rulesFile): rules = {} in_f = codecs.open(rulesFile, mode='r', encoding='utf-8') for line in in_f: line = line.rstrip() if line.startswith('¤'): continue parts = line.split('@') if len(parts) < 4: raise Exception(' Unexpected format of the line: ', line) if parts[1] not in rules: rules[parts[1]] = [] rules[parts[1]].append(parts[3]) in_f.close() return rules
load_fs_mrf_to_syntax_mrf_translation_rules
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(rulesFile):\n rules = {}\n in_f = codecs.open(rulesFile, mode='r', encoding='utf-8')\n for line in in_f:\n line = line.rstrip()\n if line.startswith('¤'):\n continue\n parts = line.split('@')\n if len(parts) < 4:\n raise Exception(' Unexpected format of the line: ', line)\n if parts[1] not in rules:\n rules[parts[1]] = []\n rules[parts[1]].append(parts[3])\n in_f.close()\n return rules\n", "role": "user" }, { "content": "load_fs_mrf_to_syntax_mrf_translation_rules", "role": "assistant" } ]
def x(line): for [pattern, replacement] in _punctConversions: lastline = line line = re.sub(pattern, replacement, line) if lastline != line: break return line
convert_punctuation
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(line):\n for [pattern, replacement] in _punctConversions:\n lastline = line\n line = re.sub(pattern, replacement, line)\n if lastline != line:\n break\n return line\n", "role": "user" }, { "content": "convert_punctuation", "role": "assistant" } ]
def x(mrf_lines, conversion_rules): i = 0 while i < len(mrf_lines): line = mrf_lines[i] if line.startswith(' '): if _punctOrAbbrev.search(line): mrf_lines[i] = _convert_punctuation(line) if '_Y_' not in line: i += 1 continue withFormMatch = _morfWithForm.search(line) if withFormMatch: root = withFormMatch.group(1) pos = withFormMatch.group(2) formStr = withFormMatch.group(3) forms = formStr.split(',') all_new_lines = [] for form in forms: morphKey = pos + ' ' + form.strip() if morphKey in conversion_rules: newlines = [(' ' + root + ' //' + _esc_que_mark( r) + ' //') for r in conversion_rules[morphKey]] all_new_lines.extend(newlines) if all_new_lines: del mrf_lines[i] for newline in all_new_lines: mrf_lines.insert(i, newline) i += len(newlines) continue else: withoutFormMatch = _morfWithoutForm.search(line) if withoutFormMatch: root = withoutFormMatch.group(1) pos = withoutFormMatch.group(2) morphKey = pos all_new_lines = [] if morphKey in conversion_rules: newlines = [(' ' + root + ' //' + _esc_que_mark( r) + ' //') for r in conversion_rules[morphKey]] all_new_lines.extend(newlines) if all_new_lines: del mrf_lines[i] for newline in all_new_lines: mrf_lines.insert(i, newline) i += len(newlines) continue i += 1 return mrf_lines
convert_mrf_to_syntax_mrf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines, conversion_rules):\n i = 0\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if line.startswith(' '):\n if _punctOrAbbrev.search(line):\n mrf_lines[i] = _convert_punctuation(line)\n if '_Y_' not in line:\n i += 1\n continue\n withFormMatch = _morfWithForm.search(line)\n if withFormMatch:\n root = withFormMatch.group(1)\n pos = withFormMatch.group(2)\n formStr = withFormMatch.group(3)\n forms = formStr.split(',')\n all_new_lines = []\n for form in forms:\n morphKey = pos + ' ' + form.strip()\n if morphKey in conversion_rules:\n newlines = [(' ' + root + ' //' + _esc_que_mark(\n r) + ' //') for r in conversion_rules[morphKey]]\n all_new_lines.extend(newlines)\n if all_new_lines:\n del mrf_lines[i]\n for newline in all_new_lines:\n mrf_lines.insert(i, newline)\n i += len(newlines)\n continue\n else:\n withoutFormMatch = _morfWithoutForm.search(line)\n if withoutFormMatch:\n root = withoutFormMatch.group(1)\n pos = withoutFormMatch.group(2)\n morphKey = pos\n all_new_lines = []\n if morphKey in conversion_rules:\n newlines = [(' ' + root + ' //' + _esc_que_mark(\n r) + ' //') for r in conversion_rules[morphKey]]\n all_new_lines.extend(newlines)\n if all_new_lines:\n del mrf_lines[i]\n for newline in all_new_lines:\n mrf_lines.insert(i, newline)\n i += len(newlines)\n continue\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "convert_mrf_to_syntax_mrf", "role": "assistant" } ]
def x(mrf_lines): i = 0 while i < len(mrf_lines): line = mrf_lines[i] if '_P_' in line: for [pattern, replacement] in _pronConversions: lastline = line line = re.sub(pattern, replacement, line) if lastline != line: mrf_lines[i] = line break i += 1 return mrf_lines
convert_pronouns
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines):\n i = 0\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if '_P_' in line:\n for [pattern, replacement] in _pronConversions:\n lastline = line\n line = re.sub(pattern, replacement, line)\n if lastline != line:\n mrf_lines[i] = line\n break\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "convert_pronouns", "role": "assistant" } ]
def x(mrf_lines, allow_to_delete_all=True): i = 0 seen_analyses = [] analyses_count = 0 to_delete = [] Kpre_index = -1 Kpost_index = -1 while i < len(mrf_lines): line = mrf_lines[i] if not line.startswith(' '): if Kpre_index != -1 and Kpost_index != -1: to_delete.append(Kpre_index) elif Kpost_index != -1: to_delete.append(Kpost_index) if to_delete: for k, j in enumerate(sorted(to_delete, reverse=True)): if not allow_to_delete_all and analyses_count == len( to_delete) and k == len(to_delete) - 1: continue del mrf_lines[j] i -= 1 seen_analyses = [] analyses_count = 0 to_delete = [] Kpre_index = -1 Kpost_index = -1 elif line.startswith(' '): analyses_count += 1 if line in seen_analyses: to_delete.append(i) else: if re.search('/_K_\\s+pre\\s+//', line): Kpre_index = i elif re.search('/_K_\\s+post\\s+//', line): Kpost_index = i seen_analyses.append(line) i += 1 return mrf_lines
remove_duplicate_analyses
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines, allow_to_delete_all=True):\n i = 0\n seen_analyses = []\n analyses_count = 0\n to_delete = []\n Kpre_index = -1\n Kpost_index = -1\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if not line.startswith(' '):\n if Kpre_index != -1 and Kpost_index != -1:\n to_delete.append(Kpre_index)\n elif Kpost_index != -1:\n to_delete.append(Kpost_index)\n if to_delete:\n for k, j in enumerate(sorted(to_delete, reverse=True)):\n if not allow_to_delete_all and analyses_count == len(\n to_delete) and k == len(to_delete) - 1:\n continue\n del mrf_lines[j]\n i -= 1\n seen_analyses = []\n analyses_count = 0\n to_delete = []\n Kpre_index = -1\n Kpost_index = -1\n elif line.startswith(' '):\n analyses_count += 1\n if line in seen_analyses:\n to_delete.append(i)\n else:\n if re.search('/_K_\\\\s+pre\\\\s+//', line):\n Kpre_index = i\n elif re.search('/_K_\\\\s+post\\\\s+//', line):\n Kpost_index = i\n seen_analyses.append(line)\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "remove_duplicate_analyses", "role": "assistant" } ]
def x(mrf_lines): i = 0 cap = False while i < len(mrf_lines): line = mrf_lines[i] if not line.startswith(' ') and len(line) > 0: cap = line[0].isupper() elif line.startswith(' '): if cap: line = re.sub('(//.+\\S)\\s+//', '\\1 #cap //', line) if _morfFinV.search(line) and not _morfNotFinV.search(line): line = re.sub('(//.+\\S)\\s+//', '\\1 #FinV //', line) for [pattern, replacement] in _mrfHashTagConversions: line = re.sub(pattern, replacement, line) mrf_lines[i] = line i += 1 return mrf_lines
add_hashtag_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines):\n i = 0\n cap = False\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if not line.startswith(' ') and len(line) > 0:\n cap = line[0].isupper()\n elif line.startswith(' '):\n if cap:\n line = re.sub('(//.+\\\\S)\\\\s+//', '\\\\1 #cap //', line)\n if _morfFinV.search(line) and not _morfNotFinV.search(line):\n line = re.sub('(//.+\\\\S)\\\\s+//', '\\\\1 #FinV //', line)\n for [pattern, replacement] in _mrfHashTagConversions:\n line = re.sub(pattern, replacement, line)\n mrf_lines[i] = line\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "add_hashtag_info", "role": "assistant" } ]
def x(subcat_lex_file): rules = {} nonSpacePattern = re.compile('^\\S+$') posTagPattern = re.compile('_._') in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8') lemma = '' subcatRules = '' for line in in_f: line = line.rstrip() if nonSpacePattern.match(line) and not posTagPattern.search(line): lemma = line elif posTagPattern.search(line): subcatRules = line if len(lemma) > 0 and len(subcatRules) > 0: if lemma not in rules: rules[lemma] = [] parts = subcatRules.split('&') for part in parts: part = part.strip() rules[lemma].append(part) lemma = '' subcatRules = '' in_f.close() return rules
load_subcat_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(subcat_lex_file):\n rules = {}\n nonSpacePattern = re.compile('^\\\\S+$')\n posTagPattern = re.compile('_._')\n in_f = codecs.open(subcat_lex_file, mode='r', encoding='utf-8')\n lemma = ''\n subcatRules = ''\n for line in in_f:\n line = line.rstrip()\n if nonSpacePattern.match(line) and not posTagPattern.search(line):\n lemma = line\n elif posTagPattern.search(line):\n subcatRules = line\n if len(lemma) > 0 and len(subcatRules) > 0:\n if lemma not in rules:\n rules[lemma] = []\n parts = subcatRules.split('&')\n for part in parts:\n part = part.strip()\n rules[lemma].append(part)\n lemma = ''\n subcatRules = ''\n in_f.close()\n return rules\n", "role": "user" }, { "content": "load_subcat_info", "role": "assistant" } ]
def x(mrf_lines, subcat_rules): i = 0 while i < len(mrf_lines): line = mrf_lines[i] if line.startswith(' '): lemma_match = analysisLemmaPat.match(line) if lemma_match: lemma = lemma_match.group(1) if lemma in subcat_rules: analysis_match = analysisPat.search(line) if not analysis_match: raise Exception( ' Could not find analysis from the line:', line) analysis = analysis_match.group(1) for rule in subcat_rules[lemma]: condition, addition = rule.split('>') condition = condition.strip() conditions = condition.split() satisfied1 = [_check_condition(c, analysis) for c in conditions] if all(satisfied1): additions = addition.split('|') j = i for a in additions: line_copy = line if i == j else line[:] items_to_add = a.split() for item in items_to_add: if not _check_condition(item, analysis): line_copy = re.sub('(//.+\\S)\\s+//', '\\1 ' + item + ' //', line_copy) if j == i: mrf_lines[i] = line_copy else: mrf_lines.insert(i, line_copy) j += 1 i = j - 1 break i += 1 return mrf_lines
tag_subcat_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines, subcat_rules):\n i = 0\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if line.startswith(' '):\n lemma_match = analysisLemmaPat.match(line)\n if lemma_match:\n lemma = lemma_match.group(1)\n if lemma in subcat_rules:\n analysis_match = analysisPat.search(line)\n if not analysis_match:\n raise Exception(\n ' Could not find analysis from the line:', line)\n analysis = analysis_match.group(1)\n for rule in subcat_rules[lemma]:\n condition, addition = rule.split('>')\n condition = condition.strip()\n conditions = condition.split()\n satisfied1 = [_check_condition(c, analysis) for c in\n conditions]\n if all(satisfied1):\n additions = addition.split('|')\n j = i\n for a in additions:\n line_copy = line if i == j else line[:]\n items_to_add = a.split()\n for item in items_to_add:\n if not _check_condition(item, analysis):\n line_copy = re.sub('(//.+\\\\S)\\\\s+//', \n '\\\\1 ' + item + ' //', line_copy)\n if j == i:\n mrf_lines[i] = line_copy\n else:\n mrf_lines.insert(i, line_copy)\n j += 1\n i = j - 1\n break\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "tag_subcat_info", "role": "assistant" } ]
def x(mrf_lines): i = 0 while i < len(mrf_lines): line = mrf_lines[i] if not line.startswith(' ') and len(line) > 0: line = re.sub('^(\\S.*)([\n\r]*)$', '"<\\1>"\\2', line) line = re.sub('<<(s|/s)>>', '<\\1>', line) mrf_lines[i] = line elif line.startswith(' '): line = re.sub('#cap #cap', 'cap', line) line = re.sub('#cap', 'cap', line) line = re.sub('\\*\\*CLB', 'CLB', line) line = re.sub('#Correct!', '<Correct!>', line) line = re.sub('####', '', line) line = re.sub('#(\\S+)', '<\\1>', line) line = re.sub('\\$([,.;!?:<]+)', '\\1', line) line = re.sub('_Y_\\s+\\? _Z_', '_Z_', line) line = re.sub('_Y_\\s+\\?\\s+_Z_', '_Z_', line) line = re.sub('_Y_\\s+_Z_', '_Z_', line) line = re.sub('_Z_\\s+\\?', '_Z_', line) line = re.sub('^\\s+(\\S+)(.*)\\+(\\S+)\\s*//_(\\S)_ (.*)//(.*)$', ' "\\1\\2" L\\3 \\4 \\5 \\6', line) line = re.sub('^\\s+(\\S+)(.*)\\s+//_(\\S)_ (.*)//(.*)$', ' "\\1\\2" \\3 \\4 \\5', line) mrf_lines[i] = line i += 1 return mrf_lines
convert_to_cg3_input
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(mrf_lines):\n i = 0\n while i < len(mrf_lines):\n line = mrf_lines[i]\n if not line.startswith(' ') and len(line) > 0:\n line = re.sub('^(\\\\S.*)([\\n\\r]*)$', '\"<\\\\1>\"\\\\2', line)\n line = re.sub('<<(s|/s)>>', '<\\\\1>', line)\n mrf_lines[i] = line\n elif line.startswith(' '):\n line = re.sub('#cap #cap', 'cap', line)\n line = re.sub('#cap', 'cap', line)\n line = re.sub('\\\\*\\\\*CLB', 'CLB', line)\n line = re.sub('#Correct!', '<Correct!>', line)\n line = re.sub('####', '', line)\n line = re.sub('#(\\\\S+)', '<\\\\1>', line)\n line = re.sub('\\\\$([,.;!?:<]+)', '\\\\1', line)\n line = re.sub('_Y_\\\\s+\\\\? _Z_', '_Z_', line)\n line = re.sub('_Y_\\\\s+\\\\?\\\\s+_Z_', '_Z_', line)\n line = re.sub('_Y_\\\\s+_Z_', '_Z_', line)\n line = re.sub('_Z_\\\\s+\\\\?', '_Z_', line)\n line = re.sub('^\\\\s+(\\\\S+)(.*)\\\\+(\\\\S+)\\\\s*//_(\\\\S)_ (.*)//(.*)$',\n ' \"\\\\1\\\\2\" L\\\\3 \\\\4 \\\\5 \\\\6', line)\n line = re.sub('^\\\\s+(\\\\S+)(.*)\\\\s+//_(\\\\S)_ (.*)//(.*)$',\n ' \"\\\\1\\\\2\" \\\\3 \\\\4 \\\\5', line)\n mrf_lines[i] = line\n i += 1\n return mrf_lines\n", "role": "user" }, { "content": "convert_to_cg3_input", "role": "assistant" } ]
def x(self, json_dict, **kwargs): mrf_lines = convert_vm_json_to_mrf(json_dict) return self.process_mrf_lines(mrf_lines, **kwargs)
process_vm_json
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, json_dict, **kwargs):\n mrf_lines = convert_vm_json_to_mrf(json_dict)\n return self.process_mrf_lines(mrf_lines, **kwargs)\n", "role": "user" }, { "content": "process_vm_json", "role": "assistant" } ]
def x(self, text, **kwargs): mrf_lines = convert_Text_to_mrf(text) return self.process_mrf_lines(mrf_lines, **kwargs)
process_Text
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text, **kwargs):\n mrf_lines = convert_Text_to_mrf(text)\n return self.process_mrf_lines(mrf_lines, **kwargs)\n", "role": "user" }, { "content": "process_Text", "role": "assistant" } ]
def x(self, mrf_lines, **kwargs): converted1 = convert_mrf_to_syntax_mrf(mrf_lines, self.fs_to_synt_rules) converted2 = convert_pronouns(converted1) converted3 = remove_duplicate_analyses(converted2, allow_to_delete_all= self.allow_to_remove_all) converted4 = add_hashtag_info(converted3) converted5 = tag_subcat_info(converted4, self.subcat_rules) converted6 = remove_duplicate_analyses(converted5, allow_to_delete_all= self.allow_to_remove_all) converted7 = convert_to_cg3_input(converted6) return converted7
process_mrf_lines
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, mrf_lines, **kwargs):\n converted1 = convert_mrf_to_syntax_mrf(mrf_lines, self.fs_to_synt_rules)\n converted2 = convert_pronouns(converted1)\n converted3 = remove_duplicate_analyses(converted2, allow_to_delete_all=\n self.allow_to_remove_all)\n converted4 = add_hashtag_info(converted3)\n converted5 = tag_subcat_info(converted4, self.subcat_rules)\n converted6 = remove_duplicate_analyses(converted5, allow_to_delete_all=\n self.allow_to_remove_all)\n converted7 = convert_to_cg3_input(converted6)\n return converted7\n", "role": "user" }, { "content": "process_mrf_lines", "role": "assistant" } ]
def x(src_dir='src', ending='.cpp'): return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
get_sources
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(src_dir='src', ending='.cpp'):\n return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if\n fnm.endswith(ending)]\n", "role": "user" }, { "content": "get_sources", "role": "assistant" } ]
def x(color): color = color.replace('-', '').lower() if color == 'white': return '\x1b[97m' elif color in ['cyan', 'aqua']: return '\x1b[96m' elif color in ['purple', 'magneta', 'fuchsia']: return '\x1b[95m' elif color == 'blue': return '\x1b[94m' elif color in ['yellow', 'gold']: return '\x1b[93m' elif color in ['green', 'lime']: return '\x1b[92m' elif color == 'red': return '\x1b[91m' elif color in ['grey', 'gray', 'silver']: return '\x1b[37m' elif color in ['darkcyan', 'teal']: return '\x1b[36m' elif color in ['darkpurple', 'darkmagneta']: return '\x1b[35m' elif color in ['darkblue', 'navy']: return '\x1b[34m' elif color in ['darkyellow', 'olive']: return '\x1b[33m' elif color == 'darkgreen': return '\x1b[32m' elif color in ['darkred', 'maroon']: return '\x1b[31m' return None
get_ANSI_colored_font
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(color):\n color = color.replace('-', '').lower()\n if color == 'white':\n return '\\x1b[97m'\n elif color in ['cyan', 'aqua']:\n return '\\x1b[96m'\n elif color in ['purple', 'magneta', 'fuchsia']:\n return '\\x1b[95m'\n elif color == 'blue':\n return '\\x1b[94m'\n elif color in ['yellow', 'gold']:\n return '\\x1b[93m'\n elif color in ['green', 'lime']:\n return '\\x1b[92m'\n elif color == 'red':\n return '\\x1b[91m'\n elif color in ['grey', 'gray', 'silver']:\n return '\\x1b[37m'\n elif color in ['darkcyan', 'teal']:\n return '\\x1b[36m'\n elif color in ['darkpurple', 'darkmagneta']:\n return '\\x1b[35m'\n elif color in ['darkblue', 'navy']:\n return '\\x1b[34m'\n elif color in ['darkyellow', 'olive']:\n return '\\x1b[33m'\n elif color == 'darkgreen':\n return '\\x1b[32m'\n elif color in ['darkred', 'maroon']:\n return '\\x1b[31m'\n return None\n", "role": "user" }, { "content": "get_ANSI_colored_font", "role": "assistant" } ]
def x(text, layer, markup_settings, spansStartingFrom=None): if not markup_settings or not isinstance(markup_settings, dict): raise Exception( 'Error: markup_settings should be a dict containing markup specification;' ) startTags = '' endTags = '' graphicFormatting = False bracketing = False if 'u' in markup_settings and markup_settings['u' ] or 'underline' in markup_settings and markup_settings['underline']: startTags += '\x1b[4m' endTags += '\x1b[0m' graphicFormatting = True colorName = markup_settings['c'] if 'c' in markup_settings else None colorName = markup_settings['color' ] if 'color' in markup_settings else colorName if colorName: color = _get_ANSI_colored_font(colorName) if color: startTags += color endTags += '\x1b[0m' graphicFormatting = True else: raise Exception('Unknown color:', colorName) if 'b' in markup_settings and markup_settings['b' ] or 'bracket' in markup_settings and markup_settings['bracket']: startTags += '[' endTags = ']' + endTags bracketing = True if graphicFormatting and bracketing: startTags2 = startTags.rstrip('[') endTags = startTags2 + endTags extractor = lambda t: texts_multi(t, layer) if hasattr(layer, '__call__'): extractor = layer elif text.is_simple(layer): extractor = lambda t: texts_simple(t, layer) if not spansStartingFrom: spansStartingFrom = {} for elem in extractor(text): if elem[START] not in spansStartingFrom: spansStartingFrom[elem[START]] = [] span1 = [elem[START], elem[END], layer, startTags, endTags, graphicFormatting, bracketing] if not spansStartingFrom[elem[START]]: spansStartingFrom[elem[START]].append(span1) else: inserted = False for i in range(len(spansStartingFrom[elem[START]])): span2 = spansStartingFrom[elem[START]][i] if span1[1] > span2[1]: spansStartingFrom[elem[START]].insert(i, span1) inserted = True break elif span1[1] == span2[1] and span1[2] < span2[2]: spansStartingFrom[elem[START]].insert(i, span1) inserted = True break if not inserted: spansStartingFrom[elem[START]].append(span1) return spansStartingFrom
construct_start_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, layer, markup_settings, spansStartingFrom=None):\n if not markup_settings or not isinstance(markup_settings, dict):\n raise Exception(\n 'Error: markup_settings should be a dict containing markup specification;'\n )\n startTags = ''\n endTags = ''\n graphicFormatting = False\n bracketing = False\n if 'u' in markup_settings and markup_settings['u'\n ] or 'underline' in markup_settings and markup_settings['underline']:\n startTags += '\\x1b[4m'\n endTags += '\\x1b[0m'\n graphicFormatting = True\n colorName = markup_settings['c'] if 'c' in markup_settings else None\n colorName = markup_settings['color'\n ] if 'color' in markup_settings else colorName\n if colorName:\n color = _get_ANSI_colored_font(colorName)\n if color:\n startTags += color\n endTags += '\\x1b[0m'\n graphicFormatting = True\n else:\n raise Exception('Unknown color:', colorName)\n if 'b' in markup_settings and markup_settings['b'\n ] or 'bracket' in markup_settings and markup_settings['bracket']:\n startTags += '['\n endTags = ']' + endTags\n bracketing = True\n if graphicFormatting and bracketing:\n startTags2 = startTags.rstrip('[')\n endTags = startTags2 + endTags\n extractor = lambda t: texts_multi(t, layer)\n if hasattr(layer, '__call__'):\n extractor = layer\n elif text.is_simple(layer):\n extractor = lambda t: texts_simple(t, layer)\n if not spansStartingFrom:\n spansStartingFrom = {}\n for elem in extractor(text):\n if elem[START] not in spansStartingFrom:\n spansStartingFrom[elem[START]] = []\n span1 = [elem[START], elem[END], layer, startTags, endTags,\n graphicFormatting, bracketing]\n if not spansStartingFrom[elem[START]]:\n spansStartingFrom[elem[START]].append(span1)\n else:\n inserted = False\n for i in range(len(spansStartingFrom[elem[START]])):\n span2 = spansStartingFrom[elem[START]][i]\n if span1[1] > span2[1]:\n spansStartingFrom[elem[START]].insert(i, span1)\n inserted = True\n break\n elif span1[1] == span2[1] and span1[2] < span2[2]:\n spansStartingFrom[elem[START]].insert(i, span1)\n inserted = True\n break\n if not inserted:\n spansStartingFrom[elem[START]].append(span1)\n return spansStartingFrom\n", "role": "user" }, { "content": "construct_start_index", "role": "assistant" } ]
def x(spansStartingFrom): endIndex = {} for i in spansStartingFrom: for span1 in spansStartingFrom[i]: endSpan1 = [span1[4], span1[0], span1[2]] endLoc1 = span1[1] if endLoc1 not in endIndex: endIndex[endLoc1] = [] endIndex[endLoc1].append(endSpan1) else: inserted = False for i in range(len(endIndex[endLoc1])): endSpan2 = endIndex[endLoc1][i] if endSpan2[1] < endSpan1[1]: endIndex[endLoc1].insert(i, endSpan1) inserted = True break elif endSpan2[1] == endSpan1[1] and endSpan2[2] < endSpan1[ 2]: endIndex[endLoc1].insert(i, endSpan1) inserted = True break if not inserted: endIndex[endLoc1].append(endSpan1) return endIndex
construct_end_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(spansStartingFrom):\n endIndex = {}\n for i in spansStartingFrom:\n for span1 in spansStartingFrom[i]:\n endSpan1 = [span1[4], span1[0], span1[2]]\n endLoc1 = span1[1]\n if endLoc1 not in endIndex:\n endIndex[endLoc1] = []\n endIndex[endLoc1].append(endSpan1)\n else:\n inserted = False\n for i in range(len(endIndex[endLoc1])):\n endSpan2 = endIndex[endLoc1][i]\n if endSpan2[1] < endSpan1[1]:\n endIndex[endLoc1].insert(i, endSpan1)\n inserted = True\n break\n elif endSpan2[1] == endSpan1[1] and endSpan2[2] < endSpan1[\n 2]:\n endIndex[endLoc1].insert(i, endSpan1)\n inserted = True\n break\n if not inserted:\n endIndex[endLoc1].append(endSpan1)\n return endIndex\n", "role": "user" }, { "content": "construct_end_index", "role": "assistant" } ]
def x(spansStartingFrom): for startIndex in sorted(spansStartingFrom.keys()): for span1 in spansStartingFrom[startIndex]: if not span1[5]: continue span1Start = span1[0] span1End = span1[1] for i in range(span1Start, span1End): if i in spansStartingFrom: for span2 in spansStartingFrom[i]: span2Start = span2[0] span2End = span2[1] if span2 != span1 and span2[5]: if span2End <= span1End: if not span1[6]: span2[4] += span1[3] else: wb = span1[3].rstrip('[') span2[4] += wb
fix_overlapping_graphics
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(spansStartingFrom):\n for startIndex in sorted(spansStartingFrom.keys()):\n for span1 in spansStartingFrom[startIndex]:\n if not span1[5]:\n continue\n span1Start = span1[0]\n span1End = span1[1]\n for i in range(span1Start, span1End):\n if i in spansStartingFrom:\n for span2 in spansStartingFrom[i]:\n span2Start = span2[0]\n span2End = span2[1]\n if span2 != span1 and span2[5]:\n if span2End <= span1End:\n if not span1[6]:\n span2[4] += span1[3]\n else:\n wb = span1[3].rstrip('[')\n span2[4] += wb\n", "role": "user" }, { "content": "fix_overlapping_graphics", "role": "assistant" } ]
def x(text, layers, markup_settings=None): if markup_settings and len(layers) != len(markup_settings): raise Exception( ' Input arguments layers and markup_settings should be equal size lists.' ) elif not markup_settings and len(layers) <= len(default_markup_settings): markup_settings = default_markup_settings[0:len(layers)] elif not markup_settings: raise Exception(' Input argument markup_settings not defined.') spansStartingFrom = {} for i in range(len(layers)): layer = layers[i] settings = markup_settings[i] spansStartingFrom = _construct_start_index(text, layer, settings, spansStartingFrom) _fix_overlapping_graphics(spansStartingFrom) endTags = _construct_end_index(spansStartingFrom) return_str = [] for i in range(len(text[TEXT])): c = text[TEXT][i] emptyTags = [] if i in endTags: for tag in endTags[i]: if tag[1] != i: return_str.append(tag[0]) else: emptyTags.append(tag) if i in spansStartingFrom: for span in spansStartingFrom[i]: return_str.append(span[3]) if span[0] == span[1]: for emptyEndTag in emptyTags: if span[2] == emptyEndTag[2]: return_str.append(emptyEndTag[0]) return_str.append(c) if len(text[TEXT]) in spansStartingFrom: for span in spansStartingFrom[len(text[TEXT])]: return_str.append(span[3]) if len(text[TEXT]) in endTags: for tag in endTags[len(text[TEXT])]: return_str.append(tag[0]) if return_str and '\x1b' in return_str[-1] and not return_str[-1].endswith( '\x1b[0m'): return_str.append('\x1b[0m') return ''.join(return_str)
preformat
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, layers, markup_settings=None):\n if markup_settings and len(layers) != len(markup_settings):\n raise Exception(\n ' Input arguments layers and markup_settings should be equal size lists.'\n )\n elif not markup_settings and len(layers) <= len(default_markup_settings):\n markup_settings = default_markup_settings[0:len(layers)]\n elif not markup_settings:\n raise Exception(' Input argument markup_settings not defined.')\n spansStartingFrom = {}\n for i in range(len(layers)):\n layer = layers[i]\n settings = markup_settings[i]\n spansStartingFrom = _construct_start_index(text, layer, settings,\n spansStartingFrom)\n _fix_overlapping_graphics(spansStartingFrom)\n endTags = _construct_end_index(spansStartingFrom)\n return_str = []\n for i in range(len(text[TEXT])):\n c = text[TEXT][i]\n emptyTags = []\n if i in endTags:\n for tag in endTags[i]:\n if tag[1] != i:\n return_str.append(tag[0])\n else:\n emptyTags.append(tag)\n if i in spansStartingFrom:\n for span in spansStartingFrom[i]:\n return_str.append(span[3])\n if span[0] == span[1]:\n for emptyEndTag in emptyTags:\n if span[2] == emptyEndTag[2]:\n return_str.append(emptyEndTag[0])\n return_str.append(c)\n if len(text[TEXT]) in spansStartingFrom:\n for span in spansStartingFrom[len(text[TEXT])]:\n return_str.append(span[3])\n if len(text[TEXT]) in endTags:\n for tag in endTags[len(text[TEXT])]:\n return_str.append(tag[0])\n if return_str and '\\x1b' in return_str[-1] and not return_str[-1].endswith(\n '\\x1b[0m'):\n return_str.append('\\x1b[0m')\n return ''.join(return_str)\n", "role": "user" }, { "content": "preformat", "role": "assistant" } ]
def x(text, layers, markup_settings=None): if markup_settings and len(layers) != len(markup_settings): raise Exception( ' Input arguments layers and markup_settings should be equal size lists.' ) elif not markup_settings and len(layers) <= len(default_markup_settings): markup_settings = default_markup_settings[0:len(layers)] elif not markup_settings: raise Exception(' Input argument markup_settings not defined.') print(_preformat(text, layers, markup_settings=markup_settings))
tprint
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, layers, markup_settings=None):\n if markup_settings and len(layers) != len(markup_settings):\n raise Exception(\n ' Input arguments layers and markup_settings should be equal size lists.'\n )\n elif not markup_settings and len(layers) <= len(default_markup_settings):\n markup_settings = default_markup_settings[0:len(layers)]\n elif not markup_settings:\n raise Exception(' Input argument markup_settings not defined.')\n print(_preformat(text, layers, markup_settings=markup_settings))\n", "role": "user" }, { "content": "tprint", "role": "assistant" } ]
def x(aes_name, css_value): css_prop = AES_CSS_MAP[aes_name] if isinstance(css_value, list): return get_mark_css_for_rules(aes_name, css_prop, css_value) else: return get_mark_simple_css(aes_name, css_prop, css_value)
get_mark_css
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(aes_name, css_value):\n css_prop = AES_CSS_MAP[aes_name]\n if isinstance(css_value, list):\n return get_mark_css_for_rules(aes_name, css_prop, css_value)\n else:\n return get_mark_simple_css(aes_name, css_prop, css_value)\n", "role": "user" }, { "content": "get_mark_css", "role": "assistant" } ]
def x(self, inputFile): self.nomAdvWordTemplates = dict() self.verbRules = dict() self.verbToVinf = dict() in_f = codecs.open(inputFile, mode='r', encoding='utf-8') for line in in_f: line = line.rstrip() if len(line) > 0 and not re.match('^#.+$', line): items = line.split('\t') if len(items) == 3: verb = items[0] nounAdv = items[1] vinf = items[2] if nounAdv not in self.nomAdvWordTemplates: root, pos, form = nounAdv.split(';') if not root.startswith('^') and not root.endswith('$'): root = '^' + root + '$' constraints = {ROOT: root, POSTAG: pos} if form: constraints[FORM] = form self.nomAdvWordTemplates[nounAdv] = WordTemplate( constraints) if verb not in self.verbRules: self.verbRules[verb] = [] if verb not in self.verbToVinf: self.verbToVinf[verb] = [] self.verbRules[verb].append((nounAdv, 'V_' + vinf)) if 'V_' + vinf not in self.verbToVinf[verb]: self.verbToVinf[verb].append('V_' + vinf) else: raise Exception( ' Unexpected number of items in the input lexicon line: ' + line) in_f.close()
loadSubcatRelations
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, inputFile):\n self.nomAdvWordTemplates = dict()\n self.verbRules = dict()\n self.verbToVinf = dict()\n in_f = codecs.open(inputFile, mode='r', encoding='utf-8')\n for line in in_f:\n line = line.rstrip()\n if len(line) > 0 and not re.match('^#.+$', line):\n items = line.split('\\t')\n if len(items) == 3:\n verb = items[0]\n nounAdv = items[1]\n vinf = items[2]\n if nounAdv not in self.nomAdvWordTemplates:\n root, pos, form = nounAdv.split(';')\n if not root.startswith('^') and not root.endswith('$'):\n root = '^' + root + '$'\n constraints = {ROOT: root, POSTAG: pos}\n if form:\n constraints[FORM] = form\n self.nomAdvWordTemplates[nounAdv] = WordTemplate(\n constraints)\n if verb not in self.verbRules:\n self.verbRules[verb] = []\n if verb not in self.verbToVinf:\n self.verbToVinf[verb] = []\n self.verbRules[verb].append((nounAdv, 'V_' + vinf))\n if 'V_' + vinf not in self.verbToVinf[verb]:\n self.verbToVinf[verb].append('V_' + vinf)\n else:\n raise Exception(\n ' Unexpected number of items in the input lexicon line: ' +\n line)\n in_f.close()\n", "role": "user" }, { "content": "loadSubcatRelations", "role": "assistant" } ]
def x(self, token, verb, vinf): if verb in self.verbRules: for nounAdv, vinf1 in self.verbRules[verb]: if vinf == vinf1 and self.nomAdvWordTemplates[nounAdv].matches( token): return _getMatchingAnalysisIDs(token, self. nomAdvWordTemplates[nounAdv]) return []
tokenMatchesNomAdvVinf
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, token, verb, vinf):\n if verb in self.verbRules:\n for nounAdv, vinf1 in self.verbRules[verb]:\n if vinf == vinf1 and self.nomAdvWordTemplates[nounAdv].matches(\n token):\n return _getMatchingAnalysisIDs(token, self.\n nomAdvWordTemplates[nounAdv])\n return []\n", "role": "user" }, { "content": "tokenMatchesNomAdvVinf", "role": "assistant" } ]
def x(self, sentence, foundChains): clauses = getClausesByClauseIDs(sentence) allDetectedVerbChains = [] for clauseID in clauses: clause = clauses[clauseID] self.extendChainsInClause(clause, clauseID, foundChains)
extendChainsInSentence
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sentence, foundChains):\n clauses = getClausesByClauseIDs(sentence)\n allDetectedVerbChains = []\n for clauseID in clauses:\n clause = clauses[clauseID]\n self.extendChainsInClause(clause, clauseID, foundChains)\n", "role": "user" }, { "content": "extendChainsInSentence", "role": "assistant" } ]
def x(self, headVerbRoot, headVerbWID, nomAdvWID, widToToken): minWID = min(widToToken.keys()) nomAdvToken = widToToken[nomAdvWID] isNom = self.wtNom.matches(nomAdvToken) if headVerbWID + 1 == nomAdvWID: return True elif minWID == nomAdvWID: return True elif isNom and nomAdvWID - 1 in widToToken: prevToken = widToToken[nomAdvWID - 1] if self.wtNotSyntAttrib.matches(prevToken): return True elif self.wtNom.matches(prevToken): if self.wtNomSemCase.matches(prevToken): if not self.wtNomSemCase.matches(nomAdvToken): return True else: analyses1 = self.wtNomSemCase.matchingAnalyses(prevToken) analyses2 = self.wtNomSemCase.matchingAnalyses(nomAdvToken) forms1 = set([a[FORM] for a in analyses1]) forms2 = set([a[FORM] for a in analyses2]) if len(forms1.intersection(forms2)) == 0: return True elif not isNom and headVerbRoot.startswith('ole '): return True return False
isLikelyNotPhrase
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, headVerbRoot, headVerbWID, nomAdvWID, widToToken):\n minWID = min(widToToken.keys())\n nomAdvToken = widToToken[nomAdvWID]\n isNom = self.wtNom.matches(nomAdvToken)\n if headVerbWID + 1 == nomAdvWID:\n return True\n elif minWID == nomAdvWID:\n return True\n elif isNom and nomAdvWID - 1 in widToToken:\n prevToken = widToToken[nomAdvWID - 1]\n if self.wtNotSyntAttrib.matches(prevToken):\n return True\n elif self.wtNom.matches(prevToken):\n if self.wtNomSemCase.matches(prevToken):\n if not self.wtNomSemCase.matches(nomAdvToken):\n return True\n else:\n analyses1 = self.wtNomSemCase.matchingAnalyses(prevToken)\n analyses2 = self.wtNomSemCase.matchingAnalyses(nomAdvToken)\n forms1 = set([a[FORM] for a in analyses1])\n forms2 = set([a[FORM] for a in analyses2])\n if len(forms1.intersection(forms2)) == 0:\n return True\n elif not isNom and headVerbRoot.startswith('ole '):\n return True\n return False\n", "role": "user" }, { "content": "isLikelyNotPhrase", "role": "assistant" } ]
def x(self, headVerbRoot, headVerbWID, suitableNomAdvExpansions, expansionVerbs, widToToken): if len(suitableNomAdvExpansions) == 1 and expansionVerbs: suitableExpansionVerbs = [expVerb for expVerb in expansionVerbs if expVerb[2] == suitableNomAdvExpansions[0][2]] if len(suitableExpansionVerbs) == 1: nomAdvWID = suitableNomAdvExpansions[0][0] if self._isLikelyNotPhrase(headVerbRoot, headVerbWID, nomAdvWID, widToToken): return suitableExpansionVerbs[0] return None
canBeExpanded
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, headVerbRoot, headVerbWID, suitableNomAdvExpansions,\n expansionVerbs, widToToken):\n if len(suitableNomAdvExpansions) == 1 and expansionVerbs:\n suitableExpansionVerbs = [expVerb for expVerb in expansionVerbs if \n expVerb[2] == suitableNomAdvExpansions[0][2]]\n if len(suitableExpansionVerbs) == 1:\n nomAdvWID = suitableNomAdvExpansions[0][0]\n if self._isLikelyNotPhrase(headVerbRoot, headVerbWID, nomAdvWID,\n widToToken):\n return suitableExpansionVerbs[0]\n return None\n", "role": "user" }, { "content": "canBeExpanded", "role": "assistant" } ]
def x(text): textStart = 0 entries = re.split('\n=', text[textStart:]) stack = [[]] intro = {} sectionTitleRegEx = re.compile('={1,}.+={2,}') section = {} section['text'] = entries[0] counts = [] counts.append(3) sections = [] sections.append(section) for i in entries[1:]: section = {} title = re.match(sectionTitleRegEx, i) if title: titleEnd = title.end() title = title.group() text = i[titleEnd:] level = title.count('=') section['title'] = title.strip('= ') section['text'] = text sections.append(section.copy()) counts.append(level) for section in sections: text = section['text'] if 'wikitable' in text or '</table>' in text.lower(): section['text'], section['tables'] = tableCollector(text) section = relatedArticles(section) if '<ref' in text: section = reffinder(section) if imageRegEx.search(text): section = imageParser(section) section['text'] = section['text'].strip() if ExtLinkBracketedRegex.search(text): section = addExternalLinks(section) if '[[' in text: section = addIntLinks(section) el = 'external_links' if el in section.keys(): text = section['text'] for link in section[el]: label = link['label'] label = re.compile(re.escape(label)) m = label.search(text) try: link['start'] = m.start() link['end'] = m.end() except AttributeError: print('Problem with external links start:end position!') print(label) print(text) if counts: assert len(counts) == len(sections) n = len(sections) pos = 0 levels = [counts[0]] while pos < n: count = counts[pos] elem = sections[pos] level = levels[-1] if count == level: stack[-1].append(elem) elif count >= level: stack.append([elem]) levels.append(count) else: group = stack.pop() stack[-1][-1]['sections'] = group levels.pop() continue pos += 1 while len(stack) > 1: group = stack.pop() stack[-1][-1]['sections'] = group stack = stack[0] return stack
sectionsParser
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text):\n textStart = 0\n entries = re.split('\\n=', text[textStart:])\n stack = [[]]\n intro = {}\n sectionTitleRegEx = re.compile('={1,}.+={2,}')\n section = {}\n section['text'] = entries[0]\n counts = []\n counts.append(3)\n sections = []\n sections.append(section)\n for i in entries[1:]:\n section = {}\n title = re.match(sectionTitleRegEx, i)\n if title:\n titleEnd = title.end()\n title = title.group()\n text = i[titleEnd:]\n level = title.count('=')\n section['title'] = title.strip('= ')\n section['text'] = text\n sections.append(section.copy())\n counts.append(level)\n for section in sections:\n text = section['text']\n if 'wikitable' in text or '</table>' in text.lower():\n section['text'], section['tables'] = tableCollector(text)\n section = relatedArticles(section)\n if '<ref' in text:\n section = reffinder(section)\n if imageRegEx.search(text):\n section = imageParser(section)\n section['text'] = section['text'].strip()\n if ExtLinkBracketedRegex.search(text):\n section = addExternalLinks(section)\n if '[[' in text:\n section = addIntLinks(section)\n el = 'external_links'\n if el in section.keys():\n text = section['text']\n for link in section[el]:\n label = link['label']\n label = re.compile(re.escape(label))\n m = label.search(text)\n try:\n link['start'] = m.start()\n link['end'] = m.end()\n except AttributeError:\n print('Problem with external links start:end position!')\n print(label)\n print(text)\n if counts:\n assert len(counts) == len(sections)\n n = len(sections)\n pos = 0\n levels = [counts[0]]\n while pos < n:\n count = counts[pos]\n elem = sections[pos]\n level = levels[-1]\n if count == level:\n stack[-1].append(elem)\n elif count >= level:\n stack.append([elem])\n levels.append(count)\n else:\n group = stack.pop()\n stack[-1][-1]['sections'] = group\n levels.pop()\n continue\n pos += 1\n while len(stack) > 1:\n group = stack.pop()\n stack[-1][-1]['sections'] = group\n stack = stack[0]\n return stack\n", "role": "user" }, { "content": "sectionsParser", "role": "assistant" } ]
def x(self, text): return ''.join([c for c in text if c in self.alphabet])
clean
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text):\n return ''.join([c for c in text if c in self.alphabet])\n", "role": "user" }, { "content": "clean", "role": "assistant" } ]
def x(self, text): return ''.join(sorted(set([c for c in text if c not in self.alphabet])))
invalid_characters
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text):\n return ''.join(sorted(set([c for c in text if c not in self.alphabet])))\n", "role": "user" }, { "content": "invalid_characters", "role": "assistant" } ]
def x(self, text, context_size=20): result = defaultdict(list) for idx, char in enumerate(text): if char not in self.alphabet: start = max(0, idx - context_size) end = min(len(text), idx + context_size) result[char].append(text[start:end]) return result
find_invalid_chars
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text, context_size=20):\n result = defaultdict(list)\n for idx, char in enumerate(text):\n if char not in self.alphabet:\n start = max(0, idx - context_size)\n end = min(len(text), idx + context_size)\n result[char].append(text[start:end])\n return result\n", "role": "user" }, { "content": "find_invalid_chars", "role": "assistant" } ]
def x(self, texts, context_size=10): result = defaultdict(list) for text in texts: for char, examples in self.find_invalid_chars(text, context_size ).items(): result[char].extend(examples) return result
compute_report
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, texts, context_size=10):\n result = defaultdict(list)\n for text in texts:\n for char, examples in self.find_invalid_chars(text, context_size\n ).items():\n result[char].extend(examples)\n return result\n", "role": "user" }, { "content": "compute_report", "role": "assistant" } ]
def x(self, texts, n_examples=10, context_size=10, f=sys.stdout): result = list(self.compute_report(texts, context_size).items()) result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True) s = 'Analyzed {0} texts.\n'.format(len(texts)) if len(texts) == 0: f.write(s) return if len(result) > 0: s += 'Invalid characters and their counts:\n' for c, examples in result: s += '"{0}"\t{1}\n'.format(c, len(examples)) s += '\n' for c, examples in result: s += ('For character "{0}", found {1} occurrences.\nExamples:\n' .format(c, len(examples))) examples = sample(examples, min(len(examples), n_examples)) for idx, example in enumerate(examples): s += 'example {0}: {1}\n'.format(idx + 1, example) s += '\n' f.write(s) else: f.write('All OK\n')
report
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, texts, n_examples=10, context_size=10, f=sys.stdout):\n result = list(self.compute_report(texts, context_size).items())\n result.sort(key=lambda x: (len(x[1]), x[0]), reverse=True)\n s = 'Analyzed {0} texts.\\n'.format(len(texts))\n if len(texts) == 0:\n f.write(s)\n return\n if len(result) > 0:\n s += 'Invalid characters and their counts:\\n'\n for c, examples in result:\n s += '\"{0}\"\\t{1}\\n'.format(c, len(examples))\n s += '\\n'\n for c, examples in result:\n s += ('For character \"{0}\", found {1} occurrences.\\nExamples:\\n'\n .format(c, len(examples)))\n examples = sample(examples, min(len(examples), n_examples))\n for idx, example in enumerate(examples):\n s += 'example {0}: {1}\\n'.format(idx + 1, example)\n s += '\\n'\n f.write(s)\n else:\n f.write('All OK\\n')\n", "role": "user" }, { "content": "report", "role": "assistant" } ]
def x(self, text, **kwargs): apply_tag_analysis = False augment_words = False all_return_types = ['text', 'vislcg3', 'trees', 'dep_graphs'] return_type = all_return_types[0] for argName, argVal in kwargs.items(): if argName.lower() == 'return_type': if argVal.lower() in all_return_types: return_type = argVal.lower() else: raise Exception(' Unexpected return type: ', argVal) elif argName.lower() == 'augment_words': augment_words = bool(argVal) elif argName.lower() == 'apply_tag_analysis': apply_tag_analysis = bool(argVal) kwargs['split_result'] = True kwargs['clean_up'] = True kwargs['remove_clo'] = kwargs.get('remove_clo', True) kwargs['remove_cap'] = kwargs.get('remove_cap', True) kwargs['keep_old'] = kwargs.get('keep_old', False) kwargs['double_quotes'] = 'unesc' if apply_tag_analysis: text = text.tag_analysis() result_lines1 = self.preprocessor.process_Text(text, **kwargs) result_lines2 = self.vislcg3_processor.process_lines(result_lines1, ** kwargs) alignments = align_cg3_with_Text(result_lines2, text, **kwargs) alignments = normalise_alignments(alignments, data_type=VISLCG3_DATA, **kwargs) text[LAYER_VISLCG3] = alignments if augment_words: self._augment_text_w_syntactic_info(text, text[LAYER_VISLCG3]) if return_type == 'vislcg3': return result_lines2 elif return_type == 'trees': return build_trees_from_text(text, layer=LAYER_VISLCG3, **kwargs) elif return_type == 'dep_graphs': trees = build_trees_from_text(text, layer=LAYER_VISLCG3, **kwargs) graphs = [tree.as_dependencygraph() for tree in trees] return graphs else: return text
parse_text
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text, **kwargs):\n apply_tag_analysis = False\n augment_words = False\n all_return_types = ['text', 'vislcg3', 'trees', 'dep_graphs']\n return_type = all_return_types[0]\n for argName, argVal in kwargs.items():\n if argName.lower() == 'return_type':\n if argVal.lower() in all_return_types:\n return_type = argVal.lower()\n else:\n raise Exception(' Unexpected return type: ', argVal)\n elif argName.lower() == 'augment_words':\n augment_words = bool(argVal)\n elif argName.lower() == 'apply_tag_analysis':\n apply_tag_analysis = bool(argVal)\n kwargs['split_result'] = True\n kwargs['clean_up'] = True\n kwargs['remove_clo'] = kwargs.get('remove_clo', True)\n kwargs['remove_cap'] = kwargs.get('remove_cap', True)\n kwargs['keep_old'] = kwargs.get('keep_old', False)\n kwargs['double_quotes'] = 'unesc'\n if apply_tag_analysis:\n text = text.tag_analysis()\n result_lines1 = self.preprocessor.process_Text(text, **kwargs)\n result_lines2 = self.vislcg3_processor.process_lines(result_lines1, **\n kwargs)\n alignments = align_cg3_with_Text(result_lines2, text, **kwargs)\n alignments = normalise_alignments(alignments, data_type=VISLCG3_DATA,\n **kwargs)\n text[LAYER_VISLCG3] = alignments\n if augment_words:\n self._augment_text_w_syntactic_info(text, text[LAYER_VISLCG3])\n if return_type == 'vislcg3':\n return result_lines2\n elif return_type == 'trees':\n return build_trees_from_text(text, layer=LAYER_VISLCG3, **kwargs)\n elif return_type == 'dep_graphs':\n trees = build_trees_from_text(text, layer=LAYER_VISLCG3, **kwargs)\n graphs = [tree.as_dependencygraph() for tree in trees]\n return graphs\n else:\n return text\n", "role": "user" }, { "content": "parse_text", "role": "assistant" } ]
def x(self, keep_list, **kwargs): new_kwargs = {} for argName, argVal in kwargs.items(): if argName.lower() in keep_list: new_kwargs[argName.lower()] = argVal return new_kwargs
filter_kwargs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, keep_list, **kwargs):\n new_kwargs = {}\n for argName, argVal in kwargs.items():\n if argName.lower() in keep_list:\n new_kwargs[argName.lower()] = argVal\n return new_kwargs\n", "role": "user" }, { "content": "filter_kwargs", "role": "assistant" } ]
def x(self, text, text_layer): j = 0 for sentence in text.divide(layer=WORDS, by=SENTENCES): for i in range(len(sentence)): estnltkToken = sentence[i] vislcg3Token = text_layer[j] parse_found = False if PARSER_OUT in vislcg3Token: if len(vislcg3Token[PARSER_OUT]) > 0: firstParse = vislcg3Token[PARSER_OUT][0] estnltkToken['s_label'] = str(i) estnltkToken['s_head'] = str(firstParse[1]) deprels = '|'.join([p[0] for p in vislcg3Token[PARSER_OUT]] ) estnltkToken['s_rel'] = deprels parse_found = True if not parse_found: raise Exception( '(!) Unable to retrieve syntactic analysis for the ', estnltkToken, ' from ', vislcg3Token) j += 1
augment_text_w_syntactic_info
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text, text_layer):\n j = 0\n for sentence in text.divide(layer=WORDS, by=SENTENCES):\n for i in range(len(sentence)):\n estnltkToken = sentence[i]\n vislcg3Token = text_layer[j]\n parse_found = False\n if PARSER_OUT in vislcg3Token:\n if len(vislcg3Token[PARSER_OUT]) > 0:\n firstParse = vislcg3Token[PARSER_OUT][0]\n estnltkToken['s_label'] = str(i)\n estnltkToken['s_head'] = str(firstParse[1])\n deprels = '|'.join([p[0] for p in vislcg3Token[PARSER_OUT]]\n )\n estnltkToken['s_rel'] = deprels\n parse_found = True\n if not parse_found:\n raise Exception(\n '(!) Unable to retrieve syntactic analysis for the ',\n estnltkToken, ' from ', vislcg3Token)\n j += 1\n", "role": "user" }, { "content": "augment_text_w_syntactic_info", "role": "assistant" } ]
def x(self, text, **kwargs): augment_words = False all_return_types = ['text', 'conll', 'trees', 'dep_graphs'] return_type = all_return_types[0] for argName, argVal in kwargs.items(): if argName == 'return_type': if argVal.lower() in all_return_types: return_type = argVal.lower() else: raise Exception(' Unexpected return type: ', argVal) elif argName.lower() == 'augment_words': augment_words = bool(argVal) if not text.is_tagged(ANALYSIS): text.tag_analysis() textConllStr = convert_text_to_CONLL(text, self.feature_generator) resultsConllStr = _executeMaltparser(textConllStr, self.maltparser_dir, self.maltparser_jar, self.model_name) alignments = align_CONLL_with_Text(resultsConllStr, text, self. feature_generator, **kwargs) alignments = normalise_alignments(alignments, data_type=CONLL_DATA, ** kwargs) text[LAYER_CONLL] = alignments if augment_words: augmentTextWithCONLLstr(resultsConllStr, text) if return_type == 'conll': return resultsConllStr elif return_type == 'trees': return build_trees_from_text(text, layer=LAYER_CONLL, **kwargs) elif return_type == 'dep_graphs': trees = build_trees_from_text(text, layer=LAYER_CONLL, **kwargs) graphs = [tree.as_dependencygraph() for tree in trees] return graphs else: return text
parse_text
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, text, **kwargs):\n augment_words = False\n all_return_types = ['text', 'conll', 'trees', 'dep_graphs']\n return_type = all_return_types[0]\n for argName, argVal in kwargs.items():\n if argName == 'return_type':\n if argVal.lower() in all_return_types:\n return_type = argVal.lower()\n else:\n raise Exception(' Unexpected return type: ', argVal)\n elif argName.lower() == 'augment_words':\n augment_words = bool(argVal)\n if not text.is_tagged(ANALYSIS):\n text.tag_analysis()\n textConllStr = convert_text_to_CONLL(text, self.feature_generator)\n resultsConllStr = _executeMaltparser(textConllStr, self.maltparser_dir,\n self.maltparser_jar, self.model_name)\n alignments = align_CONLL_with_Text(resultsConllStr, text, self.\n feature_generator, **kwargs)\n alignments = normalise_alignments(alignments, data_type=CONLL_DATA, **\n kwargs)\n text[LAYER_CONLL] = alignments\n if augment_words:\n augmentTextWithCONLLstr(resultsConllStr, text)\n if return_type == 'conll':\n return resultsConllStr\n elif return_type == 'trees':\n return build_trees_from_text(text, layer=LAYER_CONLL, **kwargs)\n elif return_type == 'dep_graphs':\n trees = build_trees_from_text(text, layer=LAYER_CONLL, **kwargs)\n graphs = [tree.as_dependencygraph() for tree in trees]\n return graphs\n else:\n return text\n", "role": "user" }, { "content": "parse_text", "role": "assistant" } ]
def x(orig_text, layer=LAYER_CONLL): sent_start_index = 0 for sent_text in orig_text.split_by(SENTENCES): mapping = dict() cl_ind = sent_text.clause_indices for wid, word in enumerate(sent_text[WORDS]): firstSyntaxRel = sent_text[layer][wid][PARSER_OUT][0] parentIndex = firstSyntaxRel[1] if parentIndex != -1: if cl_ind[parentIndex] != cl_ind[wid]: mapping[wid] = -1 else: clause_start = cl_ind.index(cl_ind[wid]) j = 0 k = 0 while clause_start + j < len(cl_ind): if clause_start + j == parentIndex: break if cl_ind[clause_start + j] == cl_ind[wid]: k += 1 j += 1 assert clause_start + j < len(cl_ind ), '(!) Parent index not found for: ' + str(parentIndex ) mapping[wid] = k else: mapping[wid] = -1 for local_wid in mapping.keys(): global_wid = sent_start_index + local_wid for syntax_rel in orig_text[layer][global_wid][PARSER_OUT]: syntax_rel[1] = mapping[local_wid] sent_start_index += len(cl_ind) return orig_text
create_clause_based_dep_links
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(orig_text, layer=LAYER_CONLL):\n sent_start_index = 0\n for sent_text in orig_text.split_by(SENTENCES):\n mapping = dict()\n cl_ind = sent_text.clause_indices\n for wid, word in enumerate(sent_text[WORDS]):\n firstSyntaxRel = sent_text[layer][wid][PARSER_OUT][0]\n parentIndex = firstSyntaxRel[1]\n if parentIndex != -1:\n if cl_ind[parentIndex] != cl_ind[wid]:\n mapping[wid] = -1\n else:\n clause_start = cl_ind.index(cl_ind[wid])\n j = 0\n k = 0\n while clause_start + j < len(cl_ind):\n if clause_start + j == parentIndex:\n break\n if cl_ind[clause_start + j] == cl_ind[wid]:\n k += 1\n j += 1\n assert clause_start + j < len(cl_ind\n ), '(!) Parent index not found for: ' + str(parentIndex\n )\n mapping[wid] = k\n else:\n mapping[wid] = -1\n for local_wid in mapping.keys():\n global_wid = sent_start_index + local_wid\n for syntax_rel in orig_text[layer][global_wid][PARSER_OUT]:\n syntax_rel[1] = mapping[local_wid]\n sent_start_index += len(cl_ind)\n return orig_text\n", "role": "user" }, { "content": "create_clause_based_dep_links", "role": "assistant" } ]
def x(sentence): for word in sentence: if ANALYSIS not in word: raise Exception('(!) Error: no analysis found from word: ' + str(word)) else: word[ANALYSIS] = sorted(word[ANALYSIS], key=lambda x: '_'.join( [x[ROOT], x[POSTAG], x[FORM], x[CLITIC]])) return sentence
sort_analyses
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence):\n for word in sentence:\n if ANALYSIS not in word:\n raise Exception('(!) Error: no analysis found from word: ' +\n str(word))\n else:\n word[ANALYSIS] = sorted(word[ANALYSIS], key=lambda x: '_'.join(\n [x[ROOT], x[POSTAG], x[FORM], x[CLITIC]]))\n return sentence\n", "role": "user" }, { "content": "sort_analyses", "role": "assistant" } ]
def x(text, feature_generator): from estnltk.text import Text if not isinstance(text, Text): raise Exception( "(!) Unexpected type of input argument! Expected EstNLTK's Text. ") try: granularity = feature_generator.parseScope except AttributeError: granularity = SENTENCES assert granularity in [SENTENCES, CLAUSES ], '(!) Unsupported granularity: "' + str(granularity) + '"!' sentenceStrs = [] for sentence_text in text.split_by(granularity): sentence_text[WORDS] = __sort_analyses(sentence_text[WORDS]) for i in range(len(sentence_text[WORDS])): strForm = feature_generator.generate_features(sentence_text, i) strForm.append('_') strForm.append('\t') strForm.append('xxx') strForm.append('\t') strForm.append('_') strForm.append('\t') strForm.append('_') sentenceStrs.append(''.join(strForm)) sentenceStrs.append('') return '\n'.join(sentenceStrs)
convert_text_to_CONLL
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, feature_generator):\n from estnltk.text import Text\n if not isinstance(text, Text):\n raise Exception(\n \"(!) Unexpected type of input argument! Expected EstNLTK's Text. \")\n try:\n granularity = feature_generator.parseScope\n except AttributeError:\n granularity = SENTENCES\n assert granularity in [SENTENCES, CLAUSES\n ], '(!) Unsupported granularity: \"' + str(granularity) + '\"!'\n sentenceStrs = []\n for sentence_text in text.split_by(granularity):\n sentence_text[WORDS] = __sort_analyses(sentence_text[WORDS])\n for i in range(len(sentence_text[WORDS])):\n strForm = feature_generator.generate_features(sentence_text, i)\n strForm.append('_')\n strForm.append('\\t')\n strForm.append('xxx')\n strForm.append('\\t')\n strForm.append('_')\n strForm.append('\\t')\n strForm.append('_')\n sentenceStrs.append(''.join(strForm))\n sentenceStrs.append('')\n return '\\n'.join(sentenceStrs)\n", "role": "user" }, { "content": "convert_text_to_CONLL", "role": "assistant" } ]
def x(text, feature_generator, layer=LAYER_CONLL): from estnltk.text import Text if not isinstance(text, Text): raise Exception( "(!) Unexpected type of input argument! Expected EstNLTK's Text. ") assert layer in text, ' (!) The layer "' + layer + '" is missing form the Text object.' try: granularity = feature_generator.parseScope except AttributeError: granularity = SENTENCES assert granularity in [SENTENCES, CLAUSES ], '(!) Unsupported granularity: "' + str(granularity) + '"!' sentenceStrs = [] if granularity == CLAUSES: _create_clause_based_dep_links(text, layer) for sentence_text in text.split_by(granularity): sentence_text[WORDS] = __sort_analyses(sentence_text[WORDS]) for i in range(len(sentence_text[WORDS])): strForm = feature_generator.generate_features(sentence_text, i) syntaxToken = sentence_text[layer][i] firstSyntaxRel = syntaxToken[PARSER_OUT][0] parentLabel = str(firstSyntaxRel[1] + 1) strForm.append(parentLabel) strForm.append('\t') if parentLabel == '0': strForm.append('ROOT') strForm.append('\t') else: strForm.append(firstSyntaxRel[0]) strForm.append('\t') strForm.append('_') strForm.append('\t') strForm.append('_') sentenceStrs.append(''.join(strForm)) sentenceStrs.append('') return '\n'.join(sentenceStrs)
convert_text_w_syntax_to_CONLL
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(text, feature_generator, layer=LAYER_CONLL):\n from estnltk.text import Text\n if not isinstance(text, Text):\n raise Exception(\n \"(!) Unexpected type of input argument! Expected EstNLTK's Text. \")\n assert layer in text, ' (!) The layer \"' + layer + '\" is missing form the Text object.'\n try:\n granularity = feature_generator.parseScope\n except AttributeError:\n granularity = SENTENCES\n assert granularity in [SENTENCES, CLAUSES\n ], '(!) Unsupported granularity: \"' + str(granularity) + '\"!'\n sentenceStrs = []\n if granularity == CLAUSES:\n _create_clause_based_dep_links(text, layer)\n for sentence_text in text.split_by(granularity):\n sentence_text[WORDS] = __sort_analyses(sentence_text[WORDS])\n for i in range(len(sentence_text[WORDS])):\n strForm = feature_generator.generate_features(sentence_text, i)\n syntaxToken = sentence_text[layer][i]\n firstSyntaxRel = syntaxToken[PARSER_OUT][0]\n parentLabel = str(firstSyntaxRel[1] + 1)\n strForm.append(parentLabel)\n strForm.append('\\t')\n if parentLabel == '0':\n strForm.append('ROOT')\n strForm.append('\\t')\n else:\n strForm.append(firstSyntaxRel[0])\n strForm.append('\\t')\n strForm.append('_')\n strForm.append('\\t')\n strForm.append('_')\n sentenceStrs.append(''.join(strForm))\n sentenceStrs.append('')\n return '\\n'.join(sentenceStrs)\n", "role": "user" }, { "content": "convert_text_w_syntax_to_CONLL", "role": "assistant" } ]
def x(input_string, maltparser_dir, maltparser_jar, model_name): temp_input_file = tempfile.NamedTemporaryFile(prefix='malt_in.', mode= 'w', delete=False) temp_input_file.close() out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8') out_f.write(input_string) out_f.close() temp_output_file = tempfile.NamedTemporaryFile(prefix='malt_out.', mode ='w', delete=False) temp_output_file.close() current_dir = os.getcwd() os.chdir(maltparser_dir) cmd = ['java', '-jar', os.path.join(maltparser_dir, maltparser_jar), '-c', model_name, '-i', temp_input_file.name, '-o', temp_output_file.name, '-m', 'parse'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if p.wait() != 0: raise Exception(' Error on running Maltparser: ', p.stderr.read()) os.chdir(current_dir) results = [] in_f = codecs.open(temp_output_file.name, mode='r', encoding='utf-8') for line in in_f: results.append(line.rstrip()) in_f.close() if not temp_input_file.closed: raise Exception('Temp input file unclosed!') if not temp_output_file.closed: raise Exception('Temp input file unclosed!') if not out_f.closed: raise Exception('Output file unclosed!') if not in_f.closed: raise Exception('Input file unclosed!') os.remove(temp_input_file.name) os.remove(temp_output_file.name) return results
executeMaltparser
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(input_string, maltparser_dir, maltparser_jar, model_name):\n temp_input_file = tempfile.NamedTemporaryFile(prefix='malt_in.', mode=\n 'w', delete=False)\n temp_input_file.close()\n out_f = codecs.open(temp_input_file.name, mode='w', encoding='utf-8')\n out_f.write(input_string)\n out_f.close()\n temp_output_file = tempfile.NamedTemporaryFile(prefix='malt_out.', mode\n ='w', delete=False)\n temp_output_file.close()\n current_dir = os.getcwd()\n os.chdir(maltparser_dir)\n cmd = ['java', '-jar', os.path.join(maltparser_dir, maltparser_jar),\n '-c', model_name, '-i', temp_input_file.name, '-o',\n temp_output_file.name, '-m', 'parse']\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if p.wait() != 0:\n raise Exception(' Error on running Maltparser: ', p.stderr.read())\n os.chdir(current_dir)\n results = []\n in_f = codecs.open(temp_output_file.name, mode='r', encoding='utf-8')\n for line in in_f:\n results.append(line.rstrip())\n in_f.close()\n if not temp_input_file.closed:\n raise Exception('Temp input file unclosed!')\n if not temp_output_file.closed:\n raise Exception('Temp input file unclosed!')\n if not out_f.closed:\n raise Exception('Output file unclosed!')\n if not in_f.closed:\n raise Exception('Input file unclosed!')\n os.remove(temp_input_file.name)\n os.remove(temp_output_file.name)\n return results\n", "role": "user" }, { "content": "executeMaltparser", "role": "assistant" } ]
def x(in_file, addDepRels=False, splitIntoSentences=True): sentenceCount = 0 wordCountInSent = 0 tokens = [] in_f = codecs.open(in_file, mode='r', encoding='utf-8') for line in in_f: line = line.rstrip() if len(line) == 0 or re.match('^\\s+$', line): sentenceCount += 1 wordCountInSent = 0 continue features = line.split('\t') if len(features) != 10: raise Exception(' In file ' + in_file + ', line with unexpected format: "' + line + '" ') selfLabel = features[0] token = features[1] lemma = features[2] cpos = features[3] pos = features[4] form = features[5] parentLabel = features[6] tokens.append([str(sentenceCount), str(wordCountInSent), token, lemma + ' ' + pos + ' ' + form, selfLabel, parentLabel]) if addDepRels: tokens[-1].append(features[7]) wordCountInSent += 1 in_f.close() if not splitIntoSentences: return tokens else: sentences = [] lastSentID = '' for tok in tokens: if tok[0] != lastSentID: sentences.append([]) sentences[-1].append(tok) lastSentID = tok[0] return sentences
loadCONLLannotations
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(in_file, addDepRels=False, splitIntoSentences=True):\n sentenceCount = 0\n wordCountInSent = 0\n tokens = []\n in_f = codecs.open(in_file, mode='r', encoding='utf-8')\n for line in in_f:\n line = line.rstrip()\n if len(line) == 0 or re.match('^\\\\s+$', line):\n sentenceCount += 1\n wordCountInSent = 0\n continue\n features = line.split('\\t')\n if len(features) != 10:\n raise Exception(' In file ' + in_file +\n ', line with unexpected format: \"' + line + '\" ')\n selfLabel = features[0]\n token = features[1]\n lemma = features[2]\n cpos = features[3]\n pos = features[4]\n form = features[5]\n parentLabel = features[6]\n tokens.append([str(sentenceCount), str(wordCountInSent), token, \n lemma + ' ' + pos + ' ' + form, selfLabel, parentLabel])\n if addDepRels:\n tokens[-1].append(features[7])\n wordCountInSent += 1\n in_f.close()\n if not splitIntoSentences:\n return tokens\n else:\n sentences = []\n lastSentID = ''\n for tok in tokens:\n if tok[0] != lastSentID:\n sentences.append([])\n sentences[-1].append(tok)\n lastSentID = tok[0]\n return sentences\n", "role": "user" }, { "content": "loadCONLLannotations", "role": "assistant" } ]
def x(in_file, addDepRels=False, verbose=False, **kwargs): from estnltk.text import Text sentences = loadCONLLannotations(in_file, addDepRels=addDepRels, splitIntoSentences=True) if verbose: print(str(len(sentences)) + ' sentences loaded. ') estnltkSentTexts = [] for i in range(len(sentences)): s = sentences[i] sentenceString = ' '.join([t[2] for t in s]) sentText = Text(sentenceString, **kwargs) sentText.tag_analysis() sentText.tag_clauses() sentText.tag_verb_chains() sentText = dict(sentText) if len(sentText[WORDS]) == len(s): for j in range(len(sentText[WORDS])): estnltkWord = sentText[WORDS][j] depSyntaxWord = s[j] estnltkWord[SYNTAX_LABEL] = depSyntaxWord[4] estnltkWord[SYNTAX_HEAD] = depSyntaxWord[5] if addDepRels: estnltkWord[DEPREL] = depSyntaxWord[6] estnltkSentTexts.append(sentText) if verbose: print('*', end='') elif verbose: print( "The sentence segmentation of dependency syntax differs from the estnltk's sentence segmentation:" , len(sentText[WORDS]), ' vs ', len(s)) return estnltkSentTexts
convertCONLLtoText
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(in_file, addDepRels=False, verbose=False, **kwargs):\n from estnltk.text import Text\n sentences = loadCONLLannotations(in_file, addDepRels=addDepRels,\n splitIntoSentences=True)\n if verbose:\n print(str(len(sentences)) + ' sentences loaded. ')\n estnltkSentTexts = []\n for i in range(len(sentences)):\n s = sentences[i]\n sentenceString = ' '.join([t[2] for t in s])\n sentText = Text(sentenceString, **kwargs)\n sentText.tag_analysis()\n sentText.tag_clauses()\n sentText.tag_verb_chains()\n sentText = dict(sentText)\n if len(sentText[WORDS]) == len(s):\n for j in range(len(sentText[WORDS])):\n estnltkWord = sentText[WORDS][j]\n depSyntaxWord = s[j]\n estnltkWord[SYNTAX_LABEL] = depSyntaxWord[4]\n estnltkWord[SYNTAX_HEAD] = depSyntaxWord[5]\n if addDepRels:\n estnltkWord[DEPREL] = depSyntaxWord[6]\n estnltkSentTexts.append(sentText)\n if verbose:\n print('*', end='')\n elif verbose:\n print(\n \"The sentence segmentation of dependency syntax differs from the estnltk's sentence segmentation:\"\n , len(sentText[WORDS]), ' vs ', len(s))\n return estnltkSentTexts\n", "role": "user" }, { "content": "convertCONLLtoText", "role": "assistant" } ]
def x(conll_str_array, text): j = 0 for sentence in text.divide(layer=WORDS, by=SENTENCES): sentence = __sort_analyses(sentence) for i in range(len(sentence)): estnltkToken = sentence[i] maltparserToken = conll_str_array[j] if len(maltparserToken) > 1: maltParserAnalysis = maltparserToken.split('\t') if estnltkToken[TEXT] == maltParserAnalysis[1]: estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0] estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6] estnltkToken[DEPREL] = maltParserAnalysis[7] else: raise Exception( "A misalignment between Text and Maltparser's output: " , estnltkToken, maltparserToken) j += 1 j += 1
augmentTextWithCONLLstr
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(conll_str_array, text):\n j = 0\n for sentence in text.divide(layer=WORDS, by=SENTENCES):\n sentence = __sort_analyses(sentence)\n for i in range(len(sentence)):\n estnltkToken = sentence[i]\n maltparserToken = conll_str_array[j]\n if len(maltparserToken) > 1:\n maltParserAnalysis = maltparserToken.split('\\t')\n if estnltkToken[TEXT] == maltParserAnalysis[1]:\n estnltkToken[SYNTAX_LABEL] = maltParserAnalysis[0]\n estnltkToken[SYNTAX_HEAD] = maltParserAnalysis[6]\n estnltkToken[DEPREL] = maltParserAnalysis[7]\n else:\n raise Exception(\n \"A misalignment between Text and Maltparser's output: \"\n , estnltkToken, maltparserToken)\n j += 1\n j += 1\n", "role": "user" }, { "content": "augmentTextWithCONLLstr", "role": "assistant" } ]
def x(sentence_text, clause_id): clause = [] isEmbedded = False indices = sentence_text.clause_indices clause_anno = sentence_text.clause_annotations for wid, token in enumerate(sentence_text[WORDS]): if indices[wid] == clause_id: if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START: isEmbedded = True clause.append((wid, token)) return clause, isEmbedded
get_clause_words
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence_text, clause_id):\n clause = []\n isEmbedded = False\n indices = sentence_text.clause_indices\n clause_anno = sentence_text.clause_annotations\n for wid, token in enumerate(sentence_text[WORDS]):\n if indices[wid] == clause_id:\n if not clause and clause_anno[wid] == EMBEDDED_CLAUSE_START:\n isEmbedded = True\n clause.append((wid, token))\n return clause, isEmbedded\n", "role": "user" }, { "content": "get_clause_words", "role": "assistant" } ]
def x(sentence_text, wid, fromRight=True): i = wid while i > -1 and i < len(sentence_text[WORDS]): token = sentence_text[WORDS][i] if _pat_starting_quote.match(token[TEXT]) or _pat_ending_quote.match( token[TEXT]): return i i += 1 if fromRight else -1 return -1
detect_quotes
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence_text, wid, fromRight=True):\n i = wid\n while i > -1 and i < len(sentence_text[WORDS]):\n token = sentence_text[WORDS][i]\n if _pat_starting_quote.match(token[TEXT]) or _pat_ending_quote.match(\n token[TEXT]):\n return i\n i += 1 if fromRight else -1\n return -1\n", "role": "user" }, { "content": "detect_quotes", "role": "assistant" } ]
def x(edt_sent_text): from estnltk.mw_verbs.utils import WordTemplate if not edt_sent_text.is_tagged(VERB_CHAINS): edt_sent_text.tag_verb_chains() saying_verbs = {} if len(edt_sent_text[VERB_CHAINS]) < 2: return saying_verbs patColon = WordTemplate({'partofspeech': '^[Z]$', 'text': '^:$'}) for vid, vc in enumerate(edt_sent_text[VERB_CHAINS]): if len(vc['phrase']) == 1 and vid == len(edt_sent_text[VERB_CHAINS] ) - 1: wid = vc['phrase'][0] token = edt_sent_text[WORDS][wid] clause_id = vc[CLAUSE_IDX] clause, insideEmbeddedCl = _get_clause_words(edt_sent_text, clause_id) quoteLeft = _detect_quotes(edt_sent_text, wid, fromRight=False) quoteRight = _detect_quotes(edt_sent_text, wid, fromRight=True) quotes_in_clause = [] for wid2, token2 in clause: if _pat_starting_quote.match(token2[TEXT] ) or _pat_ending_quote.match(token2[TEXT]): quotes_in_clause.append(wid2) multipleQuotes = len(quotes_in_clause) > 1 and quotes_in_clause[-1 ] == quoteLeft if not multipleQuotes and not insideEmbeddedCl and (quoteLeft != -1 and quoteLeft + 1 == wid and quoteRight == -1): saying_verbs[wid] = 'se_saying_verb' return saying_verbs
detect_sentence_ending_saying_verbs
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(edt_sent_text):\n from estnltk.mw_verbs.utils import WordTemplate\n if not edt_sent_text.is_tagged(VERB_CHAINS):\n edt_sent_text.tag_verb_chains()\n saying_verbs = {}\n if len(edt_sent_text[VERB_CHAINS]) < 2:\n return saying_verbs\n patColon = WordTemplate({'partofspeech': '^[Z]$', 'text': '^:$'})\n for vid, vc in enumerate(edt_sent_text[VERB_CHAINS]):\n if len(vc['phrase']) == 1 and vid == len(edt_sent_text[VERB_CHAINS]\n ) - 1:\n wid = vc['phrase'][0]\n token = edt_sent_text[WORDS][wid]\n clause_id = vc[CLAUSE_IDX]\n clause, insideEmbeddedCl = _get_clause_words(edt_sent_text,\n clause_id)\n quoteLeft = _detect_quotes(edt_sent_text, wid, fromRight=False)\n quoteRight = _detect_quotes(edt_sent_text, wid, fromRight=True)\n quotes_in_clause = []\n for wid2, token2 in clause:\n if _pat_starting_quote.match(token2[TEXT]\n ) or _pat_ending_quote.match(token2[TEXT]):\n quotes_in_clause.append(wid2)\n multipleQuotes = len(quotes_in_clause) > 1 and quotes_in_clause[-1\n ] == quoteLeft\n if not multipleQuotes and not insideEmbeddedCl and (quoteLeft !=\n -1 and quoteLeft + 1 == wid and quoteRight == -1):\n saying_verbs[wid] = 'se_saying_verb'\n return saying_verbs\n", "role": "user" }, { "content": "detect_sentence_ending_saying_verbs", "role": "assistant" } ]
def x(inputFile): kSubCatRelations = dict() in_f = codecs.open(inputFile, mode='r', encoding='utf-8') for line in in_f: line = line.rstrip() if len(line) > 0 and not re.match('^#.+$', line): items = line.split(';') if len(items) == 4: root = items[0] partofspeech = items[1] postPre = items[2] morphPattern = items[3] fpattern = '(sg|pl)\\s' + morphPattern if root not in kSubCatRelations: kSubCatRelations[root] = [] kSubCatRelations[root].append([postPre, fpattern]) root_clean = root.replace('_', '') if root != root_clean: if root_clean not in kSubCatRelations: kSubCatRelations[root_clean] = [] kSubCatRelations[root_clean].append([postPre, fpattern]) else: raise Exception( ' Unexpected number of items in the input lexicon line: ' + line) in_f.close() return kSubCatRelations
loadKSubcatRelations
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(inputFile):\n kSubCatRelations = dict()\n in_f = codecs.open(inputFile, mode='r', encoding='utf-8')\n for line in in_f:\n line = line.rstrip()\n if len(line) > 0 and not re.match('^#.+$', line):\n items = line.split(';')\n if len(items) == 4:\n root = items[0]\n partofspeech = items[1]\n postPre = items[2]\n morphPattern = items[3]\n fpattern = '(sg|pl)\\\\s' + morphPattern\n if root not in kSubCatRelations:\n kSubCatRelations[root] = []\n kSubCatRelations[root].append([postPre, fpattern])\n root_clean = root.replace('_', '')\n if root != root_clean:\n if root_clean not in kSubCatRelations:\n kSubCatRelations[root_clean] = []\n kSubCatRelations[root_clean].append([postPre, fpattern])\n else:\n raise Exception(\n ' Unexpected number of items in the input lexicon line: ' +\n line)\n in_f.close()\n return kSubCatRelations\n", "role": "user" }, { "content": "loadKSubcatRelations", "role": "assistant" } ]
def x(sentence, i, kSubCatRelsLexicon): curToken = sentence[i] root = curToken[ANALYSIS][0][ROOT] if root in kSubCatRelsLexicon: for [postPre, fpattern] in kSubCatRelsLexicon[root]: if postPre == 'post' and i - 1 > -1: lastTokenAnalysis = sentence[i - 1][ANALYSIS][0] if re.match(fpattern, lastTokenAnalysis[FORM]): return 'post', i - 1 elif postPre == 'pre' and i + 1 < len(sentence): nextTokenAnalysis = sentence[i + 1][ANALYSIS][0] if re.match(fpattern, nextTokenAnalysis[FORM]): return 'pre', i + 1 if len(kSubCatRelsLexicon[root]) == 1: return kSubCatRelsLexicon[root][0][0], -1 return '_', -1
detectKsubcatRelType
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence, i, kSubCatRelsLexicon):\n curToken = sentence[i]\n root = curToken[ANALYSIS][0][ROOT]\n if root in kSubCatRelsLexicon:\n for [postPre, fpattern] in kSubCatRelsLexicon[root]:\n if postPre == 'post' and i - 1 > -1:\n lastTokenAnalysis = sentence[i - 1][ANALYSIS][0]\n if re.match(fpattern, lastTokenAnalysis[FORM]):\n return 'post', i - 1\n elif postPre == 'pre' and i + 1 < len(sentence):\n nextTokenAnalysis = sentence[i + 1][ANALYSIS][0]\n if re.match(fpattern, nextTokenAnalysis[FORM]):\n return 'pre', i + 1\n if len(kSubCatRelsLexicon[root]) == 1:\n return kSubCatRelsLexicon[root][0][0], -1\n return '_', -1\n", "role": "user" }, { "content": "detectKsubcatRelType", "role": "assistant" } ]
def x(sentence, kSubCatRelsLexicon, reverseMapping=False): relationIndex = dict() relationType = dict() for i in range(len(sentence)): estnltkWord = sentence[i] firstAnalysis = estnltkWord[ANALYSIS][0] if firstAnalysis[POSTAG] == 'K': grammCats, kChild = _detectKsubcatRelType(sentence, i, kSubCatRelsLexicon) if kChild != -1: if reverseMapping: relationIndex[kChild] = i relationType[kChild] = grammCats else: relationIndex[i] = kChild relationType[i] = grammCats return relationIndex, relationType
detectPossibleKsubcatRelsFromSent
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence, kSubCatRelsLexicon, reverseMapping=False):\n relationIndex = dict()\n relationType = dict()\n for i in range(len(sentence)):\n estnltkWord = sentence[i]\n firstAnalysis = estnltkWord[ANALYSIS][0]\n if firstAnalysis[POSTAG] == 'K':\n grammCats, kChild = _detectKsubcatRelType(sentence, i,\n kSubCatRelsLexicon)\n if kChild != -1:\n if reverseMapping:\n relationIndex[kChild] = i\n relationType[kChild] = grammCats\n else:\n relationIndex[i] = kChild\n relationType[i] = grammCats\n return relationIndex, relationType\n", "role": "user" }, { "content": "detectPossibleKsubcatRelsFromSent", "role": "assistant" } ]
def x(sentence, kSubCatRelsLexicon, addFeaturesToK=True): features = dict() if addFeaturesToK: for i in range(len(sentence)): estnltkWord = sentence[i] firstAnalysis = estnltkWord[ANALYSIS][0] if firstAnalysis[POSTAG] == 'K': grammCats, kChild = _detectKsubcatRelType(sentence, i, kSubCatRelsLexicon) features[i] = grammCats else: relationIndex, relationType = _detectPossibleKsubcatRelsFromSent( sentence, kSubCatRelsLexicon, reverseMapping=True) for i in relationIndex: features[i] = relationType[i] return features
findKsubcatFeatures
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(sentence, kSubCatRelsLexicon, addFeaturesToK=True):\n features = dict()\n if addFeaturesToK:\n for i in range(len(sentence)):\n estnltkWord = sentence[i]\n firstAnalysis = estnltkWord[ANALYSIS][0]\n if firstAnalysis[POSTAG] == 'K':\n grammCats, kChild = _detectKsubcatRelType(sentence, i,\n kSubCatRelsLexicon)\n features[i] = grammCats\n else:\n relationIndex, relationType = _detectPossibleKsubcatRelsFromSent(\n sentence, kSubCatRelsLexicon, reverseMapping=True)\n for i in relationIndex:\n features[i] = relationType[i]\n return features\n", "role": "user" }, { "content": "findKsubcatFeatures", "role": "assistant" } ]
def x(self, sentence_text, wid): assert WORDS in sentence_text and len(sentence_text[WORDS] ) > 0, " (!) 'words' layer missing or empty in given Text!" sentence = sentence_text[WORDS] assert -1 < wid and wid < len(sentence), ' (!) Invalid word id: ' + str(wid ) if wid == 0: if self.kSubCatRelsLex: self.kFeatures = _findKsubcatFeatures(sentence, self. kSubCatRelsLex, addFeaturesToK=True) if self.addVerbcGramm or self.addNomAdvVinf: self.vcFeatures = generate_verb_chain_features(sentence_text, addGrammPred=self.addVerbcGramm, addNomAdvVinf=self. addNomAdvVinf) if self.addSeSayingVerbs: self.sayingverbs = detect_sentence_ending_saying_verbs( sentence_text) if self.addClauseBound: self.clbFeatures = [] for tag in sentence_text.clause_annotations: if not tag: self.clbFeatures.append([]) elif tag == EMBEDDED_CLAUSE_START: self.clbFeatures.append(['emb_cl_start']) elif tag == EMBEDDED_CLAUSE_END: self.clbFeatures.append(['emb_cl_end']) elif tag == CLAUSE_BOUNDARY: self.clbFeatures.append(['clb']) estnltkWord = sentence[wid] firstAnalysis = estnltkWord[ANALYSIS][0] strForm = [] strForm.append(str(wid + 1)) strForm.append('\t') word_text = estnltkWord[TEXT] word_text = word_text.replace(' ', '_') strForm.append(word_text) strForm.append('\t') word_root = firstAnalysis[ROOT] word_root = word_root.replace(' ', '_') if len(word_root) == 0: word_root = '??' strForm.append(word_root) strForm.append('\t') strForm.append(firstAnalysis[POSTAG]) strForm.append('\t') finePos = firstAnalysis[POSTAG] if self.addAmbiguousPos and len(estnltkWord[ANALYSIS]) > 1: pos_tags = sorted(list(set([a[POSTAG] for a in estnltkWord[ANALYSIS]])) ) finePos = '_'.join(pos_tags) strForm.append(finePos) strForm.append('\t') grammCats = [] if len(firstAnalysis[FORM]) != 0: forms = firstAnalysis[FORM].split() grammCats.extend(forms) if self.vcFeatures and self.vcFeatures[wid]: grammCats.extend(self.vcFeatures[wid]) if self.addClauseBound and self.clbFeatures[wid]: grammCats.extend(self.clbFeatures[wid]) if self.kFeatures and wid in self.kFeatures: grammCats.extend([self.kFeatures[wid]]) if self.sayingverbs and wid in self.sayingverbs: grammCats.extend([self.sayingverbs[wid]]) if not grammCats: grammCats = '_' else: grammCats = '|'.join(grammCats) strForm.append(grammCats) strForm.append('\t') return strForm
generate_features
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, sentence_text, wid):\n assert WORDS in sentence_text and len(sentence_text[WORDS]\n ) > 0, \" (!) 'words' layer missing or empty in given Text!\"\n sentence = sentence_text[WORDS]\n assert -1 < wid and wid < len(sentence), ' (!) Invalid word id: ' + str(wid\n )\n if wid == 0:\n if self.kSubCatRelsLex:\n self.kFeatures = _findKsubcatFeatures(sentence, self.\n kSubCatRelsLex, addFeaturesToK=True)\n if self.addVerbcGramm or self.addNomAdvVinf:\n self.vcFeatures = generate_verb_chain_features(sentence_text,\n addGrammPred=self.addVerbcGramm, addNomAdvVinf=self.\n addNomAdvVinf)\n if self.addSeSayingVerbs:\n self.sayingverbs = detect_sentence_ending_saying_verbs(\n sentence_text)\n if self.addClauseBound:\n self.clbFeatures = []\n for tag in sentence_text.clause_annotations:\n if not tag:\n self.clbFeatures.append([])\n elif tag == EMBEDDED_CLAUSE_START:\n self.clbFeatures.append(['emb_cl_start'])\n elif tag == EMBEDDED_CLAUSE_END:\n self.clbFeatures.append(['emb_cl_end'])\n elif tag == CLAUSE_BOUNDARY:\n self.clbFeatures.append(['clb'])\n estnltkWord = sentence[wid]\n firstAnalysis = estnltkWord[ANALYSIS][0]\n strForm = []\n strForm.append(str(wid + 1))\n strForm.append('\\t')\n word_text = estnltkWord[TEXT]\n word_text = word_text.replace(' ', '_')\n strForm.append(word_text)\n strForm.append('\\t')\n word_root = firstAnalysis[ROOT]\n word_root = word_root.replace(' ', '_')\n if len(word_root) == 0:\n word_root = '??'\n strForm.append(word_root)\n strForm.append('\\t')\n strForm.append(firstAnalysis[POSTAG])\n strForm.append('\\t')\n finePos = firstAnalysis[POSTAG]\n if self.addAmbiguousPos and len(estnltkWord[ANALYSIS]) > 1:\n pos_tags = sorted(list(set([a[POSTAG] for a in estnltkWord[ANALYSIS]]))\n )\n finePos = '_'.join(pos_tags)\n strForm.append(finePos)\n strForm.append('\\t')\n grammCats = []\n if len(firstAnalysis[FORM]) != 0:\n forms = firstAnalysis[FORM].split()\n grammCats.extend(forms)\n if self.vcFeatures and self.vcFeatures[wid]:\n grammCats.extend(self.vcFeatures[wid])\n if self.addClauseBound and self.clbFeatures[wid]:\n grammCats.extend(self.clbFeatures[wid])\n if self.kFeatures and wid in self.kFeatures:\n grammCats.extend([self.kFeatures[wid]])\n if self.sayingverbs and wid in self.sayingverbs:\n grammCats.extend([self.sayingverbs[wid]])\n if not grammCats:\n grammCats = '_'\n else:\n grammCats = '|'.join(grammCats)\n strForm.append(grammCats)\n strForm.append('\\t')\n return strForm\n", "role": "user" }, { "content": "generate_features", "role": "assistant" } ]
def x(aes, value): if isinstance(value, six.string_types): return Rules(aes) else: rules = Rules() for idx, (pattern, css_value) in enumerate(value): rules.add_rule(pattern, '{0}_{1}'.format(aes, idx)) return rules
create_rules
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(aes, value):\n if isinstance(value, six.string_types):\n return Rules(aes)\n else:\n rules = Rules()\n for idx, (pattern, css_value) in enumerate(value):\n rules.add_rule(pattern, '{0}_{1}'.format(aes, idx))\n return rules\n", "role": "user" }, { "content": "create_rules", "role": "assistant" } ]
def x(self, pattern, css_class): self.__patterns.append(re.compile(pattern, flags=re.U | re.M)) self.__css_classes.append(css_class)
add_rule
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, pattern, css_class):\n self.__patterns.append(re.compile(pattern, flags=re.U | re.M))\n self.__css_classes.append(css_class)\n", "role": "user" }, { "content": "add_rule", "role": "assistant" } ]
def x(self, value): for idx, pattern in enumerate(self.__patterns): if pattern.match(value) is not None: return self.__css_classes[idx] return self.__default
get_css_class
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(self, value):\n for idx, pattern in enumerate(self.__patterns):\n if pattern.match(value) is not None:\n return self.__css_classes[idx]\n return self.__default\n", "role": "user" }, { "content": "get_css_class", "role": "assistant" } ]
def x(index_name, **kwargs): es = elasticsearch.Elasticsearch(**kwargs) es.indices.create(index=index_name, body=mapping) return connect(index_name, **kwargs)
create_index
[ { "content": "Given the source code of a python function, suggest a fitting name for the function.", "role": "system" }, { "content": "def x(index_name, **kwargs):\n es = elasticsearch.Elasticsearch(**kwargs)\n es.indices.create(index=index_name, body=mapping)\n return connect(index_name, **kwargs)\n", "role": "user" }, { "content": "create_index", "role": "assistant" } ]