_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q34200
LemmaReplacer._load_entries
train
def _load_entries(self): """Check for availability of lemmatizer for French.""" rel_path = os.path.join('~','cltk_data', 'french', 'text','french_data_cltk' ,'entries.py') path = os.path.expanduser(rel_path) #logger.info('Loading entries. This may take a minute.') loader = importlib.machinery.SourceFileLoader('entries', path) module = loader.load_module() entries = module.entries return entries
python
{ "resource": "" }
q34201
LemmaReplacer.lemmatize
train
def lemmatize(self, tokens): """define list of lemmas""" entries = self.entries forms_and_lemmas = self.forms_and_lemmas lemma_list = [x[0] for x in entries] """Provide a lemma for each token""" lemmatized = [] for token in tokens: """check for a match between token and list of lemmas""" if token in lemma_list: lemmed = (token, token) lemmatized.append(lemmed) else: """if no match check for a match between token and list of lemma forms""" lemma = [k for k, v in forms_and_lemmas.items() if token in v] if lemma != []: lemmed = (token, lemma) lemmatized.append(lemmed) elif lemma == []: """if no match apply regular expressions and check for a match against the list of lemmas again""" regexed = regex(token) if regexed in lemma_list: lemmed = (token, regexed) lemmatized.append(lemmed) else: lemmed = (token, "None") lemmatized.append(lemmed) return lemmatized
python
{ "resource": "" }
q34202
BaseSentenceTokenizer.tokenize
train
def tokenize(self, text: str, model: object = None): """ Method for tokenizing sentences with pretrained punkt models; can be overridden by language-specific tokenizers. :rtype: list :param text: text to be tokenized into sentences :type text: str :param model: tokenizer object to used # Should be in init? :type model: object """ if not self.model: model = self.model tokenizer = self.model if self.lang_vars: tokenizer._lang_vars = self.lang_vars return tokenizer.tokenize(text)
python
{ "resource": "" }
q34203
BaseRegexSentenceTokenizer.tokenize
train
def tokenize(self, text: str, model: object = None): """ Method for tokenizing sentences with regular expressions. :rtype: list :param text: text to be tokenized into sentences :type text: str """ sentences = re.split(self.pattern, text) return sentences
python
{ "resource": "" }
q34204
OldEnglishDictionaryLemmatizer._load_forms_and_lemmas
train
def _load_forms_and_lemmas(self): """Load the dictionary of lemmas and forms from the OE models repository.""" rel_path = os.path.join(CLTK_DATA_DIR, 'old_english', 'model', 'old_english_models_cltk', 'data', 'oe.lemmas') path = os.path.expanduser(rel_path) self.lemma_dict = {} with open(path, 'r') as infile: lines = infile.read().splitlines() for line in lines: forms = line.split('\t') lemma = forms[0] for form_seq in forms: indiv_forms = form_seq.split(',') for form in indiv_forms: form = form.lower() lemma_list = self.lemma_dict.get(form, []) lemma_list.append(lemma) self.lemma_dict[form] = lemma_list for form in self.lemma_dict.keys(): self.lemma_dict[form] = list(set(self.lemma_dict[form]))
python
{ "resource": "" }
q34205
OldEnglishDictionaryLemmatizer._load_type_counts
train
def _load_type_counts(self): """Load the table of frequency counts of word forms.""" rel_path = os.path.join(CLTK_DATA_DIR, 'old_english', 'model', 'old_english_models_cltk', 'data', 'oe.counts') path = os.path.expanduser(rel_path) self.type_counts = {} with open(path, 'r') as infile: lines = infile.read().splitlines() for line in lines: count, word = line.split() self.type_counts[word] = int(count)
python
{ "resource": "" }
q34206
OldEnglishDictionaryLemmatizer._relative_frequency
train
def _relative_frequency(self, word): """Computes the log relative frequency for a word form""" count = self.type_counts.get(word, 0) return math.log(count/len(self.type_counts)) if count > 0 else 0
python
{ "resource": "" }
q34207
OldEnglishDictionaryLemmatizer._lemmatize_token
train
def _lemmatize_token(self, token, best_guess=True, return_frequencies=False): """Lemmatize a single token. If best_guess is true, then take the most frequent lemma when a form has multiple possible lemmatizations. If the form is not found, just return it. If best_guess is false, then always return the full set of possible lemmas, or None if none found. """ lemmas = self.lemma_dict.get(token.lower(), None) if best_guess == True: if lemmas == None: lemma = token elif len(lemmas) > 1: counts = [self.type_counts[word] for word in lemmas] lemma = lemmas[argmax(counts)] else: lemma = lemmas[0] if return_frequencies == True: lemma = (lemma, self._relative_frequency(lemma)) else: lemma = [] if lemmas == None else lemmas if return_frequencies == True: lemma = [(word, self._relative_frequency(word)) for word in lemma] return(token, lemma)
python
{ "resource": "" }
q34208
OldEnglishDictionaryLemmatizer.lemmatize
train
def lemmatize(self, text, best_guess=True, return_frequencies=False): """Lemmatize all tokens in a string or a list. A string is first tokenized using punkt. Throw a type error if the input is neither a string nor a list. """ if isinstance(text, str): tokens = wordpunct_tokenize(text) elif isinstance(text, list): tokens= text else: raise TypeError("lemmatize only works with strings or lists of string tokens.") return [self._lemmatize_token(token, best_guess, return_frequencies) for token in tokens]
python
{ "resource": "" }
q34209
OldEnglishDictionaryLemmatizer.evaluate
train
def evaluate(self, filename): """Runs the lemmatize function over the contents of the file, counting the proportion of unfound lemmas.""" with open(filename, 'r') as infile: lines = infile.read().splitlines() lemma_count = 0 token_count = 0 for line in lines: line = re.sub(r'[.,!?:;0-9]', ' ', line) lemmas = [lemma for (_, lemma) in self.lemmatize(line, best_guess=False)] token_count += len(lemmas) lemma_count += len(lemmas) - lemmas.count([]) return lemma_count/token_count
python
{ "resource": "" }
q34210
Stemmer.get_stem
train
def get_stem(self, noun, gender, mimation=True): """Return the stem of a noun, given its gender""" stem = '' if mimation and noun[-1:] == 'm': # noun = noun[:-1] pass # Take off ending if gender == 'm': if noun[-2:] in list(self.endings['m']['singular'].values()) + \ list(self.endings['m']['dual'].values()): stem = noun[:-2] elif noun[-1] in list(self.endings['m']['plural'].values()): stem = noun[:-1] else: print("Unknown masculine noun: {}".format(noun)) elif gender == 'f': if noun[-4:] in self.endings['f']['plural']['nominative'] + \ self.endings['f']['plural']['oblique']: stem = noun[:-4] + 't' elif noun[-3:] in list(self.endings['f']['singular'].values()) + \ list(self.endings['f']['dual'].values()): stem = noun[:-3] + 't' elif noun[-2:] in list(self.endings['m']['singular'].values()) + \ list(self.endings['m']['dual'].values()): stem = noun[:-2] else: print("Unknown feminine noun: {}".format(noun)) else: print("Unknown noun: {}".format(noun)) return stem
python
{ "resource": "" }
q34211
Macronizer._retrieve_tag
train
def _retrieve_tag(self, text): """Tag text with chosen tagger and clean tags. Tag format: [('word', 'tag')] :param text: string :return: list of tuples, with each tuple containing the word and its pos tag :rtype : list """ if self.tagger == 'tag_ngram_123_backoff': # Data format: Perseus Style (see https://github.com/cltk/latin_treebank_perseus) tags = POSTag('latin').tag_ngram_123_backoff(text.lower()) return [(tag[0], tag[1]) for tag in tags] elif self.tagger == 'tag_tnt': tags = POSTag('latin').tag_tnt(text.lower()) return [(tag[0], tag[1]) for tag in tags] elif self.tagger == 'tag_crf': tags = POSTag('latin').tag_crf(text.lower()) return [(tag[0], tag[1]) for tag in tags]
python
{ "resource": "" }
q34212
Macronizer._retrieve_morpheus_entry
train
def _retrieve_morpheus_entry(self, word): """Return Morpheus entry for word Entry format: [(head word, tag, macronized form)] :param word: unmacronized, lowercased word :ptype word: string :return: Morpheus entry in tuples :rtype : list """ entry = self.macron_data.get(word) if entry is None: logger.info('No Morpheus entry found for {}.'.format(word)) return None elif len(entry) == 0: logger.info('No Morpheus entry found for {}.'.format(word)) return entry
python
{ "resource": "" }
q34213
Macronizer._macronize_word
train
def _macronize_word(self, word): """Return macronized word. :param word: (word, tag) :ptype word: tuple :return: (word, tag, macronized_form) :rtype : tuple """ head_word = word[0] tag = word[1] if tag is None: logger.info('Tagger {} could not tag {}.'.format(self.tagger, head_word)) return head_word, tag, head_word elif tag == 'U--------': return (head_word, tag.lower(), head_word) else: entries = self._retrieve_morpheus_entry(head_word) if entries is None: return head_word, tag.lower(), head_word matched_entry = [entry for entry in entries if entry[0] == tag.lower()] if len(matched_entry) == 0: logger.info('No matching Morpheus entry found for {}.'.format(head_word)) return head_word, tag.lower(), entries[0][2] elif len(matched_entry) == 1: return head_word, tag.lower(), matched_entry[0][2].lower() else: logger.info('Multiple matching entries found for {}.'.format(head_word)) return head_word, tag.lower(), matched_entry[1][2].lower()
python
{ "resource": "" }
q34214
Macronizer.macronize_tags
train
def macronize_tags(self, text): """Return macronized form along with POS tags. E.g. "Gallia est omnis divisa in partes tres," -> [('gallia', 'n-s---fb-', 'galliā'), ('est', 'v3spia---', 'est'), ('omnis', 'a-s---mn-', 'omnis'), ('divisa', 't-prppnn-', 'dīvīsa'), ('in', 'r--------', 'in'), ('partes', 'n-p---fa-', 'partēs'), ('tres', 'm--------', 'trēs')] :param text: raw text :return: tuples of head word, tag, macronized form :rtype : list """ return [self._macronize_word(word) for word in self._retrieve_tag(text)]
python
{ "resource": "" }
q34215
Macronizer.macronize_text
train
def macronize_text(self, text): """Return macronized form of text. E.g. "Gallia est omnis divisa in partes tres," -> "galliā est omnis dīvīsa in partēs trēs ," :param text: raw text :return: macronized text :rtype : str """ macronized_words = [entry[2] for entry in self.macronize_tags(text)] return " ".join(macronized_words)
python
{ "resource": "" }
q34216
Tokenizer.string_tokenizer
train
def string_tokenizer(self, untokenized_string: str, include_blanks=False): """ This function is based off CLTK's line tokenizer. Use this for strings rather than .txt files. input: '20. u2-sza-bi-la-kum\n1. a-na ia-as2-ma-ah-{d}iszkur#\n2. qi2-bi2-ma\n3. um-ma {d}utu-szi-{d}iszkur\n' output:['20. u2-sza-bi-la-kum', '1. a-na ia-as2-ma-ah-{d}iszkur#', '2. qi2-bi2-ma'] :param untokenized_string: string :param include_blanks: instances of empty lines :return: lines as strings in list """ line_output = [] assert isinstance(untokenized_string, str), \ 'Incoming argument must be a string.' if include_blanks: tokenized_lines = untokenized_string.splitlines() else: tokenized_lines = [line for line in untokenized_string.splitlines() if line != r'\\n'] for line in tokenized_lines: # Strip out damage characters if not self.damage: # Add 'xn' -- missing sign or number? line = ''.join(c for c in line if c not in "#[]?!*") re.match(r'^\d*\.|\d\'\.', line) line_output.append(line.rstrip()) return line_output
python
{ "resource": "" }
q34217
Tokenizer.line_tokenizer
train
def line_tokenizer(self, text): """ From a .txt file, outputs lines as string in list. input: 21. u2-wa-a-ru at-ta e2-kal2-la-ka _e2_-ka wu-e-er 22. ... u2-ul szi-... 23. ... x ... output:['21. u2-wa-a-ru at-ta e2-kal2-la-ka _e2_-ka wu-e-er', '22. ... u2-ul szi-...', '23. ... x ...',] :param: .txt file containing untokenized string :return: lines as strings in list """ line_output = [] with open(text, mode='r+', encoding='utf8') as file: lines = file.readlines() assert isinstance(text, str), 'Incoming argument must be a string.' for line in lines: # Strip out damage characters if not self.damage: # Add 'xn' -- missing sign or number? line = ''.join(c for c in line if c not in "#[]?!*") re.match(r'^\d*\.|\d\'\.', line) line_output.append(line.rstrip()) return line_output
python
{ "resource": "" }
q34218
Syllabifier.get_lang_data
train
def get_lang_data(self): """Define and call data for future use. Initializes and defines all variables which define the phonetic vectors. """ root = os.path.expanduser('~') csv_dir_path = os.path.join(root, 'cltk_data/sanskrit/model/sanskrit_models_cltk/phonetics') all_phonetic_csv = os.path.join(csv_dir_path, 'all_script_phonetic_data.csv') tamil_csv = os.path.join(csv_dir_path, 'tamil_script_phonetic_data.csv') # Make helper function for this with open(all_phonetic_csv,'r') as f: reader = csv.reader(f, delimiter = ',', quotechar = '"') next(reader, None) # Skip headers all_phonetic_data = [row for row in reader] with open(tamil_csv,'r') as f: reader = csv.reader(f, delimiter = ',', quotechar = '"') next(reader, None) # Skip headers # tamil_phonetic_data = [row[PHONETIC_VECTOR_START_OFFSET:] for row in reader] tamil_phonetic_data = [row for row in reader] # Handle better? all_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in all_phonetic_data] tamil_phonetic_data = [[int(cell) if cell=='0' or cell=='1' else cell for cell in row] for row in tamil_phonetic_data] all_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in all_phonetic_data]) tamil_phonetic_vectors = np.array([row[PHONETIC_VECTOR_START_OFFSET:] for row in tamil_phonetic_data]) phonetic_vector_length = all_phonetic_vectors.shape[1] return all_phonetic_data, tamil_phonetic_data, all_phonetic_vectors, tamil_phonetic_vectors, phonetic_vector_length
python
{ "resource": "" }
q34219
Syllabifier.orthographic_syllabify
train
def orthographic_syllabify(self, word): """Main syllablic function.""" p_vectors = [self.get_phonetic_feature_vector(c, self.lang) for c in word] syllables = [] for i in range(len(word)): v = p_vectors[i] syllables.append(word[i]) if i + 1 < len(word) and (not self.is_valid(p_vectors[i + 1]) or self.is_misc(p_vectors[i + 1])): syllables.append(u' ') elif not self.is_valid(v) or self.is_misc(v): syllables.append(u' ') elif self.is_vowel(v): anu_nonplos = (i + 2 < len(word) and self.is_anusvaar(p_vectors[i + 1]) and not self.is_plosive(p_vectors[i + 2]) ) anu_eow = (i + 2 == len(word) and self.is_anusvaar(p_vectors[i + 1])) if not (anu_nonplos or anu_eow): syllables.append(u' ') elif i + 1 < len(word) and (self.is_consonant(v) or self.is_nukta(v)): if self.is_consonant(p_vectors[i + 1]): syllables.append(u' ') elif self.is_vowel(p_vectors[i + 1]) and not self.is_dependent_vowel(p_vectors[i + 1]): syllables.append(u' ') elif self.is_anusvaar(p_vectors[i + 1]): anu_nonplos = (i + 2 < len(word) and not self.is_plosive(p_vectors[i + 2])) anu_eow = i + 2 == len(word) if not (anu_nonplos or anu_eow): syllables.append(u' ') return u''.join(syllables).strip().split(u' ')
python
{ "resource": "" }
q34220
read_file
train
def read_file(filepath: str) -> str: """Read a file and return it as a string""" # ? Check this is ok if absolute paths passed in filepath = os.path.expanduser(filepath) with open(filepath) as opened_file: # type: IO file_read = opened_file.read() # type: str return file_read
python
{ "resource": "" }
q34221
ConcordanceIndex.return_concordance_all
train
def return_concordance_all(self, tokens: List[str]) -> List[List[str]]: """Take a list of tokens, iteratively run each word through return_concordance_word and build a list of all. This returns a list of lists. """ coll = pyuca.Collator() # type: pyuca.Collator tokens = sorted(tokens, key=coll.sort_key) #! is the list order preserved? concordance_list = [] # type: List[List[str]] for token in tokens: concordance_list_for_word = self.return_concordance_word(token) # List[str] if concordance_list_for_word: concordance_list.append(concordance_list_for_word) return concordance_list
python
{ "resource": "" }
q34222
ScansionFormatter.hexameter
train
def hexameter(self, line: str) -> str: """ Format a string of hexameter metrical stress patterns into foot divisions :param line: the scansion pattern :return: the scansion string formatted with foot breaks >>> print(ScansionFormatter().hexameter( "-UU-UU-UU---UU--")) -UU|-UU|-UU|--|-UU|-- """ mylist = list(line) items = len(mylist) idx_start = items - 2 idx_end = items while idx_start > 0: potential_foot = "".join(mylist[idx_start: idx_end]) if potential_foot == self.constants.HEXAMETER_ENDING or \ potential_foot == self.constants.SPONDEE: mylist.insert(idx_start, self.constants.FOOT_SEPARATOR) idx_start -= 1 idx_end -= 2 if potential_foot == self.constants.DACTYL: mylist.insert(idx_start, "|") idx_start -= 1 idx_end -= 3 idx_start -= 1 return "".join(mylist)
python
{ "resource": "" }
q34223
ScansionFormatter.merge_line_scansion
train
def merge_line_scansion(self, line: str, scansion: str) -> str: """ Merge a line of verse with its scansion string. Do not accent dipthongs. :param line: the original Latin verse line :param scansion: the scansion pattern :return: the original line with the scansion pattern applied via macrons >>> print(ScansionFormatter().merge_line_scansion( ... "Arma virumque cano, Troiae qui prīmus ab ōrīs", ... "- U U - U U - UU- - - U U - -")) Ārma virūmque canō, Troiae quī prīmus ab ōrīs >>> print(ScansionFormatter().merge_line_scansion( ... "lītora, multum ille et terrīs iactātus et alto", ... " - U U - - - - - - - U U - U")) lītora, mūltum īlle ēt tērrīs iāctātus et ālto >>> print(ScansionFormatter().merge_line_scansion( ... 'aut facere, haec a te dictaque factaque sunt', ... ' - U U - - - - U U - U U - ')) aut facere, haec ā tē dīctaque fāctaque sūnt """ letters = list(line) marks = list(scansion) if len(scansion) < len(line): marks += ((len(line) - len(scansion)) * " ").split() for idx in range(0, len(marks)): if marks[idx] == self.constants.STRESSED: vowel = letters[idx] if vowel not in self.stress_accent_dict: LOG.error("problem! vowel: {} not in dict for line {}".format(vowel, line)) pass else: if idx > 1: if (letters[idx -2] + letters[idx - 1]).lower() == "qu": new_vowel = self.stress_accent_dict[vowel] letters[idx] = new_vowel continue if idx > 0: if letters[idx - 1] + vowel in self.constants.DIPTHONGS: continue new_vowel = self.stress_accent_dict[vowel] letters[idx] = new_vowel else: new_vowel = self.stress_accent_dict[vowel] letters[idx] = new_vowel return "".join(letters).rstrip()
python
{ "resource": "" }
q34224
arabicrange
train
def arabicrange(): u"""return a list of arabic characteres . Return a list of characteres between \u060c to \u0652 @return: list of arabic characteres. @rtype: unicode """ mylist = [] for i in range(0x0600, 0x00653): try: mylist.append(unichr(i)) except NameError: # python 3 compatible mylist.append(chr(i)) except ValueError: pass return mylist
python
{ "resource": "" }
q34225
is_vocalized
train
def is_vocalized(word): """Checks if the arabic word is vocalized. the word musn't have any spaces and pounctuations. @param word: arabic unicode char @type word: unicode @return: if the word is vocalized @rtype:Boolean """ if word.isalpha(): return False for char in word: if is_tashkeel(char): return True else: return False
python
{ "resource": "" }
q34226
is_arabicstring
train
def is_arabicstring(text): """ Checks for an Arabic standard Unicode block characters An arabic string can contain spaces, digits and pounctuation. but only arabic standard characters, not extended arabic @param text: input text @type text: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ if re.search(u"([^\u0600-\u0652%s%s%s\s\d])" \ % (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), text): return False return True
python
{ "resource": "" }
q34227
is_arabicword
train
def is_arabicword(word): """ Checks for an valid Arabic word. An Arabic word not contains spaces, digits and pounctuation avoid some spelling error, TEH_MARBUTA must be at the end. @param word: input word @type word: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ if len(word) == 0: return False elif re.search(u"([^\u0600-\u0652%s%s%s])" \ % (LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_MADDA_ABOVE), word): return False elif is_haraka(word[0]) or word[0] in (WAW_HAMZA, YEH_HAMZA): return False # if Teh Marbuta or Alef_Maksura not in the end elif re.match(u"^(.)*[%s](.)+$" % ALEF_MAKSURA, word): return False elif re.match(u"^(.)*[%s]([^%s%s%s])(.)+$" % \ (TEH_MARBUTA, DAMMA, KASRA, FATHA), word): return False else: return True
python
{ "resource": "" }
q34228
normalize_hamza
train
def normalize_hamza(word): """Standardize the Hamzat into one form of hamza, replace Madda by hamza and alef. Replace the LamAlefs by simplified letters. @param word: arabic text. @type word: unicode. @return: return a converted text. @rtype: unicode. """ if word.startswith(ALEF_MADDA): if len(word) >= 3 and (word[1] not in HARAKAT) and \ (word[2] == SHADDA or len(word) == 3): word = HAMZA + ALEF + word[1:] else: word = HAMZA + HAMZA + word[1:] # convert all Hamza from into one form word = word.replace(ALEF_MADDA, HAMZA + HAMZA) word = HAMZAT_PATTERN.sub(HAMZA, word) return word
python
{ "resource": "" }
q34229
joint
train
def joint(letters, marks): """ joint the letters with the marks the length ot letters and marks must be equal return word @param letters: the word letters @type letters: unicode @param marks: the word marks @type marks: unicode @return: word @rtype: unicode """ # The length ot letters and marks must be equal if len(letters) != len(marks): return "" stack_letter = stack.Stack(letters) stack_letter.items.reverse() stack_mark = stack.Stack(marks) stack_mark.items.reverse() word_stack = stack.Stack() last_letter = stack_letter.pop() last_mark = stack_mark.pop() vowels = HARAKAT while last_letter != None and last_mark != None: if last_letter == SHADDA: top = word_stack.pop() if top not in vowels: word_stack.push(top) word_stack.push(last_letter) if last_mark != NOT_DEF_HARAKA: word_stack.push(last_mark) else: word_stack.push(last_letter) if last_mark != NOT_DEF_HARAKA: word_stack.push(last_mark) last_letter = stack_letter.pop() last_mark = stack_mark.pop() if not (stack_letter.is_empty() and stack_mark.is_empty()): return False else: return ''.join(word_stack.items)
python
{ "resource": "" }
q34230
shaddalike
train
def shaddalike(partial, fully): """ If the two words has the same letters and the same harakats, this fuction return True. The first word is partially vocalized, the second is fully if the partially contians a shadda, it must be at the same place in the fully @param partial: the partially vocalized word @type partial: unicode @param fully: the fully vocalized word @type fully: unicode @return: if contains shadda @rtype: Boolean """ # المدخل ليس به شدة، لا داعي للبحث if not has_shadda(partial): return True # المدخل به شدة، والنتيجة ليس بها شدة، خاطئ elif not has_shadda(fully) and has_shadda(partial): return False # المدخل والمخرج بهما شدة، نتأكد من موقعهما partial = strip_harakat(partial) fully = strip_harakat(fully) pstack = stack.Stack(partial) vstack = stack.Stack(fully) plast = pstack.pop() vlast = vstack.pop() # if debug: print "+0", Pstack, Vstack while plast != None and vlast != None: if plast == vlast: plast = pstack.pop() vlast = vstack.pop() elif plast == SHADDA and vlast != SHADDA: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast break elif plast != SHADDA and vlast == SHADDA: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast vlast = vstack.pop() else: # if debug: print "+2", Pstack.items, Plast, Vstack.items, Vlast break if not (pstack.is_empty() and vstack.is_empty()): return False else: return True
python
{ "resource": "" }
q34231
reduce_tashkeel
train
def reduce_tashkeel(text): """Reduce the Tashkeel, by deleting evident cases. @param text: the input text fully vocalized. @type text: unicode. @return : partially vocalized text. @rtype: unicode. """ patterns = [ # delete all fathat, except on waw and yeh u"(?<!(%s|%s))(%s|%s)" % (WAW, YEH, SUKUN, FATHA), # delete damma if followed by waw. u"%s(?=%s)" % (DAMMA, WAW), # delete kasra if followed by yeh. u"%s(?=%s)" % (KASRA, YEH), # delete fatha if followed by alef to reduce yeh maftouha # and waw maftouha before alef. u"%s(?=%s)" % (FATHA, ALEF), # delete fatha from yeh and waw if they are in the word begining. u"(?<=\s(%s|%s))%s" % (WAW, YEH, FATHA), # delete kasra if preceded by Hamza below alef. u"(?<=%s)%s" % (ALEF_HAMZA_BELOW, KASRA), ] reduced = text for pat in patterns: reduced = re.sub(pat, '', reduced) return reduced
python
{ "resource": "" }
q34232
vocalized_similarity
train
def vocalized_similarity(word1, word2): """ if the two words has the same letters and the same harakats, this function return True. The two words can be full vocalized, or partial vocalized @param word1: first word @type word1: unicode @param word2: second word @type word2: unicode @return: return if words are similar, else return negative number of errors @rtype: Boolean / int """ stack1 = stack.Stack(word1) stack2 = stack.Stack(word2) last1 = stack1.pop() last2 = stack2.pop() err_count = 0 vowels = HARAKAT while last1 != None and last2 != None: if last1 == last2: last1 = stack1.pop() last2 = stack2.pop() elif last1 in vowels and last2 not in vowels: last1 = stack1.pop() elif last1 not in vowels and last2 in vowels: last2 = stack2.pop() else: # break if last1 == SHADDA: last1 = stack1.pop() elif last2 == SHADDA: last2 = stack2.pop() else: last1 = stack1.pop() last2 = stack2.pop() err_count += 1 if err_count > 0: return -err_count else: return True
python
{ "resource": "" }
q34233
tokenize
train
def tokenize(text=""): """ Tokenize text into words. @param text: the input text. @type text: unicode. @return: list of words. @rtype: list. """ if text == '': return [] else: # split tokens mylist = TOKEN_PATTERN.split(text) # don't remove newline \n mylist = [TOKEN_REPLACE.sub('', x) for x in mylist if x] # remove empty substring mylist = [x for x in mylist if x] return mylist
python
{ "resource": "" }
q34234
gen_docs
train
def gen_docs(corpus, lemmatize, rm_stops): """Open and process files from a corpus. Return a list of sentences for an author. Each sentence is itself a list of tokenized words. """ assert corpus in ['phi5', 'tlg'] if corpus == 'phi5': language = 'latin' filepaths = assemble_phi5_author_filepaths() jv_replacer = JVReplacer() text_cleaner = phi5_plaintext_cleanup word_tokenizer = nltk_tokenize_words if rm_stops: stops = latin_stops else: stops = None elif corpus == 'tlg': language = 'greek' filepaths = assemble_tlg_author_filepaths() text_cleaner = tlg_plaintext_cleanup word_tokenizer = nltk_tokenize_words if rm_stops: stops = latin_stops else: stops = None if lemmatize: lemmatizer = LemmaReplacer(language) sent_tokenizer = TokenizeSentence(language) for filepath in filepaths: with open(filepath) as f: text = f.read() # light first-pass cleanup, before sentence tokenization (which relies on punctuation) text = text_cleaner(text, rm_punctuation=False, rm_periods=False) sent_tokens = sent_tokenizer.tokenize_sentences(text) # doc_sentences = [] for sentence in sent_tokens: # a second cleanup at sentence-level, to rm all punctuation sentence = text_cleaner(sentence, rm_punctuation=True, rm_periods=True) sentence = word_tokenizer(sentence) sentence = [s.lower() for s in sentence] sentence = [w for w in sentence if w] if language == 'latin': sentence = [w[1:] if w.startswith('-') else w for w in sentence] if stops: sentence = [w for w in sentence if w not in stops] sentence = [w for w in sentence if len(w) > 1] # rm short words if sentence: sentence = sentence if lemmatize: sentence = lemmatizer.lemmatize(sentence) if sentence and language == 'latin': sentence = [jv_replacer.replace(word) for word in sentence] if sentence: yield sentence
python
{ "resource": "" }
q34235
make_model
train
def make_model(corpus, lemmatize=False, rm_stops=False, size=100, window=10, min_count=5, workers=4, sg=1, save_path=None): """Train W2V model.""" # Simple training, with one large list t0 = time.time() sentences_stream = gen_docs(corpus, lemmatize=lemmatize, rm_stops=rm_stops) # sentences_list = [] # for sent in sentences_stream: # sentences_list.append(sent) model = Word2Vec(sentences=list(sentences_stream), size=size, window=window, min_count=min_count, workers=workers, sg=sg) # "Trim" the model of unnecessary data. Model cannot be updated anymore. model.init_sims(replace=True) if save_path: save_path = os.path.expanduser(save_path) model.save(save_path) print('Total training time for {0}: {1} minutes'.format(save_path, (time.time() - t0) / 60))
python
{ "resource": "" }
q34236
get_sims
train
def get_sims(word, language, lemmatized=False, threshold=0.70): """Get similar Word2Vec terms from vocabulary or trained model. TODO: Add option to install corpus if not available. """ # Normalize incoming word string jv_replacer = JVReplacer() if language == 'latin': # Note that casefold() seemingly does not work with diacritic # Greek, likely because of it expects single code points, not # diacritics. Look into global string normalization to code points # for all languages, especially Greek. word = jv_replacer.replace(word).casefold() model_dirs = {'greek': '~/cltk_data/greek/model/greek_word2vec_cltk', 'latin': '~/cltk_data/latin/model/latin_word2vec_cltk'} assert language in model_dirs.keys(), 'Langauges available with Word2Vec model: {}'.format(model_dirs.keys()) if lemmatized: lemma_str = '_lemmed' else: lemma_str = '' model_name = '{0}_s100_w30_min5_sg{1}.model'.format(language, lemma_str) model_dir_abs = os.path.expanduser(model_dirs[language]) model_path = os.path.join(model_dir_abs, model_name) try: model = Word2Vec.load(model_path) except FileNotFoundError as fnf_error: print(fnf_error) print("CLTK's Word2Vec models cannot be found. Please import '{}_word2vec_cltk'.".format(language)) raise try: similars = model.most_similar(word) except KeyError as key_err: print(key_err) possible_matches = [] for term in model.vocab: if term.startswith(word[:3]): possible_matches.append(term) print("The following terms in the Word2Vec model you may be looking for: '{}'.".format(possible_matches)) return None returned_sims = [] for similar in similars: if similar[1] > threshold: returned_sims.append(similar[0]) if not returned_sims: print("Matches found, but below the threshold of 'threshold={}'. Lower it to see these results.".format(threshold)) return returned_sims
python
{ "resource": "" }
q34237
HexameterScanner.invalid_foot_to_spondee
train
def invalid_foot_to_spondee(self, feet: list, foot: str, idx: int) -> str: """ In hexameters, a single foot that is a unstressed_stressed syllable pattern is often just a double spondee, so here we coerce it to stressed. :param feet: list of string representations of meterical feet :param foot: the bad foot to correct :param idx: the index of the foot to correct :return: corrected scansion >>> print(HexameterScanner().invalid_foot_to_spondee( ... ['-UU', '--', '-U', 'U-', '--', '-UU'],'-U', 2)) # doctest: +NORMALIZE_WHITESPACE -UU----U----UU """ new_foot = foot.replace(self.constants.UNSTRESSED, self.constants.STRESSED) feet[idx] = new_foot return "".join(feet)
python
{ "resource": "" }
q34238
HexameterScanner.correct_dactyl_chain
train
def correct_dactyl_chain(self, scansion: str) -> str: """ Three or more unstressed accents in a row is a broken dactyl chain, best detected and processed backwards. Since this method takes a Procrustean approach to modifying the scansion pattern, it is not used by default in the scan method; however, it is available as an optional keyword parameter, and users looking to further automate the generation of scansion candidates should consider using this as a fall back. :param scansion: scansion with broken dactyl chain; inverted amphibrachs not allowed :return: corrected line of scansion >>> print(HexameterScanner().correct_dactyl_chain( ... "- U U - - U U - - - U U - x")) - - - - - U U - - - U U - x >>> print(HexameterScanner().correct_dactyl_chain( ... "- U U U U - - - - - U U - U")) # doctest: +NORMALIZE_WHITESPACE - - - U U - - - - - U U - U """ mark_list = string_utils.mark_list(scansion) vals = list(scansion.replace(" ", "")) # ignore last two positions, save them feet = [vals.pop(), vals.pop()] length = len(vals) idx = length - 1 while idx > 0: one = vals[idx] two = vals[idx - 1] if idx > 1: three = vals[idx - 2] else: three = "" # Dactyl foot is okay, no corrections if one == self.constants.UNSTRESSED and \ two == self.constants.UNSTRESSED and \ three == self.constants.STRESSED: feet += [one] feet += [two] feet += [three] idx -= 3 continue # Spondee foot is okay, no corrections if one == self.constants.STRESSED and \ two == self.constants.STRESSED: feet += [one] feet += [two] idx -= 2 continue # handle "U U U" foot as "- U U" if one == self.constants.UNSTRESSED and \ two == self.constants.UNSTRESSED and \ three == self.constants.UNSTRESSED: feet += [one] feet += [two] feet += [self.constants.STRESSED] idx -= 3 continue # handle "U U -" foot as "- -" if one == self.constants.STRESSED and \ two == self.constants.UNSTRESSED and \ three == self.constants.UNSTRESSED: feet += [self.constants.STRESSED] feet += [self.constants.STRESSED] idx -= 2 continue # handle "- U" foot as "- -" if one == self.constants.UNSTRESSED and \ two == self.constants.STRESSED: feet += [self.constants.STRESSED] feet += [two] idx -= 2 continue corrected = "".join(feet[::-1]) new_line = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_line[mark_list[idx]] = car return "".join(new_line)
python
{ "resource": "" }
q34239
apply_raw_r_assimilation
train
def apply_raw_r_assimilation(last_syllable: str) -> str: """ -r preceded by an -s-, -l- or -n- becomes respectively en -s, -l or -n. >>> apply_raw_r_assimilation("arm") 'armr' >>> apply_raw_r_assimilation("ás") 'áss' >>> apply_raw_r_assimilation("stól") 'stóll' >>> apply_raw_r_assimilation("stein") 'steinn' >>> apply_raw_r_assimilation("vin") 'vinn' :param last_syllable: last syllable of an Old Norse word :return: """ if len(last_syllable) > 0: if last_syllable[-1] == "l": return last_syllable + "l" elif last_syllable[-1] == "s": return last_syllable + "s" elif last_syllable[-1] == "n": return last_syllable + "n" return last_syllable + "r"
python
{ "resource": "" }
q34240
add_r_ending_to_syllable
train
def add_r_ending_to_syllable(last_syllable: str, is_first=True) -> str: """ Adds an the -r ending to the last syllable of an Old Norse word. In some cases, it really adds an -r. In other cases, it on doubles the last character or left the syllable unchanged. >>> add_r_ending_to_syllable("arm", True) 'armr' >>> add_r_ending_to_syllable("ás", True) 'áss' >>> add_r_ending_to_syllable("stól", True) 'stóll' >>> "jö"+add_r_ending_to_syllable("kul", False) 'jökull' >>> add_r_ending_to_syllable("stein", True) 'steinn' >>> 'mi'+add_r_ending_to_syllable('kil', False) 'mikill' >>> add_r_ending_to_syllable('sæl', True) 'sæll' >>> 'li'+add_r_ending_to_syllable('til', False) 'litill' >>> add_r_ending_to_syllable('vænn', True) 'vænn' >>> add_r_ending_to_syllable('lauss', True) 'lauss' >>> add_r_ending_to_syllable("vin", True) 'vinr' >>> add_r_ending_to_syllable("sel", True) 'selr' >>> add_r_ending_to_syllable('fagr', True) 'fagr' >>> add_r_ending_to_syllable('vitr', True) 'vitr' >>> add_r_ending_to_syllable('vetr', True) 'vetr' >>> add_r_ending_to_syllable('akr', True) 'akr' >>> add_r_ending_to_syllable('Björn', True) 'Björn' >>> add_r_ending_to_syllable('þurs', True) 'þurs' >>> add_r_ending_to_syllable('karl', True) 'karl' >>> add_r_ending_to_syllable('hrafn', True) 'hrafn' :param last_syllable: last syllable of the word :param is_first: is it the first syllable of the word? :return: inflected syllable """ if len(last_syllable) >= 2: if last_syllable[-1] in ['l', 'n', 's', 'r']: if last_syllable[-2] in CONSONANTS: # Apocope of r return last_syllable else: # Assimilation of r if len(last_syllable) >= 3 and last_syllable[-3:-1] in DIPHTHONGS: return apply_raw_r_assimilation(last_syllable) elif last_syllable[-2] in SHORT_VOWELS and is_first: # No assimilation when r is supposed to be added to a stressed syllable # whose last letter is l, n or s and the penultimate letter is a short vowel return last_syllable + "r" elif last_syllable[-2] in SHORT_VOWELS: return apply_raw_r_assimilation(last_syllable) elif last_syllable[-2] in LONG_VOWELS: return apply_raw_r_assimilation(last_syllable) return apply_raw_r_assimilation(last_syllable) else: return last_syllable + "r" else: return last_syllable + "r"
python
{ "resource": "" }
q34241
add_r_ending
train
def add_r_ending(stem: str) -> str: """ Adds an -r ending to an Old Norse noun. >>> add_r_ending("arm") 'armr' >>> add_r_ending("ás") 'áss' >>> add_r_ending("stól") 'stóll' >>> add_r_ending("jökul") 'jökull' >>> add_r_ending("stein") 'steinn' >>> add_r_ending('mikil') 'mikill' >>> add_r_ending('sæl') 'sæll' >>> add_r_ending('litil') 'litill' >>> add_r_ending('vænn') 'vænn' >>> add_r_ending('lauss') 'lauss' >>> add_r_ending("vin") 'vinr' >>> add_r_ending("sel") 'selr' >>> add_r_ending('fagr') 'fagr' >>> add_r_ending('vitr') 'vitr' >>> add_r_ending('vetr') 'vetr' >>> add_r_ending('akr') 'akr' >>> add_r_ending('Björn') 'björn' >>> add_r_ending('þurs') 'þurs' >>> add_r_ending('karl') 'karl' >>> add_r_ending('hrafn') 'hrafn' :param stem: :return: """ s_stem = s.syllabify_ssp(stem.lower()) n_stem = len(s_stem) last_syllable = Syllable(s_stem[-1], VOWELS, CONSONANTS) return "".join(s_stem[:-1]) + add_r_ending_to_syllable(last_syllable.text, n_stem == 1)
python
{ "resource": "" }
q34242
apply_i_umlaut
train
def apply_i_umlaut(stem: str): """ Changes the vowel of the last syllable of the given stem according to an i-umlaut. >>> apply_i_umlaut("mæl") 'mæl' >>> apply_i_umlaut("lagð") 'legð' >>> apply_i_umlaut("vak") 'vek' >>> apply_i_umlaut("haf") 'hef' >>> apply_i_umlaut("buð") 'byð' >>> apply_i_umlaut("bár") 'bær' >>> apply_i_umlaut("réð") 'réð' >>> apply_i_umlaut("fór") 'fœr' :param stem: :return: """ assert len(stem) > 0 s_stem = s.syllabify_ssp(stem.lower()) last_syllable = OldNorseSyllable(s_stem[-1], VOWELS, CONSONANTS) last_syllable.apply_i_umlaut() return "".join(s_stem[:-1]) + str(last_syllable)
python
{ "resource": "" }
q34243
HendecasyllableScanner.correct_invalid_start
train
def correct_invalid_start(self, scansion: str) -> str: """ The third syllable of a hendecasyllabic line is long, so we will convert it. :param scansion: scansion string :return: scansion string with corrected start >>> print(HendecasyllableScanner().correct_invalid_start( ... "- U U U U - U - U - U").strip()) - U - U U - U - U - U """ mark_list = string_utils.mark_list(scansion) vals = list(scansion.replace(" ", "")) corrected = vals[:2] + [self.constants.STRESSED] + vals[3:] new_line = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_line[mark_list[idx]] = car return "".join(new_line)
python
{ "resource": "" }
q34244
SequentialBackoffLemmatizer.tag_one
train
def tag_one(self: object, tokens: List[str], index: int, history: List[str]): """ Determine an appropriate tag for the specified token, and return that tag. If this tagger is unable to determine a tag for the specified token, then its backoff tagger is consulted. :rtype: tuple :type tokens: list :param tokens: The list of words that are being tagged. :type index: int :param index: The index of the word whose tag should be returned. :type history: list(str) :param history: A list of the tags for all words before index. """ lemma = None for tagger in self._taggers: lemma = tagger.choose_tag(tokens, index, history) if lemma is not None: break return lemma, tagger
python
{ "resource": "" }
q34245
tokenize_akkadian_words
train
def tokenize_akkadian_words(line): """ Operates on a single line of text, returns all words in the line as a tuple in a list. input: "1. isz-pur-ram a-na" output: [("isz-pur-ram", "akkadian"), ("a-na", "akkadian")] :param: line: text string :return: list of tuples: (word, language) """ beginning_underscore = "_[^_]+(?!_)$" # only match a string if it has a beginning underscore anywhere ending_underscore = "^(?<!_)[^_]+_" # only match a string if it has an ending underscore anywhere two_underscores = "_[^_]+_" # only match a string if it has two underscores words = line.split() # split the line on spaces ignoring the first split (which is the # line number) language = "akkadian" output_words = [] for word in words: if re.search(two_underscores, word): # If the string has two underscores in it then the word is # in Sumerian while the neighboring words are in Akkadian. output_words.append((word, "sumerian")) elif re.search(beginning_underscore, word): # If the word has an initial underscore somewhere # but no other underscores than we're starting a block # of Sumerian. language = "sumerian" output_words.append((word, language)) elif re.search(ending_underscore, word): # If the word has an ending underscore somewhere # but not other underscores than we're ending a block # of Sumerian. output_words.append((word, language)) language = "akkadian" else: # If there are no underscore than we are continuing # whatever language we're currently in. output_words.append((word, language)) return output_words
python
{ "resource": "" }
q34246
tokenize_arabic_words
train
def tokenize_arabic_words(text): """ Tokenize text into words @param text: the input text. @type text: unicode. @return: list of words. @rtype: list. """ specific_tokens = [] if not text: return specific_tokens else: specific_tokens = araby.tokenize(text) return specific_tokens
python
{ "resource": "" }
q34247
tokenize_middle_high_german_words
train
def tokenize_middle_high_german_words(text): """Tokenizes MHG text""" assert isinstance(text, str) # As far as I know, hyphens were never used for compounds, so the tokenizer treats all hyphens as line-breaks text = re.sub(r'-\n',r'-', text) text = re.sub(r'\n', r' ', text) text = re.sub(r'(?<=.)(?=[\.\";\,\:\[\]\(\)!&?])',r' ', text) text = re.sub(r'(?<=[\.\";\,\:\[\]\(\)!&?])(?=.)',r' ', text) text = re.sub(r'\s+',r' ', text) text = str.split(text) return text
python
{ "resource": "" }
q34248
WordTokenizer.tokenize
train
def tokenize(self, string): """Tokenize incoming string.""" if self.language == 'akkadian': tokens = tokenize_akkadian_words(string) elif self.language == 'arabic': tokens = tokenize_arabic_words(string) elif self.language == 'french': tokens = tokenize_french_words(string) elif self.language == 'greek': tokens = tokenize_greek_words(string) elif self.language == 'latin': tokens = tokenize_latin_words(string) elif self.language == 'old_norse': tokens = tokenize_old_norse_words(string) elif self.language == 'middle_english': tokens = tokenize_middle_english_words(string) elif self.language == 'middle_high_german': tokens = tokenize_middle_high_german_words(string) else: tokens = nltk_tokenize_words(string) return tokens
python
{ "resource": "" }
q34249
WordTokenizer.tokenize_sign
train
def tokenize_sign(self, word): """This is for tokenizing cuneiform signs.""" if self.language == 'akkadian': sign_tokens = tokenize_akkadian_signs(word) else: sign_tokens = 'Language must be written using cuneiform.' return sign_tokens
python
{ "resource": "" }
q34250
TLGU._check_import_source
train
def _check_import_source(): """Check if tlgu imported, if not import it.""" path_rel = '~/cltk_data/greek/software/greek_software_tlgu/tlgu.h' path = os.path.expanduser(path_rel) if not os.path.isfile(path): try: corpus_importer = CorpusImporter('greek') corpus_importer.import_corpus('greek_software_tlgu') except Exception as exc: logger.error('Failed to import TLGU: %s', exc) raise
python
{ "resource": "" }
q34251
TLGU._check_install
train
def _check_install(self): """Check if tlgu installed, if not install it.""" try: subprocess.check_output(['which', 'tlgu']) except Exception as exc: logger.info('TLGU not installed: %s', exc) logger.info('Installing TLGU.') if not subprocess.check_output(['which', 'gcc']): logger.error('GCC seems not to be installed.') else: tlgu_path_rel = '~/cltk_data/greek/software/greek_software_tlgu' tlgu_path = os.path.expanduser(tlgu_path_rel) if not self.testing: print('Do you want to install TLGU?') print('To continue, press Return. To exit, Control-C.') input() else: print('Automated or test build, skipping keyboard input confirmation for installation of TLGU.') try: command = 'cd {0} && make install'.format(tlgu_path) print('Going to run command:', command) p_out = subprocess.call(command, shell=True) if p_out == 0: logger.info('TLGU installed.') else: logger.error('TLGU install without sudo failed.') except Exception as exc: logger.error('TLGU install failed: %s', exc) else: # for Linux needing root access to '/usr/local/bin' if not self.testing: print('Could not install without root access. Do you want to install TLGU with sudo?') command = 'cd {0} && sudo make install'.format(tlgu_path) print('Going to run command:', command) print('To continue, press Return. To exit, Control-C.') input() p_out = subprocess.call(command, shell=True) else: command = 'cd {0} && sudo make install'.format(tlgu_path) p_out = subprocess.call(command, shell=True) if p_out == 0: logger.info('TLGU installed.') else: logger.error('TLGU install with sudo failed.')
python
{ "resource": "" }
q34252
Syllabifier.syllabify
train
def syllabify(self, word): """Splits input Latin word into a list of syllables, based on the language syllables loaded for the Syllabifier instance""" prefixes = self.language['single_syllable_prefixes'] prefixes.sort(key=len, reverse=True) # Check if word is in exception dictionary if word in self.language['exceptions']: syllables = self.language['exceptions'][word] # Else, breakdown syllables for word else: syllables = [] # Remove prefixes for prefix in prefixes: if word.startswith(prefix): syllables.append(prefix) word = re.sub('^%s' % prefix, '', word) break # Initialize syllable to build by iterating through over characters syllable = '' # Get word length for determining character position in word word_len = len(word) # Iterate over characters to build syllables for i, char in enumerate(word): # Build syllable syllable = syllable + char syllable_complete = False # Checks to process syllable logic char_is_vowel = self._is_vowel(char) has_next_char = i < word_len - 1 has_prev_char = i > 0 # If it's the end of the word, the syllable is complete if not has_next_char: syllable_complete = True else: next_char = word[i + 1] if has_prev_char: prev_char = word[i - 1] # 'i' is a special case for a vowel. when i is at the # beginning of the word (Iesu) or i is between # vowels (alleluia), then the i is treated as a # consonant (y) Note: what about compounds like 'adiungere' if char == 'i' and has_next_char and self._is_vowel(next_char): if i == 0: char_is_vowel = False elif self._is_vowel(prev_char): char_is_vowel = False # Determine if the syllable is complete if char_is_vowel: if ( ( # If the next character's a vowel self._is_vowel( next_char) # And it doesn't compose a dipthong with the current character and not self._is_diphthong(char, next_char) # And the current character isn't preceded by a q, unless followed by a u and not ( has_prev_char and prev_char == "q" and char == "u" and next_char != "u" ) ) or ( # If the next character's a consonant but not a double consonant, unless it's a mute consonant followed by a liquid consonant i < word_len - 2 and ( ( ( has_prev_char and prev_char != "q" and char == "u" and self._is_vowel(word[i + 2]) ) or ( not has_prev_char and char == "u" and self._is_vowel(word[i + 2]) ) ) or ( char != "u" and self._is_vowel(word[i + 2]) and not self._is_diphthong(char, next_char) ) or ( self._is_mute_consonant_or_f(next_char) and self._is_liquid_consonant(word[i + 2]) ) ) ) ): syllable_complete = True # Otherwise, it's a consonant else: if ( # If the next character's also a consonant (but it's not the last in the word) ( not self._is_vowel(next_char) and i < word_len - 2 ) # If the char's not a mute consonant followed by a liquid consonant and not ( self._is_mute_consonant_or_f(char) and self._is_liquid_consonant(next_char) ) # If the char's not a c, p, or t followed by an h and not ( ( has_prev_char and not self._is_vowel(prev_char) and char in ['c', 'p', 't'] and next_char == 'h' ) or ( not has_prev_char and char in ['c', 'p', 't'] and next_char == 'h' ) ) # And it's not the only letter in the syllable and not len(syllable) == 1 ): syllable_complete = True # If it's a complete syllable, append it to syllables list and reset syllable if syllable_complete: syllables.append(syllable) syllable = '' return syllables
python
{ "resource": "" }
q34253
Scansion._clean_text
train
def _clean_text(self, text): """Clean the text of extraneous punction. By default, ':', ';', and '.' are defined as stops. :param text: raw text :return: clean text :rtype : string """ clean = [] for char in text: if char in self.punc_stops: clean += '.' elif char not in self.punc: clean += char else: pass return (''.join(clean)).lower()
python
{ "resource": "" }
q34254
Scansion._tokenize
train
def _tokenize(self, text): """Tokenize the text into a list of sentences with a list of words. :param text: raw text :return: tokenized text :rtype : list """ sentences = [] tokens = [] for word in self._clean_accents(text).split(' '): tokens.append(word) if '.' in word: sentences.append(tokens) tokens = [] return sentences
python
{ "resource": "" }
q34255
Scansion._long_by_nature
train
def _long_by_nature(self, syllable): """Check if syllable is long by nature. Long by nature includes: 1) Syllable contains a diphthong 2) Syllable contains a long vowel :param syllable: current syllable :return: True if long by nature :rtype : bool """ # Find diphthongs vowel_group = [] for char in syllable: print if char in self.long_vowels: return True elif char not in self.sing_cons and char not in self.doub_cons: vowel_group += char if ''.join(vowel_group) in self.diphthongs: return True
python
{ "resource": "" }
q34256
Scansion._long_by_position
train
def _long_by_position(self, syllable, sentence): """Check if syllable is long by position. Long by position includes: 1) Next syllable begins with two consonants, unless those consonants are a stop + liquid combination 2) Next syllable begins with a double consonant 3) Syllable ends with a consonant and the next syllable begins with a consonant :param syllable: Current syllable :param sentence: Current sentence :return: True if syllable is long by position :rtype : bool """ try: next_syll = sentence[sentence.index(syllable) + 1] # Long by position by case 1 if (next_syll[0] in self.sing_cons and next_syll[1] in self.sing_cons) and (next_syll[0] not in self.stops and next_syll[1] not in self.liquids): return True # Long by position by case 2 elif syllable[-1] in self.vowels and next_syll[0] in self.doub_cons: return True # Long by position by case 3 elif syllable[-1] in self.sing_cons and (next_syll[0] in self.sing_cons): return True else: pass except IndexError: logger.info("IndexError while checking if syllable '%s' is long. Continuing.", syllable)
python
{ "resource": "" }
q34257
Scansion.scan_text
train
def scan_text(self, input_string): """The primary method for the class. :param input_string: A string of macronized text. :return: meter of text :rtype : list """ syllables = self._make_syllables(input_string) sentence_syllables = self._syllable_condenser(syllables) meter = self._scansion(sentence_syllables) return meter
python
{ "resource": "" }
q34258
Stemmer.stem
train
def stem(self, text): """Stem each word of the Latin text.""" stemmed_text = '' for word in text.split(' '): if word not in self.stops: # remove '-que' suffix word, in_que_pass_list = self._checkremove_que(word) if not in_que_pass_list: # remove the simple endings from the target word word, was_stemmed = self._matchremove_simple_endings(word) # if word didn't match the simple endings, try verb endings if not was_stemmed: word = self._matchremove_verb_endings(word) # add the stemmed word to the text stemmed_text += word + ' ' return stemmed_text
python
{ "resource": "" }
q34259
Stemmer._checkremove_que
train
def _checkremove_que(self, word): """If word ends in -que and if word is not in pass list, strip -que""" in_que_pass_list = False que_pass_list = ['atque', 'quoque', 'neque', 'itaque', 'absque', 'apsque', 'abusque', 'adaeque', 'adusque', 'denique', 'deque', 'susque', 'oblique', 'peraeque', 'plenisque', 'quandoque', 'quisque', 'quaeque', 'cuiusque', 'cuique', 'quemque', 'quamque', 'quaque', 'quique', 'quorumque', 'quarumque', 'quibusque', 'quosque', 'quasque', 'quotusquisque', 'quousque', 'ubique', 'undique', 'usque', 'uterque', 'utique', 'utroque', 'utribique', 'torque', 'coque', 'concoque', 'contorque', 'detorque', 'decoque', 'excoque', 'extorque', 'obtorque', 'optorque', 'retorque', 'recoque', 'attorque', 'incoque', 'intorque', 'praetorque'] if word not in que_pass_list: word = re.sub(r'que$', '', word) else: in_que_pass_list = True return word, in_que_pass_list
python
{ "resource": "" }
q34260
Stemmer._matchremove_simple_endings
train
def _matchremove_simple_endings(self, word): """Remove the noun, adjective, adverb word endings""" was_stemmed = False # noun, adjective, and adverb word endings sorted by charlen, then alph simple_endings = ['ibus', 'ius', 'ae', 'am', 'as', 'em', 'es', 'ia', 'is', 'nt', 'os', 'ud', 'um', 'us', 'a', 'e', 'i', 'o', 'u'] for ending in simple_endings: if word.endswith(ending): word = re.sub(r'{0}$'.format(ending), '', word) was_stemmed = True break return word, was_stemmed
python
{ "resource": "" }
q34261
Syllabifier._setup
train
def _setup(self, word) -> List[str]: """ Prepares a word for syllable processing. If the word starts with a prefix, process it separately. :param word: :return: """ if len(word) == 1: return [word] for prefix in self.constants.PREFIXES: if word.startswith(prefix): (first, rest) = string_utils.split_on(word, prefix) if self._contains_vowels(rest): return string_utils.remove_blank_spaces( self._process(first) + self._process(rest)) # a word like pror can happen from ellision return string_utils.remove_blank_spaces(self._process(word)) if word in self.constants.UI_EXCEPTIONS.keys(): return self.constants.UI_EXCEPTIONS[word] return string_utils.remove_blank_spaces(self._process(word))
python
{ "resource": "" }
q34262
Syllabifier.convert_consonantal_i
train
def convert_consonantal_i(self, word) -> str: """Convert i to j when at the start of a word.""" match = list(self.consonantal_i_matcher.finditer(word)) if match: if word[0].isupper(): return "J" + word[1:] return "j" + word[1:] return word
python
{ "resource": "" }
q34263
Syllabifier._process
train
def _process(self, word: str) -> List[str]: """ Process a word into a list of strings representing the syllables of the word. This method describes rules for consonant grouping behaviors and then iteratively applies those rules the list of letters that comprise the word, until all the letters are grouped into appropriate syllable groups. :param word: :return: """ # if a blank arrives from splitting, just return an empty list if len(word.strip()) == 0: return [] word = self.convert_consonantal_i(word) my_word = " " + word + " " letters = list(my_word) positions = [] for dipth in self.diphthongs: if dipth in my_word: dipth_matcher = re.compile("{}".format(dipth)) matches = dipth_matcher.finditer(my_word) for match in matches: (start, end) = match.span() positions.append(start) matches = self.kw_matcher.finditer(my_word) for match in matches: (start, end) = match.span() positions.append(start) letters = string_utils.merge_next(letters, positions) letters = string_utils.remove_blanks(letters) positions.clear() if not self._contains_vowels("".join(letters)): return ["".join(letters).strip()] # occurs when only 'qu' appears by ellision positions = self._starting_consonants_only(letters) while len(positions) > 0: letters = string_utils.move_consonant_right(letters, positions) letters = string_utils.remove_blanks(letters) positions = self._starting_consonants_only(letters) positions = self._ending_consonants_only(letters) while len(positions) > 0: letters = string_utils.move_consonant_left(letters, positions) letters = string_utils.remove_blanks(letters) positions = self._ending_consonants_only(letters) positions = self._find_solo_consonant(letters) while len(positions) > 0: letters = self._move_consonant(letters, positions) letters = string_utils.remove_blanks(letters) positions = self._find_solo_consonant(letters) positions = self._find_consonant_cluster(letters) while len(positions) > 0: letters = self._move_consonant(letters, positions) letters = string_utils.remove_blanks(letters) positions = self._find_consonant_cluster(letters) return letters
python
{ "resource": "" }
q34264
Syllabifier._ends_with_vowel
train
def _ends_with_vowel(self, letter_group: str) -> bool: """Check if a string ends with a vowel.""" if len(letter_group) == 0: return False return self._contains_vowels(letter_group[-1])
python
{ "resource": "" }
q34265
Syllabifier._starts_with_vowel
train
def _starts_with_vowel(self, letter_group: str) -> bool: """Check if a string starts with a vowel.""" if len(letter_group) == 0: return False return self._contains_vowels(letter_group[0])
python
{ "resource": "" }
q34266
Syllabifier._starting_consonants_only
train
def _starting_consonants_only(self, letters: list) -> list: """Return a list of starting consonant positions.""" for idx, letter in enumerate(letters): if not self._contains_vowels(letter) and self._contains_consonants(letter): return [idx] if self._contains_vowels(letter): return [] if self._contains_vowels(letter) and self._contains_consonants(letter): return [] return []
python
{ "resource": "" }
q34267
Syllabifier._ending_consonants_only
train
def _ending_consonants_only(self, letters: List[str]) -> List[int]: """Return a list of positions for ending consonants.""" reversed_letters = list(reversed(letters)) length = len(letters) for idx, letter in enumerate(reversed_letters): if not self._contains_vowels(letter) and self._contains_consonants(letter): return [(length - idx) - 1] if self._contains_vowels(letter): return [] if self._contains_vowels(letter) and self._contains_consonants(letter): return [] return []
python
{ "resource": "" }
q34268
Syllabifier._find_solo_consonant
train
def _find_solo_consonant(self, letters: List[str]) -> List[int]: """Find the positions of any solo consonants that are not yet paired with a vowel.""" solos = [] for idx, letter in enumerate(letters): if len(letter) == 1 and self._contains_consonants(letter): solos.append(idx) return solos
python
{ "resource": "" }
q34269
Syllabifier._move_consonant
train
def _move_consonant(self, letters: list, positions: List[int]) -> List[str]: """ Given a list of consonant positions, move the consonants according to certain consonant syllable behavioral rules for gathering and grouping. :param letters: :param positions: :return: """ for pos in positions: previous_letter = letters[pos - 1] consonant = letters[pos] next_letter = letters[pos + 1] if self._contains_vowels(next_letter) and self._starts_with_vowel(next_letter): return string_utils.move_consonant_right(letters, [pos]) if self._contains_vowels(previous_letter) and self._ends_with_vowel( previous_letter) and len(previous_letter) == 1: return string_utils.move_consonant_left(letters, [pos]) if previous_letter + consonant in self.constants.ASPIRATES: return string_utils.move_consonant_left(letters, [pos]) if consonant + next_letter in self.constants.ASPIRATES: return string_utils.move_consonant_right(letters, [pos]) if next_letter[0] == consonant: return string_utils.move_consonant_left(letters, [pos]) if consonant in self.constants.MUTES and next_letter[0] in self.constants.LIQUIDS: return string_utils.move_consonant_right(letters, [pos]) if consonant in ['k', 'K'] and next_letter[0] in ['w', 'W']: return string_utils.move_consonant_right(letters, [pos]) if self._contains_consonants(next_letter[0]) and self._starts_with_vowel( previous_letter[-1]): return string_utils.move_consonant_left(letters, [pos]) # fall through case if self._contains_consonants(next_letter[0]): return string_utils.move_consonant_right(letters, [pos]) return letters
python
{ "resource": "" }
q34270
Syllabifier.get_syllable_count
train
def get_syllable_count(self, syllables: List[str]) -> int: """ Counts the number of syllable groups that would occur after ellision. Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line, and apply stresses to the original word positions. However, we also want to be able to count the number of syllables accurately. :param syllables: :return: >>> syllabifier = Syllabifier() >>> print(syllabifier.get_syllable_count([ ... 'Jām', 'tūm', 'c', 'au', 'sus', 'es', 'u', 'nus', 'I', 'ta', 'lo', 'rum'])) 11 """ tmp_syllables = copy.deepcopy(syllables) return len(string_utils.remove_blank_spaces( string_utils.move_consonant_right(tmp_syllables, self._find_solo_consonant(tmp_syllables))))
python
{ "resource": "" }
q34271
_unrecognised
train
def _unrecognised(achr): """ Handle unrecognised characters. """ if options['handleUnrecognised'] == UNRECOGNISED_ECHO: return achr elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE: return options['substituteChar'] else: raise KeyError(achr)
python
{ "resource": "" }
q34272
CharacterBlock._transliterate
train
def _transliterate (self, text, outFormat): """ Transliterate the text to the target transliteration scheme.""" result = [] for c in text: if c.isspace(): result.append(c) try: result.append(self[c].equivalents[outFormat.name]) except KeyError: result.append(_unrecognised(c)) return result
python
{ "resource": "" }
q34273
TransliterationScheme._setupParseTree
train
def _setupParseTree(self, rowFrom, rowTo, colIndex, tree): """ Build the search tree for multi-character encodings. """ if colIndex == self._longestEntry: return prevchar = None rowIndex = rowFrom while rowIndex <= rowTo: if colIndex < len(self._parsedata[rowIndex]): c = self._parsedata[rowIndex][colIndex] if c != prevchar: tree[c] = {} if prevchar is not None: self._setupParseTree(rowFrom, rowIndex - 1, colIndex + 1, tree[prevchar]) rowFrom = rowIndex prevchar = c if rowIndex == rowTo: self._setupParseTree(rowFrom, rowIndex, colIndex + 1, tree[prevchar]) rowIndex = rowIndex + 1
python
{ "resource": "" }
q34274
TransliterationScheme._transliterate
train
def _transliterate (self, text, outFormat): """ Transliterate the text to Unicode.""" result = [] text = self._preprocess(text) i = 0 while i < len(text): if text[i].isspace(): result.append(text[i]) i = i+1 else: chr = self._getNextChar(text, i) try: result.append(self[chr].unichr) except KeyError: result.append(_unrecognised(chr)) i = i + len(chr) return result
python
{ "resource": "" }
q34275
DevanagariTransliterationScheme._equivalent
train
def _equivalent(self, char, prev, next, implicitA): """ Transliterate a Devanagari character to Latin. Add implicit As unless overridden by VIRAMA. """ result = [] if char.unichr != DevanagariCharacter._VIRAMA: result.append(char.equivalents[self.name]) """ Append implicit A to consonants if the next character isn't a vowel. """ if implicitA and char.isConsonant \ and ((next is not None \ and next.unichr != DevanagariCharacter._VIRAMA \ and not next.isVowel) \ or next is None): result.append(characterBlocks['DEVANAGARI']\ [DevanagariCharacter._LETTER_A].equivalents[self.name]) return result
python
{ "resource": "" }
q34276
CorpusImporter.list_corpora
train
def list_corpora(self): """Show corpora available for the CLTK to download.""" try: # corpora = LANGUAGE_CORPORA[self.language] corpora = self.all_corpora corpus_names = [corpus['name'] for corpus in corpora] return corpus_names except (NameError, KeyError) as error: msg = 'Corpus not available for language "{}": {}'.format(self.language, error) logger.error(msg) raise CorpusImportError(msg)
python
{ "resource": "" }
q34277
onekgreek_tei_xml_to_text
train
def onekgreek_tei_xml_to_text(): """Find TEI XML dir of TEI XML for the First 1k Years of Greek corpus.""" if not bs4_installed: logger.error('Install `bs4` and `lxml` to parse these TEI files.') raise ImportError xml_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek/data/*/*/*.xml') xml_paths = glob.glob(xml_dir) if not len(xml_paths): logger.error('1K Greek corpus not installed. Use CorpusInstaller to get `First1KGreek`.') raise FileNotFoundError xml_paths = [path for path in xml_paths if '__cts__' not in path] # new dir new_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek_plaintext/') if not os.path.isdir(new_dir): os.makedirs(new_dir) for xml_path in xml_paths: _, xml_name = os.path.split(xml_path) xml_name = xml_name.rstrip('.xml') xml_name += '.txt' with open(xml_path) as file_open: soup = BeautifulSoup(file_open, 'lxml') body = soup.body text = body.get_text() new_plaintext_path = os.path.join(new_dir, xml_name) with open(new_plaintext_path, 'w') as file_open: file_open.write(text)
python
{ "resource": "" }
q34278
onekgreek_tei_xml_to_text_capitains
train
def onekgreek_tei_xml_to_text_capitains(): """Use MyCapitains program to convert TEI to plaintext.""" file = os.path.expanduser( '~/cltk_data/greek/text/greek_text_first1kgreek/data/tlg0627/tlg021/tlg0627.tlg021.1st1K-grc1.xml') xml_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek/data/*/*/*.xml') xml_paths = glob.glob(xml_dir) if not len(xml_paths): logger.error('1K Greek corpus not installed. Use CorpusInstaller to get `First1KGreek`.') raise FileNotFoundError xml_paths = [path for path in xml_paths if '__cts__' not in path] # new dir new_dir = os.path.expanduser('~/cltk_data/greek/text/greek_text_first1kgreek_plaintext/') if not os.path.isdir(new_dir): os.makedirs(new_dir) for xml_path in xml_paths: _, xml_name = os.path.split(xml_path) xml_name = xml_name.rstrip('.xml') xml_name += '.txt' plain_text = '' with open(xml_path) as file_open: text = CapitainsCtsText(resource=file_open) for ref in text.getReffs(level=len(text.citation)): psg = text.getTextualNode(subreference=ref, simple=True) text_line = psg.export(Mimetypes.PLAINTEXT, exclude=["tei:note"]) plain_text += text_line new_plaintext_path = os.path.join(new_dir, xml_name) with open(new_plaintext_path, 'w') as file_open: file_open.write(plain_text)
python
{ "resource": "" }
q34279
Lemmata.load_replacement_patterns
train
def load_replacement_patterns(self): """Check for availability of the specified dictionary.""" filename = self.dictionary + '.py' models = self.language + '_models_cltk' rel_path = os.path.join('~/cltk_data', self.language, 'model', models, 'semantics', filename) path = os.path.expanduser(rel_path) logger.info('Loading lemmata or synonyms. This may take a minute.') loader = importlib.machinery.SourceFileLoader(filename, path) module = types.ModuleType(loader.name) loader.exec_module(module) return module.DICTIONARY
python
{ "resource": "" }
q34280
Lemmata.lookup
train
def lookup(self, tokens): """Return a list of possible lemmata and their probabilities for each token""" lemmatized_tokens = [] if type(tokens) == list: for token in tokens: # look for token in lemma dict keys if token.lower() in self.lemmata.keys(): # `lemmas` is a list of possible lemmata. Probability values must be assigned. # `lemmalist` is a list of the form [(LEMMA, PROBABILITY), (LEMMA, PROBABILITY)] # `lemmaobj` is a tuple with the form (LEMMA, LIST) lemmas = self.lemmata[token.lower()] lemmalist = [] for lemma in lemmas: lemmalist.append((lemma, 1/len(lemmas))) lemmaobj = (token, lemmalist) else: # if token not found in lemma-headword list, return the token itself lemmalist = [] lemmalist.append((token, 1)) lemmaobj = (token, lemmalist) lemmatized_tokens.append(lemmaobj) if type(tokens) == str: if tokens.lower() in self.lemmata.keys(): # `lemmas` is a list of possible lemmata. Probability values must be assigned. # `lemmalist` is a list of the form [(LEMMA, PROBABILITY), (LEMMA, PROBABILITY)] # `lemmaobj` is a tuple with the form (LEMMA, LIST) lemmas = self.lemmata[tokens.lower()] lemmalist = [] for lemma in lemmas: lemmalist.append((lemma, 1/len(lemmas))) lemmaobj = (tokens, lemmalist) else: # if token not found in lemma-headword list, return the token itself lemmalist = [] lemmalist.append((tokens, 1)) lemmaobj = (tokens, lemmalist) lemmatized_tokens.append(lemmaobj) return lemmatized_tokens
python
{ "resource": "" }
q34281
Lemmata.isolate
train
def isolate(obj): """Feed a standard semantic object in and receive a simple list of lemmata """ answers = [] for token in obj: lemmata = token[1] for pair in lemmata: answers.append(pair[0]) return answers
python
{ "resource": "" }
q34282
Syllabifier.set_hierarchy
train
def set_hierarchy(self, hierarchy): """ Sets an alternative sonority hierarchy, note that you will also need to specify the vowelset with the set_vowels, in order for the module to correctly identify each nucleus. The order of the phonemes defined is by decreased consonantality Example: >>> s = Syllabifier() >>> s.set_hierarchy([['i', 'u'], ['e'], ['a'], ['r'], ['m', 'n'], ['f']]) >>> s.set_vowels(['i', 'u', 'e', 'a']) >>> s.syllabify('feminarum') ['fe', 'mi', 'na', 'rum'] """ self.hierarchy = dict([(k, i) for i, j in enumerate(hierarchy) for k in j])
python
{ "resource": "" }
q34283
Syllabifier.syllabify_ssp
train
def syllabify_ssp(self, word): """ Syllabifies a word according to the Sonority Sequencing Principle :param word: Word to be syllabified :return: List consisting of syllables Example: First you need to define the matters of articulation >>> high_vowels = ['a'] >>> mid_vowels = ['e'] >>> low_vowels = ['i', 'u'] >>> flaps = ['r'] >>> nasals = ['m', 'n'] >>> fricatives = ['f'] >>> s = Syllabifier(high_vowels=high_vowels, mid_vowels=mid_vowels, low_vowels=low_vowels, flaps=flaps, nasals=nasals, fricatives=fricatives) >>> s.syllabify("feminarum") ['fe', 'mi', 'na', 'rum'] Not specifying your alphabet results in an error: >>> s.syllabify("foemina") Traceback (most recent call last): ... cltk.exceptions.InputError Additionally, you can utilize the language parameter: >>> s = Syllabifier(language='middle_high_german') >>> s.syllabify('lobebæren') ['lo', 'be', 'bæ', 'ren'] >>> s = Syllabifier(language='middle_english') >>> s.syllabify("huntyng") ['hun', 'tyng'] >>> s = Syllabifier(language='old_english') >>> s.syllabify("arcebiscop") ['ar', 'ce', 'bis', 'cop'] The break_geminants parameter ensures a breakpoint is placed between geminants: >>> geminant_s = Syllabifier(break_geminants=True) >>> hierarchy = [["a", "á", "æ", "e", "é", "i", "í", "o", "ǫ", "ø", "ö", "œ", "ó", "u", "ú", "y", "ý"], ["j"], ["m"], ["n"], ["p", "b", "d", "g", "t", "k"], ["c", "f", "s", "h", "v", "x", "þ", "ð"], ["r"], ["l"]] >>> geminant_s.set_hierarchy(hierarchy) >>> geminant_s.set_vowels(hierarchy[0]) >>> geminant_s.syllabify("ennitungl") ['en', 'ni', 'tungl'] """ # List indicating the syllable indices syllables = [] find_nucleus = True i = 0 try: # Replace each letter occurence with its corresponding number # indicating its position in the sonority hierarchy encoded = list(map(lambda x: self.hierarchy[x], word)) except KeyError: LOG.error( "The given string contains invalid characters. " "Make sure to define the mater of articulation for each phoneme.") raise InputError while i < len(word) - 1: # Search for nucleus while word[i] not in self.vowels and i < len(word) - 1 and find_nucleus: i += 1 if find_nucleus is True: i += 1 if i >= len(word) - 1: break else: # If the break_geminants parameter is set to True, prioritize geminants if self.break_geminants and word[i-1] == word[i]: syllables.append(i-1) find_nucleus = True # If a cluster of three phonemes with the same values exist, break syllable elif encoded[i - 1] == encoded[i] == encoded[i + 1]: syllables.append(i) find_nucleus = True elif encoded[i] > encoded[i - 1] and encoded[i] > encoded[i + 1]: syllables.append(i) find_nucleus = True elif encoded[i] < encoded[i - 1] and encoded[i] < encoded[i + 1]: syllables.append(i) find_nucleus = True else: find_nucleus = False i += 1 for n, k in enumerate(syllables): word = word[:k + n + 1] + "." + word[k + n + 1:] word = word.split('.') # Check if last syllable has a nucleus if sum([x in self.vowels for x in word[-1]]) == 0: word[-2] += word[-1] word = word[:-1] return self.onset_maximization(word)
python
{ "resource": "" }
q34284
PentameterScanner.make_spondaic
train
def make_spondaic(self, scansion: str) -> str: """ If a pentameter line has 12 syllables, then it must start with double spondees. :param scansion: a string of scansion patterns :return: a scansion pattern string starting with two spondees >>> print(PentameterScanner().make_spondaic("U U U U U U U U U U U U")) - - - - - - U U - U U U """ mark_list = string_utils.mark_list(scansion) vals = list(scansion.replace(" ", "")) new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1] corrected = "".join(new_vals) new_line = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_line[mark_list[idx]] = car return "".join(new_line)
python
{ "resource": "" }
q34285
PentameterScanner.correct_penultimate_dactyl_chain
train
def correct_penultimate_dactyl_chain(self, scansion: str) -> str: """ For pentameter the last two feet of the verse are predictable dactyls, and do not regularly allow substitutions. :param scansion: scansion line thus far :return: corrected line of scansion >>> print(PentameterScanner().correct_penultimate_dactyl_chain( ... "U U U U U U U U U U U U U U")) U U U U U U U - U U - U U U """ mark_list = string_utils.mark_list(scansion) vals = list(scansion.replace(" ", "")) n_vals = vals[:-7] + [self.constants.DACTYL + self.constants.DACTYL] + [vals[-1]] corrected = "".join(n_vals) new_line = list(" " * len(scansion)) for idx, car in enumerate(corrected): new_line[mark_list[idx]] = car return "".join(new_line)
python
{ "resource": "" }
q34286
eval_str_to_list
train
def eval_str_to_list(input_str: str) -> List[str]: """Turn str into str or tuple.""" inner_cast = ast.literal_eval(input_str) # type: List[str] if isinstance(inner_cast, list): return inner_cast else: raise ValueError
python
{ "resource": "" }
q34287
get_authors
train
def get_authors(filepath: str) -> List[str]: """Open file and check for author info.""" str_oneline = r'(^__author__ = )(\[.*?\])' # type" str comp_oneline = re.compile(str_oneline, re.MULTILINE) # type: Pattern[str] with open(filepath) as file_open: file_read = file_open.read() # type: str match = comp_oneline.findall(file_read) if match: inner_list_as_str = match[0][1] # type: str inner_list = eval_str_to_list(inner_list_as_str) # type: List[str] return inner_list return list()
python
{ "resource": "" }
q34288
scantree
train
def scantree(path: str) -> Generator: """Recursively yield DirEntry objects for given directory.""" for entry in os.scandir(path): if entry.is_dir(follow_symlinks=False): yield from scantree(entry.path) else: if entry.name.endswith('.py'): yield entry
python
{ "resource": "" }
q34289
write_contribs
train
def write_contribs(def_dict_list: Dict[str, List[str]]) -> None: """Write to file, in current dir, 'contributors.md'.""" file_str = '' # type: str note = '# Contributors\nCLTK Core authors, ordered alphabetically by first name\n\n' # type: str # pylint: disable=line-too-long file_str += note for contrib in def_dict_list: file_str += '## ' + contrib + '\n' for module in def_dict_list[contrib]: file_str += '* ' + module + '\n' file_str += '\n' file_name = 'contributors.md' # type: str with open(file_name, 'w') as file_open: # type: IO file_open.write(file_str) logger.info('Wrote contribs file at "%s".', file_name)
python
{ "resource": "" }
q34290
find_write_contribs
train
def find_write_contribs() -> None: """Look for files, find authors, sort, write file.""" map_file_auth = {} # type: Dict[str, List[str]] for filename in scantree('cltk'): filepath = filename.path # type: str authors_list = get_authors(filepath) # type: List[str] if authors_list: map_file_auth[filepath] = authors_list map_auth_file = defaultdict(list) # type: Dict[str, List[str]] for file, authors_file in map_file_auth.items(): for author in authors_file: map_auth_file[author].append(file) # now sort the str contents of the list value map_auth_file = sort_def_dict(map_auth_file) map_auth_file_sorted = sorted(map_auth_file.items()) # type: List[Tuple[str, List[str]]] map_auth_file = OrderedDict(map_auth_file_sorted) write_contribs(map_auth_file)
python
{ "resource": "" }
q34291
Metre.syllabify
train
def syllabify(self, hierarchy): """ Syllables may play a role in verse classification. """ if len(self.long_lines) == 0: logger.error("No text was imported") self.syllabified_text = [] else: syllabifier = Syllabifier(language="old_norse", break_geminants=True) syllabifier.set_hierarchy(hierarchy) syllabified_text = [] for i, long_line in enumerate(self.long_lines): syllabified_text.append([]) for short_line in long_line: assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine) short_line.syllabify(syllabifier) syllabified_text[i].append(short_line.syllabified) self.syllabified_text = syllabified_text
python
{ "resource": "" }
q34292
Metre.to_phonetics
train
def to_phonetics(self): """ Transcribing words in verse helps find alliteration. """ if len(self.long_lines) == 0: logger.error("No text was imported") self.syllabified_text = [] else: transcriber = Transcriber(DIPHTHONGS_IPA, DIPHTHONGS_IPA_class, IPA_class, old_norse_rules) transcribed_text = [] phonological_features_text = [] for i, long_line in enumerate(self.long_lines): transcribed_text.append([]) phonological_features_text.append([]) for short_line in long_line: assert isinstance(short_line, ShortLine) or isinstance(short_line, LongLine) short_line.to_phonetics(transcriber) transcribed_text[i].append(short_line.transcribed) phonological_features_text[i].append(short_line.phonological_features_text) self.transcribed_text = transcribed_text self.phonological_features_text = phonological_features_text
python
{ "resource": "" }
q34293
PoeticWord.parse_word_with
train
def parse_word_with(self, poetry_tools: PoetryTools): """ Compute the phonetic transcription of the word with IPA representation Compute the syllables of the word Compute the length of each syllable Compute if a syllable is stress of noe Compute the POS category the word is in :param poetry_tools: instance of PoetryTools :return: """ phonemes = poetry_tools.tr.text_to_phonemes(self.text) self.syl = poetry_tools.syllabifier.syllabify_phonemes(phonemes) for i, syllable in enumerate(self.syl): self.ipa_transcription.append([]) syl_len = measure_old_norse_syllable(syllable).value syl_stress = 1 if i == 0 else 0 self.length.append(syl_len) self.stress.append(syl_stress) for c in syllable: self.ipa_transcription[i].append(c.ipar)
python
{ "resource": "" }
q34294
set_path
train
def set_path(dicts, keys, v): """ Helper function for modifying nested dictionaries :param dicts: dict: the given dictionary :param keys: list str: path to added value :param v: str: value to be added Example: >>> d = dict() >>> set_path(d, ['a', 'b', 'c'], 'd') >>> d {'a': {'b': {'c': ['d']}}} In case of duplicate paths, the additional value will be added to the leaf node rather than simply replace it: >>> set_path(d, ['a', 'b', 'c'], 'e') >>> d {'a': {'b': {'c': ['d', 'e']}}} """ for key in keys[:-1]: dicts = dicts.setdefault(key, dict()) dicts = dicts.setdefault(keys[-1], list()) dicts.append(v)
python
{ "resource": "" }
q34295
get_paths
train
def get_paths(src): """ Generates root-to-leaf paths, given a treebank in string format. Note that get_path is an iterator and does not return all the paths simultaneously. :param src: str: treebank Examples: >>> st = "((IP-MAT-SPE (' ') (INTJ Yes) (, ,) (' ') (IP-MAT-PRN (NP-SBJ (PRO he)) (VBD seyde)) (, ,) (' ') (NP-SBJ (PRO I)) (MD shall) (VB promyse) (NP-OB2 (PRO you)) (IP-INF (TO to) (VB fullfylle) (NP-OB1 (PRO$ youre) (N desyre))) (. .) (' '))" Get the sixth generated path: >>> list(get_paths(st))[5] ['IP-MAT-SPE', 'IP-MAT-PRN', 'VBD', 'seyde'] """ st = list() tmp = '' for let in src: if let == '(': if tmp != '': st.append(tmp) tmp = '' elif let == ')': if tmp != '': st.append(tmp) yield st st = st[:-1 - (tmp != '')] tmp = '' elif let == ' ': if tmp != '': st.append(tmp) tmp = '' else: tmp += let
python
{ "resource": "" }
q34296
Transliterate.transliterate
train
def transliterate(self, text, mode='Latin'): """ Transliterates Anglo-Saxon runes into latin and vice versa. Sources: http://www.arild-hauge.com/eanglor.htm https://en.wikipedia.org/wiki/Anglo-Saxon_runes :param text: str: The text to be transcribed :param mode: Specifies transliteration mode, options: Latin (default): Transliterates Anglo-Saxon runes into the latin alphabet, using the Dickins system Anglo-Saxon/Anglo-Frisian : Transliterates Latin text into Anglo-Saxon runes Examples: >>> Transliterate().transliterate("Hƿæt Ƿe Gardena in geardagum", "Anglo-Saxon") 'ᚻᚹᚫᛏ ᚹᛖ ᚷᚪᚱᛞᛖᚾᚪ ᛁᚾ ᚷᛠᚱᛞᚪᚷᚢᛗ' >>> Transliterate().transliterate("ᚩᚠᛏ ᛋᚳᚣᛚᛞ ᛋᚳᛖᚠᛁᛝ ᛋᚳᛠᚦᛖᚾᚪ ᚦᚱᛠᛏᚢᛗ", "Latin") 'oft scyld scefin sceathena threatum' """ if mode == 'Latin': return Transliterate.__transliterate_helper(text, L_Transliteration) elif mode in ['Anglo-Saxon', 'Anglo-Frisian']: return Transliterate.__transliterate_helper(text, R_Transliteration) else: LOG.error("The specified mode is currently not supported") raise InputError("The specified mode is currently not supported")
python
{ "resource": "" }
q34297
SawyerNutAssembly.clear_objects
train
def clear_objects(self, obj): """ Clears objects with name @obj out of the task space. This is useful for supporting task modes with single types of objects, as in @self.single_object_mode without changing the model definition. """ for obj_name, obj_mjcf in self.mujoco_objects.items(): if obj_name == obj: continue else: sim_state = self.sim.get_state() # print(self.sim.model.get_joint_qpos_addr(obj_name)) sim_state.qpos[self.sim.model.get_joint_qpos_addr(obj_name)[0]] = 10 self.sim.set_state(sim_state) self.sim.forward()
python
{ "resource": "" }
q34298
SawyerNutAssembly._check_contact
train
def _check_contact(self): """ Returns True if gripper is in contact with an object. """ collision = False for contact in self.sim.data.contact[: self.sim.data.ncon]: if ( self.sim.model.geom_id2name(contact.geom1) in self.finger_names or self.sim.model.geom_id2name(contact.geom2) in self.finger_names ): collision = True break return collision
python
{ "resource": "" }
q34299
SawyerNutAssembly._check_success
train
def _check_success(self): """ Returns True if task has been completed. """ # remember objects that are on the correct pegs gripper_site_pos = self.sim.data.site_xpos[self.eef_site_id] for i in range(len(self.ob_inits)): obj_str = str(self.item_names[i]) + "0" obj_pos = self.sim.data.body_xpos[self.obj_body_id[obj_str]] dist = np.linalg.norm(gripper_site_pos - obj_pos) r_reach = 1 - np.tanh(10.0 * dist) self.objects_on_pegs[i] = int(self.on_peg(obj_pos, i) and r_reach < 0.6) if self.single_object_mode > 0: return np.sum(self.objects_on_pegs) > 0 # need one object on peg # returns True if all objects are on correct pegs return np.sum(self.objects_on_pegs) == len(self.ob_inits)
python
{ "resource": "" }