_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q34100
Table.find_one
train
def find_one(self, *args, **kwargs): """Get a single result from the table. Works just like :py:meth:`find() <dataset.Table.find>` but returns one result, or ``None``. :: row = table.find_one(country='United States') """ if not self.exists: return None kwargs['_limit'] = 1 kwargs['_step'] = None resiter = self.find(*args, **kwargs) try: for row in resiter: return row finally: resiter.close()
python
{ "resource": "" }
q34101
Table.count
train
def count(self, *_clauses, **kwargs): """Return the count of results for the given filter set.""" # NOTE: this does not have support for limit and offset since I can't # see how this is useful. Still, there might be compatibility issues # with people using these flags. Let's see how it goes. if not self.exists: return 0 args = self._args_to_clause(kwargs, clauses=_clauses) query = select([func.count()], whereclause=args) query = query.select_from(self.table) rp = self.db.executable.execute(query) return rp.fetchone()[0]
python
{ "resource": "" }
q34102
connect
train
def connect(url=None, schema=None, reflect_metadata=True, engine_kwargs=None, reflect_views=True, ensure_schema=True, row_type=row_type): """ Opens a new connection to a database. *url* can be any valid `SQLAlchemy engine URL`_. If *url* is not defined it will try to use *DATABASE_URL* from environment variable. Returns an instance of :py:class:`Database <dataset.Database>`. Set *reflect_metadata* to False if you don't want the entire database schema to be pre-loaded. This significantly speeds up connecting to large databases with lots of tables. *reflect_views* can be set to False if you don't want views to be loaded. Additionally, *engine_kwargs* will be directly passed to SQLAlchemy, e.g. set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB connection timeout`_. Set *row_type* to an alternate dict-like class to change the type of container rows are stored in.:: db = dataset.connect('sqlite:///factbook.db') .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle """ if url is None: url = os.environ.get('DATABASE_URL', 'sqlite://') return Database(url, schema=schema, reflect_metadata=reflect_metadata, engine_kwargs=engine_kwargs, reflect_views=reflect_views, ensure_schema=ensure_schema, row_type=row_type)
python
{ "resource": "" }
q34103
Database.executable
train
def executable(self): """Connection against which statements will be executed.""" if not hasattr(self.local, 'conn'): self.local.conn = self.engine.connect() return self.local.conn
python
{ "resource": "" }
q34104
Database.in_transaction
train
def in_transaction(self): """Check if this database is in a transactional context.""" if not hasattr(self.local, 'tx'): return False return len(self.local.tx) > 0
python
{ "resource": "" }
q34105
Database.begin
train
def begin(self): """Enter a transaction explicitly. No data will be written until the transaction has been committed. """ if not hasattr(self.local, 'tx'): self.local.tx = [] self.local.tx.append(self.executable.begin())
python
{ "resource": "" }
q34106
Database.rollback
train
def rollback(self): """Roll back the current transaction. Discard all statements executed since the transaction was begun. """ if hasattr(self.local, 'tx') and self.local.tx: tx = self.local.tx.pop() tx.rollback() self._flush_tables()
python
{ "resource": "" }
q34107
Database.load_table
train
def load_table(self, table_name): """Load a table. This will fail if the tables does not already exist in the database. If the table exists, its columns will be reflected and are available on the :py:class:`Table <dataset.Table>` object. Returns a :py:class:`Table <dataset.Table>` instance. :: table = db.load_table('population') """ table_name = normalize_table_name(table_name) with self.lock: if table_name not in self._tables: self._tables[table_name] = Table(self, table_name) return self._tables.get(table_name)
python
{ "resource": "" }
q34108
Database.get_table
train
def get_table(self, table_name, primary_id=None, primary_type=None): """Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population'] """ return self.create_table(table_name, primary_id, primary_type)
python
{ "resource": "" }
q34109
Database.query
train
def query(self, query, *args, **kwargs): """Run a statement on the database directly. Allows for the execution of arbitrary read/write queries. A query can either be a plain text string, or a `SQLAlchemy expression <http://docs.sqlalchemy.org/en/latest/core/tutorial.html#selecting>`_. If a plain string is passed in, it will be converted to an expression automatically. Further positional and keyword arguments will be used for parameter binding. To include a positional argument in your query, use question marks in the query (i.e. ``SELECT * FROM tbl WHERE a = ?```). For keyword arguments, use a bind parameter (i.e. ``SELECT * FROM tbl WHERE a = :foo``). :: statement = 'SELECT user, COUNT(*) c FROM photos GROUP BY user' for row in db.query(statement): print(row['user'], row['c']) The returned iterator will yield each result sequentially. """ if isinstance(query, six.string_types): query = text(query) _step = kwargs.pop('_step', QUERY_STEP) rp = self.executable.execute(query, *args, **kwargs) return ResultIter(rp, row_type=self.row_type, step=_step)
python
{ "resource": "" }
q34110
printcolour
train
def printcolour(text, sameline=False, colour=get_colour("ENDC")): """ Print color text using escape codes """ if sameline: sep = '' else: sep = '\n' sys.stdout.write(get_colour(colour) + text + bcolours["ENDC"] + sep)
python
{ "resource": "" }
q34111
abbreviate
train
def abbreviate(labels, rfill=' '): """ Abbreviate labels without introducing ambiguities. """ max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
python
{ "resource": "" }
q34112
box_text
train
def box_text(text, width, offset=0): """ Return text inside an ascii textbox """ box = " " * offset + "-" * (width+2) + "\n" box += " " * offset + "|" + text.center(width) + "|" + "\n" box += " " * offset + "-" * (width+2) return box
python
{ "resource": "" }
q34113
calc_bins
train
def calc_bins(n, min_val, max_val, h=None, binwidth=None): """ Calculate number of bins for the histogram """ if not h: h = max(10, math.log(n + 1, 2)) if binwidth == 0: binwidth = 0.1 if binwidth is None: binwidth = (max_val - min_val) / h for b in drange(min_val, max_val, step=binwidth, include_stop=True): if b.is_integer(): yield int(b) else: yield b
python
{ "resource": "" }
q34114
read_numbers
train
def read_numbers(numbers): """ Read the input data in the most optimal way """ if isiterable(numbers): for number in numbers: yield float(str(number).strip()) else: with open(numbers) as fh: for number in fh: yield float(number.strip())
python
{ "resource": "" }
q34115
run_demo
train
def run_demo(): """ Run a demonstration """ module_dir = dirname(dirname(os.path.realpath(__file__))) demo_file = os.path.join(module_dir, 'examples/data/exp.txt') if not os.path.isfile(demo_file): sys.stderr.write("demo input file not found!\n") sys.stderr.write("run the downloaddata.sh script in the example first\n") sys.exit(1) # plotting a histogram print("plotting a basic histogram") print("plot_hist('%s')" % demo_file) print("hist -f %s" % demo_file) print("cat %s | hist" % demo_file) plot_hist(demo_file) print("*" * 80) # with colours print("histogram with colours") print("plot_hist('%s', colour='blue')" % demo_file) print("hist -f %s -c blue" % demo_file) plot_hist(demo_file, colour='blue') print("*" * 80) # changing the shape of the point print("changing the shape of the bars") print("plot_hist('%s', pch='.')" % demo_file) print("hist -f %s -p ." % demo_file) plot_hist(demo_file, pch='.') print("*" * 80) # changing the size of the plot print("changing the size of the plot") print("plot_hist('%s', height=35.0, bincount=40)" % demo_file) print("hist -f %s -s 35.0 -b 40" % demo_file) plot_hist(demo_file, height=35.0, bincount=40)
python
{ "resource": "" }
q34116
plot_scatter
train
def plot_scatter(f, xs, ys, size, pch, colour, title): """ Form a complex number. Arguments: f -- comma delimited file w/ x,y coordinates xs -- if f not specified this is a file w/ x coordinates ys -- if f not specified this is a filew / y coordinates size -- size of the plot pch -- shape of the points (any character) colour -- colour of the points title -- title of the plot """ cs = None if f: if isinstance(f, str): with open(f) as fh: data = [tuple(line.strip().split(',')) for line in fh] else: data = [tuple(line.strip().split(',')) for line in f] xs = [float(i[0]) for i in data] ys = [float(i[1]) for i in data] if len(data[0]) > 2: cs = [i[2].strip() for i in data] elif isinstance(xs, list) and isinstance(ys, list): pass else: with open(xs) as fh: xs = [float(str(row).strip()) for row in fh] with open(ys) as fh: ys = [float(str(row).strip()) for row in fh] _plot_scatter(xs, ys, size, pch, colour, title, cs)
python
{ "resource": "" }
q34117
Word.syllabify
train
def syllabify(self): """ Syllabifier module for Middle High German The algorithm works by applying the MOP(Maximal Onset Principle) on open syllables. For closed syllables, the legal partitions are checked and applied. The word is always returned in lowercase. Examples: >>> Word('entslâfen').syllabify() ['ent', 'slâ', 'fen'] >>> Word('fröude').syllabify() ['fröu', 'de'] >>> Word('füerest').syllabify() ['füe', 'rest'] """ # Array holding the index of each given syllable ind = [] i = 0 # Iterate through letters of word searching for the nuclei while i < len(self.word) - 1: if self.word[i] in SHORT_VOWELS + LONG_VOWELS: nucleus = '' # Find cluster of vowels while self.word[i] in SHORT_VOWELS + LONG_VOWELS and i < len(self.word) - 1: nucleus += self.word[i] i += 1 try: # Check whether it is suceeded by a geminant if self.word[i] == self.word[i + 1]: ind.append(i) i += 2 continue except IndexError: pass if nucleus in SHORT_VOWELS: ind.append(i + 2 if self.word[i:i+3] in TRIPHTHONGS else i + 1 if self.word[i:i + 2] in DIPHTHONGS else i) continue else: ind.append(i - 1) continue i += 1 self.syllabified = self.word for n, k in enumerate(ind): self.syllabified = self.syllabified[:k + n + 1] + "." + self.syllabified[k + n + 1:] # Check whether the last syllable lacks a vowel nucleus self.syllabified = self.syllabified.split(".") if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) == 0: self.syllabified[-2] += self.syllabified[-1] self.syllabified = self.syllabified[:-1] return self.syllabified
python
{ "resource": "" }
q34118
Word.ASCII_encoding
train
def ASCII_encoding(self): """Returns the ASCII encoding of a string""" w = unicodedata.normalize('NFKD', self.word).encode('ASCII', 'ignore') # Encode into ASCII, returns a bytestring w = w.decode('utf-8') # Convert back to string return w
python
{ "resource": "" }
q34119
ATFConverter._convert_consonant
train
def _convert_consonant(sign): """ Uses dictionary to replace ATF convention for unicode characters. input = ['as,', 'S,ATU', 'tet,', 'T,et', 'sza', 'ASZ'] output = ['aṣ', 'ṢATU', 'teṭ', 'Ṭet', 'ša', 'AŠ'] :param sign: string :return: string """ for key in TITTLES: sign = sign.replace(key, TITTLES[key]) return sign
python
{ "resource": "" }
q34120
ATFConverter._convert_number_to_subscript
train
def _convert_number_to_subscript(num): """ Converts number into subscript input = ["a", "a1", "a2", "a3", "be2", "be3", "bad2", "bad3"] output = ["a", "a₁", "a₂", "a₃", "be₂", "be₃", "bad₂", "bad₃"] :param num: number called after sign :return: number in subscript """ subscript = '' for character in str(num): subscript += chr(0x2080 + int(character)) return subscript
python
{ "resource": "" }
q34121
ATFConverter._convert_num
train
def _convert_num(self, sign): """ Converts number registered in get_number_from_sign. input = ["a2", "☉", "be3"] output = ["a₂", "☉", "be₃"] :param sign: string :return sign: string """ # Check if there's a number at the end new_sign, num = self._get_number_from_sign(sign) if num < 2: # "ab" -> "ab" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if num > 3: # "buru14" -> "buru₁₄" return new_sign.replace(str(num), self._convert_number_to_subscript(num)) if self.two_three: # pylint: disable=no-else-return return new_sign.replace(str(num), self._convert_number_to_subscript(num)) else: # "bad3" -> "bàd" for i, character in enumerate(new_sign): new_vowel = '' if character in VOWELS: if num == 2: # noinspection PyUnusedLocal new_vowel = character + chr(0x0301) elif num == 3: new_vowel = character + chr(0x0300) break return new_sign[:i] + normalize('NFC', new_vowel) + \ new_sign[i+1:].replace(str(num), '')
python
{ "resource": "" }
q34122
ATFConverter.process
train
def process(self, text_string): """ Expects a list of tokens, will return the list converted from ATF format to print-format. input = ["a", "a2", "a3", "geme2", "bad3", "buru14"] output = ["a", "á", "à", "géme", "bàd", "buru₁₄"] :param text_string: string :return: text_string """ output = [self._convert_num(self._convert_consonant(token)) for token in text_string] return output
python
{ "resource": "" }
q34123
Levenshtein.Levenshtein_Distance
train
def Levenshtein_Distance(w1, w2): """ Computes Levenshtein Distance between two words Args: :param w1: str :param w2: str :return: int Examples: >>> Levenshtein.Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 >>> Levenshtein.Levenshtein_Distance('orbis', 'robis') 2 """ m, n = len(w1), len(w2) v1 = [i for i in range(n + 1)] v2 = [0 for i in range(n + 1)] for i in range(m): v2[0] = i + 1 for j in range(n): delCost = v1[j + 1] + 1 insCost = v2[j] + 1 subCost = v1[j] if w1[i] != w2[j]: subCost += 1 v2[j + 1] = min(delCost, insCost, subCost) v1, v2 = v2, v1 return v1[-1]
python
{ "resource": "" }
q34124
Levenshtein.Damerau_Levenshtein_Distance
train
def Damerau_Levenshtein_Distance(w1, w2): """ Computes Damerau-Levenshtein Distance between two words Args: :param w1: str :param w2: str :return int: Examples: For the most part, Damerau-Levenshtein behaves identically to Levenshtein: >>> Levenshtein.Damerau_Levenshtein_Distance('noctis', 'noctem') 2 >>> Levenshtein.Levenshtein_Distance('nox', 'nochem') 4 The strength of DL lies in detecting transposition of characters: >>> Levenshtein.Damerau_Levenshtein_Distance('orbis', 'robis') 1 """ # Define alphabet alph = sorted(list(set(w1 + w2))) # Calculate alphabet size alph_s = len(alph) dam_ar = [0 for _ in range(alph_s)] mat = [[0 for _ in range(len(w2) + 2)] for _ in range(len(w1) + 2)] max_dist = len(w1) + len(w2) mat[0][0] = max_dist # Initialize matrix margin to the maximum possible distance (essentially inf) for ease of calculations (avoiding try blocks) for i in range(1, len(w1) + 2): mat[i][0] = max_dist mat[i][1] = i - 1 for i in range(1, len(w2) + 2): mat[0][i] = max_dist mat[1][i] = i - 1 for i in range(2, len(w1) + 2): tem = 0 for j in range(2, len(w2) + 2): k = dam_ar[alph.index(w2[j - 2])] l = tem if w1[i - 2] == w2[j - 2]: cost = 0 tem = j else: cost = 1 # The reccurence relation of DL is identical to that of Levenshtein with the addition of transposition mat[i][j] = min(mat[i - 1][j - 1] + cost, mat[i][j - 1] + 1, mat[i - 1][j] + 1, mat[k - 1][l - 1] + i + j - k - l - 1) dam_ar[alph.index(w1[i - 2])] = i return mat[-1][-1]
python
{ "resource": "" }
q34125
Frequency.counter_from_str
train
def counter_from_str(self, string): """Build word frequency list from incoming string.""" string_list = [chars for chars in string if chars not in self.punctuation] string_joined = ''.join(string_list) tokens = self.punkt.word_tokenize(string_joined) return Counter(tokens)
python
{ "resource": "" }
q34126
Frequency._assemble_corpus_string
train
def _assemble_corpus_string(self, corpus): """Takes a list of filepaths, returns a string containing contents of all files.""" if corpus == 'phi5': filepaths = assemble_phi5_author_filepaths() file_cleaner = phi5_plaintext_cleanup elif corpus == 'tlg': filepaths = assemble_tlg_author_filepaths() file_cleaner = tlg_plaintext_cleanup for filepath in filepaths: with open(filepath) as file_open: file_read = file_open.read().lower() file_clean = file_cleaner(file_read) yield file_clean
python
{ "resource": "" }
q34127
remove_punctuation_dict
train
def remove_punctuation_dict() -> Dict[int, None]: """ Provide a dictionary for removing punctuation, swallowing spaces. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... remove_punctuation_dict()).lstrip()) Im ok Oh Fine """ tmp = dict((i, None) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')) return tmp
python
{ "resource": "" }
q34128
punctuation_for_spaces_dict
train
def punctuation_for_spaces_dict() -> Dict[int, str]: """ Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion to keep stress patterns in alignment with original vowel positions in the verse. :return dict with punctuation from the unicode table >>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate( ... punctuation_for_spaces_dict()).strip()) I m ok Oh Fine """ return dict((i, " ") for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))
python
{ "resource": "" }
q34129
differences
train
def differences(scansion: str, candidate: str) -> List[int]: """ Given two strings, return a list of index positions where the contents differ. :param scansion: :param candidate: :return: >>> differences("abc", "abz") [2] """ before = scansion.replace(" ", "") after = candidate.replace(" ", "") diffs = [] for idx, tmp in enumerate(before): if before[idx] != after[idx]: diffs.append(idx) return diffs
python
{ "resource": "" }
q34130
space_list
train
def space_list(line: str) -> List[int]: """ Given a string, return a list of index positions where a blank space occurs. :param line: :return: >>> space_list(" abc ") [0, 1, 2, 3, 7] """ spaces = [] for idx, car in enumerate(list(line)): if car == " ": spaces.append(idx) return spaces
python
{ "resource": "" }
q34131
to_syllables_with_trailing_spaces
train
def to_syllables_with_trailing_spaces(line: str, syllables: List[str]) -> List[str]: """ Given a line of syllables and spaces, and a list of syllables, produce a list of the syllables with trailing spaces attached as approriate. :param line: :param syllables: :return: >>> to_syllables_with_trailing_spaces(' arma virumque cano ', ... ['ar', 'ma', 'vi', 'rum', 'que', 'ca', 'no' ]) [' ar', 'ma ', 'vi', 'rum', 'que ', 'ca', 'no '] """ syllabs_spaces = [] idx = 0 linelen = len(line) for position, syl in enumerate(syllables): start = line.index(syl, idx) idx = start + len(syl) if position == 0 and start > 0: # line starts with punctuation, substituted w/ spaces syl = (start * " ") + syl if idx + 1 > len(line): syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[idx] if nextchar != " ": syllabs_spaces.append(syl) continue else: tmpidx = idx while tmpidx < linelen and nextchar == " ": syl += " " tmpidx += 1 if tmpidx == linelen: syllabs_spaces.append(syl) return syllabs_spaces nextchar = line[tmpidx] idx = tmpidx - 1 syllabs_spaces.append(syl) return syllabs_spaces
python
{ "resource": "" }
q34132
join_syllables_spaces
train
def join_syllables_spaces(syllables: List[str], spaces: List[int]) -> str: """ Given a list of syllables, and a list of integers indicating the position of spaces, return a string that has a space inserted at the designated points. :param syllables: :param spaces: :return: >>> join_syllables_spaces(["won", "to", "tree", "dun"], [3, 6, 11]) 'won to tree dun' """ syllable_line = list("".join(syllables)) for space in spaces: syllable_line.insert(space, " ") return "".join(flatten(syllable_line))
python
{ "resource": "" }
q34133
stress_positions
train
def stress_positions(stress: str, scansion: str) -> List[int]: """ Given a stress value and a scansion line, return the index positions of the stresses. :param stress: :param scansion: :return: >>> stress_positions("-", " - U U - UU - U U") [0, 3, 6] """ line = scansion.replace(" ", "") stresses = [] for idx, char in enumerate(line): if char == stress: stresses.append(idx) return stresses
python
{ "resource": "" }
q34134
merge_elisions
train
def merge_elisions(elided: List[str]) -> str: """ Given a list of strings with different space swapping elisions applied, merge the elisions, taking the most without compounding the omissions. :param elided: :return: >>> merge_elisions([ ... "ignavae agua multum hiatus", "ignav agua multum hiatus" ,"ignavae agua mult hiatus"]) 'ignav agua mult hiatus' """ results = list(elided[0]) for line in elided: for idx, car in enumerate(line): if car == " ": results[idx] = " " return "".join(results)
python
{ "resource": "" }
q34135
move_consonant_right
train
def move_consonant_right(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the right, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_right(list("abbra"), [ 2, 3]) ['a', 'b', '', '', 'bra'] """ for pos in positions: letters[pos + 1] = letters[pos] + letters[pos + 1] letters[pos] = "" return letters
python
{ "resource": "" }
q34136
move_consonant_left
train
def move_consonant_left(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letters, and a list of consonant positions, move the consonant positions to the left, merging strings as necessary. :param letters: :param positions: :return: >>> move_consonant_left(['a', 'b', '', '', 'bra'], [1]) ['ab', '', '', '', 'bra'] """ for pos in positions: letters[pos - 1] = letters[pos - 1] + letters[pos] letters[pos] = "" return letters
python
{ "resource": "" }
q34137
merge_next
train
def merge_next(letters: List[str], positions: List[int]) -> List[str]: """ Given a list of letter positions, merge each letter with its next neighbor. :param letters: :param positions: :return: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2]) ['ab', '', 'ov', '', 'o'] >>> # Note: because it operates on the original list passed in, the effect is not cummulative: >>> merge_next(['a', 'b', 'o', 'v', 'o' ], [0, 2, 3]) ['ab', '', 'ov', 'o', ''] """ for pos in positions: letters[pos] = letters[pos] + letters[pos + 1] letters[pos + 1] = "" return letters
python
{ "resource": "" }
q34138
remove_blanks
train
def remove_blanks(letters: List[str]): """ Given a list of letters, remove any empty strings. :param letters: :return: >>> remove_blanks(['a', '', 'b', '', 'c']) ['a', 'b', 'c'] """ cleaned = [] for letter in letters: if letter != "": cleaned.append(letter) return cleaned
python
{ "resource": "" }
q34139
split_on
train
def split_on(word: str, section: str) -> Tuple[str, str]: """ Given a string, split on a section, and return the two sections as a tuple. :param word: :param section: :return: >>> split_on('hamrye', 'ham') ('ham', 'rye') """ return word[:word.index(section)] + section, word[word.index(section) + len(section):]
python
{ "resource": "" }
q34140
remove_blank_spaces
train
def remove_blank_spaces(syllables: List[str]) -> List[str]: """ Given a list of letters, remove any blank spaces or empty strings. :param syllables: :return: >>> remove_blank_spaces(['', 'a', ' ', 'b', ' ', 'c', '']) ['a', 'b', 'c'] """ cleaned = [] for syl in syllables: if syl == " " or syl == '': pass else: cleaned.append(syl) return cleaned
python
{ "resource": "" }
q34141
overwrite
train
def overwrite(char_list: List[str], regexp: str, quality: str, offset: int = 0) -> List[str]: """ Given a list of characters and spaces, a matching regular expression, and a quality or character, replace the matching character with a space, overwriting with an offset and a multiplier if provided. :param char_list: :param regexp: :param quality: :param offset: :return: >>> overwrite(list('multe igne'), r'e\s[aeiou]', ' ') ['m', 'u', 'l', 't', ' ', ' ', 'i', 'g', 'n', 'e'] """ long_matcher = re.compile(regexp) line = "".join(char_list) long_positions = long_matcher.finditer(line) for match in long_positions: (start, end) = match.span() # pylint: disable=unused-variable char_list[start + offset] = quality return char_list
python
{ "resource": "" }
q34142
get_unstresses
train
def get_unstresses(stresses: List[int], count: int) -> List[int]: """ Given a list of stressed positions, and count of possible positions, return a list of the unstressed positions. :param stresses: a list of stressed positions :param count: the number of possible positions :return: a list of unstressed positions >>> get_unstresses([0, 3, 6, 9, 12, 15], 17) [1, 2, 4, 5, 7, 8, 10, 11, 13, 14, 16] """ return list(set(range(count)) - set(stresses))
python
{ "resource": "" }
q34143
decline_strong_masculine_noun
train
def decline_strong_masculine_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong masculine nouns. >>> decline_strong_masculine_noun("armr", "arms", "armar") armr arm armi arms armar arma örmum arma # >>> decline_strong_masculine_noun("ketill", "ketils", "katlar") # ketill # ketil # katli # ketils # katlar # katla # kötlum # katla >>> decline_strong_masculine_noun("mór", "mós", "móar") mór mó mói mós móar móa móum móa >>> decline_strong_masculine_noun("hirðir", "hirðis", "hirðar") hirðir hirð hirði hirðis hirðar hirða hirðum hirða >>> decline_strong_masculine_noun("söngr", "söngs", "söngvar") söngr söng söngvi söngs söngvar söngva söngvum söngva >>> decline_strong_masculine_noun("gestr", "gests", "gestir") gestr gest gesti gests gestir gesti gestum gesta >>> decline_strong_masculine_noun("staðr", "staðar", "staðir") staðr stað staði staðar staðir staði stöðum staða # >>> decline_strong_masculine_noun("skjöldr", "skjaldar", "skildir") # skjöldr # skjöld # skildi # skjaldar # skildir # skjöldu # skjöldum # skjalda # # >>> decline_strong_masculine_noun("völlr", "vallar", "vellir") # völlr # völl # velli # vallar # vellir # völlu # völlum # valla # # >>> decline_strong_masculine_noun("fögnuðr", "fagnaðar", "fagnaðir") # fögnuðr # fögnuð # fagnaði # fagnaðar # fagnaðir # fögnuðu # fögnuðum # fagnaða a-stem armr, arm, armi, arms; armar, arma, örmum, arma ketill, ketil, katli, ketils; katlar, katla, kötlum, katla mór, mó, mó, mós; móar, móa, móm, móa hirðir, hirði, hirði, hirðis; hirðar, hirða, hirðum, hirða söngr, söng, söngvi, söngs; söngvar, söngva, söngvum, söngva i-stem gestr, gest, gest, gests; gestir, gesti, gestum, gesta staðr, stað stað, staðar; staðir, staði, stöðum, staða # u-stem # skjödr, skjöld, skildi, skjaldar; skildir, skjöldu, skjöldum, skjalda # völlr, völl, velli, vallar; vellir, völlu, völlum, valla # fögnuðr, fögnuð, fągnaði, fagnaðar; fagnaðir, fögnuðu, fögnuðum, fagnaða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ np_syl = s.syllabify_ssp(np) last_np_syl = np_syl[-1] if last_np_syl.endswith("ar"): # a-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural if last_np_syl.endswith("ar"): print(np[:-1]) elif last_np_syl.endswith("ir"): print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a") elif last_np_syl.endswith("ir"): # if has_u_umlaut(ns): # # u-stem # common_stem = ns[:-1] # # # nominative singular # print(ns) # # # accusative singular # print(common_stem) # # # dative singular # if np[len(common_stem):][0] == "v": # print(common_stem + "vi") # else: # print(common_stem + "i") # # # genitive singular # print(gs) # # common_stem_p = np[:-2] # # nominative plural # print(np) # # # accusative plural # print(apply_u_umlaut(common_stem_p)+"u") # # # dative plural # if np[len(common_stem):][0] == "v": # print(apply_u_umlaut(common_stem_p) + "vum") # # elif np[len(common_stem):][0] == "j": # print(apply_u_umlaut(common_stem_p) + "jum") # else: # print(apply_u_umlaut(common_stem_p) + "um") # # # genitive plural # if np[len(common_stem):][0] == "v": # print(common_stem_p + "va") # elif np[len(common_stem):][0] == "j": # print(common_stem_p + "ja") # else: # print(common_stem_p + "a") # else: # i-stem common_stem = extract_common_stem(ns, gs, np) # nominative singular print(ns) # accusative singular print(common_stem) # dative singular if np[len(common_stem):][0] == "v": print(common_stem + "vi") else: print(common_stem + "i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if np[len(common_stem):][0] == "v": print(apply_u_umlaut(common_stem) + "vum") elif np[len(common_stem):][0] == "j": print(apply_u_umlaut(common_stem) + "jum") else: print(apply_u_umlaut(common_stem) + "um") # genitive plural if np[len(common_stem):][0] == "v": print(common_stem + "va") elif np[len(common_stem):][0] == "j": print(common_stem + "ja") else: print(common_stem + "a")
python
{ "resource": "" }
q34144
decline_strong_feminine_noun
train
def decline_strong_feminine_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong feminine nouns. o macron-stem Most of strong feminine nouns follows the declension of rún and för. >>> decline_strong_feminine_noun("rún", "rúnar", "rúnar") rún rún rún rúnar rúnar rúnar rúnum rúna >>> decline_strong_feminine_noun("för", "farar", "farar") för för för farar farar farar förum fara >>> decline_strong_feminine_noun("kerling", "kerlingar", "kerlingar") kerling kerling kerlingu kerlingar kerlingar kerlingar kerlingum kerlinga >>> decline_strong_feminine_noun("skel", "skeljar", "skeljar") skel skel skel skeljar skeljar skeljar skeljum skelja >>> decline_strong_feminine_noun("ör", "örvar", "örvar") ör ör ör örvar örvar örvar örum örva >>> decline_strong_feminine_noun("heiðr", "heiðar", "heiðar") heiðr heiði heiði heiðar heiðar heiðar heiðum heiða i-stem >>> decline_strong_feminine_noun("öxl", "axlar", "axlir") öxl öxl öxl axlar axlir axlir öxlum axla >>> decline_strong_feminine_noun("höfn", "hafnar", "hafnir") höfn höfn höfn hafnar hafnir hafnir höfnum hafna >>> decline_strong_feminine_noun("norn", "nornar", "nornir") norn norn norn nornar nornir nornir nornum norna >>> decline_strong_feminine_noun("jörð", "jarðar", "jarðir") jörð jörð jörð jarðar jarðir jarðir jörðum jarða >>> decline_strong_feminine_noun("borg", "borgar", "borgir") borg borg borgu borgar borgir borgir borgum borga :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") else: print(ns) # dative singular if len(ns) > 2 and ns[-1] == "r" and ns[-2] in CONSONANTS: print(ns[:-1]+"i") elif ns.endswith("ing") or ns.endswith("rg"): print(ns + "u") else: print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural # print("dative plural "+np[len(np[:-3]):][0]) if np[len(np[:-3]):][0] == "v": print(apply_u_umlaut(np[:-2])[:-1]+"um") elif np[len(np[:-3]):][0] == "j": print(apply_u_umlaut(np[:-2])+"um") else: print(apply_u_umlaut(np[:-2])+"um") # genitive plural print(np[:-2]+"a")
python
{ "resource": "" }
q34145
decline_strong_neuter_noun
train
def decline_strong_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of strong neuter nouns. a-stem Most of strong neuter nouns follow the declensions of skip, land and herað. >>> decline_strong_neuter_noun("skip", "skips", "skip") skip skip skipi skips skip skip skipum skipa >>> decline_strong_neuter_noun("land", "lands", "lönd") land land landi lands lönd lönd löndum landa >>> decline_strong_neuter_noun("herað", "heraðs", "heruð") herað herað heraði heraðs heruð heruð heruðum heraða # >>> decline_strong_neuter_noun("kyn", "kyns", "kyn") # kyn # kyn # kyni # kyns # kyn # kyn # kynjum # kynja # # >>> decline_strong_neuter_noun("högg", "höggs", "högg") # högg # högg # höggvi # höggs # högg # högg # höggum # höggva >>> decline_strong_neuter_noun("kvæði", "kvæðis", "kvæði") kvæði kvæði kvæði kvæðis kvæði kvæði kvæðum kvæða :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(ns) # dative singular if ns[-1] == "i": print(ns) # TODO +"vi" else: print(ns+"i") # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural if ns[-1] in CONSONANTS: print(apply_u_umlaut(np)+"um") else: print(apply_u_umlaut(np[:-1]) + "um") # TODO +"vum" # genitive plural if ns[-1] in CONSONANTS: print(ns+"a") # TODO + "va" else: print(ns[:-1]+"a")
python
{ "resource": "" }
q34146
decline_weak_masculine_noun
train
def decline_weak_masculine_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak masculine nouns. >>> decline_weak_masculine_noun("goði", "goða", "goðar") goði goða goða goða goðar goða goðum goða >>> decline_weak_masculine_noun("hluti", "hluta", "hlutar") hluti hluta hluta hluta hlutar hluta hlutum hluta >>> decline_weak_masculine_noun("arfi", "arfa", "arfar") arfi arfa arfa arfa arfar arfa örfum arfa >>> decline_weak_masculine_noun("bryti", "bryta", "brytjar") bryti bryta bryta bryta brytjar brytja brytjum brytja >>> decline_weak_masculine_noun("vöðvi", "vöðva", "vöðvar") vöðvi vöðva vöðva vöðva vöðvar vöðva vöðum vöðva The main pattern is: :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np[:-1]) # dative plural if len(np) > 3 and np[-3] == "v": print(apply_u_umlaut(np[:-3]) + "um") else: print(apply_u_umlaut(np[:-2]) + "um") # genitive plural print(np[:-1])
python
{ "resource": "" }
q34147
decline_weak_feminine_noun
train
def decline_weak_feminine_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak feminine nouns. >>> decline_weak_feminine_noun("saga", "sögu", "sögur") saga sögu sögu sögu sögur sögur sögum sagna >>> decline_weak_feminine_noun("kona", "konu", "konur") kona konu konu konu konur konur konum kvenna >>> decline_weak_feminine_noun("kirkja", "kirkju", "kirkjur") kirkja kirkju kirkju kirkju kirkjur kirkjur kirkjum kirkna >>> decline_weak_feminine_noun("völva", "völu", "völur") völva völu völu völu völur völur völum völna >>> decline_weak_feminine_noun("speki", "speki", "") speki speki speki speki >>> decline_weak_feminine_noun("reiði", "reiði", "") reiði reiði reiði reiði >>> decline_weak_feminine_noun("elli", "elli", "") elli elli elli elli >>> decline_weak_feminine_noun("frœði", "frœði", "") frœði frœði frœði frœði It is to note that the genitive plural of völva is not attested so the given form is analogously reconstructed. The main pattern is: -a -u -u -u -ur -ur -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ if ns[-1] == "i" and gs[-1] == "i" and not np: print(ns) print(ns) print(ns) print(ns) else: # nominative singular print(ns) # accusative singular print(gs) # dative singular print(gs) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np[:-1]+"m") # genitive plural if ns == "kona": print("kvenna") elif ns[-2] == "v" or ns[-2] == "j": print(ns[:-2]+"na") else: print(ns[:-1]+"na")
python
{ "resource": "" }
q34148
decline_weak_neuter_noun
train
def decline_weak_neuter_noun(ns: str, gs: str, np: str): """ Gives the full declension of weak neuter nouns. >>> decline_weak_neuter_noun("auga", "auga", "augu") auga auga auga auga augu augu augum augna >>> decline_weak_neuter_noun("hjarta", "hjarta", "hjörtu") hjarta hjarta hjarta hjarta hjörtu hjörtu hjörtum hjartna >>> decline_weak_neuter_noun("lunga", "lunga", "lungu") lunga lunga lunga lunga lungu lungu lungum lungna >>> decline_weak_neuter_noun("eyra", "eyra", "eyru") eyra eyra eyra eyra eyru eyru eyrum eyrna The main pattern is: -a -a -a -a -u -u -um -na :param ns: nominative singular :param gs: genitive singular :param np: nominative plural :return: """ # nominative singular print(ns) # accusative singular print(ns) # dative singular print(ns) # genitive singular print(gs) # nominative plural print(np) # accusative plural print(np) # dative plural print(np+"m") # genitive plural print(ns[:-1]+"na")
python
{ "resource": "" }
q34149
select_id_by_name
train
def select_id_by_name(query): """Do a case-insensitive regex match on author name, returns TLG id.""" id_author = get_id_author() comp = regex.compile(r'{}'.format(query.casefold()), flags=regex.VERSION1) matches = [] for _id, author in id_author.items(): match = comp.findall(author.casefold()) if match: matches.append((_id, author)) return matches
python
{ "resource": "" }
q34150
get_date_of_author
train
def get_date_of_author(_id): """Pass author id and return the name of its associated date.""" _dict = get_date_author() for date, ids in _dict.items(): if _id in ids: return date return None
python
{ "resource": "" }
q34151
_get_epoch
train
def _get_epoch(_str): """Take incoming string, return its epoch.""" _return = None if _str.startswith('A.D. '): _return = 'ad' elif _str.startswith('a. A.D. '): _return = None #? elif _str.startswith('p. A.D. '): _return = 'ad' elif regex.match(r'^[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^a\. *[0-9]+ B\.C\. *', _str): _return = 'bc' elif regex.match(r'^p\. *[0-9]+ B\.C\. *', _str): _return = None #? elif _str == 'Incertum' or _str == 'Varia': _return = _str return _return
python
{ "resource": "" }
q34152
NaiveDecliner.decline_noun
train
def decline_noun(self, noun, gender, mimation=True): """Return a list of all possible declined forms given any form of a noun and its gender.""" stem = self.stemmer.get_stem(noun, gender) declension = [] for case in self.endings[gender]['singular']: if gender == 'm': form = stem + self.endings[gender]['singular'][case] else: form = stem + self.endings[gender]['singular'][case][1:] declension.append((form, {'case': case, 'number': 'singular'})) for case in self.endings[gender]['dual']: if gender == 'm': form = stem + self.endings[gender]['dual'][case] else: form = stem + self.endings[gender]['dual'][case][1:] declension.append((form, {'case': case, 'number': 'dual'})) for case in self.endings[gender]['plural']: if gender == 'm': form = stem + self.endings[gender]['plural'][case] else: if stem[-3] in self.akkadian['macron_vowels']: theme_vowel = stem[-3] else: theme_vowel = 'ā' ending = [x for x in self.endings[gender]['plural'][case] if x[0] == theme_vowel] if stem[-2] in self.akkadian['short_vowels']: form = stem[:-2] + ending[0] elif stem[-1] in self.akkadian['consonants'] and stem[-2] in self.akkadian['macron_vowels']: form = stem + ending[0] else: form = stem[:-1] + ending[0] declension.append((form, {'case': case, 'number': 'plural'})) return declension
python
{ "resource": "" }
q34153
stem
train
def stem(text): """make string lower-case""" text = text.lower() """Stem each word of the French text.""" stemmed_text = '' word_tokenizer = WordTokenizer('french') tokenized_text = word_tokenizer.tokenize(text) for word in tokenized_text: """remove the simple endings from the target word""" word, was_stemmed = matchremove_noun_endings(word) """if word didn't match the simple endings, try verb endings""" if not was_stemmed: word = matchremove_verb_endings(word) """add the stemmed word to the text""" stemmed_text += word + ' ' return stemmed_text
python
{ "resource": "" }
q34154
VerseScanner.transform_i_to_j_optional
train
def transform_i_to_j_optional(self, line: str) -> str: """ Sometimes for the demands of meter a more permissive i to j transformation is warranted. :param line: :return: >>> print(VerseScanner().transform_i_to_j_optional("Italiam")) Italjam >>> print(VerseScanner().transform_i_to_j_optional("Lāvīniaque")) Lāvīnjaque >>> print(VerseScanner().transform_i_to_j_optional("omnium")) omnjum """ words = line.split(" ") space_list = string_utils.space_list(line) corrected_words = [] for word in words: found = False for prefix in self.constants.PREFIXES: if word.startswith(prefix) and word != prefix: corrected_words.append(self.syllabifier.convert_consonantal_i(prefix)) corrected_words.append( self.syllabifier.convert_consonantal_i(word[len(prefix):])) found = True break if not found: corrected_words.append(self.syllabifier.convert_consonantal_i(word)) new_line = string_utils.join_syllables_spaces(corrected_words, space_list) # the following two may be tunable and subject to improvement char_list = string_utils.overwrite(list(new_line), "[bcdfgjkmpqrstvwxzBCDFGHJKMPQRSTVWXZ][i][{}]".format( self.constants.VOWELS_WO_I), "j", 1) char_list = string_utils.overwrite(char_list, "[{}][iI][{}]".format(self.constants.LIQUIDS, self.constants.VOWELS_WO_I), "j", 1) return "".join(char_list)
python
{ "resource": "" }
q34155
VerseScanner.accent_by_position
train
def accent_by_position(self, verse_line: str) -> str: """ Accent vowels according to the rules of scansion. :param verse_line: a line of unaccented verse :return: the same line with vowels accented by position >>> print(VerseScanner().accent_by_position( ... "Arma virumque cano, Troiae qui primus ab oris").lstrip()) Ārma virūmque canō Trojae qui primus ab oris """ line = verse_line.translate(self.punctuation_substitutions) line = self.transform_i_to_j(line) marks = list(line) # locate and save dipthong positions since we don't want them being accented dipthong_positions = [] for dipth in self.constants.DIPTHONGS: if dipth in line: dipthong_positions.append(line.find(dipth)) # Vowels followed by 2 consonants # The digraphs ch, ph, th, qu and sometimes gu and su count as single consonants. # see http://people.virginia.edu/~jdk3t/epicintrog/scansion.htm marks = string_utils.overwrite(marks, "[{}][{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # one space (or more for 'dropped' punctuation may intervene) marks = string_utils.overwrite(marks, r"[{}][{}]\s*[{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # ... if both consonants are in the next word, the vowel may be long # .... but it could be short if the vowel is not on the thesis/emphatic part of the foot # ... see Gildersleeve and Lodge p.446 marks = string_utils.overwrite(marks, r"[{}]\s*[{}][{}]".format( self.constants.VOWELS, self.constants.CONSONANTS, self.constants.CONSONANTS_WO_H), self.constants.STRESSED) # x is considered as two letters marks = string_utils.overwrite(marks, "[{}][xX]".format(self.constants.VOWELS), self.constants.STRESSED) # z is considered as two letters marks = string_utils.overwrite(marks, r"[{}][zZ]".format(self.constants.VOWELS), self.constants.STRESSED) original_verse = list(line) for idx, word in enumerate(original_verse): if marks[idx] == self.constants.STRESSED: original_verse[idx] = self.constants.VOWELS_TO_ACCENTS[original_verse[idx]] # make sure dipthongs aren't accented for idx in dipthong_positions: if original_verse[idx + 1] in self.constants.ACCENTS_TO_VOWELS: original_verse[idx + 1] = self.constants.ACCENTS_TO_VOWELS[original_verse[idx + 1]] return "".join(original_verse)
python
{ "resource": "" }
q34156
VerseScanner.calc_offset
train
def calc_offset(self, syllables_spaces: List[str]) -> Dict[int, int]: """ Calculate a dictionary of accent positions from a list of syllables with spaces. :param syllables_spaces: :return: """ line = string_utils.flatten(syllables_spaces) mydict = {} # type: Dict[int, int] # #defaultdict(int) #type: Dict[int, int] for idx, syl in enumerate(syllables_spaces): target_syllable = syllables_spaces[idx] skip_qu = string_utils.starts_with_qu(target_syllable) matches = list(self.syllable_matcher.finditer(target_syllable)) for position, possible in enumerate(matches): if skip_qu: skip_qu = False continue (start, end) = possible.span() if target_syllable[start:end] in \ self.constants.VOWELS + self.constants.ACCENTED_VOWELS: part = line[:len("".join(syllables_spaces[:idx]))] offset = len(part) + start if line[offset] not in self.constants.VOWELS + self.constants.ACCENTED_VOWELS: LOG.error("Problem at line {} offset {}".format(line, offset)) mydict[idx] = offset return mydict
python
{ "resource": "" }
q34157
VerseScanner.produce_scansion
train
def produce_scansion(self, stresses: list, syllables_wspaces: List[str], offset_map: Dict[int, int]) -> str: """ Create a scansion string that has stressed and unstressed syllable positions in locations that correspond with the original texts syllable vowels. :param stresses list of syllable positions :param syllables_wspaces list of syllables with spaces escaped for punctuation or elision :param offset_map dictionary of syllable positions, and an offset amount which is the number of spaces to skip in the original line before inserting the accent. """ scansion = list(" " * len(string_utils.flatten(syllables_wspaces))) unstresses = string_utils.get_unstresses(stresses, len(syllables_wspaces)) try: for idx in unstresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.UNSTRESSED for idx in stresses: location = offset_map.get(idx) if location is not None: scansion[location] = self.constants.STRESSED except Exception as e: LOG.error("problem with syllables; check syllabification {}, {}".format( syllables_wspaces, e)) return "".join(scansion)
python
{ "resource": "" }
q34158
VerseScanner.flag_dipthongs
train
def flag_dipthongs(self, syllables: List[str]) -> List[int]: """ Return a list of syllables that contain a dipthong :param syllables: :return: """ long_positions = [] for idx, syl in enumerate(syllables): for dipthong in self.constants.DIPTHONGS: if dipthong in syllables[idx]: if not string_utils.starts_with_qu(syllables[idx]): long_positions.append(idx) return long_positions
python
{ "resource": "" }
q34159
VerseScanner.elide
train
def elide(self, line: str, regexp: str, quantity: int = 1, offset: int = 0) -> str: """ Erase a section of a line, matching on a regex, pushing in a quantity of blank spaces, and jumping forward with an offset if necessary. If the elided vowel was strong, the vowel merged with takes on the stress. :param line: :param regexp: :param quantity: :param offset: :return: >>> print(VerseScanner().elide("uvae avaritia", r"[e]\s*[a]")) uv āvaritia >>> print(VerseScanner().elide("mare avaritia", r"[e]\s*[a]")) mar avaritia """ matcher = re.compile(regexp) positions = matcher.finditer(line) new_line = line for match in positions: (start, end) = match.span() # pylint: disable=unused-variable if (start > 0) and new_line[start - 1: start + 1] in self.constants.DIPTHONGS: vowel_to_coerce = new_line[end - 1] new_line = new_line[:(start - 1) + offset] + (" " * (quantity + 2)) + \ self.constants.stress_accent_dict[vowel_to_coerce] + new_line[end:] else: new_line = new_line[:start + offset] + \ (" " * quantity) + new_line[start + quantity + offset:] return new_line
python
{ "resource": "" }
q34160
VerseScanner.assign_candidate
train
def assign_candidate(self, verse: Verse, candidate: str) -> Verse: """ Helper method; make sure that the verse object is properly packaged. :param verse: :param candidate: :return: """ verse.scansion = candidate verse.valid = True verse.accented = self.formatter.merge_line_scansion( verse.original, verse.scansion) return verse
python
{ "resource": "" }
q34161
CollatinusDecliner.__getRoots
train
def __getRoots(self, lemma, model=None): """ Retrieve the known roots of a lemma :param lemma: Canonical form of the word (lemma) :type lemma: str :param model_roots: Model data from the loaded self.__data__. Can be passed by decline() :type model_roots: dict :return: Dictionary of roots with their root identifier as key :rtype: dict """ if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) ROOT_IDS = { "K": "lemma", "1": "geninf", "2": "perf" } lemma_entry = self.__lemmas__[lemma] original_roots = { root_id: lemma_entry[root_name].split(",") for root_id, root_name in ROOT_IDS.items() if root_id != "K" and lemma_entry[root_name] } returned_roots = {} if not model: model = self.__models__[lemma_entry["model"]] # For each registered root in the model, for model_root_id, model_root_data in model["R"].items(): # If we have K, it's equivalent to canonical form if model_root_data[0] == "K": returned_roots[model_root_id] = [lemma_entry["lemma"]] # Otherwise we have deletion number and addition char else: deletion, addition = int(model_root_data[0]), model_root_data[1] or "" # If a the root is declared already, # we retrieve the information if model_root_id != "1" and model_root_id in returned_roots: lemma_roots = returned_roots[model_root_id] else: lemma_roots = lemma_entry["lemma"].split(",") # We construct the roots returned_roots[model_root_id] = [ lemma_root[:-deletion] + addition for lemma_root in lemma_roots ] original_roots.update(returned_roots) return original_roots
python
{ "resource": "" }
q34162
CollatinusDecliner.decline
train
def decline(self, lemma, flatten=False, collatinus_dict=False): """ Decline a lemma .. warning:: POS are incomplete as we do not detect the type outside of verbs, participle and adjective. :raise UnknownLemma: When the lemma is unknown to our data :param lemma: Lemma (Canonical form) to decline :type lemma: str :param flatten: If set to True, returns a list of forms without natural language information about them :type flatten: bool :param collatinus_dict: If sets to True, Dictionary of grammatically valid forms, including variants, with keys\ corresponding to morpho informations. :type collatinus_dict: bool :return: List of tuple where first value is the form and second the pos, ie [("sum", "v1ppip---")] :rtype: list or dict """ if lemma not in self.__lemmas__: raise UnknownLemma("%s is unknown" % lemma) # Get data information lemma_entry = self.__lemmas__[lemma] model = self.__models__[lemma_entry["model"]] # Get the roots roots = self.__getRoots(lemma, model=model) # Get the known forms in order keys = sorted([int(key) for key in model["des"].keys()]) forms_data = [(key, model["des"][str(key)]) for key in keys] # Generate the return dict forms = {key: [] for key in keys} for key, form_list in forms_data: for form in form_list: root_id, endings = tuple(form) for root in roots[root_id]: for ending in endings: forms[key].append(root + ending) # sufd means we have the original forms of the parent but we add a suffix if len(model["sufd"]): # For each constant form1 for key, iter_forms in forms.items(): new_forms = [] # We add the constant suffix for sufd in model["sufd"]: new_forms += [form+sufd for form in iter_forms] forms[key] = new_forms # If we need a secure version of the forms. For example, if we have variants if len(model["suf"]): cached_forms = {k: v+[] for k, v in forms.items()} # Making cache without using copy # For each suffix # The format is [suffix characters, [modified forms]] for suffixes in model["suf"]: suffix, modified_forms = suffixes[0], suffixes[1] for modified_form in modified_forms: forms[modified_form] += [f+suffix for f in cached_forms[modified_form]] # We update with the new roots # If some form do not exist, we delete them prehentively if len(model["abs"]): for abs_form in model["abs"]: if abs_form in forms: del forms[abs_form] if flatten: return list([form for case_forms in forms.values() for form in case_forms]) elif collatinus_dict: return forms else: return list( [(form, self.__getPOS(key)) for key, case_forms in forms.items() for form in case_forms] )
python
{ "resource": "" }
q34163
_sentence_context
train
def _sentence_context(match, language='latin', case_insensitive=True): """Take one incoming regex match object and return the sentence in which the match occurs. :rtype : str :param match: regex.match :param language: str """ language_punct = {'greek': r'\.|;', 'latin': r'\.|\?|!'} assert language in language_punct.keys(), \ 'Available punctuation schemes: {}'.format(language_punct.keys()) start = match.start() end = match.end() window = 1000 snippet_left = match.string[start - window:start + 1] snippet_right = match.string[end:end + window] re_match = match.string[match.start():match.end()] comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1) # Left left_punct = [] for punct in comp_sent_boundary.finditer(snippet_left): end = punct.end() left_punct.append(end) try: last_period = left_punct.pop() + 1 except IndexError: last_period = 0 # Right right_punct = [] for punct in comp_sent_boundary.finditer(snippet_right): end = punct.end() right_punct.append(end) try: first_period = right_punct.pop(0) except IndexError: first_period = 0 sentence = snippet_left[last_period:-1] + '*' + re_match + '*' + snippet_right[0:first_period] return sentence
python
{ "resource": "" }
q34164
match_regex
train
def match_regex(input_str, pattern, language, context, case_insensitive=True): """Take input string and a regex pattern, then yield generator of matches in desired format. TODO: Rename this `match_pattern` and incorporate the keyword expansion code currently in search_corpus. :param input_str: :param pattern: :param language: :param context: Integer or 'sentence' 'paragraph' :rtype : str """ if type(context) is str: contexts = ['sentence', 'paragraph'] assert context in contexts or type(context) is int, 'Available contexts: {}'.format(contexts) else: context = int(context) for match in _regex_span(pattern, input_str, case_insensitive=case_insensitive): if context == 'sentence': yield _sentence_context(match, language) elif context == 'paragraph': yield _paragraph_context(match) else: yield _window_match(match, context)
python
{ "resource": "" }
q34165
make_worlist_trie
train
def make_worlist_trie(wordlist): """ Creates a nested dictionary representing the trie created by the given word list. :param wordlist: str list: :return: nested dictionary >>> make_worlist_trie(['einander', 'einen', 'neben']) {'e': {'i': {'n': {'a': {'n': {'d': {'e': {'r': {'__end__': '__end__'}}}}}, 'e': {'n': {'__end__': '__end__'}}}}}, 'n': {'e': {'b': {'e': {'n': {'__end__': '__end__'}}}}}} """ dicts = dict() for w in wordlist: curr = dicts for l in w: curr = curr.setdefault(l, {}) curr['__end__'] = '__end__' return dicts
python
{ "resource": "" }
q34166
MetricalValidator.is_valid_hendecasyllables
train
def is_valid_hendecasyllables(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Hendecasyllables metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool >>> print(MetricalValidator().is_valid_hendecasyllables("-U-UU-U-U-U")) True """ line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 11: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_HENDECASYLLABLES.__contains__(line)
python
{ "resource": "" }
q34167
MetricalValidator.is_valid_pentameter
train
def is_valid_pentameter(self, scanned_line: str) -> bool: """Determine if a scansion pattern is one of the valid Pentameter metrical patterns :param scanned_line: a line containing a sequence of stressed and unstressed syllables :return bool: whether or not the scansion is a valid pentameter >>> print(MetricalValidator().is_valid_pentameter('-UU-UU--UU-UUX')) True """ line = scanned_line.replace(self.constants.FOOT_SEPARATOR, "") line = line.replace(" ", "") if len(line) < 10: return False line = line[:-1] + self.constants.OPTIONAL_ENDING return self.VALID_PENTAMETERS.__contains__(line)
python
{ "resource": "" }
q34168
MetricalValidator.hexameter_feet
train
def hexameter_feet(self, scansion: str) -> List[str]: """ Produces a list of hexameter feet, stressed and unstressed syllables with spaces intact. If the scansion line is not entirely correct, it will attempt to corral one or more improper patterns into one or more feet. :param: scansion: the scanned line :return list of strings, representing the feet of the hexameter, or if the scansion is wildly incorrect, the function will return an empty list. >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - - - - - - U U - U")).strip() ) - U U |- - |- - |- - |- U U |- U >>> print("|".join(MetricalValidator().hexameter_feet( ... "- U U - - U - - - - U U - U")).strip()) - U U |- - |U - |- - |- U U |- U """ backwards_scan = list(scansion.rstrip()) feet = [] candidates = [self.constants.STRESSED + self.constants.OPTIONAL_ENDING, self.constants.STRESSED + self.constants.STRESSED, self.constants.STRESSED + self.constants.UNSTRESSED, self.constants.UNSTRESSED + self.constants.STRESSED] incomplete_foot = self.constants.UNSTRESSED + self.constants.UNSTRESSED try: while len(backwards_scan) > 0: spaces = [] chunk1 = backwards_scan.pop() while len("".join(chunk1).replace(" ", "")) == 0: if len(backwards_scan) == 0: feet.append(chunk1) return feet[::-1] chunk1 = backwards_scan.pop() + "".join(chunk1) chunk2 = backwards_scan.pop() while chunk2 == " ": spaces.append(chunk2) if len(backwards_scan) == 0: feet.append(chunk2) return feet[::-1] chunk2 = backwards_scan.pop() new_candidate = "".join(chunk2) + "".join(spaces) + "".join(chunk1) if new_candidate.replace(" ", "") in candidates: feet.append(new_candidate) else: if new_candidate.replace(" ", "") == incomplete_foot: spaces2 = [] previous_mark = backwards_scan.pop() while previous_mark == " ": spaces2.append(previous_mark) previous_mark = backwards_scan.pop() if previous_mark == self.constants.STRESSED: new_candidate = "".join(previous_mark) + "".join( spaces2) + new_candidate feet.append(new_candidate) else: feet.append(new_candidate) # invalid foot spaces3 = [] next_mark = backwards_scan.pop() while next_mark == " ": spaces3.append(previous_mark) next_mark = backwards_scan.pop() feet.append("".join(next_mark) + "".join(spaces3) + previous_mark) except Exception as ex: LOG.error("err at: {}, {}".format(scansion, ex)) return list() return feet[::-1]
python
{ "resource": "" }
q34169
MetricalValidator.closest_hexameter_patterns
train
def closest_hexameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hexameter patterns. :return: list of the closest valid hexameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hexameter_patterns('-UUUUU-----UU--')) ['-UU-UU-----UU--'] """ return self._closest_patterns(self.VALID_HEXAMETERS, scansion)
python
{ "resource": "" }
q34170
MetricalValidator.closest_pentameter_patterns
train
def closest_pentameter_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid pentameter patterns. :return: list of the closest valid pentameter patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_pentameter_patterns('--UUU--UU-UUX')) ['---UU--UU-UUX'] """ return self._closest_patterns(self.VALID_PENTAMETERS, scansion)
python
{ "resource": "" }
q34171
MetricalValidator.closest_hendecasyllable_patterns
train
def closest_hendecasyllable_patterns(self, scansion: str) -> List[str]: """ Find the closest group of matching valid hendecasyllable patterns. :return: list of the closest valid hendecasyllable patterns; only candidates with a matching length/number of syllables are considered. >>> print(MetricalValidator().closest_hendecasyllable_patterns('UU-UU-U-U-X')) ['-U-UU-U-U-X', 'U--UU-U-U-X'] """ return self._closest_patterns(self.VALID_HENDECASYLLABLES, scansion)
python
{ "resource": "" }
q34172
MetricalValidator._closest_patterns
train
def _closest_patterns(self, patterns: List[str], scansion: str) -> List[str]: """ Find the closest group of matching valid patterns. :patterns: a list of patterns :scansion: the scansion pattern thus far :return: list of the closest valid patterns; only candidates with a matching length/number of syllables are considered. """ pattern = scansion.replace(" ", "") pattern = pattern.replace(self.constants.FOOT_SEPARATOR, "") ending = pattern[-1] candidate = pattern[:len(pattern) - 1] + self.constants.OPTIONAL_ENDING cans = [(distance(candidate, x), x) for x in patterns if len(x) == len(candidate)] if cans: cans = sorted(cans, key=lambda tup: tup[0]) top = cans[0][0] return [can[1][:-1] + ending for can in cans if can[0] == top] return []
python
{ "resource": "" }
q34173
MetricalValidator._build_pentameter_templates
train
def _build_pentameter_templates(self) -> List[str]: """Create pentameter templates.""" return [ # '-UU|-UU|-|-UU|-UU|X' self.constants.DACTYL + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '-UU|--|-|-UU|-UU|X' self.constants.DACTYL + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|-UU|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.DACTYL + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING, # '--|--|-|-UU|-UU|X' self.constants.SPONDEE + self.constants.SPONDEE + self.constants.STRESSED + self.constants.DACTYL + self.constants.DACTYL + self.constants.OPTIONAL_ENDING]
python
{ "resource": "" }
q34174
LemmaReplacer._load_replacement_patterns
train
def _load_replacement_patterns(self): """Check for availability of lemmatizer for a language.""" if self.language == 'latin': warnings.warn( "LemmaReplacer is deprecated and will soon be removed from CLTK. Please use the BackoffLatinLemmatizer at cltk.lemmatize.latin.backoff.", DeprecationWarning, stacklevel=2) rel_path = os.path.join('~','cltk_data', self.language, 'model','latin_models_cltk', 'lemmata','latin_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('latin_lemmata_cltk', path) elif self.language == 'greek': rel_path = os.path.join('~','cltk_data', self.language, 'model','greek_models_cltk', 'lemmata','greek_lemmata_cltk.py') path = os.path.expanduser(rel_path) #logger.info('Loading lemmata. This may take a minute.') loader = importlib.machinery.SourceFileLoader('greek_lemmata_cltk', path) module = loader.load_module() lemmata = module.LEMMATA return lemmata
python
{ "resource": "" }
q34175
Needleman_Wunsch
train
def Needleman_Wunsch(w1, w2, d=-1, alphabet = "abcdefghijklmnopqrstuvwxyz", S = Default_Matrix(26, 1, -1) ): """ Computes allignment using Needleman-Wunsch algorithm. The alphabet parameter is used for specifying the alphabetical order of the similarity matrix. Similarity matrix is initialized to an unweighted matrix that returns 1 for match and -1 for substitution. Args: :param w1: str :param w2: str :param d: int/float :param alphabet: str :param S: list :return: str tuple Examples: NW calculates the optimal string alignment based on a weighted matrix M. By default, an unweighted similarity matrix is used to represent substitution cost (1 if match, -1 otherwise). >>> Needleman_Wunsch('piscis', 'pesce') ('piscis', 'pesc-e') You can also define your own alphabet and matrix >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = Default_Matrix(7, 1, -1)) ('pesc-are', 'piscia--') Clearly, a weighted matrix should be used over the default one if linguistic accuracy is desired. The Matrix can be defined manually through matching of manners of articulation or stochastically by detecting the most common substitutions. A simple example follows: First define the similarity matrix >>> M = Default_Matrix(7, 1, -1) We know want to increase the score for matching a to i. >>> M[0][3] = 0.8 >>> M[3][0] = 0.8 >>> Needleman_Wunsch('pescare', 'piscia', alphabet = "aceiprs", S = M) ('pescare', 'pisci-a') """ #S must be a square matrix matching the length of your alphabet if len(S) != len(alphabet) or len(S[0])!= len(alphabet): raise AssertionError("Unexpected dimensions of Similarity matrix, S." " S must be a n by n square matrix, where n is the" " length of your predefined alphabet") m,n = len(w1), len(w2) F = [[0 for i in range(n+1)] for j in range(m+1)] for i in range(m+1): F[i][0] = d*i for i in range(n+1): F[0][i] = d*i #F[i][j] is given by the reccurence relation F[i][j] = max(F[i-1][j-1] + S(A[i],B[i]), F[i][j-1] + d, F[i-1][j] + d) #Where S the similarity matrix and d the gap penalty for i in range(1,m+1): for j in range(1,n+1): F[i][j] = max(F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])], F[i-1][j] + d,F[i][j-1] + d) A1, A2 = "", "" i, j = m, n #Since F[n][m] gives the maximum score, we can now reconstruct the alignment by determining whether the optimal move #is a match, insertion or deletion while i>0 or j>0: if i>0 and j>0 and F[i][j] == F[i-1][j-1] + S[alphabet.index(w1[i-1])][alphabet.index(w2[j-1])]: A1 = w1[i-1] + A1 A2 = w2[j-1] + A2 i -= 1 j -= 1 elif i>0 and F[i][j] == F[i-1][j] + d: A1 = w1[i-1] + A1 A2 = "-" + A2 i -= 1 else: A1 = "-" + A1 A2 = w2[j-1] + A2 j -= 1 return (A1, A2)
python
{ "resource": "" }
q34176
CDLICorpus.toc
train
def toc(self): """ Returns a rich list of texts in the catalog. """ output = [] for key in sorted(self.catalog.keys()): edition = self.catalog[key]['edition'] length = len(self.catalog[key]['transliteration']) output.append( "Pnum: {key}, Edition: {edition}, length: {length} line(s)".format( key=key, edition=edition, length=length)) return output
python
{ "resource": "" }
q34177
englishToPun_number
train
def englishToPun_number(number): """This function converts the normal english number to the punjabi number with punjabi digits, its input will be an integer of type int, and output will be a string. """ output = '' number = list(str(number)) for digit in number: output += DIGITS[int(digit)] return output
python
{ "resource": "" }
q34178
is_indiclang_char
train
def is_indiclang_char(c,lang): """ Applicable to Brahmi derived Indic scripts """ o=get_offset(c,lang) return (o>=0 and o<=0x7f) or ord(c)==DANDA or ord(c)==DOUBLE_DANDA
python
{ "resource": "" }
q34179
is_velar
train
def is_velar(c,lang): """ Is the character a velar """ o=get_offset(c,lang) return (o>=VELAR_RANGE[0] and o<=VELAR_RANGE[1])
python
{ "resource": "" }
q34180
is_palatal
train
def is_palatal(c,lang): """ Is the character a palatal """ o=get_offset(c,lang) return (o>=PALATAL_RANGE[0] and o<=PALATAL_RANGE[1])
python
{ "resource": "" }
q34181
is_retroflex
train
def is_retroflex(c,lang): """ Is the character a retroflex """ o=get_offset(c,lang) return (o>=RETROFLEX_RANGE[0] and o<=RETROFLEX_RANGE[1])
python
{ "resource": "" }
q34182
is_dental
train
def is_dental(c,lang): """ Is the character a dental """ o=get_offset(c,lang) return (o>=DENTAL_RANGE[0] and o<=DENTAL_RANGE[1])
python
{ "resource": "" }
q34183
is_labial
train
def is_labial(c,lang): """ Is the character a labial """ o=get_offset(c,lang) return (o>=LABIAL_RANGE[0] and o<=LABIAL_RANGE[1])
python
{ "resource": "" }
q34184
Verse.to_phonetics
train
def to_phonetics(self): """Transcribe phonetics.""" tr = Transcriber() self.transcribed_phonetics = [tr.transcribe(line) for line in self.text]
python
{ "resource": "" }
q34185
PositionedPhoneme
train
def PositionedPhoneme(phoneme, word_initial = False, word_final = False, syllable_initial = False, syllable_final = False, env_start = False, env_end = False): ''' A decorator for phonemes, used in applying rules over words. Returns a copy of the input phoneme, with additional attributes, specifying whether the phoneme occurs at a word or syllable boundary, or its position in an environment. ''' pos_phoneme = deepcopy(phoneme) pos_phoneme.word_initial = word_initial pos_phoneme.word_final = word_final pos_phoneme.syllable_initial = syllable_initial pos_phoneme.syllable_final = syllable_final pos_phoneme.env_start = env_start pos_phoneme.env_end = env_end return pos_phoneme
python
{ "resource": "" }
q34186
PhonemeDisjunction.matches
train
def matches(self, other): ''' A disjunctive list matches a phoneme if any of its members matches the phoneme. If other is also a disjunctive list, any match between this list and the other returns true. ''' if other is None: return False if isinstance(other, PhonemeDisjunction): return any([phoneme.matches(other) for phoneme in self]) if isinstance(other, list) or isinstance(other, PhonologicalFeature): other = phoneme(other) return any([phoneme <= other for phoneme in self])
python
{ "resource": "" }
q34187
Orthophonology.transcribe
train
def transcribe(self, text, as_phonemes = False): ''' Trascribes a text, which is first tokenized for words, then each word is transcribed. If as_phonemes is true, returns a list of list of phoneme objects, else returns a string concatenation of the IPA symbols of the phonemes. ''' phoneme_words = [self.transcribe_word(word) for word in self._tokenize(text)] if not as_phonemes: words = [''.join([phoneme.ipa for phoneme in word]) for word in phoneme_words] return ' '.join(words) else: return phoneme_words
python
{ "resource": "" }
q34188
Orthophonology.transcribe_to_modern
train
def transcribe_to_modern(self, text) : ''' A very first attempt at trancribing from IPA to some modern orthography. The method is intended to provide the student with clues to the pronounciation of old orthographies. ''' # first transcribe letter by letter phoneme_words = self.transcribe(text, as_phonemes = True) words = [''.join([self.to_modern[0][phoneme.ipa] for phoneme in word]) for word in phoneme_words] modern_text = ' '.join(words) # then apply phonotactic fixes for regexp, replacement in self.to_modern[1]: modern_text = re.sub(regexp, replacement, modern_text) return modern_text
python
{ "resource": "" }
q34189
Orthophonology.voice
train
def voice(self, consonant) : ''' Voices a consonant, by searching the sound inventory for a consonant having the same features as the argument, but +voice. ''' voiced_consonant = deepcopy(consonant) voiced_consonant[Voiced] = Voiced.pos return self._find_sound(voiced_consonant)
python
{ "resource": "" }
q34190
Orthophonology.aspirate
train
def aspirate(self, consonant) : ''' Aspirates a consonant, by searching the sound inventory for a consonant having the same features as the argument, but +aspirated. ''' aspirated_consonant = deepcopy(consonant) aspirated_consonant[Aspirated] = Aspirated.pos return self._find_sound(aspirated_consonant)
python
{ "resource": "" }
q34191
BaseSentenceTokenizerTrainer.train_sentence_tokenizer
train
def train_sentence_tokenizer(self: object, text: str): """ Train sentence tokenizer. """ language_punkt_vars = PunktLanguageVars # Set punctuation if self.punctuation: if self.strict: language_punkt_vars.sent_end_chars = self.punctuation + self.strict_punctuation else: language_punkt_vars.sent_end_chars = self.punctuation # Set abbreviations trainer = PunktTrainer(text, language_punkt_vars) trainer.INCLUDE_ALL_COLLOCS = True trainer.INCLUDE_ABBREV_COLLOCS = True tokenizer = PunktSentenceTokenizer(trainer.get_params()) if self.abbreviations: for abbreviation in self.abbreviations: tokenizer._params.abbrev_types.add(abbreviation) return tokenizer
python
{ "resource": "" }
q34192
FilteredPlaintextCorpusReader.docs
train
def docs(self, fileids=None) -> Generator[str, str, None]: """ Returns the complete text of an Text document, closing the document after we are done reading it and yielding it in a memory safe fashion. """ if not fileids: fileids = self.fileids() # Create a generator, loading one document into memory at a time. for path, encoding in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as reader: if self.skip_keywords: tmp_data = [] for line in reader: skip = False for keyword in self.skip_keywords: if keyword in line: skip = True if not skip: tmp_data.append(line) yield ''.join(tmp_data) else: yield reader.read()
python
{ "resource": "" }
q34193
FilteredPlaintextCorpusReader.sizes
train
def sizes(self, fileids=None) -> Generator[int, int, None]: """ Returns a list of tuples, the fileid and size on disk of the file. This function is used to detect oddly large files in the corpus. """ if not fileids: fileids = self.fileids() # Create a generator, getting every path and computing filesize for path in self.abspaths(fileids): yield os.path.getsize(path)
python
{ "resource": "" }
q34194
TesseraeCorpusReader.docs
train
def docs(self: object, fileids:str): """ Returns the complete text of a .tess file, closing the document after we are done reading it and yielding it in a memory-safe fashion. """ for path, encoding in self.abspaths(fileids, include_encoding=True): with codecs.open(path, 'r', encoding=encoding) as f: yield f.read()
python
{ "resource": "" }
q34195
TesseraeCorpusReader.lines
train
def lines(self: object, fileids: str, plaintext: bool = True): """ Tokenizes documents in the corpus by line """ for text in self.texts(fileids, plaintext): text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines for line in text.split('\n'): yield line
python
{ "resource": "" }
q34196
TesseraeCorpusReader.sents
train
def sents(self: object, fileids: str): """ Tokenizes documents in the corpus by sentence """ for para in self.paras(fileids): for sent in sent_tokenize(para): yield sent
python
{ "resource": "" }
q34197
TesseraeCorpusReader.words
train
def words(self: object, fileids: str): """ Tokenizes documents in the corpus by word """ for sent in self.sents(fileids): for token in word_tokenize(sent): yield token
python
{ "resource": "" }
q34198
TesseraeCorpusReader.pos_tokenize
train
def pos_tokenize(self: object, fileids: str): """ Segments, tokenizes, and POS tag a document in the corpus. """ for para in self.paras(fileids): yield [ self.pos_tagger(word_tokenize(sent)) for sent in sent_tokenize(para) ]
python
{ "resource": "" }
q34199
TesseraeCorpusReader.describe
train
def describe(self: object, fileids: str = None): """ Performs a single pass of the corpus and returns a dictionary with a variety of metrics concerning the state of the corpus. based on (Bengfort et al, 2018: 46) """ started = time.time() # Structures to perform counting counts = FreqDist() tokens = FreqDist() # Perform a single pass over paragraphs, tokenize, and counts for para in self.paras(fileids): counts['paras'] += 1 for sent in para: counts['sents'] += 1 # Include POS at some point for word in sent: counts['words'] += 1 tokens[word] += 1 # Compute the number of files in the corpus n_fileids = len(self.fileids()) # Return data structure with information return { 'files': n_fileids, 'paras': counts['paras'], 'sents': counts['sents'], 'words': counts['words'], 'vocab': len(tokens), 'lexdiv': round((counts['words'] / len(tokens)), 3), 'ppdoc': round((counts['paras'] / n_fileids), 3), 'sppar':round((counts['sents'] / counts['paras']), 3), 'secs': round((time.time()-started), 3), }
python
{ "resource": "" }