repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
ngrams
def ngrams(string, n=3, punctuation=PUNCTUATION, continuous=False): """ Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words. """ def strip_punctuation(s, punctuation=set(punctuation)): return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation] if n <= 0: return [] if isinstance(string, basestring): s = [strip_punctuation(s.split(" ")) for s in tokenize(string)] if isinstance(string, Sentence): s = [strip_punctuation(string)] if isinstance(string, Text): s = [strip_punctuation(s) for s in string] if continuous: s = [sum(s, [])] g = [] for s in s: #s = [None] + s + [None] g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)]) return g
python
def ngrams(string, n=3, punctuation=PUNCTUATION, continuous=False): """ Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words. """ def strip_punctuation(s, punctuation=set(punctuation)): return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation] if n <= 0: return [] if isinstance(string, basestring): s = [strip_punctuation(s.split(" ")) for s in tokenize(string)] if isinstance(string, Sentence): s = [strip_punctuation(string)] if isinstance(string, Text): s = [strip_punctuation(s) for s in string] if continuous: s = [sum(s, [])] g = [] for s in s: #s = [None] + s + [None] g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)]) return g
[ "def", "ngrams", "(", "string", ",", "n", "=", "3", ",", "punctuation", "=", "PUNCTUATION", ",", "continuous", "=", "False", ")", ":", "def", "strip_punctuation", "(", "s", ",", "punctuation", "=", "set", "(", "punctuation", ")", ")", ":", "return", "[", "w", "for", "w", "in", "s", "if", "(", "isinstance", "(", "w", ",", "Word", ")", "and", "w", ".", "string", "or", "w", ")", "not", "in", "punctuation", "]", "if", "n", "<=", "0", ":", "return", "[", "]", "if", "isinstance", "(", "string", ",", "basestring", ")", ":", "s", "=", "[", "strip_punctuation", "(", "s", ".", "split", "(", "\" \"", ")", ")", "for", "s", "in", "tokenize", "(", "string", ")", "]", "if", "isinstance", "(", "string", ",", "Sentence", ")", ":", "s", "=", "[", "strip_punctuation", "(", "string", ")", "]", "if", "isinstance", "(", "string", ",", "Text", ")", ":", "s", "=", "[", "strip_punctuation", "(", "s", ")", "for", "s", "in", "string", "]", "if", "continuous", ":", "s", "=", "[", "sum", "(", "s", ",", "[", "]", ")", "]", "g", "=", "[", "]", "for", "s", "in", "s", ":", "#s = [None] + s + [None]", "g", ".", "extend", "(", "[", "tuple", "(", "s", "[", "i", ":", "i", "+", "n", "]", ")", "for", "i", "in", "range", "(", "len", "(", "s", ")", "-", "n", "+", "1", ")", "]", ")", "return", "g" ]
Returns a list of n-grams (tuples of n successive words) from the given string. Alternatively, you can supply a Text or Sentence object. With continuous=False, n-grams will not run over sentence markers (i.e., .!?). Punctuation marks are stripped from words.
[ "Returns", "a", "list", "of", "n", "-", "grams", "(", "tuples", "of", "n", "successive", "words", ")", "from", "the", "given", "string", ".", "Alternatively", "you", "can", "supply", "a", "Text", "or", "Sentence", "object", ".", "With", "continuous", "=", "False", "n", "-", "grams", "will", "not", "run", "over", "sentence", "markers", "(", "i", ".", "e", ".", ".", "!?", ")", ".", "Punctuation", "marks", "are", "stripped", "from", "words", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L79-L101
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
deflood
def deflood(s, n=3): """ Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..." """ if n == 0: return s[0:0] return re.sub(r"((.)\2{%s,})" % (n-1), lambda m: m.group(1)[0] * n, s)
python
def deflood(s, n=3): """ Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..." """ if n == 0: return s[0:0] return re.sub(r"((.)\2{%s,})" % (n-1), lambda m: m.group(1)[0] * n, s)
[ "def", "deflood", "(", "s", ",", "n", "=", "3", ")", ":", "if", "n", "==", "0", ":", "return", "s", "[", "0", ":", "0", "]", "return", "re", ".", "sub", "(", "r\"((.)\\2{%s,})\"", "%", "(", "n", "-", "1", ")", ",", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", "[", "0", "]", "*", "n", ",", "s", ")" ]
Returns the string with no more than n repeated characters, e.g., deflood("NIIIICE!!", n=1) => "Nice!" deflood("nice.....", n=3) => "nice..."
[ "Returns", "the", "string", "with", "no", "more", "than", "n", "repeated", "characters", "e", ".", "g", ".", "deflood", "(", "NIIIICE!!", "n", "=", "1", ")", "=", ">", "Nice!", "deflood", "(", "nice", ".....", "n", "=", "3", ")", "=", ">", "nice", "..." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L103-L110
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
pprint
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4): """ Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object. """ if isinstance(string, basestring): print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])) if isinstance(string, Text): print("\n\n".join([table(sentence, fill=column) for sentence in string])) if isinstance(string, Sentence): print(table(string, fill=column))
python
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4): """ Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object. """ if isinstance(string, basestring): print("\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])) if isinstance(string, Text): print("\n\n".join([table(sentence, fill=column) for sentence in string])) if isinstance(string, Sentence): print(table(string, fill=column))
[ "def", "pprint", "(", "string", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", "]", ",", "column", "=", "4", ")", ":", "if", "isinstance", "(", "string", ",", "basestring", ")", ":", "print", "(", "\"\\n\\n\"", ".", "join", "(", "[", "table", "(", "sentence", ",", "fill", "=", "column", ")", "for", "sentence", "in", "Text", "(", "string", ",", "token", ")", "]", ")", ")", "if", "isinstance", "(", "string", ",", "Text", ")", ":", "print", "(", "\"\\n\\n\"", ".", "join", "(", "[", "table", "(", "sentence", ",", "fill", "=", "column", ")", "for", "sentence", "in", "string", "]", ")", ")", "if", "isinstance", "(", "string", ",", "Sentence", ")", ":", "print", "(", "table", "(", "string", ",", "fill", "=", "column", ")", ")" ]
Pretty-prints the output of Parser.parse() as a table with outlined columns. Alternatively, you can supply a tree.Text or tree.Sentence object.
[ "Pretty", "-", "prints", "the", "output", "of", "Parser", ".", "parse", "()", "as", "a", "table", "with", "outlined", "columns", ".", "Alternatively", "you", "can", "supply", "a", "tree", ".", "Text", "or", "tree", ".", "Sentence", "object", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L112-L121
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_read
def _read(path, encoding="utf-8", comment=";;;"): """ Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode. """ if path: if isinstance(path, basestring) and os.path.exists(path): # From file path. if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): # From string. f = path.splitlines() else: # From file or buffer. f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
python
def _read(path, encoding="utf-8", comment=";;;"): """ Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode. """ if path: if isinstance(path, basestring) and os.path.exists(path): # From file path. if PY2: f = codecs.open(path, 'r', encoding='utf-8') else: f = open(path, 'r', encoding='utf-8') elif isinstance(path, basestring): # From string. f = path.splitlines() else: # From file or buffer. f = path for i, line in enumerate(f): line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, binary_type) else line line = line.strip() line = decode_utf8(line, encoding) if not line or (comment and line.startswith(comment)): continue yield line return
[ "def", "_read", "(", "path", ",", "encoding", "=", "\"utf-8\"", ",", "comment", "=", "\";;;\"", ")", ":", "if", "path", ":", "if", "isinstance", "(", "path", ",", "basestring", ")", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "# From file path.", "if", "PY2", ":", "f", "=", "codecs", ".", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "else", ":", "f", "=", "open", "(", "path", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "elif", "isinstance", "(", "path", ",", "basestring", ")", ":", "# From string.", "f", "=", "path", ".", "splitlines", "(", ")", "else", ":", "# From file or buffer.", "f", "=", "path", "for", "i", ",", "line", "in", "enumerate", "(", "f", ")", ":", "line", "=", "line", ".", "strip", "(", "codecs", ".", "BOM_UTF8", ")", "if", "i", "==", "0", "and", "isinstance", "(", "line", ",", "binary_type", ")", "else", "line", "line", "=", "line", ".", "strip", "(", ")", "line", "=", "decode_utf8", "(", "line", ",", "encoding", ")", "if", "not", "line", "or", "(", "comment", "and", "line", ".", "startswith", "(", "comment", ")", ")", ":", "continue", "yield", "line", "return" ]
Returns an iterator over the lines in the file at the given path, strippping comments and decoding each line to Unicode.
[ "Returns", "an", "iterator", "over", "the", "lines", "in", "the", "file", "at", "the", "given", "path", "strippping", "comments", "and", "decoding", "each", "line", "to", "Unicode", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L218-L242
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
penntreebank2universal
def penntreebank2universal(token, tag): """ Returns a (token, tag)-tuple with a simplified universal part-of-speech tag. """ if tag.startswith(("NNP-", "NNPS-")): return (token, "%s-%s" % (NOUN, tag.split("-")[-1])) if tag in ("NN", "NNS", "NNP", "NNPS", "NP"): return (token, NOUN) if tag in ("MD", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"): return (token, VERB) if tag in ("JJ", "JJR", "JJS"): return (token, ADJ) if tag in ("RB", "RBR", "RBS", "WRB"): return (token, ADV) if tag in ("PRP", "PRP$", "WP", "WP$"): return (token, PRON) if tag in ("DT", "PDT", "WDT", "EX"): return (token, DET) if tag in ("IN",): return (token, PREP) if tag in ("CD",): return (token, NUM) if tag in ("CC",): return (token, CONJ) if tag in ("UH",): return (token, INTJ) if tag in ("POS", "RP", "TO"): return (token, PRT) if tag in ("SYM", "LS", ".", "!", "?", ",", ":", "(", ")", "\"", "#", "$"): return (token, PUNC) return (token, X)
python
def penntreebank2universal(token, tag): """ Returns a (token, tag)-tuple with a simplified universal part-of-speech tag. """ if tag.startswith(("NNP-", "NNPS-")): return (token, "%s-%s" % (NOUN, tag.split("-")[-1])) if tag in ("NN", "NNS", "NNP", "NNPS", "NP"): return (token, NOUN) if tag in ("MD", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"): return (token, VERB) if tag in ("JJ", "JJR", "JJS"): return (token, ADJ) if tag in ("RB", "RBR", "RBS", "WRB"): return (token, ADV) if tag in ("PRP", "PRP$", "WP", "WP$"): return (token, PRON) if tag in ("DT", "PDT", "WDT", "EX"): return (token, DET) if tag in ("IN",): return (token, PREP) if tag in ("CD",): return (token, NUM) if tag in ("CC",): return (token, CONJ) if tag in ("UH",): return (token, INTJ) if tag in ("POS", "RP", "TO"): return (token, PRT) if tag in ("SYM", "LS", ".", "!", "?", ",", ":", "(", ")", "\"", "#", "$"): return (token, PUNC) return (token, X)
[ "def", "penntreebank2universal", "(", "token", ",", "tag", ")", ":", "if", "tag", ".", "startswith", "(", "(", "\"NNP-\"", ",", "\"NNPS-\"", ")", ")", ":", "return", "(", "token", ",", "\"%s-%s\"", "%", "(", "NOUN", ",", "tag", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", ")", ")", "if", "tag", "in", "(", "\"NN\"", ",", "\"NNS\"", ",", "\"NNP\"", ",", "\"NNPS\"", ",", "\"NP\"", ")", ":", "return", "(", "token", ",", "NOUN", ")", "if", "tag", "in", "(", "\"MD\"", ",", "\"VB\"", ",", "\"VBD\"", ",", "\"VBG\"", ",", "\"VBN\"", ",", "\"VBP\"", ",", "\"VBZ\"", ")", ":", "return", "(", "token", ",", "VERB", ")", "if", "tag", "in", "(", "\"JJ\"", ",", "\"JJR\"", ",", "\"JJS\"", ")", ":", "return", "(", "token", ",", "ADJ", ")", "if", "tag", "in", "(", "\"RB\"", ",", "\"RBR\"", ",", "\"RBS\"", ",", "\"WRB\"", ")", ":", "return", "(", "token", ",", "ADV", ")", "if", "tag", "in", "(", "\"PRP\"", ",", "\"PRP$\"", ",", "\"WP\"", ",", "\"WP$\"", ")", ":", "return", "(", "token", ",", "PRON", ")", "if", "tag", "in", "(", "\"DT\"", ",", "\"PDT\"", ",", "\"WDT\"", ",", "\"EX\"", ")", ":", "return", "(", "token", ",", "DET", ")", "if", "tag", "in", "(", "\"IN\"", ",", ")", ":", "return", "(", "token", ",", "PREP", ")", "if", "tag", "in", "(", "\"CD\"", ",", ")", ":", "return", "(", "token", ",", "NUM", ")", "if", "tag", "in", "(", "\"CC\"", ",", ")", ":", "return", "(", "token", ",", "CONJ", ")", "if", "tag", "in", "(", "\"UH\"", ",", ")", ":", "return", "(", "token", ",", "INTJ", ")", "if", "tag", "in", "(", "\"POS\"", ",", "\"RP\"", ",", "\"TO\"", ")", ":", "return", "(", "token", ",", "PRT", ")", "if", "tag", "in", "(", "\"SYM\"", ",", "\"LS\"", ",", "\".\"", ",", "\"!\"", ",", "\"?\"", ",", "\",\"", ",", "\":\"", ",", "\"(\"", ",", "\")\"", ",", "\"\\\"\"", ",", "\"#\"", ",", "\"$\"", ")", ":", "return", "(", "token", ",", "PUNC", ")", "return", "(", "token", ",", "X", ")" ]
Returns a (token, tag)-tuple with a simplified universal part-of-speech tag.
[ "Returns", "a", "(", "token", "tag", ")", "-", "tuple", "with", "a", "simplified", "universal", "part", "-", "of", "-", "speech", "tag", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L891-L920
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_tokens
def find_tokens(string, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace=replacements, linebreak=r"\n{2,}"): """ Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ # Handle periods separately. punctuation = tuple(punctuation.replace(".", "")) # Handle replacements (contractions). for a, b in replace.items(): string = re.sub(a, b, string) # Handle Unicode quotes. if isinstance(string, unicode): string = string.replace(u"“", u" “ ") string = string.replace(u"”", u" ” ") string = string.replace(u"‘", u" ‘ ") string = string.replace(u"’", u" ’ ") # Collapse whitespace. string = re.sub("\r\n", "\n", string) string = re.sub(linebreak, " %s " % EOS, string) string = re.sub(r"\s+", " ", string) tokens = [] # Handle punctuation marks. for t in TOKEN.findall(string+" "): if len(t) > 0: tail = [] while t.startswith(punctuation) and \ not t in replace: # Split leading punctuation. if t.startswith(punctuation): tokens.append(t[0]); t=t[1:] while t.endswith(punctuation+(".",)) and \ not t in replace: # Split trailing punctuation. if t.endswith(punctuation): tail.append(t[-1]); t=t[:-1] # Split ellipsis (...) before splitting period. if t.endswith("..."): tail.append("..."); t=t[:-3].rstrip(".") # Split period (if not an abbreviation). if t.endswith("."): if t in abbreviations or \ RE_ABBR1.match(t) is not None or \ RE_ABBR2.match(t) is not None or \ RE_ABBR3.match(t) is not None: break else: tail.append(t[-1]); t=t[:-1] if t != "": tokens.append(t) tokens.extend(reversed(tail)) # Handle sentence breaks (periods, quotes, parenthesis). sentences, i, j = [[]], 0, 0 while j < len(tokens): if tokens[j] in ("...", ".", "!", "?", EOS): while j < len(tokens) \ and tokens[j] in ("'", "\"", u"”", u"’", "...", ".", "!", "?", ")", EOS): if tokens[j] in ("'", "\"") and sentences[-1].count(tokens[j]) % 2 == 0: break # Balanced quotes. j += 1 sentences[-1].extend(t for t in tokens[i:j] if t != EOS) sentences.append([]) i = j j += 1 # Handle emoticons. sentences[-1].extend(tokens[i:j]) sentences = (" ".join(s) for s in sentences if len(s) > 0) sentences = (RE_SARCASM.sub("(!)", s) for s in sentences) sentences = [RE_EMOTICONS.sub( lambda m: m.group(1).replace(" ", "") + m.group(2), s) for s in sentences] return sentences
python
def find_tokens(string, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace=replacements, linebreak=r"\n{2,}"): """ Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks. """ # Handle periods separately. punctuation = tuple(punctuation.replace(".", "")) # Handle replacements (contractions). for a, b in replace.items(): string = re.sub(a, b, string) # Handle Unicode quotes. if isinstance(string, unicode): string = string.replace(u"“", u" “ ") string = string.replace(u"”", u" ” ") string = string.replace(u"‘", u" ‘ ") string = string.replace(u"’", u" ’ ") # Collapse whitespace. string = re.sub("\r\n", "\n", string) string = re.sub(linebreak, " %s " % EOS, string) string = re.sub(r"\s+", " ", string) tokens = [] # Handle punctuation marks. for t in TOKEN.findall(string+" "): if len(t) > 0: tail = [] while t.startswith(punctuation) and \ not t in replace: # Split leading punctuation. if t.startswith(punctuation): tokens.append(t[0]); t=t[1:] while t.endswith(punctuation+(".",)) and \ not t in replace: # Split trailing punctuation. if t.endswith(punctuation): tail.append(t[-1]); t=t[:-1] # Split ellipsis (...) before splitting period. if t.endswith("..."): tail.append("..."); t=t[:-3].rstrip(".") # Split period (if not an abbreviation). if t.endswith("."): if t in abbreviations or \ RE_ABBR1.match(t) is not None or \ RE_ABBR2.match(t) is not None or \ RE_ABBR3.match(t) is not None: break else: tail.append(t[-1]); t=t[:-1] if t != "": tokens.append(t) tokens.extend(reversed(tail)) # Handle sentence breaks (periods, quotes, parenthesis). sentences, i, j = [[]], 0, 0 while j < len(tokens): if tokens[j] in ("...", ".", "!", "?", EOS): while j < len(tokens) \ and tokens[j] in ("'", "\"", u"”", u"’", "...", ".", "!", "?", ")", EOS): if tokens[j] in ("'", "\"") and sentences[-1].count(tokens[j]) % 2 == 0: break # Balanced quotes. j += 1 sentences[-1].extend(t for t in tokens[i:j] if t != EOS) sentences.append([]) i = j j += 1 # Handle emoticons. sentences[-1].extend(tokens[i:j]) sentences = (" ".join(s) for s in sentences if len(s) > 0) sentences = (RE_SARCASM.sub("(!)", s) for s in sentences) sentences = [RE_EMOTICONS.sub( lambda m: m.group(1).replace(" ", "") + m.group(2), s) for s in sentences] return sentences
[ "def", "find_tokens", "(", "string", ",", "punctuation", "=", "PUNCTUATION", ",", "abbreviations", "=", "ABBREVIATIONS", ",", "replace", "=", "replacements", ",", "linebreak", "=", "r\"\\n{2,}\"", ")", ":", "# Handle periods separately.", "punctuation", "=", "tuple", "(", "punctuation", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ")", "# Handle replacements (contractions).", "for", "a", ",", "b", "in", "replace", ".", "items", "(", ")", ":", "string", "=", "re", ".", "sub", "(", "a", ",", "b", ",", "string", ")", "# Handle Unicode quotes.", "if", "isinstance", "(", "string", ",", "unicode", ")", ":", "string", "=", "string", ".", "replace", "(", "u\"“\", ", "u", " “ \")", "", "string", "=", "string", ".", "replace", "(", "u\"”\", ", "u", " ” \")", "", "string", "=", "string", ".", "replace", "(", "u\"‘\", ", "u", " ‘ \")", "", "string", "=", "string", ".", "replace", "(", "u\"’\", ", "u", " ’ \")", "", "# Collapse whitespace.", "string", "=", "re", ".", "sub", "(", "\"\\r\\n\"", ",", "\"\\n\"", ",", "string", ")", "string", "=", "re", ".", "sub", "(", "linebreak", ",", "\" %s \"", "%", "EOS", ",", "string", ")", "string", "=", "re", ".", "sub", "(", "r\"\\s+\"", ",", "\" \"", ",", "string", ")", "tokens", "=", "[", "]", "# Handle punctuation marks.", "for", "t", "in", "TOKEN", ".", "findall", "(", "string", "+", "\" \"", ")", ":", "if", "len", "(", "t", ")", ">", "0", ":", "tail", "=", "[", "]", "while", "t", ".", "startswith", "(", "punctuation", ")", "and", "not", "t", "in", "replace", ":", "# Split leading punctuation.", "if", "t", ".", "startswith", "(", "punctuation", ")", ":", "tokens", ".", "append", "(", "t", "[", "0", "]", ")", "t", "=", "t", "[", "1", ":", "]", "while", "t", ".", "endswith", "(", "punctuation", "+", "(", "\".\"", ",", ")", ")", "and", "not", "t", "in", "replace", ":", "# Split trailing punctuation.", "if", "t", ".", "endswith", "(", "punctuation", ")", ":", "tail", ".", "append", "(", "t", "[", "-", "1", "]", ")", "t", "=", "t", "[", ":", "-", "1", "]", "# Split ellipsis (...) before splitting period.", "if", "t", ".", "endswith", "(", "\"...\"", ")", ":", "tail", ".", "append", "(", "\"...\"", ")", "t", "=", "t", "[", ":", "-", "3", "]", ".", "rstrip", "(", "\".\"", ")", "# Split period (if not an abbreviation).", "if", "t", ".", "endswith", "(", "\".\"", ")", ":", "if", "t", "in", "abbreviations", "or", "RE_ABBR1", ".", "match", "(", "t", ")", "is", "not", "None", "or", "RE_ABBR2", ".", "match", "(", "t", ")", "is", "not", "None", "or", "RE_ABBR3", ".", "match", "(", "t", ")", "is", "not", "None", ":", "break", "else", ":", "tail", ".", "append", "(", "t", "[", "-", "1", "]", ")", "t", "=", "t", "[", ":", "-", "1", "]", "if", "t", "!=", "\"\"", ":", "tokens", ".", "append", "(", "t", ")", "tokens", ".", "extend", "(", "reversed", "(", "tail", ")", ")", "# Handle sentence breaks (periods, quotes, parenthesis).", "sentences", ",", "i", ",", "j", "=", "[", "[", "]", "]", ",", "0", ",", "0", "while", "j", "<", "len", "(", "tokens", ")", ":", "if", "tokens", "[", "j", "]", "in", "(", "\"...\"", ",", "\".\"", ",", "\"!\"", ",", "\"?\"", ",", "EOS", ")", ":", "while", "j", "<", "len", "(", "tokens", ")", "and", "tokens", "[", "j", "]", "in", "(", "\"'\"", ",", "\"\\\"\"", ",", "u\"”\", ", "u", "’\", \".", ".", "\", \".", "\"", " \"!", "\"", " \"?", "\"", " \")", "\"", " EO", "S", ":", "", "", "if", "tokens", "[", "j", "]", "in", "(", "\"'\"", ",", "\"\\\"\"", ")", "and", "sentences", "[", "-", "1", "]", ".", "count", "(", "tokens", "[", "j", "]", ")", "%", "2", "==", "0", ":", "break", "# Balanced quotes.", "j", "+=", "1", "sentences", "[", "-", "1", "]", ".", "extend", "(", "t", "for", "t", "in", "tokens", "[", "i", ":", "j", "]", "if", "t", "!=", "EOS", ")", "sentences", ".", "append", "(", "[", "]", ")", "i", "=", "j", "j", "+=", "1", "# Handle emoticons.", "sentences", "[", "-", "1", "]", ".", "extend", "(", "tokens", "[", "i", ":", "j", "]", ")", "sentences", "=", "(", "\" \"", ".", "join", "(", "s", ")", "for", "s", "in", "sentences", "if", "len", "(", "s", ")", ">", "0", ")", "sentences", "=", "(", "RE_SARCASM", ".", "sub", "(", "\"(!)\"", ",", "s", ")", "for", "s", "in", "sentences", ")", "sentences", "=", "[", "RE_EMOTICONS", ".", "sub", "(", "lambda", "m", ":", "m", ".", "group", "(", "1", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "+", "m", ".", "group", "(", "2", ")", ",", "s", ")", "for", "s", "in", "sentences", "]", "return", "sentences" ]
Returns a list of sentences. Each sentence is a space-separated string of tokens (words). Handles common cases of abbreviations (e.g., etc., ...). Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence. Headings without an ending period are inferred by line breaks.
[ "Returns", "a", "list", "of", "sentences", ".", "Each", "sentence", "is", "a", "space", "-", "separated", "string", "of", "tokens", "(", "words", ")", ".", "Handles", "common", "cases", "of", "abbreviations", "(", "e", ".", "g", ".", "etc", ".", "...", ")", ".", "Punctuation", "marks", "are", "split", "from", "other", "words", ".", "Periods", "(", "or", "?!", ")", "mark", "the", "end", "of", "a", "sentence", ".", "Headings", "without", "an", "ending", "period", "are", "inferred", "by", "line", "breaks", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L976-L1046
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_suffix_rules
def _suffix_rules(token, tag="NN"): """ Default morphological tagging rules for English, based on word suffixes. """ if isinstance(token, (list, tuple)): token, tag = token if token.endswith("ing"): tag = "VBG" if token.endswith("ly"): tag = "RB" if token.endswith("s") and not token.endswith(("is", "ous", "ss")): tag = "NNS" if token.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in token: tag = "JJ" if token.endswith("ed"): tag = "VBN" if token.endswith(("ate", "ify", "ise", "ize")): tag = "VBP" return [token, tag]
python
def _suffix_rules(token, tag="NN"): """ Default morphological tagging rules for English, based on word suffixes. """ if isinstance(token, (list, tuple)): token, tag = token if token.endswith("ing"): tag = "VBG" if token.endswith("ly"): tag = "RB" if token.endswith("s") and not token.endswith(("is", "ous", "ss")): tag = "NNS" if token.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in token: tag = "JJ" if token.endswith("ed"): tag = "VBN" if token.endswith(("ate", "ify", "ise", "ize")): tag = "VBP" return [token, tag]
[ "def", "_suffix_rules", "(", "token", ",", "tag", "=", "\"NN\"", ")", ":", "if", "isinstance", "(", "token", ",", "(", "list", ",", "tuple", ")", ")", ":", "token", ",", "tag", "=", "token", "if", "token", ".", "endswith", "(", "\"ing\"", ")", ":", "tag", "=", "\"VBG\"", "if", "token", ".", "endswith", "(", "\"ly\"", ")", ":", "tag", "=", "\"RB\"", "if", "token", ".", "endswith", "(", "\"s\"", ")", "and", "not", "token", ".", "endswith", "(", "(", "\"is\"", ",", "\"ous\"", ",", "\"ss\"", ")", ")", ":", "tag", "=", "\"NNS\"", "if", "token", ".", "endswith", "(", "(", "\"able\"", ",", "\"al\"", ",", "\"ful\"", ",", "\"ible\"", ",", "\"ient\"", ",", "\"ish\"", ",", "\"ive\"", ",", "\"less\"", ",", "\"tic\"", ",", "\"ous\"", ")", ")", "or", "\"-\"", "in", "token", ":", "tag", "=", "\"JJ\"", "if", "token", ".", "endswith", "(", "\"ed\"", ")", ":", "tag", "=", "\"VBN\"", "if", "token", ".", "endswith", "(", "(", "\"ate\"", ",", "\"ify\"", ",", "\"ise\"", ",", "\"ize\"", ")", ")", ":", "tag", "=", "\"VBP\"", "return", "[", "token", ",", "tag", "]" ]
Default morphological tagging rules for English, based on word suffixes.
[ "Default", "morphological", "tagging", "rules", "for", "English", "based", "on", "word", "suffixes", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1053-L1070
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_tags
def find_tags(tokens, lexicon={}, model=None, morphology=None, context=None, entities=None, default=("NN", "NNP", "CD"), language="en", map=None, **kwargs): """ Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules. """ tagged = [] # Tag known words. for i, token in enumerate(tokens): tagged.append([token, lexicon.get(token, i == 0 and lexicon.get(token.lower()) or None)]) # Tag unknown words. for i, (token, tag) in enumerate(tagged): prev, next = (None, None), (None, None) if i > 0: prev = tagged[i-1] if i < len(tagged) - 1: next = tagged[i+1] if tag is None or token in (model is not None and model.unknown or ()): # Use language model (i.e., SLP). if model is not None: tagged[i] = model.apply([token, None], prev, next) # Use NNP for capitalized words (except in German). elif token.istitle() and language != "de": tagged[i] = [token, default[1]] # Use CD for digits and numbers. elif CD.match(token) is not None: tagged[i] = [token, default[2]] # Use suffix rules (e.g., -ly = RB). elif morphology is not None: tagged[i] = morphology.apply([token, default[0]], prev, next) # Use suffix rules (English default). elif language == "en": tagged[i] = _suffix_rules([token, default[0]]) # Use most frequent tag (NN). else: tagged[i] = [token, default[0]] # Tag words by context. if context is not None and model is None: tagged = context.apply(tagged) # Tag named entities. if entities is not None: tagged = entities.apply(tagged) # Map tags with a custom function. if map is not None: tagged = [list(map(token, tag)) or [token, default[0]] for token, tag in tagged] return tagged
python
def find_tags(tokens, lexicon={}, model=None, morphology=None, context=None, entities=None, default=("NN", "NNP", "CD"), language="en", map=None, **kwargs): """ Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules. """ tagged = [] # Tag known words. for i, token in enumerate(tokens): tagged.append([token, lexicon.get(token, i == 0 and lexicon.get(token.lower()) or None)]) # Tag unknown words. for i, (token, tag) in enumerate(tagged): prev, next = (None, None), (None, None) if i > 0: prev = tagged[i-1] if i < len(tagged) - 1: next = tagged[i+1] if tag is None or token in (model is not None and model.unknown or ()): # Use language model (i.e., SLP). if model is not None: tagged[i] = model.apply([token, None], prev, next) # Use NNP for capitalized words (except in German). elif token.istitle() and language != "de": tagged[i] = [token, default[1]] # Use CD for digits and numbers. elif CD.match(token) is not None: tagged[i] = [token, default[2]] # Use suffix rules (e.g., -ly = RB). elif morphology is not None: tagged[i] = morphology.apply([token, default[0]], prev, next) # Use suffix rules (English default). elif language == "en": tagged[i] = _suffix_rules([token, default[0]]) # Use most frequent tag (NN). else: tagged[i] = [token, default[0]] # Tag words by context. if context is not None and model is None: tagged = context.apply(tagged) # Tag named entities. if entities is not None: tagged = entities.apply(tagged) # Map tags with a custom function. if map is not None: tagged = [list(map(token, tag)) or [token, default[0]] for token, tag in tagged] return tagged
[ "def", "find_tags", "(", "tokens", ",", "lexicon", "=", "{", "}", ",", "model", "=", "None", ",", "morphology", "=", "None", ",", "context", "=", "None", ",", "entities", "=", "None", ",", "default", "=", "(", "\"NN\"", ",", "\"NNP\"", ",", "\"CD\"", ")", ",", "language", "=", "\"en\"", ",", "map", "=", "None", ",", "*", "*", "kwargs", ")", ":", "tagged", "=", "[", "]", "# Tag known words.", "for", "i", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "tagged", ".", "append", "(", "[", "token", ",", "lexicon", ".", "get", "(", "token", ",", "i", "==", "0", "and", "lexicon", ".", "get", "(", "token", ".", "lower", "(", ")", ")", "or", "None", ")", "]", ")", "# Tag unknown words.", "for", "i", ",", "(", "token", ",", "tag", ")", "in", "enumerate", "(", "tagged", ")", ":", "prev", ",", "next", "=", "(", "None", ",", "None", ")", ",", "(", "None", ",", "None", ")", "if", "i", ">", "0", ":", "prev", "=", "tagged", "[", "i", "-", "1", "]", "if", "i", "<", "len", "(", "tagged", ")", "-", "1", ":", "next", "=", "tagged", "[", "i", "+", "1", "]", "if", "tag", "is", "None", "or", "token", "in", "(", "model", "is", "not", "None", "and", "model", ".", "unknown", "or", "(", ")", ")", ":", "# Use language model (i.e., SLP).", "if", "model", "is", "not", "None", ":", "tagged", "[", "i", "]", "=", "model", ".", "apply", "(", "[", "token", ",", "None", "]", ",", "prev", ",", "next", ")", "# Use NNP for capitalized words (except in German).", "elif", "token", ".", "istitle", "(", ")", "and", "language", "!=", "\"de\"", ":", "tagged", "[", "i", "]", "=", "[", "token", ",", "default", "[", "1", "]", "]", "# Use CD for digits and numbers.", "elif", "CD", ".", "match", "(", "token", ")", "is", "not", "None", ":", "tagged", "[", "i", "]", "=", "[", "token", ",", "default", "[", "2", "]", "]", "# Use suffix rules (e.g., -ly = RB).", "elif", "morphology", "is", "not", "None", ":", "tagged", "[", "i", "]", "=", "morphology", ".", "apply", "(", "[", "token", ",", "default", "[", "0", "]", "]", ",", "prev", ",", "next", ")", "# Use suffix rules (English default).", "elif", "language", "==", "\"en\"", ":", "tagged", "[", "i", "]", "=", "_suffix_rules", "(", "[", "token", ",", "default", "[", "0", "]", "]", ")", "# Use most frequent tag (NN).", "else", ":", "tagged", "[", "i", "]", "=", "[", "token", ",", "default", "[", "0", "]", "]", "# Tag words by context.", "if", "context", "is", "not", "None", "and", "model", "is", "None", ":", "tagged", "=", "context", ".", "apply", "(", "tagged", ")", "# Tag named entities.", "if", "entities", "is", "not", "None", ":", "tagged", "=", "entities", ".", "apply", "(", "tagged", ")", "# Map tags with a custom function.", "if", "map", "is", "not", "None", ":", "tagged", "=", "[", "list", "(", "map", "(", "token", ",", "tag", ")", ")", "or", "[", "token", ",", "default", "[", "0", "]", "]", "for", "token", ",", "tag", "in", "tagged", "]", "return", "tagged" ]
Returns a list of [token, tag]-items for the given list of tokens: ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] Words are tagged using the given lexicon of (word, tag)-items. Unknown words are tagged NN by default. Unknown words that start with a capital letter are tagged NNP (unless language="de"). Unknown words that consist only of digits and punctuation marks are tagged CD. Unknown words are then improved with morphological rules. All words are improved with contextual rules. If a model is given, uses model for unknown words instead of morphology and context. If map is a function, it is applied to each (token, tag) after applying all rules.
[ "Returns", "a", "list", "of", "[", "token", "tag", "]", "-", "items", "for", "the", "given", "list", "of", "tokens", ":", "[", "The", "cat", "purs", "]", "=", ">", "[[", "The", "DT", "]", "[", "cat", "NN", "]", "[", "purs", "VB", "]]", "Words", "are", "tagged", "using", "the", "given", "lexicon", "of", "(", "word", "tag", ")", "-", "items", ".", "Unknown", "words", "are", "tagged", "NN", "by", "default", ".", "Unknown", "words", "that", "start", "with", "a", "capital", "letter", "are", "tagged", "NNP", "(", "unless", "language", "=", "de", ")", ".", "Unknown", "words", "that", "consist", "only", "of", "digits", "and", "punctuation", "marks", "are", "tagged", "CD", ".", "Unknown", "words", "are", "then", "improved", "with", "morphological", "rules", ".", "All", "words", "are", "improved", "with", "contextual", "rules", ".", "If", "a", "model", "is", "given", "uses", "model", "for", "unknown", "words", "instead", "of", "morphology", "and", "context", ".", "If", "map", "is", "a", "function", "it", "is", "applied", "to", "each", "(", "token", "tag", ")", "after", "applying", "all", "rules", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1072-L1123
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_chunks
def find_chunks(tagged, language="en"): """ The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O """ chunked = [x for x in tagged] tags = "".join("%s%s" % (tag, SEPARATOR) for token, tag in tagged) # Use Germanic or Romance chunking rules according to given language. for tag, rule in CHUNKS[int(language in ("ca", "es", "pt", "fr", "it", "pt", "ro"))]: for m in rule.finditer(tags): # Find the start of chunks inside the tags-string. # Number of preceding separators = number of preceding tokens. i = m.start() j = tags[:i].count(SEPARATOR) n = m.group(0).count(SEPARATOR) for k in range(j, j+n): if len(chunked[k]) == 3: continue if len(chunked[k]) < 3: # A conjunction or comma cannot be start of a chunk. if k == j and chunked[k][1] in ("CC", "CJ", ","): j += 1 # Mark first token in chunk with B-. elif k == j: chunked[k].append("B-" + tag) # Mark other tokens in chunk with I-. else: chunked[k].append("I-" + tag) # Mark chinks (tokens outside of a chunk) with O-. for chink in filter(lambda x: len(x) < 3, chunked): chink.append("O") # Post-processing corrections. for i, (word, tag, chunk) in enumerate(chunked): if tag.startswith("RB") and chunk == "B-NP": # "Perhaps you" => ADVP + NP # "Really nice work" => NP # "Really, nice work" => ADVP + O + NP if i < len(chunked)-1 and not chunked[i+1][1].startswith("JJ"): chunked[i+0][2] = "B-ADVP" chunked[i+1][2] = "B-NP" if i < len(chunked)-1 and chunked[i+1][1] in ("CC", "CJ", ","): chunked[i+1][2] = "O" if i < len(chunked)-2 and chunked[i+1][2] == "O": chunked[i+2][2] = "B-NP" return chunked
python
def find_chunks(tagged, language="en"): """ The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O """ chunked = [x for x in tagged] tags = "".join("%s%s" % (tag, SEPARATOR) for token, tag in tagged) # Use Germanic or Romance chunking rules according to given language. for tag, rule in CHUNKS[int(language in ("ca", "es", "pt", "fr", "it", "pt", "ro"))]: for m in rule.finditer(tags): # Find the start of chunks inside the tags-string. # Number of preceding separators = number of preceding tokens. i = m.start() j = tags[:i].count(SEPARATOR) n = m.group(0).count(SEPARATOR) for k in range(j, j+n): if len(chunked[k]) == 3: continue if len(chunked[k]) < 3: # A conjunction or comma cannot be start of a chunk. if k == j and chunked[k][1] in ("CC", "CJ", ","): j += 1 # Mark first token in chunk with B-. elif k == j: chunked[k].append("B-" + tag) # Mark other tokens in chunk with I-. else: chunked[k].append("I-" + tag) # Mark chinks (tokens outside of a chunk) with O-. for chink in filter(lambda x: len(x) < 3, chunked): chink.append("O") # Post-processing corrections. for i, (word, tag, chunk) in enumerate(chunked): if tag.startswith("RB") and chunk == "B-NP": # "Perhaps you" => ADVP + NP # "Really nice work" => NP # "Really, nice work" => ADVP + O + NP if i < len(chunked)-1 and not chunked[i+1][1].startswith("JJ"): chunked[i+0][2] = "B-ADVP" chunked[i+1][2] = "B-NP" if i < len(chunked)-1 and chunked[i+1][1] in ("CC", "CJ", ","): chunked[i+1][2] = "O" if i < len(chunked)-2 and chunked[i+1][2] == "O": chunked[i+2][2] = "B-NP" return chunked
[ "def", "find_chunks", "(", "tagged", ",", "language", "=", "\"en\"", ")", ":", "chunked", "=", "[", "x", "for", "x", "in", "tagged", "]", "tags", "=", "\"\"", ".", "join", "(", "\"%s%s\"", "%", "(", "tag", ",", "SEPARATOR", ")", "for", "token", ",", "tag", "in", "tagged", ")", "# Use Germanic or Romance chunking rules according to given language.", "for", "tag", ",", "rule", "in", "CHUNKS", "[", "int", "(", "language", "in", "(", "\"ca\"", ",", "\"es\"", ",", "\"pt\"", ",", "\"fr\"", ",", "\"it\"", ",", "\"pt\"", ",", "\"ro\"", ")", ")", "]", ":", "for", "m", "in", "rule", ".", "finditer", "(", "tags", ")", ":", "# Find the start of chunks inside the tags-string.", "# Number of preceding separators = number of preceding tokens.", "i", "=", "m", ".", "start", "(", ")", "j", "=", "tags", "[", ":", "i", "]", ".", "count", "(", "SEPARATOR", ")", "n", "=", "m", ".", "group", "(", "0", ")", ".", "count", "(", "SEPARATOR", ")", "for", "k", "in", "range", "(", "j", ",", "j", "+", "n", ")", ":", "if", "len", "(", "chunked", "[", "k", "]", ")", "==", "3", ":", "continue", "if", "len", "(", "chunked", "[", "k", "]", ")", "<", "3", ":", "# A conjunction or comma cannot be start of a chunk.", "if", "k", "==", "j", "and", "chunked", "[", "k", "]", "[", "1", "]", "in", "(", "\"CC\"", ",", "\"CJ\"", ",", "\",\"", ")", ":", "j", "+=", "1", "# Mark first token in chunk with B-.", "elif", "k", "==", "j", ":", "chunked", "[", "k", "]", ".", "append", "(", "\"B-\"", "+", "tag", ")", "# Mark other tokens in chunk with I-.", "else", ":", "chunked", "[", "k", "]", ".", "append", "(", "\"I-\"", "+", "tag", ")", "# Mark chinks (tokens outside of a chunk) with O-.", "for", "chink", "in", "filter", "(", "lambda", "x", ":", "len", "(", "x", ")", "<", "3", ",", "chunked", ")", ":", "chink", ".", "append", "(", "\"O\"", ")", "# Post-processing corrections.", "for", "i", ",", "(", "word", ",", "tag", ",", "chunk", ")", "in", "enumerate", "(", "chunked", ")", ":", "if", "tag", ".", "startswith", "(", "\"RB\"", ")", "and", "chunk", "==", "\"B-NP\"", ":", "# \"Perhaps you\" => ADVP + NP", "# \"Really nice work\" => NP", "# \"Really, nice work\" => ADVP + O + NP", "if", "i", "<", "len", "(", "chunked", ")", "-", "1", "and", "not", "chunked", "[", "i", "+", "1", "]", "[", "1", "]", ".", "startswith", "(", "\"JJ\"", ")", ":", "chunked", "[", "i", "+", "0", "]", "[", "2", "]", "=", "\"B-ADVP\"", "chunked", "[", "i", "+", "1", "]", "[", "2", "]", "=", "\"B-NP\"", "if", "i", "<", "len", "(", "chunked", ")", "-", "1", "and", "chunked", "[", "i", "+", "1", "]", "[", "1", "]", "in", "(", "\"CC\"", ",", "\"CJ\"", ",", "\",\"", ")", ":", "chunked", "[", "i", "+", "1", "]", "[", "2", "]", "=", "\"O\"", "if", "i", "<", "len", "(", "chunked", ")", "-", "2", "and", "chunked", "[", "i", "+", "1", "]", "[", "2", "]", "==", "\"O\"", ":", "chunked", "[", "i", "+", "2", "]", "[", "2", "]", "=", "\"B-NP\"", "return", "chunked" ]
The input is a list of [token, tag]-items. The output is a list of [token, tag, chunk]-items: The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. => The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ":", "The", "/", "DT", "nice", "/", "JJ", "fish", "/", "NN", "is", "/", "VBZ", "dead", "/", "JJ", ".", "/", ".", "=", ">", "The", "/", "DT", "/", "B", "-", "NP", "nice", "/", "JJ", "/", "I", "-", "NP", "fish", "/", "NN", "/", "I", "-", "NP", "is", "/", "VBZ", "/", "B", "-", "VP", "dead", "/", "JJ", "/", "B", "-", "ADJP", ".", "/", ".", "/", "O" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1171-L1216
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_prepositions
def find_prepositions(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk. """ # Tokens that are not part of a preposition just get the O-tag. for ch in chunked: ch.append("O") for i, chunk in enumerate(chunked): if chunk[2].endswith("PP") and chunk[-1] == "O": # Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund. if i < len(chunked)-1 and \ (chunked[i+1][2].endswith(("NP", "PP")) or \ chunked[i+1][1] in ("VBG", "VBN")): chunk[-1] = "B-PNP" pp = True for ch in chunked[i+1:]: if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")): break if ch[2].endswith("PP") and pp: ch[-1] = "I-PNP" if not ch[2].endswith("PP"): ch[-1] = "I-PNP" pp = False return chunked
python
def find_prepositions(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk. """ # Tokens that are not part of a preposition just get the O-tag. for ch in chunked: ch.append("O") for i, chunk in enumerate(chunked): if chunk[2].endswith("PP") and chunk[-1] == "O": # Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund. if i < len(chunked)-1 and \ (chunked[i+1][2].endswith(("NP", "PP")) or \ chunked[i+1][1] in ("VBG", "VBN")): chunk[-1] = "B-PNP" pp = True for ch in chunked[i+1:]: if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")): break if ch[2].endswith("PP") and pp: ch[-1] = "I-PNP" if not ch[2].endswith("PP"): ch[-1] = "I-PNP" pp = False return chunked
[ "def", "find_prepositions", "(", "chunked", ")", ":", "# Tokens that are not part of a preposition just get the O-tag.", "for", "ch", "in", "chunked", ":", "ch", ".", "append", "(", "\"O\"", ")", "for", "i", ",", "chunk", "in", "enumerate", "(", "chunked", ")", ":", "if", "chunk", "[", "2", "]", ".", "endswith", "(", "\"PP\"", ")", "and", "chunk", "[", "-", "1", "]", "==", "\"O\"", ":", "# Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund.", "if", "i", "<", "len", "(", "chunked", ")", "-", "1", "and", "(", "chunked", "[", "i", "+", "1", "]", "[", "2", "]", ".", "endswith", "(", "(", "\"NP\"", ",", "\"PP\"", ")", ")", "or", "chunked", "[", "i", "+", "1", "]", "[", "1", "]", "in", "(", "\"VBG\"", ",", "\"VBN\"", ")", ")", ":", "chunk", "[", "-", "1", "]", "=", "\"B-PNP\"", "pp", "=", "True", "for", "ch", "in", "chunked", "[", "i", "+", "1", ":", "]", ":", "if", "not", "(", "ch", "[", "2", "]", ".", "endswith", "(", "(", "\"NP\"", ",", "\"PP\"", ")", ")", "or", "ch", "[", "1", "]", "in", "(", "\"VBG\"", ",", "\"VBN\"", ")", ")", ":", "break", "if", "ch", "[", "2", "]", ".", "endswith", "(", "\"PP\"", ")", "and", "pp", ":", "ch", "[", "-", "1", "]", "=", "\"I-PNP\"", "if", "not", "ch", "[", "2", "]", ".", "endswith", "(", "\"PP\"", ")", ":", "ch", "[", "-", "1", "]", "=", "\"I-PNP\"", "pp", "=", "False", "return", "chunked" ]
The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, preposition]-items. PP-chunks followed by NP-chunks make up a PNP-chunk.
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "preposition", "]", "-", "items", ".", "PP", "-", "chunks", "followed", "by", "NP", "-", "chunks", "make", "up", "a", "PNP", "-", "chunk", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1218-L1242
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_relations
def find_relations(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object. """ tag = lambda token: token[2].split("-")[-1] # B-NP => NP # Group successive tokens with the same chunk-tag. chunks = [] for token in chunked: if len(chunks) == 0 \ or token[2].startswith("B-") \ or tag(token) != tag(chunks[-1][-1]): chunks.append([]) chunks[-1].append(token+["O"]) # If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id). # If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id). # Chunks that are not part of a relation get an O-tag. id = 0 for i, chunk in enumerate(chunks): if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i-1]: token[-1] += "*NP-SBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i+1]: token[-1] = "*NP-OBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") # This is more a proof-of-concept than useful in practice: # PP-LOC = be + in|at + the|my # PP-DIR = go + to|towards + the|my for i, chunk in enumerate(chunks): if 0 < i < len(chunks)-1 and len(chunk) == 1 and chunk[-1][-1] == "O": t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next if tag(t1) == "PP" and t2[1] in ("DT", "PR", "PRP$"): if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC" if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR" related = []; [related.extend(chunk) for chunk in chunks] return related
python
def find_relations(chunked): """ The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object. """ tag = lambda token: token[2].split("-")[-1] # B-NP => NP # Group successive tokens with the same chunk-tag. chunks = [] for token in chunked: if len(chunks) == 0 \ or token[2].startswith("B-") \ or tag(token) != tag(chunks[-1][-1]): chunks.append([]) chunks[-1].append(token+["O"]) # If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id). # If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id). # Chunks that are not part of a relation get an O-tag. id = 0 for i, chunk in enumerate(chunks): if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i-1]: token[-1] += "*NP-SBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP": if chunk[-1][-1] == "O": id += 1 for token in chunk: token[-1] = "VP-" + str(id) for token in chunks[i+1]: token[-1] = "*NP-OBJ-" + str(id) token[-1] = token[-1].lstrip("O-*") # This is more a proof-of-concept than useful in practice: # PP-LOC = be + in|at + the|my # PP-DIR = go + to|towards + the|my for i, chunk in enumerate(chunks): if 0 < i < len(chunks)-1 and len(chunk) == 1 and chunk[-1][-1] == "O": t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next if tag(t1) == "PP" and t2[1] in ("DT", "PR", "PRP$"): if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC" if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR" related = []; [related.extend(chunk) for chunk in chunks] return related
[ "def", "find_relations", "(", "chunked", ")", ":", "tag", "=", "lambda", "token", ":", "token", "[", "2", "]", ".", "split", "(", "\"-\"", ")", "[", "-", "1", "]", "# B-NP => NP", "# Group successive tokens with the same chunk-tag.", "chunks", "=", "[", "]", "for", "token", "in", "chunked", ":", "if", "len", "(", "chunks", ")", "==", "0", "or", "token", "[", "2", "]", ".", "startswith", "(", "\"B-\"", ")", "or", "tag", "(", "token", ")", "!=", "tag", "(", "chunks", "[", "-", "1", "]", "[", "-", "1", "]", ")", ":", "chunks", ".", "append", "(", "[", "]", ")", "chunks", "[", "-", "1", "]", ".", "append", "(", "token", "+", "[", "\"O\"", "]", ")", "# If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id).", "# If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id).", "# Chunks that are not part of a relation get an O-tag.", "id", "=", "0", "for", "i", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "if", "tag", "(", "chunk", "[", "-", "1", "]", ")", "==", "\"VP\"", "and", "i", ">", "0", "and", "tag", "(", "chunks", "[", "i", "-", "1", "]", "[", "-", "1", "]", ")", "==", "\"NP\"", ":", "if", "chunk", "[", "-", "1", "]", "[", "-", "1", "]", "==", "\"O\"", ":", "id", "+=", "1", "for", "token", "in", "chunk", ":", "token", "[", "-", "1", "]", "=", "\"VP-\"", "+", "str", "(", "id", ")", "for", "token", "in", "chunks", "[", "i", "-", "1", "]", ":", "token", "[", "-", "1", "]", "+=", "\"*NP-SBJ-\"", "+", "str", "(", "id", ")", "token", "[", "-", "1", "]", "=", "token", "[", "-", "1", "]", ".", "lstrip", "(", "\"O-*\"", ")", "if", "tag", "(", "chunk", "[", "-", "1", "]", ")", "==", "\"VP\"", "and", "i", "<", "len", "(", "chunks", ")", "-", "1", "and", "tag", "(", "chunks", "[", "i", "+", "1", "]", "[", "-", "1", "]", ")", "==", "\"NP\"", ":", "if", "chunk", "[", "-", "1", "]", "[", "-", "1", "]", "==", "\"O\"", ":", "id", "+=", "1", "for", "token", "in", "chunk", ":", "token", "[", "-", "1", "]", "=", "\"VP-\"", "+", "str", "(", "id", ")", "for", "token", "in", "chunks", "[", "i", "+", "1", "]", ":", "token", "[", "-", "1", "]", "=", "\"*NP-OBJ-\"", "+", "str", "(", "id", ")", "token", "[", "-", "1", "]", "=", "token", "[", "-", "1", "]", ".", "lstrip", "(", "\"O-*\"", ")", "# This is more a proof-of-concept than useful in practice:", "# PP-LOC = be + in|at + the|my", "# PP-DIR = go + to|towards + the|my", "for", "i", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "if", "0", "<", "i", "<", "len", "(", "chunks", ")", "-", "1", "and", "len", "(", "chunk", ")", "==", "1", "and", "chunk", "[", "-", "1", "]", "[", "-", "1", "]", "==", "\"O\"", ":", "t0", ",", "t1", ",", "t2", "=", "chunks", "[", "i", "-", "1", "]", "[", "-", "1", "]", ",", "chunks", "[", "i", "]", "[", "0", "]", ",", "chunks", "[", "i", "+", "1", "]", "[", "0", "]", "# previous / current / next", "if", "tag", "(", "t1", ")", "==", "\"PP\"", "and", "t2", "[", "1", "]", "in", "(", "\"DT\"", ",", "\"PR\"", ",", "\"PRP$\"", ")", ":", "if", "t0", "[", "0", "]", "in", "BE", "and", "t1", "[", "0", "]", "in", "(", "\"in\"", ",", "\"at\"", ")", ":", "t1", "[", "-", "1", "]", "=", "\"PP-LOC\"", "if", "t0", "[", "0", "]", "in", "GO", "and", "t1", "[", "0", "]", "in", "(", "\"to\"", ",", "\"towards\"", ")", ":", "t1", "[", "-", "1", "]", "=", "\"PP-DIR\"", "related", "=", "[", "]", "[", "related", ".", "extend", "(", "chunk", ")", "for", "chunk", "in", "chunks", "]", "return", "related" ]
The input is a list of [token, tag, chunk]-items. The output is a list of [token, tag, chunk, relation]-items. A noun phrase preceding a verb phrase is perceived as sentence subject. A noun phrase following a verb phrase is perceived as sentence object.
[ "The", "input", "is", "a", "list", "of", "[", "token", "tag", "chunk", "]", "-", "items", ".", "The", "output", "is", "a", "list", "of", "[", "token", "tag", "chunk", "relation", "]", "-", "items", ".", "A", "noun", "phrase", "preceding", "a", "verb", "phrase", "is", "perceived", "as", "sentence", "subject", ".", "A", "noun", "phrase", "following", "a", "verb", "phrase", "is", "perceived", "as", "sentence", "object", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1250-L1296
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
find_keywords
def find_keywords(string, parser, top=10, frequency={}, **kwargs): """ Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...} """ lemmata = kwargs.pop("lemmata", kwargs.pop("stem", True)) # Parse the string and extract noun phrases (NP). chunks = [] wordcount = 0 for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split(): for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"] if w[2] == "B-NP": chunks.append([w]) wordcount += 1 elif w[2] == "I-NP" and w[1][:3] == chunks[-1][-1][1][:3] == "NNP": chunks[-1][-1][+0] += " " + w[+0] # Collapse NNPs: "Ms Kitty". chunks[-1][-1][-1] += " " + w[-1] elif w[2] == "I-NP": chunks[-1].append(w) wordcount += 1 # Rate the nouns in noun phrases. m = {} for i, chunk in enumerate(chunks): head = True if parser.language not in ("ca", "es", "pt", "fr", "it", "pt", "ro"): # Head of "cat hair" => "hair". # Head of "poils de chat" => "poils". chunk = list(reversed(chunk)) for w in chunk: if w[1].startswith("NN"): if lemmata: k = w[-1] else: k = w[0].lower() if not k in m: m[k] = [0.0, set(), 1.0, 1.0, 1.0] # Higher score for chunks that appear more frequently. m[k][0] += 1 / float(wordcount) # Higher score for chunks that appear in more contexts (semantic centrality). m[k][1].add(" ".join(map(lambda x: x[0], chunk)).lower()) # Higher score for chunks at the start (25%) of the text. m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0 # Higher score for chunks not in a prepositional phrase. m[k][3] += 1 if w[3] == "O" else 0 # Higher score for chunk head. m[k][4] += 1 if head else 0 head = False # Rate tf-idf if a frequency dict is given. for k in m: if frequency: df = frequency.get(k, 0.0) df = max(df, 1e-10) df = log(1.0 / df, 2.71828) else: df = 1.0 m[k][0] = max(1e-10, m[k][0] * df) m[k][1] = 1 + float(len(m[k][1])) # Sort candidates alphabetically by total score # The harmonic mean will emphasize tf-idf score. hmean = lambda a: len(a) / sum(1.0 / x for x in a) m = [(hmean(m[k]), k) for k in m] m = sorted(m, key=lambda x: x[1]) m = sorted(m, key=lambda x: x[0], reverse=True) m = [k for score, k in m] return m[:top]
python
def find_keywords(string, parser, top=10, frequency={}, **kwargs): """ Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...} """ lemmata = kwargs.pop("lemmata", kwargs.pop("stem", True)) # Parse the string and extract noun phrases (NP). chunks = [] wordcount = 0 for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split(): for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"] if w[2] == "B-NP": chunks.append([w]) wordcount += 1 elif w[2] == "I-NP" and w[1][:3] == chunks[-1][-1][1][:3] == "NNP": chunks[-1][-1][+0] += " " + w[+0] # Collapse NNPs: "Ms Kitty". chunks[-1][-1][-1] += " " + w[-1] elif w[2] == "I-NP": chunks[-1].append(w) wordcount += 1 # Rate the nouns in noun phrases. m = {} for i, chunk in enumerate(chunks): head = True if parser.language not in ("ca", "es", "pt", "fr", "it", "pt", "ro"): # Head of "cat hair" => "hair". # Head of "poils de chat" => "poils". chunk = list(reversed(chunk)) for w in chunk: if w[1].startswith("NN"): if lemmata: k = w[-1] else: k = w[0].lower() if not k in m: m[k] = [0.0, set(), 1.0, 1.0, 1.0] # Higher score for chunks that appear more frequently. m[k][0] += 1 / float(wordcount) # Higher score for chunks that appear in more contexts (semantic centrality). m[k][1].add(" ".join(map(lambda x: x[0], chunk)).lower()) # Higher score for chunks at the start (25%) of the text. m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0 # Higher score for chunks not in a prepositional phrase. m[k][3] += 1 if w[3] == "O" else 0 # Higher score for chunk head. m[k][4] += 1 if head else 0 head = False # Rate tf-idf if a frequency dict is given. for k in m: if frequency: df = frequency.get(k, 0.0) df = max(df, 1e-10) df = log(1.0 / df, 2.71828) else: df = 1.0 m[k][0] = max(1e-10, m[k][0] * df) m[k][1] = 1 + float(len(m[k][1])) # Sort candidates alphabetically by total score # The harmonic mean will emphasize tf-idf score. hmean = lambda a: len(a) / sum(1.0 / x for x in a) m = [(hmean(m[k]), k) for k in m] m = sorted(m, key=lambda x: x[1]) m = sorted(m, key=lambda x: x[0], reverse=True) m = [k for score, k in m] return m[:top]
[ "def", "find_keywords", "(", "string", ",", "parser", ",", "top", "=", "10", ",", "frequency", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "lemmata", "=", "kwargs", ".", "pop", "(", "\"lemmata\"", ",", "kwargs", ".", "pop", "(", "\"stem\"", ",", "True", ")", ")", "# Parse the string and extract noun phrases (NP).", "chunks", "=", "[", "]", "wordcount", "=", "0", "for", "sentence", "in", "parser", ".", "parse", "(", "string", ",", "chunks", "=", "True", ",", "lemmata", "=", "lemmata", ")", ".", "split", "(", ")", ":", "for", "w", "in", "sentence", ":", "# [\"cats\", \"NNS\", \"I-NP\", \"O\", \"cat\"]", "if", "w", "[", "2", "]", "==", "\"B-NP\"", ":", "chunks", ".", "append", "(", "[", "w", "]", ")", "wordcount", "+=", "1", "elif", "w", "[", "2", "]", "==", "\"I-NP\"", "and", "w", "[", "1", "]", "[", ":", "3", "]", "==", "chunks", "[", "-", "1", "]", "[", "-", "1", "]", "[", "1", "]", "[", ":", "3", "]", "==", "\"NNP\"", ":", "chunks", "[", "-", "1", "]", "[", "-", "1", "]", "[", "+", "0", "]", "+=", "\" \"", "+", "w", "[", "+", "0", "]", "# Collapse NNPs: \"Ms Kitty\".", "chunks", "[", "-", "1", "]", "[", "-", "1", "]", "[", "-", "1", "]", "+=", "\" \"", "+", "w", "[", "-", "1", "]", "elif", "w", "[", "2", "]", "==", "\"I-NP\"", ":", "chunks", "[", "-", "1", "]", ".", "append", "(", "w", ")", "wordcount", "+=", "1", "# Rate the nouns in noun phrases.", "m", "=", "{", "}", "for", "i", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "head", "=", "True", "if", "parser", ".", "language", "not", "in", "(", "\"ca\"", ",", "\"es\"", ",", "\"pt\"", ",", "\"fr\"", ",", "\"it\"", ",", "\"pt\"", ",", "\"ro\"", ")", ":", "# Head of \"cat hair\" => \"hair\".", "# Head of \"poils de chat\" => \"poils\".", "chunk", "=", "list", "(", "reversed", "(", "chunk", ")", ")", "for", "w", "in", "chunk", ":", "if", "w", "[", "1", "]", ".", "startswith", "(", "\"NN\"", ")", ":", "if", "lemmata", ":", "k", "=", "w", "[", "-", "1", "]", "else", ":", "k", "=", "w", "[", "0", "]", ".", "lower", "(", ")", "if", "not", "k", "in", "m", ":", "m", "[", "k", "]", "=", "[", "0.0", ",", "set", "(", ")", ",", "1.0", ",", "1.0", ",", "1.0", "]", "# Higher score for chunks that appear more frequently.", "m", "[", "k", "]", "[", "0", "]", "+=", "1", "/", "float", "(", "wordcount", ")", "# Higher score for chunks that appear in more contexts (semantic centrality).", "m", "[", "k", "]", "[", "1", "]", ".", "add", "(", "\" \"", ".", "join", "(", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "chunk", ")", ")", ".", "lower", "(", ")", ")", "# Higher score for chunks at the start (25%) of the text.", "m", "[", "k", "]", "[", "2", "]", "+=", "1", "if", "float", "(", "i", ")", "/", "len", "(", "chunks", ")", "<=", "0.25", "else", "0", "# Higher score for chunks not in a prepositional phrase.", "m", "[", "k", "]", "[", "3", "]", "+=", "1", "if", "w", "[", "3", "]", "==", "\"O\"", "else", "0", "# Higher score for chunk head.", "m", "[", "k", "]", "[", "4", "]", "+=", "1", "if", "head", "else", "0", "head", "=", "False", "# Rate tf-idf if a frequency dict is given.", "for", "k", "in", "m", ":", "if", "frequency", ":", "df", "=", "frequency", ".", "get", "(", "k", ",", "0.0", ")", "df", "=", "max", "(", "df", ",", "1e-10", ")", "df", "=", "log", "(", "1.0", "/", "df", ",", "2.71828", ")", "else", ":", "df", "=", "1.0", "m", "[", "k", "]", "[", "0", "]", "=", "max", "(", "1e-10", ",", "m", "[", "k", "]", "[", "0", "]", "*", "df", ")", "m", "[", "k", "]", "[", "1", "]", "=", "1", "+", "float", "(", "len", "(", "m", "[", "k", "]", "[", "1", "]", ")", ")", "# Sort candidates alphabetically by total score", "# The harmonic mean will emphasize tf-idf score.", "hmean", "=", "lambda", "a", ":", "len", "(", "a", ")", "/", "sum", "(", "1.0", "/", "x", "for", "x", "in", "a", ")", "m", "=", "[", "(", "hmean", "(", "m", "[", "k", "]", ")", ",", "k", ")", "for", "k", "in", "m", "]", "m", "=", "sorted", "(", "m", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "m", "=", "sorted", "(", "m", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ",", "reverse", "=", "True", ")", "m", "=", "[", "k", "for", "score", ",", "k", "in", "m", "]", "return", "m", "[", ":", "top", "]" ]
Returns a sorted list of keywords in the given string. The given parser (e.g., pattern.en.parser) is used to identify noun phrases. The given frequency dictionary can be a reference corpus, with relative document frequency (df, 0.0-1.0) for each lemma, e.g., {"the": 0.8, "cat": 0.1, ...}
[ "Returns", "a", "sorted", "list", "of", "keywords", "in", "the", "given", "string", ".", "The", "given", "parser", "(", "e", ".", "g", ".", "pattern", ".", "en", ".", "parser", ")", "is", "used", "to", "identify", "noun", "phrases", ".", "The", "given", "frequency", "dictionary", "can", "be", "a", "reference", "corpus", "with", "relative", "document", "frequency", "(", "df", "0", ".", "0", "-", "1", ".", "0", ")", "for", "each", "lemma", "e", ".", "g", ".", "{", "the", ":", "0", ".", "8", "cat", ":", "0", ".", "1", "...", "}" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1300-L1366
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
tense_id
def tense_id(*args, **kwargs): """ Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated. """ # Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)): if len(args) == 1 and isinstance(args[0], (list, tuple)): if args[0] not in ((PRESENT, PARTICIPLE), (PAST, PARTICIPLE)): args = args[0] # No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise. if len(args) == 0 and len(kwargs) == 0: t = INFINITIVE else: t = PRESENT # Set default values. tense = kwargs.get("tense" , args[0] if len(args) > 0 else t) person = kwargs.get("person" , args[1] if len(args) > 1 else 3) or None number = kwargs.get("number" , args[2] if len(args) > 2 else SINGULAR) mood = kwargs.get("mood" , args[3] if len(args) > 3 else INDICATIVE) aspect = kwargs.get("aspect" , args[4] if len(args) > 4 else IMPERFECTIVE) negated = kwargs.get("negated", args[5] if len(args) > 5 else False) # Disambiguate wrong order of parameters. if mood in (PERFECTIVE, IMPERFECTIVE): mood, aspect = INDICATIVE, mood # Disambiguate INFINITIVE. # Disambiguate PARTICIPLE, IMPERFECT, PRETERITE. # These are often considered to be tenses but are in fact tense + aspect. if tense == INFINITIVE: person = number = mood = aspect = None; negated=False if tense in ((PRESENT, PARTICIPLE), PRESENT+PARTICIPLE, PARTICIPLE, GERUND): tense, aspect = PRESENT, PROGRESSIVE if tense in ((PAST, PARTICIPLE), PAST+PARTICIPLE): tense, aspect = PAST, PROGRESSIVE if tense == IMPERFECT: tense, aspect = PAST, IMPERFECTIVE if tense == PRETERITE: tense, aspect = PAST, PERFECTIVE if aspect in (CONTINUOUS, PARTICIPLE, GERUND): aspect = PROGRESSIVE if aspect == PROGRESSIVE: person = number = None # Disambiguate CONDITIONAL. # In Spanish, the conditional is regarded as an indicative tense. if tense == CONDITIONAL and mood == INDICATIVE: tense, mood = PRESENT, CONDITIONAL # Disambiguate aliases: "pl" => # (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False). return TENSES_ID.get(tense.lower(), TENSES_ID.get((tense, person, number, mood, aspect, negated)))
python
def tense_id(*args, **kwargs): """ Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated. """ # Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)): if len(args) == 1 and isinstance(args[0], (list, tuple)): if args[0] not in ((PRESENT, PARTICIPLE), (PAST, PARTICIPLE)): args = args[0] # No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise. if len(args) == 0 and len(kwargs) == 0: t = INFINITIVE else: t = PRESENT # Set default values. tense = kwargs.get("tense" , args[0] if len(args) > 0 else t) person = kwargs.get("person" , args[1] if len(args) > 1 else 3) or None number = kwargs.get("number" , args[2] if len(args) > 2 else SINGULAR) mood = kwargs.get("mood" , args[3] if len(args) > 3 else INDICATIVE) aspect = kwargs.get("aspect" , args[4] if len(args) > 4 else IMPERFECTIVE) negated = kwargs.get("negated", args[5] if len(args) > 5 else False) # Disambiguate wrong order of parameters. if mood in (PERFECTIVE, IMPERFECTIVE): mood, aspect = INDICATIVE, mood # Disambiguate INFINITIVE. # Disambiguate PARTICIPLE, IMPERFECT, PRETERITE. # These are often considered to be tenses but are in fact tense + aspect. if tense == INFINITIVE: person = number = mood = aspect = None; negated=False if tense in ((PRESENT, PARTICIPLE), PRESENT+PARTICIPLE, PARTICIPLE, GERUND): tense, aspect = PRESENT, PROGRESSIVE if tense in ((PAST, PARTICIPLE), PAST+PARTICIPLE): tense, aspect = PAST, PROGRESSIVE if tense == IMPERFECT: tense, aspect = PAST, IMPERFECTIVE if tense == PRETERITE: tense, aspect = PAST, PERFECTIVE if aspect in (CONTINUOUS, PARTICIPLE, GERUND): aspect = PROGRESSIVE if aspect == PROGRESSIVE: person = number = None # Disambiguate CONDITIONAL. # In Spanish, the conditional is regarded as an indicative tense. if tense == CONDITIONAL and mood == INDICATIVE: tense, mood = PRESENT, CONDITIONAL # Disambiguate aliases: "pl" => # (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False). return TENSES_ID.get(tense.lower(), TENSES_ID.get((tense, person, number, mood, aspect, negated)))
[ "def", "tense_id", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)):", "if", "len", "(", "args", ")", "==", "1", "and", "isinstance", "(", "args", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "args", "[", "0", "]", "not", "in", "(", "(", "PRESENT", ",", "PARTICIPLE", ")", ",", "(", "PAST", ",", "PARTICIPLE", ")", ")", ":", "args", "=", "args", "[", "0", "]", "# No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise.", "if", "len", "(", "args", ")", "==", "0", "and", "len", "(", "kwargs", ")", "==", "0", ":", "t", "=", "INFINITIVE", "else", ":", "t", "=", "PRESENT", "# Set default values.", "tense", "=", "kwargs", ".", "get", "(", "\"tense\"", ",", "args", "[", "0", "]", "if", "len", "(", "args", ")", ">", "0", "else", "t", ")", "person", "=", "kwargs", ".", "get", "(", "\"person\"", ",", "args", "[", "1", "]", "if", "len", "(", "args", ")", ">", "1", "else", "3", ")", "or", "None", "number", "=", "kwargs", ".", "get", "(", "\"number\"", ",", "args", "[", "2", "]", "if", "len", "(", "args", ")", ">", "2", "else", "SINGULAR", ")", "mood", "=", "kwargs", ".", "get", "(", "\"mood\"", ",", "args", "[", "3", "]", "if", "len", "(", "args", ")", ">", "3", "else", "INDICATIVE", ")", "aspect", "=", "kwargs", ".", "get", "(", "\"aspect\"", ",", "args", "[", "4", "]", "if", "len", "(", "args", ")", ">", "4", "else", "IMPERFECTIVE", ")", "negated", "=", "kwargs", ".", "get", "(", "\"negated\"", ",", "args", "[", "5", "]", "if", "len", "(", "args", ")", ">", "5", "else", "False", ")", "# Disambiguate wrong order of parameters.", "if", "mood", "in", "(", "PERFECTIVE", ",", "IMPERFECTIVE", ")", ":", "mood", ",", "aspect", "=", "INDICATIVE", ",", "mood", "# Disambiguate INFINITIVE.", "# Disambiguate PARTICIPLE, IMPERFECT, PRETERITE.", "# These are often considered to be tenses but are in fact tense + aspect.", "if", "tense", "==", "INFINITIVE", ":", "person", "=", "number", "=", "mood", "=", "aspect", "=", "None", "negated", "=", "False", "if", "tense", "in", "(", "(", "PRESENT", ",", "PARTICIPLE", ")", ",", "PRESENT", "+", "PARTICIPLE", ",", "PARTICIPLE", ",", "GERUND", ")", ":", "tense", ",", "aspect", "=", "PRESENT", ",", "PROGRESSIVE", "if", "tense", "in", "(", "(", "PAST", ",", "PARTICIPLE", ")", ",", "PAST", "+", "PARTICIPLE", ")", ":", "tense", ",", "aspect", "=", "PAST", ",", "PROGRESSIVE", "if", "tense", "==", "IMPERFECT", ":", "tense", ",", "aspect", "=", "PAST", ",", "IMPERFECTIVE", "if", "tense", "==", "PRETERITE", ":", "tense", ",", "aspect", "=", "PAST", ",", "PERFECTIVE", "if", "aspect", "in", "(", "CONTINUOUS", ",", "PARTICIPLE", ",", "GERUND", ")", ":", "aspect", "=", "PROGRESSIVE", "if", "aspect", "==", "PROGRESSIVE", ":", "person", "=", "number", "=", "None", "# Disambiguate CONDITIONAL.", "# In Spanish, the conditional is regarded as an indicative tense.", "if", "tense", "==", "CONDITIONAL", "and", "mood", "==", "INDICATIVE", ":", "tense", ",", "mood", "=", "PRESENT", ",", "CONDITIONAL", "# Disambiguate aliases: \"pl\" =>", "# (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False).", "return", "TENSES_ID", ".", "get", "(", "tense", ".", "lower", "(", ")", ",", "TENSES_ID", ".", "get", "(", "(", "tense", ",", "person", ",", "number", ",", "mood", ",", "aspect", ",", "negated", ")", ")", ")" ]
Returns the tense id for a given (tense, person, number, mood, aspect, negated). Aliases and compound forms (e.g., IMPERFECT) are disambiguated.
[ "Returns", "the", "tense", "id", "for", "a", "given", "(", "tense", "person", "number", "mood", "aspect", "negated", ")", ".", "Aliases", "and", "compound", "forms", "(", "e", ".", "g", ".", "IMPERFECT", ")", "are", "disambiguated", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1594-L1641
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
_multilingual
def _multilingual(function, *args, **kwargs): """ Returns the value from the function with the given name in the given language module. By default, language="en". """ return getattr(_module(kwargs.pop("language", "en")), function)(*args, **kwargs)
python
def _multilingual(function, *args, **kwargs): """ Returns the value from the function with the given name in the given language module. By default, language="en". """ return getattr(_module(kwargs.pop("language", "en")), function)(*args, **kwargs)
[ "def", "_multilingual", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "_module", "(", "kwargs", ".", "pop", "(", "\"language\"", ",", "\"en\"", ")", ")", ",", "function", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns the value from the function with the given name in the given language module. By default, language="en".
[ "Returns", "the", "value", "from", "the", "function", "with", "the", "given", "name", "in", "the", "given", "language", "module", ".", "By", "default", "language", "=", "en", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2188-L2192
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
language
def language(s): """ Returns a (language, confidence)-tuple for the given string. """ s = decode_utf8(s) s = set(w.strip(PUNCTUATION) for w in s.replace("'", "' ").split()) n = float(len(s) or 1) p = {} for xx in LANGUAGES: lexicon = _module(xx).__dict__["lexicon"] p[xx] = sum(1 for w in s if w in lexicon) / n return max(p.items(), key=lambda kv: (kv[1], int(kv[0] == "en")))
python
def language(s): """ Returns a (language, confidence)-tuple for the given string. """ s = decode_utf8(s) s = set(w.strip(PUNCTUATION) for w in s.replace("'", "' ").split()) n = float(len(s) or 1) p = {} for xx in LANGUAGES: lexicon = _module(xx).__dict__["lexicon"] p[xx] = sum(1 for w in s if w in lexicon) / n return max(p.items(), key=lambda kv: (kv[1], int(kv[0] == "en")))
[ "def", "language", "(", "s", ")", ":", "s", "=", "decode_utf8", "(", "s", ")", "s", "=", "set", "(", "w", ".", "strip", "(", "PUNCTUATION", ")", "for", "w", "in", "s", ".", "replace", "(", "\"'\"", ",", "\"' \"", ")", ".", "split", "(", ")", ")", "n", "=", "float", "(", "len", "(", "s", ")", "or", "1", ")", "p", "=", "{", "}", "for", "xx", "in", "LANGUAGES", ":", "lexicon", "=", "_module", "(", "xx", ")", ".", "__dict__", "[", "\"lexicon\"", "]", "p", "[", "xx", "]", "=", "sum", "(", "1", "for", "w", "in", "s", "if", "w", "in", "lexicon", ")", "/", "n", "return", "max", "(", "p", ".", "items", "(", ")", ",", "key", "=", "lambda", "kv", ":", "(", "kv", "[", "1", "]", ",", "int", "(", "kv", "[", "0", "]", "==", "\"en\"", ")", ")", ")" ]
Returns a (language, confidence)-tuple for the given string.
[ "Returns", "a", "(", "language", "confidence", ")", "-", "tuple", "for", "the", "given", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2194-L2204
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
lazylist._lazy
def _lazy(self, method, *args): """ If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it. """ if list.__len__(self) == 0: self.load() setattr(self, method, types.MethodType(getattr(list, method), self)) return getattr(list, method)(self, *args)
python
def _lazy(self, method, *args): """ If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it. """ if list.__len__(self) == 0: self.load() setattr(self, method, types.MethodType(getattr(list, method), self)) return getattr(list, method)(self, *args)
[ "def", "_lazy", "(", "self", ",", "method", ",", "*", "args", ")", ":", "if", "list", ".", "__len__", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "setattr", "(", "self", ",", "method", ",", "types", ".", "MethodType", "(", "getattr", "(", "list", ",", "method", ")", ",", "self", ")", ")", "return", "getattr", "(", "list", ",", "method", ")", "(", "self", ",", "*", "args", ")" ]
If the list is empty, calls lazylist.load(). Replaces lazylist.method() with list.method() and calls it.
[ "If", "the", "list", "is", "empty", "calls", "lazylist", ".", "load", "()", ".", "Replaces", "lazylist", ".", "method", "()", "with", "list", ".", "method", "()", "and", "calls", "it", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L179-L186
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.train
def train(self, token, tag, previous=None, next=None): """ Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples. """ self._classifier.train(self._v(token, previous, next), type=tag)
python
def train(self, token, tag, previous=None, next=None): """ Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples. """ self._classifier.train(self._v(token, previous, next), type=tag)
[ "def", "train", "(", "self", ",", "token", ",", "tag", ",", "previous", "=", "None", ",", "next", "=", "None", ")", ":", "self", ".", "_classifier", ".", "train", "(", "self", ".", "_v", "(", "token", ",", "previous", ",", "next", ")", ",", "type", "=", "tag", ")" ]
Trains the model to predict the given tag for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Trains", "the", "model", "to", "predict", "the", "given", "tag", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L323-L327
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.classify
def classify(self, token, previous=None, next=None, **kwargs): """ Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples. """ return self._classifier.classify(self._v(token, previous, next), **kwargs)
python
def classify(self, token, previous=None, next=None, **kwargs): """ Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples. """ return self._classifier.classify(self._v(token, previous, next), **kwargs)
[ "def", "classify", "(", "self", ",", "token", ",", "previous", "=", "None", ",", "next", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_classifier", ".", "classify", "(", "self", ".", "_v", "(", "token", ",", "previous", ",", "next", ")", ",", "*", "*", "kwargs", ")" ]
Returns the predicted tag for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Returns", "the", "predicted", "tag", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L329-L333
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model.apply
def apply(self, token, previous=(None, None), next=(None, None)): """ Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples. """ return [token[0], self._classifier.classify(self._v(token[0], previous, next))]
python
def apply(self, token, previous=(None, None), next=(None, None)): """ Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples. """ return [token[0], self._classifier.classify(self._v(token[0], previous, next))]
[ "def", "apply", "(", "self", ",", "token", ",", "previous", "=", "(", "None", ",", "None", ")", ",", "next", "=", "(", "None", ",", "None", ")", ")", ":", "return", "[", "token", "[", "0", "]", ",", "self", ".", "_classifier", ".", "classify", "(", "self", ".", "_v", "(", "token", "[", "0", "]", ",", "previous", ",", "next", ")", ")", "]" ]
Returns a (token, tag)-tuple for the given token, in context of the given previous and next (token, tag)-tuples.
[ "Returns", "a", "(", "token", "tag", ")", "-", "tuple", "for", "the", "given", "token", "in", "context", "of", "the", "given", "previous", "and", "next", "(", "token", "tag", ")", "-", "tuples", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L335-L339
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Model._v
def _v(self, token, previous=None, next=None): """ Returns a training vector for the given (word, tag)-tuple and its context. """ def f(v, s1, s2): if s2: v[s1 + " " + s2] = 1 p, n = previous, next p = ("", "") if not p else (p[0] or "", p[1] or "") n = ("", "") if not n else (n[0] or "", n[1] or "") v = {} f(v, "b", "b") # Bias. f(v, "h", token[0]) # Capitalization. f(v, "w", token[-6:] if token not in self.known or token in self.unknown else "") f(v, "x", token[-3:]) # Word suffix. f(v, "-x", p[0][-3:]) # Word suffix left. f(v, "+x", n[0][-3:]) # Word suffix right. f(v, "-t", p[1]) # Tag left. f(v, "-+", p[1] + n[1]) # Tag left + right. f(v, "+t", n[1]) # Tag right. return v
python
def _v(self, token, previous=None, next=None): """ Returns a training vector for the given (word, tag)-tuple and its context. """ def f(v, s1, s2): if s2: v[s1 + " " + s2] = 1 p, n = previous, next p = ("", "") if not p else (p[0] or "", p[1] or "") n = ("", "") if not n else (n[0] or "", n[1] or "") v = {} f(v, "b", "b") # Bias. f(v, "h", token[0]) # Capitalization. f(v, "w", token[-6:] if token not in self.known or token in self.unknown else "") f(v, "x", token[-3:]) # Word suffix. f(v, "-x", p[0][-3:]) # Word suffix left. f(v, "+x", n[0][-3:]) # Word suffix right. f(v, "-t", p[1]) # Tag left. f(v, "-+", p[1] + n[1]) # Tag left + right. f(v, "+t", n[1]) # Tag right. return v
[ "def", "_v", "(", "self", ",", "token", ",", "previous", "=", "None", ",", "next", "=", "None", ")", ":", "def", "f", "(", "v", ",", "s1", ",", "s2", ")", ":", "if", "s2", ":", "v", "[", "s1", "+", "\" \"", "+", "s2", "]", "=", "1", "p", ",", "n", "=", "previous", ",", "next", "p", "=", "(", "\"\"", ",", "\"\"", ")", "if", "not", "p", "else", "(", "p", "[", "0", "]", "or", "\"\"", ",", "p", "[", "1", "]", "or", "\"\"", ")", "n", "=", "(", "\"\"", ",", "\"\"", ")", "if", "not", "n", "else", "(", "n", "[", "0", "]", "or", "\"\"", ",", "n", "[", "1", "]", "or", "\"\"", ")", "v", "=", "{", "}", "f", "(", "v", ",", "\"b\"", ",", "\"b\"", ")", "# Bias.", "f", "(", "v", ",", "\"h\"", ",", "token", "[", "0", "]", ")", "# Capitalization.", "f", "(", "v", ",", "\"w\"", ",", "token", "[", "-", "6", ":", "]", "if", "token", "not", "in", "self", ".", "known", "or", "token", "in", "self", ".", "unknown", "else", "\"\"", ")", "f", "(", "v", ",", "\"x\"", ",", "token", "[", "-", "3", ":", "]", ")", "# Word suffix.", "f", "(", "v", ",", "\"-x\"", ",", "p", "[", "0", "]", "[", "-", "3", ":", "]", ")", "# Word suffix left.", "f", "(", "v", ",", "\"+x\"", ",", "n", "[", "0", "]", "[", "-", "3", ":", "]", ")", "# Word suffix right.", "f", "(", "v", ",", "\"-t\"", ",", "p", "[", "1", "]", ")", "# Tag left.", "f", "(", "v", ",", "\"-+\"", ",", "p", "[", "1", "]", "+", "n", "[", "1", "]", ")", "# Tag left + right.", "f", "(", "v", ",", "\"+t\"", ",", "n", "[", "1", "]", ")", "# Tag right.", "return", "v" ]
Returns a training vector for the given (word, tag)-tuple and its context.
[ "Returns", "a", "training", "vector", "for", "the", "given", "(", "word", "tag", ")", "-", "tuple", "and", "its", "context", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L341-L360
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Morphology.apply
def apply(self, token, previous=(None, None), next=(None, None)): """ Applies lexical rules to the given token, which is a [word, tag] list. """ w = token[0] for r in self: if r[1] in self._cmd: # Rule = ly hassuf 2 RB x f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower() if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f") if f and token[1] != r[0]: continue if (cmd == "word" and x == w) \ or (cmd == "char" and x in w) \ or (cmd == "haspref" and w.startswith(x)) \ or (cmd == "hassuf" and w.endswith(x)) \ or (cmd == "addpref" and x + w in self.known) \ or (cmd == "addsuf" and w + x in self.known) \ or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \ or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \ or (cmd == "goodleft" and x == next[0]) \ or (cmd == "goodright" and x == previous[0]): token[1] = pos return token
python
def apply(self, token, previous=(None, None), next=(None, None)): """ Applies lexical rules to the given token, which is a [word, tag] list. """ w = token[0] for r in self: if r[1] in self._cmd: # Rule = ly hassuf 2 RB x f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower() if r[2] in self._cmd: # Rule = NN s fhassuf 1 NNS x f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f") if f and token[1] != r[0]: continue if (cmd == "word" and x == w) \ or (cmd == "char" and x in w) \ or (cmd == "haspref" and w.startswith(x)) \ or (cmd == "hassuf" and w.endswith(x)) \ or (cmd == "addpref" and x + w in self.known) \ or (cmd == "addsuf" and w + x in self.known) \ or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.known) \ or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.known) \ or (cmd == "goodleft" and x == next[0]) \ or (cmd == "goodright" and x == previous[0]): token[1] = pos return token
[ "def", "apply", "(", "self", ",", "token", ",", "previous", "=", "(", "None", ",", "None", ")", ",", "next", "=", "(", "None", ",", "None", ")", ")", ":", "w", "=", "token", "[", "0", "]", "for", "r", "in", "self", ":", "if", "r", "[", "1", "]", "in", "self", ".", "_cmd", ":", "# Rule = ly hassuf 2 RB x", "f", ",", "x", ",", "pos", ",", "cmd", "=", "bool", "(", "0", ")", ",", "r", "[", "0", "]", ",", "r", "[", "-", "2", "]", ",", "r", "[", "1", "]", ".", "lower", "(", ")", "if", "r", "[", "2", "]", "in", "self", ".", "_cmd", ":", "# Rule = NN s fhassuf 1 NNS x", "f", ",", "x", ",", "pos", ",", "cmd", "=", "bool", "(", "1", ")", ",", "r", "[", "1", "]", ",", "r", "[", "-", "2", "]", ",", "r", "[", "2", "]", ".", "lower", "(", ")", ".", "lstrip", "(", "\"f\"", ")", "if", "f", "and", "token", "[", "1", "]", "!=", "r", "[", "0", "]", ":", "continue", "if", "(", "cmd", "==", "\"word\"", "and", "x", "==", "w", ")", "or", "(", "cmd", "==", "\"char\"", "and", "x", "in", "w", ")", "or", "(", "cmd", "==", "\"haspref\"", "and", "w", ".", "startswith", "(", "x", ")", ")", "or", "(", "cmd", "==", "\"hassuf\"", "and", "w", ".", "endswith", "(", "x", ")", ")", "or", "(", "cmd", "==", "\"addpref\"", "and", "x", "+", "w", "in", "self", ".", "known", ")", "or", "(", "cmd", "==", "\"addsuf\"", "and", "w", "+", "x", "in", "self", ".", "known", ")", "or", "(", "cmd", "==", "\"deletepref\"", "and", "w", ".", "startswith", "(", "x", ")", "and", "w", "[", "len", "(", "x", ")", ":", "]", "in", "self", ".", "known", ")", "or", "(", "cmd", "==", "\"deletesuf\"", "and", "w", ".", "endswith", "(", "x", ")", "and", "w", "[", ":", "-", "len", "(", "x", ")", "]", "in", "self", ".", "known", ")", "or", "(", "cmd", "==", "\"goodleft\"", "and", "x", "==", "next", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"goodright\"", "and", "x", "==", "previous", "[", "0", "]", ")", ":", "token", "[", "1", "]", "=", "pos", "return", "token" ]
Applies lexical rules to the given token, which is a [word, tag] list.
[ "Applies", "lexical", "rules", "to", "the", "given", "token", "which", "is", "a", "[", "word", "tag", "]", "list", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L403-L425
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Morphology.insert
def insert(self, i, tag, affix, cmd="hassuf", tagged=None): """ Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly"). """ if affix.startswith("-") and affix.endswith("-"): affix, cmd = affix[+1:-1], "char" if affix.startswith("-"): affix, cmd = affix[+1:-0], "hassuf" if affix.endswith("-"): affix, cmd = affix[+0:-1], "haspref" if tagged: r = [tagged, affix, "f"+cmd.lstrip("f"), tag, "x"] else: r = [affix, cmd.lstrip("f"), tag, "x"] lazylist.insert(self, i, r)
python
def insert(self, i, tag, affix, cmd="hassuf", tagged=None): """ Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly"). """ if affix.startswith("-") and affix.endswith("-"): affix, cmd = affix[+1:-1], "char" if affix.startswith("-"): affix, cmd = affix[+1:-0], "hassuf" if affix.endswith("-"): affix, cmd = affix[+0:-1], "haspref" if tagged: r = [tagged, affix, "f"+cmd.lstrip("f"), tag, "x"] else: r = [affix, cmd.lstrip("f"), tag, "x"] lazylist.insert(self, i, r)
[ "def", "insert", "(", "self", ",", "i", ",", "tag", ",", "affix", ",", "cmd", "=", "\"hassuf\"", ",", "tagged", "=", "None", ")", ":", "if", "affix", ".", "startswith", "(", "\"-\"", ")", "and", "affix", ".", "endswith", "(", "\"-\"", ")", ":", "affix", ",", "cmd", "=", "affix", "[", "+", "1", ":", "-", "1", "]", ",", "\"char\"", "if", "affix", ".", "startswith", "(", "\"-\"", ")", ":", "affix", ",", "cmd", "=", "affix", "[", "+", "1", ":", "-", "0", "]", ",", "\"hassuf\"", "if", "affix", ".", "endswith", "(", "\"-\"", ")", ":", "affix", ",", "cmd", "=", "affix", "[", "+", "0", ":", "-", "1", "]", ",", "\"haspref\"", "if", "tagged", ":", "r", "=", "[", "tagged", ",", "affix", ",", "\"f\"", "+", "cmd", ".", "lstrip", "(", "\"f\"", ")", ",", "tag", ",", "\"x\"", "]", "else", ":", "r", "=", "[", "affix", ",", "cmd", ".", "lstrip", "(", "\"f\"", ")", ",", "tag", ",", "\"x\"", "]", "lazylist", ".", "insert", "(", "self", ",", "i", ",", "r", ")" ]
Inserts a new rule that assigns the given tag to words with the given affix, e.g., Morphology.append("RB", "-ly").
[ "Inserts", "a", "new", "rule", "that", "assigns", "the", "given", "tag", "to", "words", "with", "the", "given", "affix", "e", ".", "g", ".", "Morphology", ".", "append", "(", "RB", "-", "ly", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L427-L441
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Context.apply
def apply(self, tokens): """ Applies contextual rules to the given list of tokens, where each token is a [word, tag] list. """ o = [("STAART", "STAART")] * 3 # Empty delimiters for look ahead/back. t = o + tokens + o for i, token in enumerate(t): for r in self: if token[1] == "STAART": continue if token[1] != r[0] and r[0] != "*": continue cmd, x, y = r[2], r[3], r[4] if len(r) > 4 else "" cmd = cmd.lower() if (cmd == "prevtag" and x == t[i-1][1]) \ or (cmd == "nexttag" and x == t[i+1][1]) \ or (cmd == "prev2tag" and x == t[i-2][1]) \ or (cmd == "next2tag" and x == t[i+2][1]) \ or (cmd == "prev1or2tag" and x in (t[i-1][1], t[i-2][1])) \ or (cmd == "next1or2tag" and x in (t[i+1][1], t[i+2][1])) \ or (cmd == "prev1or2or3tag" and x in (t[i-1][1], t[i-2][1], t[i-3][1])) \ or (cmd == "next1or2or3tag" and x in (t[i+1][1], t[i+2][1], t[i+3][1])) \ or (cmd == "surroundtag" and x == t[i-1][1] and y == t[i+1][1]) \ or (cmd == "curwd" and x == t[i+0][0]) \ or (cmd == "prevwd" and x == t[i-1][0]) \ or (cmd == "nextwd" and x == t[i+1][0]) \ or (cmd == "prev1or2wd" and x in (t[i-1][0], t[i-2][0])) \ or (cmd == "next1or2wd" and x in (t[i+1][0], t[i+2][0])) \ or (cmd == "prevwdtag" and x == t[i-1][0] and y == t[i-1][1]) \ or (cmd == "nextwdtag" and x == t[i+1][0] and y == t[i+1][1]) \ or (cmd == "wdprevtag" and x == t[i-1][1] and y == t[i+0][0]) \ or (cmd == "wdnexttag" and x == t[i+0][0] and y == t[i+1][1]) \ or (cmd == "wdand2aft" and x == t[i+0][0] and y == t[i+2][0]) \ or (cmd == "wdand2tagbfr" and x == t[i-2][1] and y == t[i+0][0]) \ or (cmd == "wdand2tagaft" and x == t[i+0][0] and y == t[i+2][1]) \ or (cmd == "lbigram" and x == t[i-1][0] and y == t[i+0][0]) \ or (cmd == "rbigram" and x == t[i+0][0] and y == t[i+1][0]) \ or (cmd == "prevbigram" and x == t[i-2][1] and y == t[i-1][1]) \ or (cmd == "nextbigram" and x == t[i+1][1] and y == t[i+2][1]): t[i] = [t[i][0], r[1]] return t[len(o):-len(o)]
python
def apply(self, tokens): """ Applies contextual rules to the given list of tokens, where each token is a [word, tag] list. """ o = [("STAART", "STAART")] * 3 # Empty delimiters for look ahead/back. t = o + tokens + o for i, token in enumerate(t): for r in self: if token[1] == "STAART": continue if token[1] != r[0] and r[0] != "*": continue cmd, x, y = r[2], r[3], r[4] if len(r) > 4 else "" cmd = cmd.lower() if (cmd == "prevtag" and x == t[i-1][1]) \ or (cmd == "nexttag" and x == t[i+1][1]) \ or (cmd == "prev2tag" and x == t[i-2][1]) \ or (cmd == "next2tag" and x == t[i+2][1]) \ or (cmd == "prev1or2tag" and x in (t[i-1][1], t[i-2][1])) \ or (cmd == "next1or2tag" and x in (t[i+1][1], t[i+2][1])) \ or (cmd == "prev1or2or3tag" and x in (t[i-1][1], t[i-2][1], t[i-3][1])) \ or (cmd == "next1or2or3tag" and x in (t[i+1][1], t[i+2][1], t[i+3][1])) \ or (cmd == "surroundtag" and x == t[i-1][1] and y == t[i+1][1]) \ or (cmd == "curwd" and x == t[i+0][0]) \ or (cmd == "prevwd" and x == t[i-1][0]) \ or (cmd == "nextwd" and x == t[i+1][0]) \ or (cmd == "prev1or2wd" and x in (t[i-1][0], t[i-2][0])) \ or (cmd == "next1or2wd" and x in (t[i+1][0], t[i+2][0])) \ or (cmd == "prevwdtag" and x == t[i-1][0] and y == t[i-1][1]) \ or (cmd == "nextwdtag" and x == t[i+1][0] and y == t[i+1][1]) \ or (cmd == "wdprevtag" and x == t[i-1][1] and y == t[i+0][0]) \ or (cmd == "wdnexttag" and x == t[i+0][0] and y == t[i+1][1]) \ or (cmd == "wdand2aft" and x == t[i+0][0] and y == t[i+2][0]) \ or (cmd == "wdand2tagbfr" and x == t[i-2][1] and y == t[i+0][0]) \ or (cmd == "wdand2tagaft" and x == t[i+0][0] and y == t[i+2][1]) \ or (cmd == "lbigram" and x == t[i-1][0] and y == t[i+0][0]) \ or (cmd == "rbigram" and x == t[i+0][0] and y == t[i+1][0]) \ or (cmd == "prevbigram" and x == t[i-2][1] and y == t[i-1][1]) \ or (cmd == "nextbigram" and x == t[i+1][1] and y == t[i+2][1]): t[i] = [t[i][0], r[1]] return t[len(o):-len(o)]
[ "def", "apply", "(", "self", ",", "tokens", ")", ":", "o", "=", "[", "(", "\"STAART\"", ",", "\"STAART\"", ")", "]", "*", "3", "# Empty delimiters for look ahead/back.", "t", "=", "o", "+", "tokens", "+", "o", "for", "i", ",", "token", "in", "enumerate", "(", "t", ")", ":", "for", "r", "in", "self", ":", "if", "token", "[", "1", "]", "==", "\"STAART\"", ":", "continue", "if", "token", "[", "1", "]", "!=", "r", "[", "0", "]", "and", "r", "[", "0", "]", "!=", "\"*\"", ":", "continue", "cmd", ",", "x", ",", "y", "=", "r", "[", "2", "]", ",", "r", "[", "3", "]", ",", "r", "[", "4", "]", "if", "len", "(", "r", ")", ">", "4", "else", "\"\"", "cmd", "=", "cmd", ".", "lower", "(", ")", "if", "(", "cmd", "==", "\"prevtag\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"nexttag\"", "and", "x", "==", "t", "[", "i", "+", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"prev2tag\"", "and", "x", "==", "t", "[", "i", "-", "2", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"next2tag\"", "and", "x", "==", "t", "[", "i", "+", "2", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"prev1or2tag\"", "and", "x", "in", "(", "t", "[", "i", "-", "1", "]", "[", "1", "]", ",", "t", "[", "i", "-", "2", "]", "[", "1", "]", ")", ")", "or", "(", "cmd", "==", "\"next1or2tag\"", "and", "x", "in", "(", "t", "[", "i", "+", "1", "]", "[", "1", "]", ",", "t", "[", "i", "+", "2", "]", "[", "1", "]", ")", ")", "or", "(", "cmd", "==", "\"prev1or2or3tag\"", "and", "x", "in", "(", "t", "[", "i", "-", "1", "]", "[", "1", "]", ",", "t", "[", "i", "-", "2", "]", "[", "1", "]", ",", "t", "[", "i", "-", "3", "]", "[", "1", "]", ")", ")", "or", "(", "cmd", "==", "\"next1or2or3tag\"", "and", "x", "in", "(", "t", "[", "i", "+", "1", "]", "[", "1", "]", ",", "t", "[", "i", "+", "2", "]", "[", "1", "]", ",", "t", "[", "i", "+", "3", "]", "[", "1", "]", ")", ")", "or", "(", "cmd", "==", "\"surroundtag\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "1", "]", "and", "y", "==", "t", "[", "i", "+", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"curwd\"", "and", "x", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"prevwd\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"nextwd\"", "and", "x", "==", "t", "[", "i", "+", "1", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"prev1or2wd\"", "and", "x", "in", "(", "t", "[", "i", "-", "1", "]", "[", "0", "]", ",", "t", "[", "i", "-", "2", "]", "[", "0", "]", ")", ")", "or", "(", "cmd", "==", "\"next1or2wd\"", "and", "x", "in", "(", "t", "[", "i", "+", "1", "]", "[", "0", "]", ",", "t", "[", "i", "+", "2", "]", "[", "0", "]", ")", ")", "or", "(", "cmd", "==", "\"prevwdtag\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "-", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"nextwdtag\"", "and", "x", "==", "t", "[", "i", "+", "1", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"wdprevtag\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "1", "]", "and", "y", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"wdnexttag\"", "and", "x", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"wdand2aft\"", "and", "x", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "2", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"wdand2tagbfr\"", "and", "x", "==", "t", "[", "i", "-", "2", "]", "[", "1", "]", "and", "y", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"wdand2tagaft\"", "and", "x", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "2", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"lbigram\"", "and", "x", "==", "t", "[", "i", "-", "1", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"rbigram\"", "and", "x", "==", "t", "[", "i", "+", "0", "]", "[", "0", "]", "and", "y", "==", "t", "[", "i", "+", "1", "]", "[", "0", "]", ")", "or", "(", "cmd", "==", "\"prevbigram\"", "and", "x", "==", "t", "[", "i", "-", "2", "]", "[", "1", "]", "and", "y", "==", "t", "[", "i", "-", "1", "]", "[", "1", "]", ")", "or", "(", "cmd", "==", "\"nextbigram\"", "and", "x", "==", "t", "[", "i", "+", "1", "]", "[", "1", "]", "and", "y", "==", "t", "[", "i", "+", "2", "]", "[", "1", "]", ")", ":", "t", "[", "i", "]", "=", "[", "t", "[", "i", "]", "[", "0", "]", ",", "r", "[", "1", "]", "]", "return", "t", "[", "len", "(", "o", ")", ":", "-", "len", "(", "o", ")", "]" ]
Applies contextual rules to the given list of tokens, where each token is a [word, tag] list.
[ "Applies", "contextual", "rules", "to", "the", "given", "list", "of", "tokens", "where", "each", "token", "is", "a", "[", "word", "tag", "]", "list", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L498-L538
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Context.insert
def insert(self, i, tag1, tag2, cmd="prevtag", x=None, y=None): """ Inserts a new rule that updates words with tag1 to tag2, given constraints x and y, e.g., Context.append("TO < NN", "VB") """ if " < " in tag1 and not x and not y: tag1, x = tag1.split(" < "); cmd="prevtag" if " > " in tag1 and not x and not y: x, tag1 = tag1.split(" > "); cmd="nexttag" lazylist.insert(self, i, [tag1, tag2, cmd, x or "", y or ""])
python
def insert(self, i, tag1, tag2, cmd="prevtag", x=None, y=None): """ Inserts a new rule that updates words with tag1 to tag2, given constraints x and y, e.g., Context.append("TO < NN", "VB") """ if " < " in tag1 and not x and not y: tag1, x = tag1.split(" < "); cmd="prevtag" if " > " in tag1 and not x and not y: x, tag1 = tag1.split(" > "); cmd="nexttag" lazylist.insert(self, i, [tag1, tag2, cmd, x or "", y or ""])
[ "def", "insert", "(", "self", ",", "i", ",", "tag1", ",", "tag2", ",", "cmd", "=", "\"prevtag\"", ",", "x", "=", "None", ",", "y", "=", "None", ")", ":", "if", "\" < \"", "in", "tag1", "and", "not", "x", "and", "not", "y", ":", "tag1", ",", "x", "=", "tag1", ".", "split", "(", "\" < \"", ")", "cmd", "=", "\"prevtag\"", "if", "\" > \"", "in", "tag1", "and", "not", "x", "and", "not", "y", ":", "x", ",", "tag1", "=", "tag1", ".", "split", "(", "\" > \"", ")", "cmd", "=", "\"nexttag\"", "lazylist", ".", "insert", "(", "self", ",", "i", ",", "[", "tag1", ",", "tag2", ",", "cmd", ",", "x", "or", "\"\"", ",", "y", "or", "\"\"", "]", ")" ]
Inserts a new rule that updates words with tag1 to tag2, given constraints x and y, e.g., Context.append("TO < NN", "VB")
[ "Inserts", "a", "new", "rule", "that", "updates", "words", "with", "tag1", "to", "tag2", "given", "constraints", "x", "and", "y", "e", ".", "g", ".", "Context", ".", "append", "(", "TO", "<", "NN", "VB", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L540-L548
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Entities.apply
def apply(self, tokens): """ Applies the named entity recognizer to the given list of tokens, where each token is a [word, tag] list. """ # Note: we could also scan for patterns, e.g., # "my|his|her name is|was *" => NNP-PERS. i = 0 while i < len(tokens): w = tokens[i][0].lower() if RE_ENTITY1.match(w) \ or RE_ENTITY2.match(w) \ or RE_ENTITY3.match(w): tokens[i][1] = self.tag if w in self: for e in self[w]: # Look ahead to see if successive words match the named entity. e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self._cmd else (e, "") b = True for j, e in enumerate(e): if i + j >= len(tokens) or tokens[i+j][0].lower() != e: b = False; break if b: for token in tokens[i:i+j+1]: token[1] = token[1] if token[1].startswith(self.tag) else self.tag token[1] += tag i += j break i += 1 return tokens
python
def apply(self, tokens): """ Applies the named entity recognizer to the given list of tokens, where each token is a [word, tag] list. """ # Note: we could also scan for patterns, e.g., # "my|his|her name is|was *" => NNP-PERS. i = 0 while i < len(tokens): w = tokens[i][0].lower() if RE_ENTITY1.match(w) \ or RE_ENTITY2.match(w) \ or RE_ENTITY3.match(w): tokens[i][1] = self.tag if w in self: for e in self[w]: # Look ahead to see if successive words match the named entity. e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self._cmd else (e, "") b = True for j, e in enumerate(e): if i + j >= len(tokens) or tokens[i+j][0].lower() != e: b = False; break if b: for token in tokens[i:i+j+1]: token[1] = token[1] if token[1].startswith(self.tag) else self.tag token[1] += tag i += j break i += 1 return tokens
[ "def", "apply", "(", "self", ",", "tokens", ")", ":", "# Note: we could also scan for patterns, e.g.,", "# \"my|his|her name is|was *\" => NNP-PERS.", "i", "=", "0", "while", "i", "<", "len", "(", "tokens", ")", ":", "w", "=", "tokens", "[", "i", "]", "[", "0", "]", ".", "lower", "(", ")", "if", "RE_ENTITY1", ".", "match", "(", "w", ")", "or", "RE_ENTITY2", ".", "match", "(", "w", ")", "or", "RE_ENTITY3", ".", "match", "(", "w", ")", ":", "tokens", "[", "i", "]", "[", "1", "]", "=", "self", ".", "tag", "if", "w", "in", "self", ":", "for", "e", "in", "self", "[", "w", "]", ":", "# Look ahead to see if successive words match the named entity.", "e", ",", "tag", "=", "(", "e", "[", ":", "-", "1", "]", ",", "\"-\"", "+", "e", "[", "-", "1", "]", ".", "upper", "(", ")", ")", "if", "e", "[", "-", "1", "]", "in", "self", ".", "_cmd", "else", "(", "e", ",", "\"\"", ")", "b", "=", "True", "for", "j", ",", "e", "in", "enumerate", "(", "e", ")", ":", "if", "i", "+", "j", ">=", "len", "(", "tokens", ")", "or", "tokens", "[", "i", "+", "j", "]", "[", "0", "]", ".", "lower", "(", ")", "!=", "e", ":", "b", "=", "False", "break", "if", "b", ":", "for", "token", "in", "tokens", "[", "i", ":", "i", "+", "j", "+", "1", "]", ":", "token", "[", "1", "]", "=", "token", "[", "1", "]", "if", "token", "[", "1", "]", ".", "startswith", "(", "self", ".", "tag", ")", "else", "self", ".", "tag", "token", "[", "1", "]", "+=", "tag", "i", "+=", "j", "break", "i", "+=", "1", "return", "tokens" ]
Applies the named entity recognizer to the given list of tokens, where each token is a [word, tag] list.
[ "Applies", "the", "named", "entity", "recognizer", "to", "the", "given", "list", "of", "tokens", "where", "each", "token", "is", "a", "[", "word", "tag", "]", "list", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L588-L616
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Entities.append
def append(self, entity, name="pers"): """ Appends a named entity to the lexicon, e.g., Entities.append("Hooloovoo", "PERS") """ e = map(lambda s: s.lower(), entity.split(" ") + [name]) self.setdefault(e[0], []).append(e)
python
def append(self, entity, name="pers"): """ Appends a named entity to the lexicon, e.g., Entities.append("Hooloovoo", "PERS") """ e = map(lambda s: s.lower(), entity.split(" ") + [name]) self.setdefault(e[0], []).append(e)
[ "def", "append", "(", "self", ",", "entity", ",", "name", "=", "\"pers\"", ")", ":", "e", "=", "map", "(", "lambda", "s", ":", "s", ".", "lower", "(", ")", ",", "entity", ".", "split", "(", "\" \"", ")", "+", "[", "name", "]", ")", "self", ".", "setdefault", "(", "e", "[", "0", "]", ",", "[", "]", ")", ".", "append", "(", "e", ")" ]
Appends a named entity to the lexicon, e.g., Entities.append("Hooloovoo", "PERS")
[ "Appends", "a", "named", "entity", "to", "the", "lexicon", "e", ".", "g", ".", "Entities", ".", "append", "(", "Hooloovoo", "PERS", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L618-L623
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Parser.find_keywords
def find_keywords(self, string, **kwargs): """ Returns a sorted list of keywords in the given string. """ return find_keywords(string, parser = self, top = kwargs.pop("top", 10), frequency = kwargs.pop("frequency", {}), **kwargs )
python
def find_keywords(self, string, **kwargs): """ Returns a sorted list of keywords in the given string. """ return find_keywords(string, parser = self, top = kwargs.pop("top", 10), frequency = kwargs.pop("frequency", {}), **kwargs )
[ "def", "find_keywords", "(", "self", ",", "string", ",", "*", "*", "kwargs", ")", ":", "return", "find_keywords", "(", "string", ",", "parser", "=", "self", ",", "top", "=", "kwargs", ".", "pop", "(", "\"top\"", ",", "10", ")", ",", "frequency", "=", "kwargs", ".", "pop", "(", "\"frequency\"", ",", "{", "}", ")", ",", "*", "*", "kwargs", ")" ]
Returns a sorted list of keywords in the given string.
[ "Returns", "a", "sorted", "list", "of", "keywords", "in", "the", "given", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L710-L717
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Parser.find_tokens
def find_tokens(self, string, **kwargs): """ Returns a list of sentences from the given string. Punctuation marks are separated from each word by a space. """ # "The cat purs." => ["The cat purs ."] return find_tokens(string, punctuation = kwargs.get( "punctuation", PUNCTUATION), abbreviations = kwargs.get("abbreviations", ABBREVIATIONS), replace = kwargs.get( "replace", replacements), linebreak = r"\n{2,}")
python
def find_tokens(self, string, **kwargs): """ Returns a list of sentences from the given string. Punctuation marks are separated from each word by a space. """ # "The cat purs." => ["The cat purs ."] return find_tokens(string, punctuation = kwargs.get( "punctuation", PUNCTUATION), abbreviations = kwargs.get("abbreviations", ABBREVIATIONS), replace = kwargs.get( "replace", replacements), linebreak = r"\n{2,}")
[ "def", "find_tokens", "(", "self", ",", "string", ",", "*", "*", "kwargs", ")", ":", "# \"The cat purs.\" => [\"The cat purs .\"]", "return", "find_tokens", "(", "string", ",", "punctuation", "=", "kwargs", ".", "get", "(", "\"punctuation\"", ",", "PUNCTUATION", ")", ",", "abbreviations", "=", "kwargs", ".", "get", "(", "\"abbreviations\"", ",", "ABBREVIATIONS", ")", ",", "replace", "=", "kwargs", ".", "get", "(", "\"replace\"", ",", "replacements", ")", ",", "linebreak", "=", "r\"\\n{2,}\"", ")" ]
Returns a list of sentences from the given string. Punctuation marks are separated from each word by a space.
[ "Returns", "a", "list", "of", "sentences", "from", "the", "given", "string", ".", "Punctuation", "marks", "are", "separated", "from", "each", "word", "by", "a", "space", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L719-L728
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Parser.find_tags
def find_tags(self, tokens, **kwargs): """ Annotates the given list of tokens with part-of-speech tags. Returns a list of tokens, where each token is now a [word, tag]-list. """ # ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] return find_tags(tokens, lexicon = kwargs.get( "lexicon", self.lexicon or {}), model = kwargs.get( "model", self.model), morphology = kwargs.get("morphology", self.morphology), context = kwargs.get( "context", self.context), entities = kwargs.get( "entities", self.entities), language = kwargs.get( "language", self.language), default = kwargs.get( "default", self.default), map = kwargs.get( "map", None))
python
def find_tags(self, tokens, **kwargs): """ Annotates the given list of tokens with part-of-speech tags. Returns a list of tokens, where each token is now a [word, tag]-list. """ # ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] return find_tags(tokens, lexicon = kwargs.get( "lexicon", self.lexicon or {}), model = kwargs.get( "model", self.model), morphology = kwargs.get("morphology", self.morphology), context = kwargs.get( "context", self.context), entities = kwargs.get( "entities", self.entities), language = kwargs.get( "language", self.language), default = kwargs.get( "default", self.default), map = kwargs.get( "map", None))
[ "def", "find_tags", "(", "self", ",", "tokens", ",", "*", "*", "kwargs", ")", ":", "# [\"The\", \"cat\", \"purs\"] => [[\"The\", \"DT\"], [\"cat\", \"NN\"], [\"purs\", \"VB\"]]", "return", "find_tags", "(", "tokens", ",", "lexicon", "=", "kwargs", ".", "get", "(", "\"lexicon\"", ",", "self", ".", "lexicon", "or", "{", "}", ")", ",", "model", "=", "kwargs", ".", "get", "(", "\"model\"", ",", "self", ".", "model", ")", ",", "morphology", "=", "kwargs", ".", "get", "(", "\"morphology\"", ",", "self", ".", "morphology", ")", ",", "context", "=", "kwargs", ".", "get", "(", "\"context\"", ",", "self", ".", "context", ")", ",", "entities", "=", "kwargs", ".", "get", "(", "\"entities\"", ",", "self", ".", "entities", ")", ",", "language", "=", "kwargs", ".", "get", "(", "\"language\"", ",", "self", ".", "language", ")", ",", "default", "=", "kwargs", ".", "get", "(", "\"default\"", ",", "self", ".", "default", ")", ",", "map", "=", "kwargs", ".", "get", "(", "\"map\"", ",", "None", ")", ")" ]
Annotates the given list of tokens with part-of-speech tags. Returns a list of tokens, where each token is now a [word, tag]-list.
[ "Annotates", "the", "given", "list", "of", "tokens", "with", "part", "-", "of", "-", "speech", "tags", ".", "Returns", "a", "list", "of", "tokens", "where", "each", "token", "is", "now", "a", "[", "word", "tag", "]", "-", "list", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L730-L743
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Parser.find_chunks
def find_chunks(self, tokens, **kwargs): """ Annotates the given list of tokens with chunk tags. Several tags can be added, for example chunk + preposition tags. """ # [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] => # [["The", "DT", "B-NP"], ["cat", "NN", "I-NP"], ["purs", "VB", "B-VP"]] return find_prepositions( find_chunks(tokens, language = kwargs.get("language", self.language)))
python
def find_chunks(self, tokens, **kwargs): """ Annotates the given list of tokens with chunk tags. Several tags can be added, for example chunk + preposition tags. """ # [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] => # [["The", "DT", "B-NP"], ["cat", "NN", "I-NP"], ["purs", "VB", "B-VP"]] return find_prepositions( find_chunks(tokens, language = kwargs.get("language", self.language)))
[ "def", "find_chunks", "(", "self", ",", "tokens", ",", "*", "*", "kwargs", ")", ":", "# [[\"The\", \"DT\"], [\"cat\", \"NN\"], [\"purs\", \"VB\"]] =>", "# [[\"The\", \"DT\", \"B-NP\"], [\"cat\", \"NN\", \"I-NP\"], [\"purs\", \"VB\", \"B-VP\"]]", "return", "find_prepositions", "(", "find_chunks", "(", "tokens", ",", "language", "=", "kwargs", ".", "get", "(", "\"language\"", ",", "self", ".", "language", ")", ")", ")" ]
Annotates the given list of tokens with chunk tags. Several tags can be added, for example chunk + preposition tags.
[ "Annotates", "the", "given", "list", "of", "tokens", "with", "chunk", "tags", ".", "Several", "tags", "can", "be", "added", "for", "example", "chunk", "+", "preposition", "tags", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L745-L753
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Parser.parse
def parse(self, s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs): """ Takes a string (sentences) and returns a tagged Unicode string (TaggedString). Sentences in the output are separated by newlines. With tokenize=True, punctuation is split from words and sentences are separated by \n. With tags=True, part-of-speech tags are parsed (NN, VB, IN, ...). With chunks=True, phrase chunk tags are parsed (NP, VP, PP, PNP, ...). With relations=True, semantic role labels are parsed (SBJ, OBJ). With lemmata=True, word lemmata are parsed. Optional parameters are passed to the tokenizer, tagger, chunker, labeler and lemmatizer. """ # Tokenizer. if tokenize is True: s = self.find_tokens(s, **kwargs) if isinstance(s, (list, tuple)): s = [isinstance(s, basestring) and s.split(" ") or s for s in s] if isinstance(s, basestring): s = [s.split(" ") for s in s.split("\n")] # Unicode. for i in range(len(s)): for j in range(len(s[i])): if isinstance(s[i][j], str): s[i][j] = decode_string(s[i][j], encoding) # Tagger (required by chunker, labeler & lemmatizer). if tags or chunks or relations or lemmata: s[i] = self.find_tags(s[i], **kwargs) else: s[i] = [[w] for w in s[i]] # Chunker. if chunks or relations: s[i] = self.find_chunks(s[i], **kwargs) # Labeler. if relations: s[i] = self.find_labels(s[i], **kwargs) # Lemmatizer. if lemmata: s[i] = self.find_lemmata(s[i], **kwargs) # Slash-formatted tagged string. # With collapse=False (or split=True), returns raw list # (this output is not usable by tree.Text). if not kwargs.get("collapse", True) \ or kwargs.get("split", False): return s # Construct TaggedString.format. # (this output is usable by tree.Text). format = ["word"] if tags: format.append("part-of-speech") if chunks: format.extend(("chunk", "preposition")) if relations: format.append("relation") if lemmata: format.append("lemma") # Collapse raw list. # Sentences are separated by newlines, tokens by spaces, tags by slashes. # Slashes in words are encoded with &slash; for i in range(len(s)): for j in range(len(s[i])): s[i][j][0] = s[i][j][0].replace("/", "&slash;") s[i][j] = "/".join(s[i][j]) s[i] = " ".join(s[i]) s = "\n".join(s) s = TaggedString(s, format, language=kwargs.get("language", self.language)) return s
python
def parse(self, s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs): """ Takes a string (sentences) and returns a tagged Unicode string (TaggedString). Sentences in the output are separated by newlines. With tokenize=True, punctuation is split from words and sentences are separated by \n. With tags=True, part-of-speech tags are parsed (NN, VB, IN, ...). With chunks=True, phrase chunk tags are parsed (NP, VP, PP, PNP, ...). With relations=True, semantic role labels are parsed (SBJ, OBJ). With lemmata=True, word lemmata are parsed. Optional parameters are passed to the tokenizer, tagger, chunker, labeler and lemmatizer. """ # Tokenizer. if tokenize is True: s = self.find_tokens(s, **kwargs) if isinstance(s, (list, tuple)): s = [isinstance(s, basestring) and s.split(" ") or s for s in s] if isinstance(s, basestring): s = [s.split(" ") for s in s.split("\n")] # Unicode. for i in range(len(s)): for j in range(len(s[i])): if isinstance(s[i][j], str): s[i][j] = decode_string(s[i][j], encoding) # Tagger (required by chunker, labeler & lemmatizer). if tags or chunks or relations or lemmata: s[i] = self.find_tags(s[i], **kwargs) else: s[i] = [[w] for w in s[i]] # Chunker. if chunks or relations: s[i] = self.find_chunks(s[i], **kwargs) # Labeler. if relations: s[i] = self.find_labels(s[i], **kwargs) # Lemmatizer. if lemmata: s[i] = self.find_lemmata(s[i], **kwargs) # Slash-formatted tagged string. # With collapse=False (or split=True), returns raw list # (this output is not usable by tree.Text). if not kwargs.get("collapse", True) \ or kwargs.get("split", False): return s # Construct TaggedString.format. # (this output is usable by tree.Text). format = ["word"] if tags: format.append("part-of-speech") if chunks: format.extend(("chunk", "preposition")) if relations: format.append("relation") if lemmata: format.append("lemma") # Collapse raw list. # Sentences are separated by newlines, tokens by spaces, tags by slashes. # Slashes in words are encoded with &slash; for i in range(len(s)): for j in range(len(s[i])): s[i][j][0] = s[i][j][0].replace("/", "&slash;") s[i][j] = "/".join(s[i][j]) s[i] = " ".join(s[i]) s = "\n".join(s) s = TaggedString(s, format, language=kwargs.get("language", self.language)) return s
[ "def", "parse", "(", "self", ",", "s", ",", "tokenize", "=", "True", ",", "tags", "=", "True", ",", "chunks", "=", "True", ",", "relations", "=", "False", ",", "lemmata", "=", "False", ",", "encoding", "=", "\"utf-8\"", ",", "*", "*", "kwargs", ")", ":", "# Tokenizer.", "if", "tokenize", "is", "True", ":", "s", "=", "self", ".", "find_tokens", "(", "s", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "s", ",", "(", "list", ",", "tuple", ")", ")", ":", "s", "=", "[", "isinstance", "(", "s", ",", "basestring", ")", "and", "s", ".", "split", "(", "\" \"", ")", "or", "s", "for", "s", "in", "s", "]", "if", "isinstance", "(", "s", ",", "basestring", ")", ":", "s", "=", "[", "s", ".", "split", "(", "\" \"", ")", "for", "s", "in", "s", ".", "split", "(", "\"\\n\"", ")", "]", "# Unicode.", "for", "i", "in", "range", "(", "len", "(", "s", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "s", "[", "i", "]", ")", ")", ":", "if", "isinstance", "(", "s", "[", "i", "]", "[", "j", "]", ",", "str", ")", ":", "s", "[", "i", "]", "[", "j", "]", "=", "decode_string", "(", "s", "[", "i", "]", "[", "j", "]", ",", "encoding", ")", "# Tagger (required by chunker, labeler & lemmatizer).", "if", "tags", "or", "chunks", "or", "relations", "or", "lemmata", ":", "s", "[", "i", "]", "=", "self", ".", "find_tags", "(", "s", "[", "i", "]", ",", "*", "*", "kwargs", ")", "else", ":", "s", "[", "i", "]", "=", "[", "[", "w", "]", "for", "w", "in", "s", "[", "i", "]", "]", "# Chunker.", "if", "chunks", "or", "relations", ":", "s", "[", "i", "]", "=", "self", ".", "find_chunks", "(", "s", "[", "i", "]", ",", "*", "*", "kwargs", ")", "# Labeler.", "if", "relations", ":", "s", "[", "i", "]", "=", "self", ".", "find_labels", "(", "s", "[", "i", "]", ",", "*", "*", "kwargs", ")", "# Lemmatizer.", "if", "lemmata", ":", "s", "[", "i", "]", "=", "self", ".", "find_lemmata", "(", "s", "[", "i", "]", ",", "*", "*", "kwargs", ")", "# Slash-formatted tagged string.", "# With collapse=False (or split=True), returns raw list", "# (this output is not usable by tree.Text).", "if", "not", "kwargs", ".", "get", "(", "\"collapse\"", ",", "True", ")", "or", "kwargs", ".", "get", "(", "\"split\"", ",", "False", ")", ":", "return", "s", "# Construct TaggedString.format.", "# (this output is usable by tree.Text).", "format", "=", "[", "\"word\"", "]", "if", "tags", ":", "format", ".", "append", "(", "\"part-of-speech\"", ")", "if", "chunks", ":", "format", ".", "extend", "(", "(", "\"chunk\"", ",", "\"preposition\"", ")", ")", "if", "relations", ":", "format", ".", "append", "(", "\"relation\"", ")", "if", "lemmata", ":", "format", ".", "append", "(", "\"lemma\"", ")", "# Collapse raw list.", "# Sentences are separated by newlines, tokens by spaces, tags by slashes.", "# Slashes in words are encoded with &slash;", "for", "i", "in", "range", "(", "len", "(", "s", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "s", "[", "i", "]", ")", ")", ":", "s", "[", "i", "]", "[", "j", "]", "[", "0", "]", "=", "s", "[", "i", "]", "[", "j", "]", "[", "0", "]", ".", "replace", "(", "\"/\"", ",", "\"&slash;\"", ")", "s", "[", "i", "]", "[", "j", "]", "=", "\"/\"", ".", "join", "(", "s", "[", "i", "]", "[", "j", "]", ")", "s", "[", "i", "]", "=", "\" \"", ".", "join", "(", "s", "[", "i", "]", ")", "s", "=", "\"\\n\"", ".", "join", "(", "s", ")", "s", "=", "TaggedString", "(", "s", ",", "format", ",", "language", "=", "kwargs", ".", "get", "(", "\"language\"", ",", "self", ".", "language", ")", ")", "return", "s" ]
Takes a string (sentences) and returns a tagged Unicode string (TaggedString). Sentences in the output are separated by newlines. With tokenize=True, punctuation is split from words and sentences are separated by \n. With tags=True, part-of-speech tags are parsed (NN, VB, IN, ...). With chunks=True, phrase chunk tags are parsed (NP, VP, PP, PNP, ...). With relations=True, semantic role labels are parsed (SBJ, OBJ). With lemmata=True, word lemmata are parsed. Optional parameters are passed to the tokenizer, tagger, chunker, labeler and lemmatizer.
[ "Takes", "a", "string", "(", "sentences", ")", "and", "returns", "a", "tagged", "Unicode", "string", "(", "TaggedString", ")", ".", "Sentences", "in", "the", "output", "are", "separated", "by", "newlines", ".", "With", "tokenize", "=", "True", "punctuation", "is", "split", "from", "words", "and", "sentences", "are", "separated", "by", "\\", "n", ".", "With", "tags", "=", "True", "part", "-", "of", "-", "speech", "tags", "are", "parsed", "(", "NN", "VB", "IN", "...", ")", ".", "With", "chunks", "=", "True", "phrase", "chunk", "tags", "are", "parsed", "(", "NP", "VP", "PP", "PNP", "...", ")", ".", "With", "relations", "=", "True", "semantic", "role", "labels", "are", "parsed", "(", "SBJ", "OBJ", ")", ".", "With", "lemmata", "=", "True", "word", "lemmata", "are", "parsed", ".", "Optional", "parameters", "are", "passed", "to", "the", "tokenizer", "tagger", "chunker", "labeler", "and", "lemmatizer", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L770-L834
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
TaggedString.split
def split(self, sep=TOKENS): """ Returns a list of sentences, where each sentence is a list of tokens, where each token is a list of word + tags. """ if sep != TOKENS: return unicode.split(self, sep) if len(self) == 0: return [] return [[[x.replace("&slash;", "/") for x in token.split("/")] for token in sentence.split(" ")] for sentence in unicode.split(self, "\n")]
python
def split(self, sep=TOKENS): """ Returns a list of sentences, where each sentence is a list of tokens, where each token is a list of word + tags. """ if sep != TOKENS: return unicode.split(self, sep) if len(self) == 0: return [] return [[[x.replace("&slash;", "/") for x in token.split("/")] for token in sentence.split(" ")] for sentence in unicode.split(self, "\n")]
[ "def", "split", "(", "self", ",", "sep", "=", "TOKENS", ")", ":", "if", "sep", "!=", "TOKENS", ":", "return", "unicode", ".", "split", "(", "self", ",", "sep", ")", "if", "len", "(", "self", ")", "==", "0", ":", "return", "[", "]", "return", "[", "[", "[", "x", ".", "replace", "(", "\"&slash;\"", ",", "\"/\"", ")", "for", "x", "in", "token", ".", "split", "(", "\"/\"", ")", "]", "for", "token", "in", "sentence", ".", "split", "(", "\" \"", ")", "]", "for", "sentence", "in", "unicode", ".", "split", "(", "self", ",", "\"\\n\"", ")", "]" ]
Returns a list of sentences, where each sentence is a list of tokens, where each token is a list of word + tags.
[ "Returns", "a", "list", "of", "sentences", "where", "each", "sentence", "is", "a", "list", "of", "tokens", "where", "each", "token", "is", "a", "list", "of", "word", "+", "tags", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L861-L871
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.TENSES
def TENSES(self): """ Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple. """ a = set(TENSES[id] for id in self._format) a = a.union(set(TENSES[id] for id in self._default.keys())) a = a.union(set(TENSES[id] for id in self._default.values())) a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation. return a
python
def TENSES(self): """ Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple. """ a = set(TENSES[id] for id in self._format) a = a.union(set(TENSES[id] for id in self._default.keys())) a = a.union(set(TENSES[id] for id in self._default.values())) a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation. return a
[ "def", "TENSES", "(", "self", ")", ":", "a", "=", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_format", ")", "a", "=", "a", ".", "union", "(", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_default", ".", "keys", "(", ")", ")", ")", "a", "=", "a", ".", "union", "(", "set", "(", "TENSES", "[", "id", "]", "for", "id", "in", "self", ".", "_default", ".", "values", "(", ")", ")", ")", "a", "=", "sorted", "(", "x", "[", ":", "-", "2", "]", "for", "x", "in", "a", "if", "x", "[", "-", "2", "]", "is", "False", ")", "# Exclude negation.", "return", "a" ]
Yields a list of tenses for this language, excluding negations. Each tense is a (tense, person, number, mood, aspect)-tuple.
[ "Yields", "a", "list", "of", "tenses", "for", "this", "language", "excluding", "negations", ".", "Each", "tense", "is", "a", "(", "tense", "person", "number", "mood", "aspect", ")", "-", "tuple", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1699-L1707
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.lemma
def lemma(self, verb, parse=True): """ Returns the infinitive form of the given verb, or None. """ if dict.__len__(self) == 0: self.load() if verb.lower() in self._inverse: return self._inverse[verb.lower()] if verb in self._inverse: return self._inverse[verb] if parse is True: # rule-based return self.find_lemma(verb)
python
def lemma(self, verb, parse=True): """ Returns the infinitive form of the given verb, or None. """ if dict.__len__(self) == 0: self.load() if verb.lower() in self._inverse: return self._inverse[verb.lower()] if verb in self._inverse: return self._inverse[verb] if parse is True: # rule-based return self.find_lemma(verb)
[ "def", "lemma", "(", "self", ",", "verb", ",", "parse", "=", "True", ")", ":", "if", "dict", ".", "__len__", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "if", "verb", ".", "lower", "(", ")", "in", "self", ".", "_inverse", ":", "return", "self", ".", "_inverse", "[", "verb", ".", "lower", "(", ")", "]", "if", "verb", "in", "self", ".", "_inverse", ":", "return", "self", ".", "_inverse", "[", "verb", "]", "if", "parse", "is", "True", ":", "# rule-based", "return", "self", ".", "find_lemma", "(", "verb", ")" ]
Returns the infinitive form of the given verb, or None.
[ "Returns", "the", "infinitive", "form", "of", "the", "given", "verb", "or", "None", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1709-L1719
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.lexeme
def lexeme(self, verb, parse=True): """ Returns a list of all possible inflections of the given verb. """ a = [] b = self.lemma(verb, parse=parse) if b in self: a = [x for x in self[b] if x != ""] elif parse is True: # rule-based a = self.find_lexeme(b) u = []; [u.append(x) for x in a if x not in u] return u
python
def lexeme(self, verb, parse=True): """ Returns a list of all possible inflections of the given verb. """ a = [] b = self.lemma(verb, parse=parse) if b in self: a = [x for x in self[b] if x != ""] elif parse is True: # rule-based a = self.find_lexeme(b) u = []; [u.append(x) for x in a if x not in u] return u
[ "def", "lexeme", "(", "self", ",", "verb", ",", "parse", "=", "True", ")", ":", "a", "=", "[", "]", "b", "=", "self", ".", "lemma", "(", "verb", ",", "parse", "=", "parse", ")", "if", "b", "in", "self", ":", "a", "=", "[", "x", "for", "x", "in", "self", "[", "b", "]", "if", "x", "!=", "\"\"", "]", "elif", "parse", "is", "True", ":", "# rule-based", "a", "=", "self", ".", "find_lexeme", "(", "b", ")", "u", "=", "[", "]", "[", "u", ".", "append", "(", "x", ")", "for", "x", "in", "a", "if", "x", "not", "in", "u", "]", "return", "u" ]
Returns a list of all possible inflections of the given verb.
[ "Returns", "a", "list", "of", "all", "possible", "inflections", "of", "the", "given", "verb", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1721-L1731
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.conjugate
def conjugate(self, verb, *args, **kwargs): """ Inflects the verb and returns the given tense (or None). For example: be - Verbs.conjugate("is", INFINITVE) => be - Verbs.conjugate("be", PRESENT, 1, SINGULAR) => I am - Verbs.conjugate("be", PRESENT, 1, PLURAL) => we are - Verbs.conjugate("be", PAST, 3, SINGULAR) => he was - Verbs.conjugate("be", PAST, aspect=PROGRESSIVE) => been - Verbs.conjugate("be", PAST, person=1, negated=True) => I wasn't """ id = tense_id(*args, **kwargs) # Get the tense index from the format description (or a default). i1 = self._format.get(id) i2 = self._format.get(self._default.get(id)) i3 = self._format.get(self._default.get(self._default.get(id))) b = self.lemma(verb, parse=kwargs.get("parse", True)) v = [] # Get the verb lexeme and return the requested index. if b in self: v = self[b] for i in (i1, i2, i3): if i is not None and 0 <= i < len(v) and v[i]: return v[i] if kwargs.get("parse", True) is True: # rule-based v = self.find_lexeme(b) for i in (i1, i2, i3): if i is not None and 0 <= i < len(v) and v[i]: return v[i]
python
def conjugate(self, verb, *args, **kwargs): """ Inflects the verb and returns the given tense (or None). For example: be - Verbs.conjugate("is", INFINITVE) => be - Verbs.conjugate("be", PRESENT, 1, SINGULAR) => I am - Verbs.conjugate("be", PRESENT, 1, PLURAL) => we are - Verbs.conjugate("be", PAST, 3, SINGULAR) => he was - Verbs.conjugate("be", PAST, aspect=PROGRESSIVE) => been - Verbs.conjugate("be", PAST, person=1, negated=True) => I wasn't """ id = tense_id(*args, **kwargs) # Get the tense index from the format description (or a default). i1 = self._format.get(id) i2 = self._format.get(self._default.get(id)) i3 = self._format.get(self._default.get(self._default.get(id))) b = self.lemma(verb, parse=kwargs.get("parse", True)) v = [] # Get the verb lexeme and return the requested index. if b in self: v = self[b] for i in (i1, i2, i3): if i is not None and 0 <= i < len(v) and v[i]: return v[i] if kwargs.get("parse", True) is True: # rule-based v = self.find_lexeme(b) for i in (i1, i2, i3): if i is not None and 0 <= i < len(v) and v[i]: return v[i]
[ "def", "conjugate", "(", "self", ",", "verb", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "id", "=", "tense_id", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# Get the tense index from the format description (or a default).", "i1", "=", "self", ".", "_format", ".", "get", "(", "id", ")", "i2", "=", "self", ".", "_format", ".", "get", "(", "self", ".", "_default", ".", "get", "(", "id", ")", ")", "i3", "=", "self", ".", "_format", ".", "get", "(", "self", ".", "_default", ".", "get", "(", "self", ".", "_default", ".", "get", "(", "id", ")", ")", ")", "b", "=", "self", ".", "lemma", "(", "verb", ",", "parse", "=", "kwargs", ".", "get", "(", "\"parse\"", ",", "True", ")", ")", "v", "=", "[", "]", "# Get the verb lexeme and return the requested index.", "if", "b", "in", "self", ":", "v", "=", "self", "[", "b", "]", "for", "i", "in", "(", "i1", ",", "i2", ",", "i3", ")", ":", "if", "i", "is", "not", "None", "and", "0", "<=", "i", "<", "len", "(", "v", ")", "and", "v", "[", "i", "]", ":", "return", "v", "[", "i", "]", "if", "kwargs", ".", "get", "(", "\"parse\"", ",", "True", ")", "is", "True", ":", "# rule-based", "v", "=", "self", ".", "find_lexeme", "(", "b", ")", "for", "i", "in", "(", "i1", ",", "i2", ",", "i3", ")", ":", "if", "i", "is", "not", "None", "and", "0", "<=", "i", "<", "len", "(", "v", ")", "and", "v", "[", "i", "]", ":", "return", "v", "[", "i", "]" ]
Inflects the verb and returns the given tense (or None). For example: be - Verbs.conjugate("is", INFINITVE) => be - Verbs.conjugate("be", PRESENT, 1, SINGULAR) => I am - Verbs.conjugate("be", PRESENT, 1, PLURAL) => we are - Verbs.conjugate("be", PAST, 3, SINGULAR) => he was - Verbs.conjugate("be", PAST, aspect=PROGRESSIVE) => been - Verbs.conjugate("be", PAST, person=1, negated=True) => I wasn't
[ "Inflects", "the", "verb", "and", "returns", "the", "given", "tense", "(", "or", "None", ")", ".", "For", "example", ":", "be", "-", "Verbs", ".", "conjugate", "(", "is", "INFINITVE", ")", "=", ">", "be", "-", "Verbs", ".", "conjugate", "(", "be", "PRESENT", "1", "SINGULAR", ")", "=", ">", "I", "am", "-", "Verbs", ".", "conjugate", "(", "be", "PRESENT", "1", "PLURAL", ")", "=", ">", "we", "are", "-", "Verbs", ".", "conjugate", "(", "be", "PAST", "3", "SINGULAR", ")", "=", ">", "he", "was", "-", "Verbs", ".", "conjugate", "(", "be", "PAST", "aspect", "=", "PROGRESSIVE", ")", "=", ">", "been", "-", "Verbs", ".", "conjugate", "(", "be", "PAST", "person", "=", "1", "negated", "=", "True", ")", "=", ">", "I", "wasn", "t" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1733-L1760
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Verbs.tenses
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ verb = verb.lower() a = set() b = self.lemma(verb, parse=parse) v = [] if b in self: v = self[b] elif parse is True: # rule-based v = self.find_lexeme(b) # For each tense in the verb lexeme that matches the given tense, # 1) retrieve the tense tuple, # 2) retrieve the tense tuples for which that tense is a default. for i, tense in enumerate(v): if tense == verb: for id, index in self._format.items(): if i == index: a.add(id) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) a = (TENSES[id][:-2] for id in a) a = Tenses(sorted(a)) return a
python
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ verb = verb.lower() a = set() b = self.lemma(verb, parse=parse) v = [] if b in self: v = self[b] elif parse is True: # rule-based v = self.find_lexeme(b) # For each tense in the verb lexeme that matches the given tense, # 1) retrieve the tense tuple, # 2) retrieve the tense tuples for which that tense is a default. for i, tense in enumerate(v): if tense == verb: for id, index in self._format.items(): if i == index: a.add(id) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) a = (TENSES[id][:-2] for id in a) a = Tenses(sorted(a)) return a
[ "def", "tenses", "(", "self", ",", "verb", ",", "parse", "=", "True", ")", ":", "verb", "=", "verb", ".", "lower", "(", ")", "a", "=", "set", "(", ")", "b", "=", "self", ".", "lemma", "(", "verb", ",", "parse", "=", "parse", ")", "v", "=", "[", "]", "if", "b", "in", "self", ":", "v", "=", "self", "[", "b", "]", "elif", "parse", "is", "True", ":", "# rule-based", "v", "=", "self", ".", "find_lexeme", "(", "b", ")", "# For each tense in the verb lexeme that matches the given tense,", "# 1) retrieve the tense tuple,", "# 2) retrieve the tense tuples for which that tense is a default.", "for", "i", ",", "tense", "in", "enumerate", "(", "v", ")", ":", "if", "tense", "==", "verb", ":", "for", "id", ",", "index", "in", "self", ".", "_format", ".", "items", "(", ")", ":", "if", "i", "==", "index", ":", "a", ".", "add", "(", "id", ")", "for", "id1", ",", "id2", "in", "self", ".", "_default", ".", "items", "(", ")", ":", "if", "id2", "in", "a", ":", "a", ".", "add", "(", "id1", ")", "for", "id1", ",", "id2", "in", "self", ".", "_default", ".", "items", "(", ")", ":", "if", "id2", "in", "a", ":", "a", ".", "add", "(", "id1", ")", "a", "=", "(", "TENSES", "[", "id", "]", "[", ":", "-", "2", "]", "for", "id", "in", "a", ")", "a", "=", "Tenses", "(", "sorted", "(", "a", ")", ")", "return", "a" ]
Returns a list of possible tenses for the given inflected verb.
[ "Returns", "a", "list", "of", "possible", "tenses", "for", "the", "given", "inflected", "verb", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1762-L1789
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Sentiment.load
def load(self, path=None): """ Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded. """ # <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" /> # <word form="damnmit" polarity="-0.75" subjectivity="1.0" label="profanity" /> if not path: path = self._path if not os.path.exists(path): return words, synsets, labels = {}, {}, {} xml = cElementTree.parse(path) xml = xml.getroot() for w in xml.findall("word"): if self._confidence is None \ or self._confidence <= float(w.attrib.get("confidence", 0.0)): w, pos, p, s, i, label, synset = ( w.attrib.get("form"), w.attrib.get("pos"), w.attrib.get("polarity", 0.0), w.attrib.get("subjectivity", 0.0), w.attrib.get("intensity", 1.0), w.attrib.get("label"), w.attrib.get(self._synset) # wordnet_id, cornetto_id, ... ) psi = (float(p), float(s), float(i)) if w: words.setdefault(w, {}).setdefault(pos, []).append(psi) if w and label: labels[w] = label if synset: synsets.setdefault(synset, []).append(psi) self._language = xml.attrib.get("language", self._language) # Average scores of all word senses per part-of-speech tag. for w in words: words[w] = dict((pos, [avg(each) for each in zip(*psi)]) for pos, psi in words[w].items()) # Average scores of all part-of-speech tags. for w, pos in list(words.items()): words[w][None] = [avg(each) for each in zip(*pos.values())] # Average scores of all synonyms per synset. for id, psi in synsets.items(): synsets[id] = [avg(each) for each in zip(*psi)] dict.update(self, words) dict.update(self.labeler, labels) dict.update(self._synsets, synsets)
python
def load(self, path=None): """ Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded. """ # <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" /> # <word form="damnmit" polarity="-0.75" subjectivity="1.0" label="profanity" /> if not path: path = self._path if not os.path.exists(path): return words, synsets, labels = {}, {}, {} xml = cElementTree.parse(path) xml = xml.getroot() for w in xml.findall("word"): if self._confidence is None \ or self._confidence <= float(w.attrib.get("confidence", 0.0)): w, pos, p, s, i, label, synset = ( w.attrib.get("form"), w.attrib.get("pos"), w.attrib.get("polarity", 0.0), w.attrib.get("subjectivity", 0.0), w.attrib.get("intensity", 1.0), w.attrib.get("label"), w.attrib.get(self._synset) # wordnet_id, cornetto_id, ... ) psi = (float(p), float(s), float(i)) if w: words.setdefault(w, {}).setdefault(pos, []).append(psi) if w and label: labels[w] = label if synset: synsets.setdefault(synset, []).append(psi) self._language = xml.attrib.get("language", self._language) # Average scores of all word senses per part-of-speech tag. for w in words: words[w] = dict((pos, [avg(each) for each in zip(*psi)]) for pos, psi in words[w].items()) # Average scores of all part-of-speech tags. for w, pos in list(words.items()): words[w][None] = [avg(each) for each in zip(*pos.values())] # Average scores of all synonyms per synset. for id, psi in synsets.items(): synsets[id] = [avg(each) for each in zip(*psi)] dict.update(self, words) dict.update(self.labeler, labels) dict.update(self._synsets, synsets)
[ "def", "load", "(", "self", ",", "path", "=", "None", ")", ":", "# <word form=\"great\" wordnet_id=\"a-01123879\" pos=\"JJ\" polarity=\"1.0\" subjectivity=\"1.0\" intensity=\"1.0\" />", "# <word form=\"damnmit\" polarity=\"-0.75\" subjectivity=\"1.0\" label=\"profanity\" />", "if", "not", "path", ":", "path", "=", "self", ".", "_path", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "words", ",", "synsets", ",", "labels", "=", "{", "}", ",", "{", "}", ",", "{", "}", "xml", "=", "cElementTree", ".", "parse", "(", "path", ")", "xml", "=", "xml", ".", "getroot", "(", ")", "for", "w", "in", "xml", ".", "findall", "(", "\"word\"", ")", ":", "if", "self", ".", "_confidence", "is", "None", "or", "self", ".", "_confidence", "<=", "float", "(", "w", ".", "attrib", ".", "get", "(", "\"confidence\"", ",", "0.0", ")", ")", ":", "w", ",", "pos", ",", "p", ",", "s", ",", "i", ",", "label", ",", "synset", "=", "(", "w", ".", "attrib", ".", "get", "(", "\"form\"", ")", ",", "w", ".", "attrib", ".", "get", "(", "\"pos\"", ")", ",", "w", ".", "attrib", ".", "get", "(", "\"polarity\"", ",", "0.0", ")", ",", "w", ".", "attrib", ".", "get", "(", "\"subjectivity\"", ",", "0.0", ")", ",", "w", ".", "attrib", ".", "get", "(", "\"intensity\"", ",", "1.0", ")", ",", "w", ".", "attrib", ".", "get", "(", "\"label\"", ")", ",", "w", ".", "attrib", ".", "get", "(", "self", ".", "_synset", ")", "# wordnet_id, cornetto_id, ...", ")", "psi", "=", "(", "float", "(", "p", ")", ",", "float", "(", "s", ")", ",", "float", "(", "i", ")", ")", "if", "w", ":", "words", ".", "setdefault", "(", "w", ",", "{", "}", ")", ".", "setdefault", "(", "pos", ",", "[", "]", ")", ".", "append", "(", "psi", ")", "if", "w", "and", "label", ":", "labels", "[", "w", "]", "=", "label", "if", "synset", ":", "synsets", ".", "setdefault", "(", "synset", ",", "[", "]", ")", ".", "append", "(", "psi", ")", "self", ".", "_language", "=", "xml", ".", "attrib", ".", "get", "(", "\"language\"", ",", "self", ".", "_language", ")", "# Average scores of all word senses per part-of-speech tag.", "for", "w", "in", "words", ":", "words", "[", "w", "]", "=", "dict", "(", "(", "pos", ",", "[", "avg", "(", "each", ")", "for", "each", "in", "zip", "(", "*", "psi", ")", "]", ")", "for", "pos", ",", "psi", "in", "words", "[", "w", "]", ".", "items", "(", ")", ")", "# Average scores of all part-of-speech tags.", "for", "w", ",", "pos", "in", "list", "(", "words", ".", "items", "(", ")", ")", ":", "words", "[", "w", "]", "[", "None", "]", "=", "[", "avg", "(", "each", ")", "for", "each", "in", "zip", "(", "*", "pos", ".", "values", "(", ")", ")", "]", "# Average scores of all synonyms per synset.", "for", "id", ",", "psi", "in", "synsets", ".", "items", "(", ")", ":", "synsets", "[", "id", "]", "=", "[", "avg", "(", "each", ")", "for", "each", "in", "zip", "(", "*", "psi", ")", "]", "dict", ".", "update", "(", "self", ",", "words", ")", "dict", ".", "update", "(", "self", ".", "labeler", ",", "labels", ")", "dict", ".", "update", "(", "self", ".", "_synsets", ",", "synsets", ")" ]
Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded.
[ "Loads", "the", "XML", "-", "file", "(", "with", "sentiment", "annotations", ")", "from", "the", "given", "path", ".", "By", "default", "Sentiment", ".", "path", "is", "lazily", "loaded", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1878-L1922
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Sentiment.synset
def synset(self, id, pos=ADJECTIVE): """ Returns a (polarity, subjectivity)-tuple for the given synset id. For example, the adjective "horrible" has id 193480 in WordNet: Sentiment.synset(193480, pos="JJ") => (-0.6, 1.0, 1.0). """ id = str(id).zfill(8) if not id.startswith(("n-", "v-", "a-", "r-")): if pos == NOUN: id = "n-" + id if pos == VERB: id = "v-" + id if pos == ADJECTIVE: id = "a-" + id if pos == ADVERB: id = "r-" + id if dict.__len__(self) == 0: self.load() try: return tuple(self._synsets[id])[:2] except KeyError: # Some WordNet id's are not zero padded. return tuple(self._synsets.get(re.sub(r"-0+", "-", id), (0.0, 0.0))[:2])
python
def synset(self, id, pos=ADJECTIVE): """ Returns a (polarity, subjectivity)-tuple for the given synset id. For example, the adjective "horrible" has id 193480 in WordNet: Sentiment.synset(193480, pos="JJ") => (-0.6, 1.0, 1.0). """ id = str(id).zfill(8) if not id.startswith(("n-", "v-", "a-", "r-")): if pos == NOUN: id = "n-" + id if pos == VERB: id = "v-" + id if pos == ADJECTIVE: id = "a-" + id if pos == ADVERB: id = "r-" + id if dict.__len__(self) == 0: self.load() try: return tuple(self._synsets[id])[:2] except KeyError: # Some WordNet id's are not zero padded. return tuple(self._synsets.get(re.sub(r"-0+", "-", id), (0.0, 0.0))[:2])
[ "def", "synset", "(", "self", ",", "id", ",", "pos", "=", "ADJECTIVE", ")", ":", "id", "=", "str", "(", "id", ")", ".", "zfill", "(", "8", ")", "if", "not", "id", ".", "startswith", "(", "(", "\"n-\"", ",", "\"v-\"", ",", "\"a-\"", ",", "\"r-\"", ")", ")", ":", "if", "pos", "==", "NOUN", ":", "id", "=", "\"n-\"", "+", "id", "if", "pos", "==", "VERB", ":", "id", "=", "\"v-\"", "+", "id", "if", "pos", "==", "ADJECTIVE", ":", "id", "=", "\"a-\"", "+", "id", "if", "pos", "==", "ADVERB", ":", "id", "=", "\"r-\"", "+", "id", "if", "dict", ".", "__len__", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "try", ":", "return", "tuple", "(", "self", ".", "_synsets", "[", "id", "]", ")", "[", ":", "2", "]", "except", "KeyError", ":", "# Some WordNet id's are not zero padded.", "return", "tuple", "(", "self", ".", "_synsets", ".", "get", "(", "re", ".", "sub", "(", "r\"-0+\"", ",", "\"-\"", ",", "id", ")", ",", "(", "0.0", ",", "0.0", ")", ")", "[", ":", "2", "]", ")" ]
Returns a (polarity, subjectivity)-tuple for the given synset id. For example, the adjective "horrible" has id 193480 in WordNet: Sentiment.synset(193480, pos="JJ") => (-0.6, 1.0, 1.0).
[ "Returns", "a", "(", "polarity", "subjectivity", ")", "-", "tuple", "for", "the", "given", "synset", "id", ".", "For", "example", "the", "adjective", "horrible", "has", "id", "193480", "in", "WordNet", ":", "Sentiment", ".", "synset", "(", "193480", "pos", "=", "JJ", ")", "=", ">", "(", "-", "0", ".", "6", "1", ".", "0", "1", ".", "0", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L1924-L1944
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Sentiment.assessments
def assessments(self, words=[], negation=True): """ Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words: where chunk is a list of successive words: a known word optionally preceded by a modifier ("very good") or a negation ("not good"). """ a = [] m = None # Preceding modifier (i.e., adverb or adjective). n = None # Preceding negation (e.g., "not beautiful"). for w, pos in words: # Only assess known words, preferably by part-of-speech tag. # Including unknown words (polarity 0.0 and subjectivity 0.0) lowers the average. if w is None: continue if w in self and pos in self[w]: p, s, i = self[w][pos] # Known word not preceded by a modifier ("good"). if m is None: a.append(dict(w=[w], p=p, s=s, i=i, n=1, x=self.labeler.get(w))) # Known word preceded by a modifier ("really good"). if m is not None: a[-1]["w"].append(w) a[-1]["p"] = max(-1.0, min(p * a[-1]["i"], +1.0)) a[-1]["s"] = max(-1.0, min(s * a[-1]["i"], +1.0)) a[-1]["i"] = i a[-1]["x"] = self.labeler.get(w) # Known word preceded by a negation ("not really good"). if n is not None: a[-1]["w"].insert(0, n) a[-1]["i"] = 1.0 / a[-1]["i"] a[-1]["n"] = -1 # Known word may be a negation. # Known word may be modifying the next word (i.e., it is a known adverb). m = None n = None if pos and pos in self.modifiers or any(map(self[w].__contains__, self.modifiers)): m = (w, pos) if negation and w in self.negations: n = w else: # Unknown word may be a negation ("not good"). if negation and w in self.negations: n = w # Unknown word. Retain negation across small words ("not a good"). elif n and len(w.strip("'")) > 1: n = None # Unknown word may be a negation preceded by a modifier ("really not good"). if n is not None and m is not None and (pos in self.modifiers or self.modifier(m[0])): a[-1]["w"].append(n) a[-1]["n"] = -1 n = None # Unknown word. Retain modifier across small words ("really is a good"). elif m and len(w) > 2: m = None # Exclamation marks boost previous word. if w == "!" and len(a) > 0: a[-1]["w"].append("!") a[-1]["p"] = max(-1.0, min(a[-1]["p"] * 1.25, +1.0)) # Exclamation marks in parentheses indicate sarcasm. if w == "(!)": a.append(dict(w=[w], p=0.0, s=1.0, i=1.0, n=1, x=IRONY)) # EMOTICONS: {("grin", +1.0): set((":-D", ":D"))} if w.isalpha() is False and len(w) <= 5 and w not in PUNCTUATION: # speedup for (type, p), e in EMOTICONS.items(): if w in map(lambda e: e.lower(), e): a.append(dict(w=[w], p=p, s=1.0, i=1.0, n=1, x=MOOD)) break for i in range(len(a)): w = a[i]["w"] p = a[i]["p"] s = a[i]["s"] n = a[i]["n"] x = a[i]["x"] # "not good" = slightly bad, "not bad" = slightly good. a[i] = (w, p * -0.5 if n < 0 else p, s, x) return a
python
def assessments(self, words=[], negation=True): """ Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words: where chunk is a list of successive words: a known word optionally preceded by a modifier ("very good") or a negation ("not good"). """ a = [] m = None # Preceding modifier (i.e., adverb or adjective). n = None # Preceding negation (e.g., "not beautiful"). for w, pos in words: # Only assess known words, preferably by part-of-speech tag. # Including unknown words (polarity 0.0 and subjectivity 0.0) lowers the average. if w is None: continue if w in self and pos in self[w]: p, s, i = self[w][pos] # Known word not preceded by a modifier ("good"). if m is None: a.append(dict(w=[w], p=p, s=s, i=i, n=1, x=self.labeler.get(w))) # Known word preceded by a modifier ("really good"). if m is not None: a[-1]["w"].append(w) a[-1]["p"] = max(-1.0, min(p * a[-1]["i"], +1.0)) a[-1]["s"] = max(-1.0, min(s * a[-1]["i"], +1.0)) a[-1]["i"] = i a[-1]["x"] = self.labeler.get(w) # Known word preceded by a negation ("not really good"). if n is not None: a[-1]["w"].insert(0, n) a[-1]["i"] = 1.0 / a[-1]["i"] a[-1]["n"] = -1 # Known word may be a negation. # Known word may be modifying the next word (i.e., it is a known adverb). m = None n = None if pos and pos in self.modifiers or any(map(self[w].__contains__, self.modifiers)): m = (w, pos) if negation and w in self.negations: n = w else: # Unknown word may be a negation ("not good"). if negation and w in self.negations: n = w # Unknown word. Retain negation across small words ("not a good"). elif n and len(w.strip("'")) > 1: n = None # Unknown word may be a negation preceded by a modifier ("really not good"). if n is not None and m is not None and (pos in self.modifiers or self.modifier(m[0])): a[-1]["w"].append(n) a[-1]["n"] = -1 n = None # Unknown word. Retain modifier across small words ("really is a good"). elif m and len(w) > 2: m = None # Exclamation marks boost previous word. if w == "!" and len(a) > 0: a[-1]["w"].append("!") a[-1]["p"] = max(-1.0, min(a[-1]["p"] * 1.25, +1.0)) # Exclamation marks in parentheses indicate sarcasm. if w == "(!)": a.append(dict(w=[w], p=0.0, s=1.0, i=1.0, n=1, x=IRONY)) # EMOTICONS: {("grin", +1.0): set((":-D", ":D"))} if w.isalpha() is False and len(w) <= 5 and w not in PUNCTUATION: # speedup for (type, p), e in EMOTICONS.items(): if w in map(lambda e: e.lower(), e): a.append(dict(w=[w], p=p, s=1.0, i=1.0, n=1, x=MOOD)) break for i in range(len(a)): w = a[i]["w"] p = a[i]["p"] s = a[i]["s"] n = a[i]["n"] x = a[i]["x"] # "not good" = slightly bad, "not bad" = slightly good. a[i] = (w, p * -0.5 if n < 0 else p, s, x) return a
[ "def", "assessments", "(", "self", ",", "words", "=", "[", "]", ",", "negation", "=", "True", ")", ":", "a", "=", "[", "]", "m", "=", "None", "# Preceding modifier (i.e., adverb or adjective).", "n", "=", "None", "# Preceding negation (e.g., \"not beautiful\").", "for", "w", ",", "pos", "in", "words", ":", "# Only assess known words, preferably by part-of-speech tag.", "# Including unknown words (polarity 0.0 and subjectivity 0.0) lowers the average.", "if", "w", "is", "None", ":", "continue", "if", "w", "in", "self", "and", "pos", "in", "self", "[", "w", "]", ":", "p", ",", "s", ",", "i", "=", "self", "[", "w", "]", "[", "pos", "]", "# Known word not preceded by a modifier (\"good\").", "if", "m", "is", "None", ":", "a", ".", "append", "(", "dict", "(", "w", "=", "[", "w", "]", ",", "p", "=", "p", ",", "s", "=", "s", ",", "i", "=", "i", ",", "n", "=", "1", ",", "x", "=", "self", ".", "labeler", ".", "get", "(", "w", ")", ")", ")", "# Known word preceded by a modifier (\"really good\").", "if", "m", "is", "not", "None", ":", "a", "[", "-", "1", "]", "[", "\"w\"", "]", ".", "append", "(", "w", ")", "a", "[", "-", "1", "]", "[", "\"p\"", "]", "=", "max", "(", "-", "1.0", ",", "min", "(", "p", "*", "a", "[", "-", "1", "]", "[", "\"i\"", "]", ",", "+", "1.0", ")", ")", "a", "[", "-", "1", "]", "[", "\"s\"", "]", "=", "max", "(", "-", "1.0", ",", "min", "(", "s", "*", "a", "[", "-", "1", "]", "[", "\"i\"", "]", ",", "+", "1.0", ")", ")", "a", "[", "-", "1", "]", "[", "\"i\"", "]", "=", "i", "a", "[", "-", "1", "]", "[", "\"x\"", "]", "=", "self", ".", "labeler", ".", "get", "(", "w", ")", "# Known word preceded by a negation (\"not really good\").", "if", "n", "is", "not", "None", ":", "a", "[", "-", "1", "]", "[", "\"w\"", "]", ".", "insert", "(", "0", ",", "n", ")", "a", "[", "-", "1", "]", "[", "\"i\"", "]", "=", "1.0", "/", "a", "[", "-", "1", "]", "[", "\"i\"", "]", "a", "[", "-", "1", "]", "[", "\"n\"", "]", "=", "-", "1", "# Known word may be a negation.", "# Known word may be modifying the next word (i.e., it is a known adverb).", "m", "=", "None", "n", "=", "None", "if", "pos", "and", "pos", "in", "self", ".", "modifiers", "or", "any", "(", "map", "(", "self", "[", "w", "]", ".", "__contains__", ",", "self", ".", "modifiers", ")", ")", ":", "m", "=", "(", "w", ",", "pos", ")", "if", "negation", "and", "w", "in", "self", ".", "negations", ":", "n", "=", "w", "else", ":", "# Unknown word may be a negation (\"not good\").", "if", "negation", "and", "w", "in", "self", ".", "negations", ":", "n", "=", "w", "# Unknown word. Retain negation across small words (\"not a good\").", "elif", "n", "and", "len", "(", "w", ".", "strip", "(", "\"'\"", ")", ")", ">", "1", ":", "n", "=", "None", "# Unknown word may be a negation preceded by a modifier (\"really not good\").", "if", "n", "is", "not", "None", "and", "m", "is", "not", "None", "and", "(", "pos", "in", "self", ".", "modifiers", "or", "self", ".", "modifier", "(", "m", "[", "0", "]", ")", ")", ":", "a", "[", "-", "1", "]", "[", "\"w\"", "]", ".", "append", "(", "n", ")", "a", "[", "-", "1", "]", "[", "\"n\"", "]", "=", "-", "1", "n", "=", "None", "# Unknown word. Retain modifier across small words (\"really is a good\").", "elif", "m", "and", "len", "(", "w", ")", ">", "2", ":", "m", "=", "None", "# Exclamation marks boost previous word.", "if", "w", "==", "\"!\"", "and", "len", "(", "a", ")", ">", "0", ":", "a", "[", "-", "1", "]", "[", "\"w\"", "]", ".", "append", "(", "\"!\"", ")", "a", "[", "-", "1", "]", "[", "\"p\"", "]", "=", "max", "(", "-", "1.0", ",", "min", "(", "a", "[", "-", "1", "]", "[", "\"p\"", "]", "*", "1.25", ",", "+", "1.0", ")", ")", "# Exclamation marks in parentheses indicate sarcasm.", "if", "w", "==", "\"(!)\"", ":", "a", ".", "append", "(", "dict", "(", "w", "=", "[", "w", "]", ",", "p", "=", "0.0", ",", "s", "=", "1.0", ",", "i", "=", "1.0", ",", "n", "=", "1", ",", "x", "=", "IRONY", ")", ")", "# EMOTICONS: {(\"grin\", +1.0): set((\":-D\", \":D\"))}", "if", "w", ".", "isalpha", "(", ")", "is", "False", "and", "len", "(", "w", ")", "<=", "5", "and", "w", "not", "in", "PUNCTUATION", ":", "# speedup", "for", "(", "type", ",", "p", ")", ",", "e", "in", "EMOTICONS", ".", "items", "(", ")", ":", "if", "w", "in", "map", "(", "lambda", "e", ":", "e", ".", "lower", "(", ")", ",", "e", ")", ":", "a", ".", "append", "(", "dict", "(", "w", "=", "[", "w", "]", ",", "p", "=", "p", ",", "s", "=", "1.0", ",", "i", "=", "1.0", ",", "n", "=", "1", ",", "x", "=", "MOOD", ")", ")", "break", "for", "i", "in", "range", "(", "len", "(", "a", ")", ")", ":", "w", "=", "a", "[", "i", "]", "[", "\"w\"", "]", "p", "=", "a", "[", "i", "]", "[", "\"p\"", "]", "s", "=", "a", "[", "i", "]", "[", "\"s\"", "]", "n", "=", "a", "[", "i", "]", "[", "\"n\"", "]", "x", "=", "a", "[", "i", "]", "[", "\"x\"", "]", "# \"not good\" = slightly bad, \"not bad\" = slightly good.", "a", "[", "i", "]", "=", "(", "w", ",", "p", "*", "-", "0.5", "if", "n", "<", "0", "else", "p", ",", "s", ",", "x", ")", "return", "a" ]
Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words: where chunk is a list of successive words: a known word optionally preceded by a modifier ("very good") or a negation ("not good").
[ "Returns", "a", "list", "of", "(", "chunk", "polarity", "subjectivity", "label", ")", "-", "tuples", "for", "the", "given", "list", "of", "words", ":", "where", "chunk", "is", "a", "list", "of", "successive", "words", ":", "a", "known", "word", "optionally", "preceded", "by", "a", "modifier", "(", "very", "good", ")", "or", "a", "negation", "(", "not", "good", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2004-L2078
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Sentiment.annotate
def annotate(self, word, pos=None, polarity=0.0, subjectivity=0.0, intensity=1.0, label=None): """ Annotates the given word with polarity, subjectivity and intensity scores, and optionally a semantic label (e.g., MOOD for emoticons, IRONY for "(!)"). """ w = self.setdefault(word, {}) w[pos] = w[None] = (polarity, subjectivity, intensity) if label: self.labeler[word] = label
python
def annotate(self, word, pos=None, polarity=0.0, subjectivity=0.0, intensity=1.0, label=None): """ Annotates the given word with polarity, subjectivity and intensity scores, and optionally a semantic label (e.g., MOOD for emoticons, IRONY for "(!)"). """ w = self.setdefault(word, {}) w[pos] = w[None] = (polarity, subjectivity, intensity) if label: self.labeler[word] = label
[ "def", "annotate", "(", "self", ",", "word", ",", "pos", "=", "None", ",", "polarity", "=", "0.0", ",", "subjectivity", "=", "0.0", ",", "intensity", "=", "1.0", ",", "label", "=", "None", ")", ":", "w", "=", "self", ".", "setdefault", "(", "word", ",", "{", "}", ")", "w", "[", "pos", "]", "=", "w", "[", "None", "]", "=", "(", "polarity", ",", "subjectivity", ",", "intensity", ")", "if", "label", ":", "self", ".", "labeler", "[", "word", "]", "=", "label" ]
Annotates the given word with polarity, subjectivity and intensity scores, and optionally a semantic label (e.g., MOOD for emoticons, IRONY for "(!)").
[ "Annotates", "the", "given", "word", "with", "polarity", "subjectivity", "and", "intensity", "scores", "and", "optionally", "a", "semantic", "label", "(", "e", ".", "g", ".", "MOOD", "for", "emoticons", "IRONY", "for", "(", "!", ")", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2080-L2087
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Spelling.train
def train(self, s, path="spelling.txt"): """ Counts the words in the given string and saves the probabilities at the given path. This can be used to generate a new model for the Spelling() constructor. """ model = {} for w in re.findall("[a-z]+", s.lower()): model[w] = w in model and model[w] + 1 or 1 model = ("%s %s" % (k, v) for k, v in sorted(model.items())) model = "\n".join(model) f = open(path, "w") f.write(model) f.close()
python
def train(self, s, path="spelling.txt"): """ Counts the words in the given string and saves the probabilities at the given path. This can be used to generate a new model for the Spelling() constructor. """ model = {} for w in re.findall("[a-z]+", s.lower()): model[w] = w in model and model[w] + 1 or 1 model = ("%s %s" % (k, v) for k, v in sorted(model.items())) model = "\n".join(model) f = open(path, "w") f.write(model) f.close()
[ "def", "train", "(", "self", ",", "s", ",", "path", "=", "\"spelling.txt\"", ")", ":", "model", "=", "{", "}", "for", "w", "in", "re", ".", "findall", "(", "\"[a-z]+\"", ",", "s", ".", "lower", "(", ")", ")", ":", "model", "[", "w", "]", "=", "w", "in", "model", "and", "model", "[", "w", "]", "+", "1", "or", "1", "model", "=", "(", "\"%s %s\"", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "sorted", "(", "model", ".", "items", "(", ")", ")", ")", "model", "=", "\"\\n\"", ".", "join", "(", "model", ")", "f", "=", "open", "(", "path", ",", "\"w\"", ")", "f", ".", "write", "(", "model", ")", "f", ".", "close", "(", ")" ]
Counts the words in the given string and saves the probabilities at the given path. This can be used to generate a new model for the Spelling() constructor.
[ "Counts", "the", "words", "in", "the", "given", "string", "and", "saves", "the", "probabilities", "at", "the", "given", "path", ".", "This", "can", "be", "used", "to", "generate", "a", "new", "model", "for", "the", "Spelling", "()", "constructor", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2113-L2124
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Spelling._edit1
def _edit1(self, w): """ Returns a set of words with edit distance 1 from the given word. """ # Of all spelling errors, 80% is covered by edit distance 1. # Edit distance 1 = one character deleted, swapped, replaced or inserted. split = [(w[:i], w[i:]) for i in range(len(w) + 1)] delete, transpose, replace, insert = ( [a + b[1:] for a, b in split if b], [a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1], [a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b], [a + c + b[0:] for a, b in split for c in Spelling.ALPHA] ) return set(delete + transpose + replace + insert)
python
def _edit1(self, w): """ Returns a set of words with edit distance 1 from the given word. """ # Of all spelling errors, 80% is covered by edit distance 1. # Edit distance 1 = one character deleted, swapped, replaced or inserted. split = [(w[:i], w[i:]) for i in range(len(w) + 1)] delete, transpose, replace, insert = ( [a + b[1:] for a, b in split if b], [a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1], [a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b], [a + c + b[0:] for a, b in split for c in Spelling.ALPHA] ) return set(delete + transpose + replace + insert)
[ "def", "_edit1", "(", "self", ",", "w", ")", ":", "# Of all spelling errors, 80% is covered by edit distance 1.", "# Edit distance 1 = one character deleted, swapped, replaced or inserted.", "split", "=", "[", "(", "w", "[", ":", "i", "]", ",", "w", "[", "i", ":", "]", ")", "for", "i", "in", "range", "(", "len", "(", "w", ")", "+", "1", ")", "]", "delete", ",", "transpose", ",", "replace", ",", "insert", "=", "(", "[", "a", "+", "b", "[", "1", ":", "]", "for", "a", ",", "b", "in", "split", "if", "b", "]", ",", "[", "a", "+", "b", "[", "1", "]", "+", "b", "[", "0", "]", "+", "b", "[", "2", ":", "]", "for", "a", ",", "b", "in", "split", "if", "len", "(", "b", ")", ">", "1", "]", ",", "[", "a", "+", "c", "+", "b", "[", "1", ":", "]", "for", "a", ",", "b", "in", "split", "for", "c", "in", "Spelling", ".", "ALPHA", "if", "b", "]", ",", "[", "a", "+", "c", "+", "b", "[", "0", ":", "]", "for", "a", ",", "b", "in", "split", "for", "c", "in", "Spelling", ".", "ALPHA", "]", ")", "return", "set", "(", "delete", "+", "transpose", "+", "replace", "+", "insert", ")" ]
Returns a set of words with edit distance 1 from the given word.
[ "Returns", "a", "set", "of", "words", "with", "edit", "distance", "1", "from", "the", "given", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2126-L2138
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Spelling._edit2
def _edit2(self, w): """ Returns a set of words with edit distance 2 from the given word """ # Of all spelling errors, 99% is covered by edit distance 2. # Only keep candidates that are actually known words (20% speedup). return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
python
def _edit2(self, w): """ Returns a set of words with edit distance 2 from the given word """ # Of all spelling errors, 99% is covered by edit distance 2. # Only keep candidates that are actually known words (20% speedup). return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
[ "def", "_edit2", "(", "self", ",", "w", ")", ":", "# Of all spelling errors, 99% is covered by edit distance 2.", "# Only keep candidates that are actually known words (20% speedup).", "return", "set", "(", "e2", "for", "e1", "in", "self", ".", "_edit1", "(", "w", ")", "for", "e2", "in", "self", ".", "_edit1", "(", "e1", ")", "if", "e2", "in", "self", ")" ]
Returns a set of words with edit distance 2 from the given word
[ "Returns", "a", "set", "of", "words", "with", "edit", "distance", "2", "from", "the", "given", "word" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2140-L2145
markuskiller/textblob-de
textblob_de/ext/_pattern/text/__init__.py
Spelling.suggest
def suggest(self, w): """ Return a list of (word, confidence) spelling corrections for the given word, based on the probability of known words with edit distance 1-2 from the given word. """ if len(self) == 0: self.load() if len(w) == 1: return [(w, 1.0)] # I if w in PUNCTUATION: return [(w, 1.0)] # .?! if w.replace(".", "").isdigit(): return [(w, 1.0)] # 1.5 candidates = self._known([w]) \ or self._known(self._edit1(w)) \ or self._known(self._edit2(w)) \ or [w] candidates = [(self.get(c, 0.0), c) for c in candidates] s = float(sum(p for p, w in candidates) or 1) candidates = sorted(((p / s, w) for p, w in candidates), reverse=True) candidates = [(w.istitle() and x.title() or x, p) for p, x in candidates] # case-sensitive return candidates
python
def suggest(self, w): """ Return a list of (word, confidence) spelling corrections for the given word, based on the probability of known words with edit distance 1-2 from the given word. """ if len(self) == 0: self.load() if len(w) == 1: return [(w, 1.0)] # I if w in PUNCTUATION: return [(w, 1.0)] # .?! if w.replace(".", "").isdigit(): return [(w, 1.0)] # 1.5 candidates = self._known([w]) \ or self._known(self._edit1(w)) \ or self._known(self._edit2(w)) \ or [w] candidates = [(self.get(c, 0.0), c) for c in candidates] s = float(sum(p for p, w in candidates) or 1) candidates = sorted(((p / s, w) for p, w in candidates), reverse=True) candidates = [(w.istitle() and x.title() or x, p) for p, x in candidates] # case-sensitive return candidates
[ "def", "suggest", "(", "self", ",", "w", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "self", ".", "load", "(", ")", "if", "len", "(", "w", ")", "==", "1", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# I", "if", "w", "in", "PUNCTUATION", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# .?!", "if", "w", ".", "replace", "(", "\".\"", ",", "\"\"", ")", ".", "isdigit", "(", ")", ":", "return", "[", "(", "w", ",", "1.0", ")", "]", "# 1.5", "candidates", "=", "self", ".", "_known", "(", "[", "w", "]", ")", "or", "self", ".", "_known", "(", "self", ".", "_edit1", "(", "w", ")", ")", "or", "self", ".", "_known", "(", "self", ".", "_edit2", "(", "w", ")", ")", "or", "[", "w", "]", "candidates", "=", "[", "(", "self", ".", "get", "(", "c", ",", "0.0", ")", ",", "c", ")", "for", "c", "in", "candidates", "]", "s", "=", "float", "(", "sum", "(", "p", "for", "p", ",", "w", "in", "candidates", ")", "or", "1", ")", "candidates", "=", "sorted", "(", "(", "(", "p", "/", "s", ",", "w", ")", "for", "p", ",", "w", "in", "candidates", ")", ",", "reverse", "=", "True", ")", "candidates", "=", "[", "(", "w", ".", "istitle", "(", ")", "and", "x", ".", "title", "(", ")", "or", "x", ",", "p", ")", "for", "p", ",", "x", "in", "candidates", "]", "# case-sensitive", "return", "candidates" ]
Return a list of (word, confidence) spelling corrections for the given word, based on the probability of known words with edit distance 1-2 from the given word.
[ "Return", "a", "list", "of", "(", "word", "confidence", ")", "spelling", "corrections", "for", "the", "given", "word", "based", "on", "the", "probability", "of", "known", "words", "with", "edit", "distance", "1", "-", "2", "from", "the", "given", "word", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L2152-L2172
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
zip
def zip(*args, **kwargs): """ Returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables (or default if too short). """ args = [list(iterable) for iterable in args] n = max(map(len, args)) v = kwargs.get("default", None) return _zip(*[i + [v] * (n - len(i)) for i in args])
python
def zip(*args, **kwargs): """ Returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables (or default if too short). """ args = [list(iterable) for iterable in args] n = max(map(len, args)) v = kwargs.get("default", None) return _zip(*[i + [v] * (n - len(i)) for i in args])
[ "def", "zip", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "list", "(", "iterable", ")", "for", "iterable", "in", "args", "]", "n", "=", "max", "(", "map", "(", "len", ",", "args", ")", ")", "v", "=", "kwargs", ".", "get", "(", "\"default\"", ",", "None", ")", "return", "_zip", "(", "*", "[", "i", "+", "[", "v", "]", "*", "(", "n", "-", "len", "(", "i", ")", ")", "for", "i", "in", "args", "]", ")" ]
Returns a list of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables (or default if too short).
[ "Returns", "a", "list", "of", "tuples", "where", "the", "i", "-", "th", "tuple", "contains", "the", "i", "-", "th", "element", "from", "each", "of", "the", "argument", "sequences", "or", "iterables", "(", "or", "default", "if", "too", "short", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L89-L96
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
chunked
def chunked(sentence): """ Returns a list of Chunk and Chink objects from the given sentence. Chink is a subclass of Chunk used for words that have Word.chunk == None (e.g., punctuation marks, conjunctions). """ # For example, to construct a training vector with the head of previous chunks as a feature. # Doing this with Sentence.chunks would discard the punctuation marks and conjunctions # (Sentence.chunks only yields Chunk objects), which amy be useful features. chunks = [] for word in sentence: if word.chunk is not None: if len(chunks) == 0 or chunks[-1] != word.chunk: chunks.append(word.chunk) else: ch = Chink(sentence) ch.append(word.copy(ch)) chunks.append(ch) return chunks
python
def chunked(sentence): """ Returns a list of Chunk and Chink objects from the given sentence. Chink is a subclass of Chunk used for words that have Word.chunk == None (e.g., punctuation marks, conjunctions). """ # For example, to construct a training vector with the head of previous chunks as a feature. # Doing this with Sentence.chunks would discard the punctuation marks and conjunctions # (Sentence.chunks only yields Chunk objects), which amy be useful features. chunks = [] for word in sentence: if word.chunk is not None: if len(chunks) == 0 or chunks[-1] != word.chunk: chunks.append(word.chunk) else: ch = Chink(sentence) ch.append(word.copy(ch)) chunks.append(ch) return chunks
[ "def", "chunked", "(", "sentence", ")", ":", "# For example, to construct a training vector with the head of previous chunks as a feature.", "# Doing this with Sentence.chunks would discard the punctuation marks and conjunctions", "# (Sentence.chunks only yields Chunk objects), which amy be useful features.", "chunks", "=", "[", "]", "for", "word", "in", "sentence", ":", "if", "word", ".", "chunk", "is", "not", "None", ":", "if", "len", "(", "chunks", ")", "==", "0", "or", "chunks", "[", "-", "1", "]", "!=", "word", ".", "chunk", ":", "chunks", ".", "append", "(", "word", ".", "chunk", ")", "else", ":", "ch", "=", "Chink", "(", "sentence", ")", "ch", ".", "append", "(", "word", ".", "copy", "(", "ch", ")", ")", "chunks", ".", "append", "(", "ch", ")", "return", "chunks" ]
Returns a list of Chunk and Chink objects from the given sentence. Chink is a subclass of Chunk used for words that have Word.chunk == None (e.g., punctuation marks, conjunctions).
[ "Returns", "a", "list", "of", "Chunk", "and", "Chink", "objects", "from", "the", "given", "sentence", ".", "Chink", "is", "a", "subclass", "of", "Chunk", "used", "for", "words", "that", "have", "Word", ".", "chunk", "==", "None", "(", "e", ".", "g", ".", "punctuation", "marks", "conjunctions", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1100-L1117
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
tree
def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Transforms the output of parse() into a Text object. The token parameter lists the order of tags in each token in the input string. """ return Text(string, token)
python
def tree(string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Transforms the output of parse() into a Text object. The token parameter lists the order of tags in each token in the input string. """ return Text(string, token)
[ "def", "tree", "(", "string", ",", "token", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "return", "Text", "(", "string", ",", "token", ")" ]
Transforms the output of parse() into a Text object. The token parameter lists the order of tags in each token in the input string.
[ "Transforms", "the", "output", "of", "parse", "()", "into", "a", "Text", "object", ".", "The", "token", "parameter", "lists", "the", "order", "of", "tags", "in", "each", "token", "in", "the", "input", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1207-L1211
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
xml_encode
def xml_encode(string): """ Returns the string with XML-safe special characters. """ string = string.replace("&", "&amp;") string = string.replace("<", "&lt;") string = string.replace(">", "&gt;") string = string.replace("\"","&quot;") string = string.replace(SLASH, "/") return string
python
def xml_encode(string): """ Returns the string with XML-safe special characters. """ string = string.replace("&", "&amp;") string = string.replace("<", "&lt;") string = string.replace(">", "&gt;") string = string.replace("\"","&quot;") string = string.replace(SLASH, "/") return string
[ "def", "xml_encode", "(", "string", ")", ":", "string", "=", "string", ".", "replace", "(", "\"&\"", ",", "\"&amp;\"", ")", "string", "=", "string", ".", "replace", "(", "\"<\"", ",", "\"&lt;\"", ")", "string", "=", "string", ".", "replace", "(", "\">\"", ",", "\"&gt;\"", ")", "string", "=", "string", ".", "replace", "(", "\"\\\"\"", ",", "\"&quot;\"", ")", "string", "=", "string", ".", "replace", "(", "SLASH", ",", "\"/\"", ")", "return", "string" ]
Returns the string with XML-safe special characters.
[ "Returns", "the", "string", "with", "XML", "-", "safe", "special", "characters", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1253-L1261
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
xml_decode
def xml_decode(string): """ Returns the string with special characters decoded. """ string = string.replace("&amp;", "&") string = string.replace("&lt;", "<") string = string.replace("&gt;", ">") string = string.replace("&quot;","\"") string = string.replace("/", SLASH) return string
python
def xml_decode(string): """ Returns the string with special characters decoded. """ string = string.replace("&amp;", "&") string = string.replace("&lt;", "<") string = string.replace("&gt;", ">") string = string.replace("&quot;","\"") string = string.replace("/", SLASH) return string
[ "def", "xml_decode", "(", "string", ")", ":", "string", "=", "string", ".", "replace", "(", "\"&amp;\"", ",", "\"&\"", ")", "string", "=", "string", ".", "replace", "(", "\"&lt;\"", ",", "\"<\"", ")", "string", "=", "string", ".", "replace", "(", "\"&gt;\"", ",", "\">\"", ")", "string", "=", "string", ".", "replace", "(", "\"&quot;\"", ",", "\"\\\"\"", ")", "string", "=", "string", ".", "replace", "(", "\"/\"", ",", "SLASH", ")", "return", "string" ]
Returns the string with special characters decoded.
[ "Returns", "the", "string", "with", "special", "characters", "decoded", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1263-L1271
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
parse_xml
def parse_xml(sentence, tab="\t", id=""): """ Returns the given Sentence object as an XML-string (plain bytestring, UTF-8 encoded). The tab delimiter is used as indendation for nested elements. The id can be used as a unique identifier per sentence for chunk id's and anchors. For example: "I eat pizza with a fork." => <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma" language="en"> <chunk type="NP" relation="SBJ" of="1"> <word type="PRP" lemma="i">I</word> </chunk> <chunk type="VP" relation="VP" id="1" anchor="A1"> <word type="VBP" lemma="eat">eat</word> </chunk> <chunk type="NP" relation="OBJ" of="1"> <word type="NN" lemma="pizza">pizza</word> </chunk> <chunk type="PNP" of="A1"> <chunk type="PP"> <word type="IN" lemma="with">with</word> </chunk> <chunk type="NP"> <word type="DT" lemma="a">a</word> <word type="NN" lemma="fork">fork</word> </chunk> </chunk> <chink> <word type="." lemma=".">.</word> </chink> </sentence> """ uid = lambda *parts: "".join([str(id), _UID_SEPARATOR ]+[str(x) for x in parts]).lstrip(_UID_SEPARATOR) push = lambda indent: indent+tab # push() increases the indentation. pop = lambda indent: indent[:-len(tab)] # pop() decreases the indentation. indent = tab xml = [] # Start the sentence element: # <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma"> xml.append('<%s%s %s="%s" %s="%s">' % ( XML_SENTENCE, XML_ID and " %s=\"%s\"" % (XML_ID, str(id)) or "", XML_TOKEN, ", ".join(sentence.token), XML_LANGUAGE, sentence.language )) # Collect chunks that are PNP anchors and assign id. anchors = {} for chunk in sentence.chunks: if chunk.attachments: anchors[chunk.start] = len(anchors) + 1 # Traverse all words in the sentence. for word in sentence.words: chunk = word.chunk pnp = word.chunk and word.chunk.pnp or None # Start the PNP element if the chunk is the first chunk in PNP: # <chunk type="PNP" of="A1"> if pnp and pnp.start == chunk.start: a = pnp.anchor and ' %s="%s"' % (XML_OF, uid("A", anchors.get(pnp.anchor.start, ""))) or "" xml.append(indent + '<%s %s="PNP"%s>' % (XML_CHUNK, XML_TYPE, a)) indent = push(indent) # Start the chunk element if the word is the first word in the chunk: # <chunk type="VP" relation="VP" id="1" anchor="A1"> if chunk and chunk.start == word.index: if chunk.relations: # Create the shortest possible attribute values for multiple relations, # e.g., [(1,"OBJ"),(2,"OBJ")]) => relation="OBJ" id="1|2" r1 = unzip(0, chunk.relations) # Relation id's. r2 = unzip(1, chunk.relations) # Relation roles. r1 = [x is None and "-" or uid(x) for x in r1] r2 = [x is None and "-" or x for x in r2] r1 = not len(unique(r1)) == 1 and "|".join(r1) or (r1+[None])[0] r2 = not len(unique(r2)) == 1 and "|".join(r2) or (r2+[None])[0] xml.append(indent + '<%s%s%s%s%s%s>' % ( XML_CHUNK, chunk.type and ' %s="%s"' % (XML_TYPE, chunk.type) or "", chunk.relations and chunk.role != None and ' %s="%s"' % (XML_RELATION, r2) or "", chunk.relation and chunk.type == "VP" and ' %s="%s"' % (XML_ID, uid(chunk.relation)) or "", chunk.relation and chunk.type != "VP" and ' %s="%s"' % (XML_OF, r1) or "", chunk.attachments and ' %s="%s"' % (XML_ANCHOR, uid("A",anchors[chunk.start])) or "" )) indent = push(indent) # Words outside of a chunk are wrapped in a <chink> tag: # <chink> if not chunk: xml.append(indent + '<%s>' % XML_CHINK) indent = push(indent) # Add the word element: # <word type="VBP" lemma="eat">eat</word> xml.append(indent + '<%s%s%s%s>%s</%s>' % ( XML_WORD, word.type and ' %s="%s"' % (XML_TYPE, xml_encode(word.type)) or '', word.lemma and ' %s="%s"' % (XML_LEMMA, xml_encode(word.lemma)) or '', (" "+" ".join(['%s="%s"' % (k,v) for k,v in word.custom_tags.items() if v != None])).rstrip(), xml_encode(unicode(word)), XML_WORD )) if not chunk: # Close the <chink> element if outside of a chunk. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHINK) if chunk and chunk.stop-1 == word.index: # Close the <chunk> element if this is the last word in the chunk. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK) if pnp and pnp.stop-1 == word.index: # Close the PNP element if this is the last word in the PNP. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK) xml.append("</%s>" % XML_SENTENCE) # Return as a plain str. return "\n".join(xml).encode("utf-8")
python
def parse_xml(sentence, tab="\t", id=""): """ Returns the given Sentence object as an XML-string (plain bytestring, UTF-8 encoded). The tab delimiter is used as indendation for nested elements. The id can be used as a unique identifier per sentence for chunk id's and anchors. For example: "I eat pizza with a fork." => <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma" language="en"> <chunk type="NP" relation="SBJ" of="1"> <word type="PRP" lemma="i">I</word> </chunk> <chunk type="VP" relation="VP" id="1" anchor="A1"> <word type="VBP" lemma="eat">eat</word> </chunk> <chunk type="NP" relation="OBJ" of="1"> <word type="NN" lemma="pizza">pizza</word> </chunk> <chunk type="PNP" of="A1"> <chunk type="PP"> <word type="IN" lemma="with">with</word> </chunk> <chunk type="NP"> <word type="DT" lemma="a">a</word> <word type="NN" lemma="fork">fork</word> </chunk> </chunk> <chink> <word type="." lemma=".">.</word> </chink> </sentence> """ uid = lambda *parts: "".join([str(id), _UID_SEPARATOR ]+[str(x) for x in parts]).lstrip(_UID_SEPARATOR) push = lambda indent: indent+tab # push() increases the indentation. pop = lambda indent: indent[:-len(tab)] # pop() decreases the indentation. indent = tab xml = [] # Start the sentence element: # <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma"> xml.append('<%s%s %s="%s" %s="%s">' % ( XML_SENTENCE, XML_ID and " %s=\"%s\"" % (XML_ID, str(id)) or "", XML_TOKEN, ", ".join(sentence.token), XML_LANGUAGE, sentence.language )) # Collect chunks that are PNP anchors and assign id. anchors = {} for chunk in sentence.chunks: if chunk.attachments: anchors[chunk.start] = len(anchors) + 1 # Traverse all words in the sentence. for word in sentence.words: chunk = word.chunk pnp = word.chunk and word.chunk.pnp or None # Start the PNP element if the chunk is the first chunk in PNP: # <chunk type="PNP" of="A1"> if pnp and pnp.start == chunk.start: a = pnp.anchor and ' %s="%s"' % (XML_OF, uid("A", anchors.get(pnp.anchor.start, ""))) or "" xml.append(indent + '<%s %s="PNP"%s>' % (XML_CHUNK, XML_TYPE, a)) indent = push(indent) # Start the chunk element if the word is the first word in the chunk: # <chunk type="VP" relation="VP" id="1" anchor="A1"> if chunk and chunk.start == word.index: if chunk.relations: # Create the shortest possible attribute values for multiple relations, # e.g., [(1,"OBJ"),(2,"OBJ")]) => relation="OBJ" id="1|2" r1 = unzip(0, chunk.relations) # Relation id's. r2 = unzip(1, chunk.relations) # Relation roles. r1 = [x is None and "-" or uid(x) for x in r1] r2 = [x is None and "-" or x for x in r2] r1 = not len(unique(r1)) == 1 and "|".join(r1) or (r1+[None])[0] r2 = not len(unique(r2)) == 1 and "|".join(r2) or (r2+[None])[0] xml.append(indent + '<%s%s%s%s%s%s>' % ( XML_CHUNK, chunk.type and ' %s="%s"' % (XML_TYPE, chunk.type) or "", chunk.relations and chunk.role != None and ' %s="%s"' % (XML_RELATION, r2) or "", chunk.relation and chunk.type == "VP" and ' %s="%s"' % (XML_ID, uid(chunk.relation)) or "", chunk.relation and chunk.type != "VP" and ' %s="%s"' % (XML_OF, r1) or "", chunk.attachments and ' %s="%s"' % (XML_ANCHOR, uid("A",anchors[chunk.start])) or "" )) indent = push(indent) # Words outside of a chunk are wrapped in a <chink> tag: # <chink> if not chunk: xml.append(indent + '<%s>' % XML_CHINK) indent = push(indent) # Add the word element: # <word type="VBP" lemma="eat">eat</word> xml.append(indent + '<%s%s%s%s>%s</%s>' % ( XML_WORD, word.type and ' %s="%s"' % (XML_TYPE, xml_encode(word.type)) or '', word.lemma and ' %s="%s"' % (XML_LEMMA, xml_encode(word.lemma)) or '', (" "+" ".join(['%s="%s"' % (k,v) for k,v in word.custom_tags.items() if v != None])).rstrip(), xml_encode(unicode(word)), XML_WORD )) if not chunk: # Close the <chink> element if outside of a chunk. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHINK) if chunk and chunk.stop-1 == word.index: # Close the <chunk> element if this is the last word in the chunk. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK) if pnp and pnp.stop-1 == word.index: # Close the PNP element if this is the last word in the PNP. indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK) xml.append("</%s>" % XML_SENTENCE) # Return as a plain str. return "\n".join(xml).encode("utf-8")
[ "def", "parse_xml", "(", "sentence", ",", "tab", "=", "\"\\t\"", ",", "id", "=", "\"\"", ")", ":", "uid", "=", "lambda", "*", "parts", ":", "\"\"", ".", "join", "(", "[", "str", "(", "id", ")", ",", "_UID_SEPARATOR", "]", "+", "[", "str", "(", "x", ")", "for", "x", "in", "parts", "]", ")", ".", "lstrip", "(", "_UID_SEPARATOR", ")", "push", "=", "lambda", "indent", ":", "indent", "+", "tab", "# push() increases the indentation.", "pop", "=", "lambda", "indent", ":", "indent", "[", ":", "-", "len", "(", "tab", ")", "]", "# pop() decreases the indentation.", "indent", "=", "tab", "xml", "=", "[", "]", "# Start the sentence element:", "# <sentence token=\"word, part-of-speech, chunk, preposition, relation, anchor, lemma\">", "xml", ".", "append", "(", "'<%s%s %s=\"%s\" %s=\"%s\">'", "%", "(", "XML_SENTENCE", ",", "XML_ID", "and", "\" %s=\\\"%s\\\"\"", "%", "(", "XML_ID", ",", "str", "(", "id", ")", ")", "or", "\"\"", ",", "XML_TOKEN", ",", "\", \"", ".", "join", "(", "sentence", ".", "token", ")", ",", "XML_LANGUAGE", ",", "sentence", ".", "language", ")", ")", "# Collect chunks that are PNP anchors and assign id.", "anchors", "=", "{", "}", "for", "chunk", "in", "sentence", ".", "chunks", ":", "if", "chunk", ".", "attachments", ":", "anchors", "[", "chunk", ".", "start", "]", "=", "len", "(", "anchors", ")", "+", "1", "# Traverse all words in the sentence.", "for", "word", "in", "sentence", ".", "words", ":", "chunk", "=", "word", ".", "chunk", "pnp", "=", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "pnp", "or", "None", "# Start the PNP element if the chunk is the first chunk in PNP:", "# <chunk type=\"PNP\" of=\"A1\">", "if", "pnp", "and", "pnp", ".", "start", "==", "chunk", ".", "start", ":", "a", "=", "pnp", ".", "anchor", "and", "' %s=\"%s\"'", "%", "(", "XML_OF", ",", "uid", "(", "\"A\"", ",", "anchors", ".", "get", "(", "pnp", ".", "anchor", ".", "start", ",", "\"\"", ")", ")", ")", "or", "\"\"", "xml", ".", "append", "(", "indent", "+", "'<%s %s=\"PNP\"%s>'", "%", "(", "XML_CHUNK", ",", "XML_TYPE", ",", "a", ")", ")", "indent", "=", "push", "(", "indent", ")", "# Start the chunk element if the word is the first word in the chunk:", "# <chunk type=\"VP\" relation=\"VP\" id=\"1\" anchor=\"A1\">", "if", "chunk", "and", "chunk", ".", "start", "==", "word", ".", "index", ":", "if", "chunk", ".", "relations", ":", "# Create the shortest possible attribute values for multiple relations, ", "# e.g., [(1,\"OBJ\"),(2,\"OBJ\")]) => relation=\"OBJ\" id=\"1|2\"", "r1", "=", "unzip", "(", "0", ",", "chunk", ".", "relations", ")", "# Relation id's.", "r2", "=", "unzip", "(", "1", ",", "chunk", ".", "relations", ")", "# Relation roles.", "r1", "=", "[", "x", "is", "None", "and", "\"-\"", "or", "uid", "(", "x", ")", "for", "x", "in", "r1", "]", "r2", "=", "[", "x", "is", "None", "and", "\"-\"", "or", "x", "for", "x", "in", "r2", "]", "r1", "=", "not", "len", "(", "unique", "(", "r1", ")", ")", "==", "1", "and", "\"|\"", ".", "join", "(", "r1", ")", "or", "(", "r1", "+", "[", "None", "]", ")", "[", "0", "]", "r2", "=", "not", "len", "(", "unique", "(", "r2", ")", ")", "==", "1", "and", "\"|\"", ".", "join", "(", "r2", ")", "or", "(", "r2", "+", "[", "None", "]", ")", "[", "0", "]", "xml", ".", "append", "(", "indent", "+", "'<%s%s%s%s%s%s>'", "%", "(", "XML_CHUNK", ",", "chunk", ".", "type", "and", "' %s=\"%s\"'", "%", "(", "XML_TYPE", ",", "chunk", ".", "type", ")", "or", "\"\"", ",", "chunk", ".", "relations", "and", "chunk", ".", "role", "!=", "None", "and", "' %s=\"%s\"'", "%", "(", "XML_RELATION", ",", "r2", ")", "or", "\"\"", ",", "chunk", ".", "relation", "and", "chunk", ".", "type", "==", "\"VP\"", "and", "' %s=\"%s\"'", "%", "(", "XML_ID", ",", "uid", "(", "chunk", ".", "relation", ")", ")", "or", "\"\"", ",", "chunk", ".", "relation", "and", "chunk", ".", "type", "!=", "\"VP\"", "and", "' %s=\"%s\"'", "%", "(", "XML_OF", ",", "r1", ")", "or", "\"\"", ",", "chunk", ".", "attachments", "and", "' %s=\"%s\"'", "%", "(", "XML_ANCHOR", ",", "uid", "(", "\"A\"", ",", "anchors", "[", "chunk", ".", "start", "]", ")", ")", "or", "\"\"", ")", ")", "indent", "=", "push", "(", "indent", ")", "# Words outside of a chunk are wrapped in a <chink> tag:", "# <chink>", "if", "not", "chunk", ":", "xml", ".", "append", "(", "indent", "+", "'<%s>'", "%", "XML_CHINK", ")", "indent", "=", "push", "(", "indent", ")", "# Add the word element:", "# <word type=\"VBP\" lemma=\"eat\">eat</word>", "xml", ".", "append", "(", "indent", "+", "'<%s%s%s%s>%s</%s>'", "%", "(", "XML_WORD", ",", "word", ".", "type", "and", "' %s=\"%s\"'", "%", "(", "XML_TYPE", ",", "xml_encode", "(", "word", ".", "type", ")", ")", "or", "''", ",", "word", ".", "lemma", "and", "' %s=\"%s\"'", "%", "(", "XML_LEMMA", ",", "xml_encode", "(", "word", ".", "lemma", ")", ")", "or", "''", ",", "(", "\" \"", "+", "\" \"", ".", "join", "(", "[", "'%s=\"%s\"'", "%", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "word", ".", "custom_tags", ".", "items", "(", ")", "if", "v", "!=", "None", "]", ")", ")", ".", "rstrip", "(", ")", ",", "xml_encode", "(", "unicode", "(", "word", ")", ")", ",", "XML_WORD", ")", ")", "if", "not", "chunk", ":", "# Close the <chink> element if outside of a chunk.", "indent", "=", "pop", "(", "indent", ")", "xml", ".", "append", "(", "indent", "+", "\"</%s>\"", "%", "XML_CHINK", ")", "if", "chunk", "and", "chunk", ".", "stop", "-", "1", "==", "word", ".", "index", ":", "# Close the <chunk> element if this is the last word in the chunk.", "indent", "=", "pop", "(", "indent", ")", "xml", ".", "append", "(", "indent", "+", "\"</%s>\"", "%", "XML_CHUNK", ")", "if", "pnp", "and", "pnp", ".", "stop", "-", "1", "==", "word", ".", "index", ":", "# Close the PNP element if this is the last word in the PNP.", "indent", "=", "pop", "(", "indent", ")", "xml", ".", "append", "(", "indent", "+", "\"</%s>\"", "%", "XML_CHUNK", ")", "xml", ".", "append", "(", "\"</%s>\"", "%", "XML_SENTENCE", ")", "# Return as a plain str.", "return", "\"\\n\"", ".", "join", "(", "xml", ")", ".", "encode", "(", "\"utf-8\"", ")" ]
Returns the given Sentence object as an XML-string (plain bytestring, UTF-8 encoded). The tab delimiter is used as indendation for nested elements. The id can be used as a unique identifier per sentence for chunk id's and anchors. For example: "I eat pizza with a fork." => <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma" language="en"> <chunk type="NP" relation="SBJ" of="1"> <word type="PRP" lemma="i">I</word> </chunk> <chunk type="VP" relation="VP" id="1" anchor="A1"> <word type="VBP" lemma="eat">eat</word> </chunk> <chunk type="NP" relation="OBJ" of="1"> <word type="NN" lemma="pizza">pizza</word> </chunk> <chunk type="PNP" of="A1"> <chunk type="PP"> <word type="IN" lemma="with">with</word> </chunk> <chunk type="NP"> <word type="DT" lemma="a">a</word> <word type="NN" lemma="fork">fork</word> </chunk> </chunk> <chink> <word type="." lemma=".">.</word> </chink> </sentence>
[ "Returns", "the", "given", "Sentence", "object", "as", "an", "XML", "-", "string", "(", "plain", "bytestring", "UTF", "-", "8", "encoded", ")", ".", "The", "tab", "delimiter", "is", "used", "as", "indendation", "for", "nested", "elements", ".", "The", "id", "can", "be", "used", "as", "a", "unique", "identifier", "per", "sentence", "for", "chunk", "id", "s", "and", "anchors", ".", "For", "example", ":", "I", "eat", "pizza", "with", "a", "fork", ".", "=", ">", "<sentence", "token", "=", "word", "part", "-", "of", "-", "speech", "chunk", "preposition", "relation", "anchor", "lemma", "language", "=", "en", ">", "<chunk", "type", "=", "NP", "relation", "=", "SBJ", "of", "=", "1", ">", "<word", "type", "=", "PRP", "lemma", "=", "i", ">", "I<", "/", "word", ">", "<", "/", "chunk", ">", "<chunk", "type", "=", "VP", "relation", "=", "VP", "id", "=", "1", "anchor", "=", "A1", ">", "<word", "type", "=", "VBP", "lemma", "=", "eat", ">", "eat<", "/", "word", ">", "<", "/", "chunk", ">", "<chunk", "type", "=", "NP", "relation", "=", "OBJ", "of", "=", "1", ">", "<word", "type", "=", "NN", "lemma", "=", "pizza", ">", "pizza<", "/", "word", ">", "<", "/", "chunk", ">", "<chunk", "type", "=", "PNP", "of", "=", "A1", ">", "<chunk", "type", "=", "PP", ">", "<word", "type", "=", "IN", "lemma", "=", "with", ">", "with<", "/", "word", ">", "<", "/", "chunk", ">", "<chunk", "type", "=", "NP", ">", "<word", "type", "=", "DT", "lemma", "=", "a", ">", "a<", "/", "word", ">", "<word", "type", "=", "NN", "lemma", "=", "fork", ">", "fork<", "/", "word", ">", "<", "/", "chunk", ">", "<", "/", "chunk", ">", "<chink", ">", "<word", "type", "=", ".", "lemma", "=", ".", ">", ".", "<", "/", "word", ">", "<", "/", "chink", ">", "<", "/", "sentence", ">" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1279-L1384
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
parse_string
def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern). """ string = "" # Traverse all the <sentence> elements in the XML. dom = XML(xml) for sentence in dom(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = sentence.get(XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in sentence: tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string.strip(), tags=format, language=language) except: return TaggedString(string.strip(), tags=format, language=language)
python
def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern). """ string = "" # Traverse all the <sentence> elements in the XML. dom = XML(xml) for sentence in dom(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = sentence.get(XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in sentence: tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string.strip(), tags=format, language=language) except: return TaggedString(string.strip(), tags=format, language=language)
[ "def", "parse_string", "(", "xml", ")", ":", "string", "=", "\"\"", "# Traverse all the <sentence> elements in the XML.", "dom", "=", "XML", "(", "xml", ")", "for", "sentence", "in", "dom", "(", "XML_SENTENCE", ")", ":", "_anchors", ".", "clear", "(", ")", "# Populated by calling _parse_tokens().", "_attachments", ".", "clear", "(", ")", "# Populated by calling _parse_tokens().", "# Parse the language from <sentence language=\"\">.", "language", "=", "sentence", ".", "get", "(", "XML_LANGUAGE", ",", "\"en\"", ")", "# Parse the token tag format from <sentence token=\"\">.", "# This information is returned in TokenString.tags,", "# so the format and order of the token tags is retained when exporting/importing as XML.", "format", "=", "sentence", ".", "get", "(", "XML_TOKEN", ",", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", "format", "=", "not", "isinstance", "(", "format", ",", "basestring", ")", "and", "format", "or", "format", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "# Traverse all <chunk> and <chink> elements in the sentence.", "# Find the <word> elements inside and create tokens.", "tokens", "=", "[", "]", "for", "chunk", "in", "sentence", ":", "tokens", ".", "extend", "(", "_parse_tokens", "(", "chunk", ",", "format", ")", ")", "# Attach PNP's to their anchors.", "# Keys in _anchors have linked anchor chunks (each chunk is a list of tokens).", "# The keys correspond to the keys in _attachments, which have linked PNP chunks.", "if", "ANCHOR", "in", "format", ":", "A", ",", "P", ",", "a", ",", "i", "=", "_anchors", ",", "_attachments", ",", "1", ",", "format", ".", "index", "(", "ANCHOR", ")", "for", "id", "in", "sorted", "(", "A", ".", "keys", "(", ")", ")", ":", "for", "token", "in", "A", "[", "id", "]", ":", "token", "[", "i", "]", "+=", "\"-\"", "+", "\"-\"", ".", "join", "(", "[", "\"A\"", "+", "str", "(", "a", "+", "p", ")", "for", "p", "in", "range", "(", "len", "(", "P", "[", "id", "]", ")", ")", "]", ")", "token", "[", "i", "]", "=", "token", "[", "i", "]", ".", "strip", "(", "\"O-\"", ")", "for", "p", ",", "pnp", "in", "enumerate", "(", "P", "[", "id", "]", ")", ":", "for", "token", "in", "pnp", ":", "token", "[", "i", "]", "+=", "\"-\"", "+", "\"P\"", "+", "str", "(", "a", "+", "p", ")", "token", "[", "i", "]", "=", "token", "[", "i", "]", ".", "strip", "(", "\"O-\"", ")", "a", "+=", "len", "(", "P", "[", "id", "]", ")", "# Collapse the tokens to string.", "# Separate multiple sentences with a new line.", "tokens", "=", "[", "\"/\"", ".", "join", "(", "[", "tag", "for", "tag", "in", "token", "]", ")", "for", "token", "in", "tokens", "]", "tokens", "=", "\" \"", ".", "join", "(", "tokens", ")", "string", "+=", "tokens", "+", "\"\\n\"", "# Return a TokenString, which is a unicode string that transforms easily", "# into a plain str, a list of tokens, or a Sentence.", "try", ":", "if", "MBSP", ":", "from", "mbsp", "import", "TokenString", "return", "TokenString", "(", "string", ".", "strip", "(", ")", ",", "tags", "=", "format", ",", "language", "=", "language", ")", "except", ":", "return", "TaggedString", "(", "string", ".", "strip", "(", ")", ",", "tags", "=", "format", ",", "language", "=", "language", ")" ]
Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
[ "Returns", "a", "slash", "-", "formatted", "string", "from", "the", "given", "XML", "representation", ".", "The", "return", "value", "is", "a", "TokenString", "(", "for", "MBSP", ")", "or", "TaggedString", "(", "for", "Pattern", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1440-L1487
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
_parse_tokens
def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Parses tokens from <word> elements in the given XML <chunk> element. Returns a flat list of tokens, in which each token is [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]. If a <chunk type="PNP"> is encountered, traverses all of the chunks in the PNP. """ tokens = [] # Only process <chunk> and <chink> elements, # text nodes in between return an empty list. if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK): return [] type = chunk.get(XML_TYPE, "O") if type == "PNP": # For, <chunk type="PNP">, recurse all the child chunks inside the PNP. for ch in chunk: tokens.extend(_parse_tokens(ch, format)) # Tag each of them as part of the PNP. if PNP in format: i = format.index(PNP) for j, token in enumerate(tokens): token[i] = (j==0 and "B-" or "I-") + "PNP" # Store attachments so we can construct anchor id's in parse_string(). # This has to be done at the end, when all the chunks have been found. a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1] if a: _attachments.setdefault(a, []) _attachments[a].append(tokens) return tokens # For <chunk type-"VP" id="1">, the relation is VP-1. # For <chunk type="NP" relation="OBJ" of="1">, the relation is NP-OBJ-1. relation = _parse_relation(chunk, type) # Process all of the <word> elements in the chunk, for example: # <word type="NN" lemma="pizza">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza] for word in filter(lambda n: n.tag == XML_WORD, chunk): tokens.append(_parse_token(word, chunk=type, relation=relation, format=format)) # Add the IOB chunk tags: # words at the start of a chunk are marked with B-, words inside with I-. if CHUNK in format: i = format.index(CHUNK) for j, token in enumerate(tokens): token[i] = token[i] != "O" and ((j==0 and "B-" or "I-") + token[i]) or "O" # The chunk can be the anchor of one or more PNP chunks. # Store anchors so we can construct anchor id's in parse_string(). a = chunk.get(XML_ANCHOR, "").split(_UID_SEPARATOR)[-1] if a: _anchors[a] = tokens return tokens
python
def _parse_tokens(chunk, format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Parses tokens from <word> elements in the given XML <chunk> element. Returns a flat list of tokens, in which each token is [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]. If a <chunk type="PNP"> is encountered, traverses all of the chunks in the PNP. """ tokens = [] # Only process <chunk> and <chink> elements, # text nodes in between return an empty list. if not (chunk.tag == XML_CHUNK or chunk.tag == XML_CHINK): return [] type = chunk.get(XML_TYPE, "O") if type == "PNP": # For, <chunk type="PNP">, recurse all the child chunks inside the PNP. for ch in chunk: tokens.extend(_parse_tokens(ch, format)) # Tag each of them as part of the PNP. if PNP in format: i = format.index(PNP) for j, token in enumerate(tokens): token[i] = (j==0 and "B-" or "I-") + "PNP" # Store attachments so we can construct anchor id's in parse_string(). # This has to be done at the end, when all the chunks have been found. a = chunk.get(XML_OF).split(_UID_SEPARATOR)[-1] if a: _attachments.setdefault(a, []) _attachments[a].append(tokens) return tokens # For <chunk type-"VP" id="1">, the relation is VP-1. # For <chunk type="NP" relation="OBJ" of="1">, the relation is NP-OBJ-1. relation = _parse_relation(chunk, type) # Process all of the <word> elements in the chunk, for example: # <word type="NN" lemma="pizza">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza] for word in filter(lambda n: n.tag == XML_WORD, chunk): tokens.append(_parse_token(word, chunk=type, relation=relation, format=format)) # Add the IOB chunk tags: # words at the start of a chunk are marked with B-, words inside with I-. if CHUNK in format: i = format.index(CHUNK) for j, token in enumerate(tokens): token[i] = token[i] != "O" and ((j==0 and "B-" or "I-") + token[i]) or "O" # The chunk can be the anchor of one or more PNP chunks. # Store anchors so we can construct anchor id's in parse_string(). a = chunk.get(XML_ANCHOR, "").split(_UID_SEPARATOR)[-1] if a: _anchors[a] = tokens return tokens
[ "def", "_parse_tokens", "(", "chunk", ",", "format", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "tokens", "=", "[", "]", "# Only process <chunk> and <chink> elements, ", "# text nodes in between return an empty list.", "if", "not", "(", "chunk", ".", "tag", "==", "XML_CHUNK", "or", "chunk", ".", "tag", "==", "XML_CHINK", ")", ":", "return", "[", "]", "type", "=", "chunk", ".", "get", "(", "XML_TYPE", ",", "\"O\"", ")", "if", "type", "==", "\"PNP\"", ":", "# For, <chunk type=\"PNP\">, recurse all the child chunks inside the PNP.", "for", "ch", "in", "chunk", ":", "tokens", ".", "extend", "(", "_parse_tokens", "(", "ch", ",", "format", ")", ")", "# Tag each of them as part of the PNP.", "if", "PNP", "in", "format", ":", "i", "=", "format", ".", "index", "(", "PNP", ")", "for", "j", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "token", "[", "i", "]", "=", "(", "j", "==", "0", "and", "\"B-\"", "or", "\"I-\"", ")", "+", "\"PNP\"", "# Store attachments so we can construct anchor id's in parse_string().", "# This has to be done at the end, when all the chunks have been found.", "a", "=", "chunk", ".", "get", "(", "XML_OF", ")", ".", "split", "(", "_UID_SEPARATOR", ")", "[", "-", "1", "]", "if", "a", ":", "_attachments", ".", "setdefault", "(", "a", ",", "[", "]", ")", "_attachments", "[", "a", "]", ".", "append", "(", "tokens", ")", "return", "tokens", "# For <chunk type-\"VP\" id=\"1\">, the relation is VP-1.", "# For <chunk type=\"NP\" relation=\"OBJ\" of=\"1\">, the relation is NP-OBJ-1.", "relation", "=", "_parse_relation", "(", "chunk", ",", "type", ")", "# Process all of the <word> elements in the chunk, for example:", "# <word type=\"NN\" lemma=\"pizza\">pizza</word> => [pizza, NN, I-NP, O, NP-OBJ-1, O, pizza]", "for", "word", "in", "filter", "(", "lambda", "n", ":", "n", ".", "tag", "==", "XML_WORD", ",", "chunk", ")", ":", "tokens", ".", "append", "(", "_parse_token", "(", "word", ",", "chunk", "=", "type", ",", "relation", "=", "relation", ",", "format", "=", "format", ")", ")", "# Add the IOB chunk tags:", "# words at the start of a chunk are marked with B-, words inside with I-.", "if", "CHUNK", "in", "format", ":", "i", "=", "format", ".", "index", "(", "CHUNK", ")", "for", "j", ",", "token", "in", "enumerate", "(", "tokens", ")", ":", "token", "[", "i", "]", "=", "token", "[", "i", "]", "!=", "\"O\"", "and", "(", "(", "j", "==", "0", "and", "\"B-\"", "or", "\"I-\"", ")", "+", "token", "[", "i", "]", ")", "or", "\"O\"", "# The chunk can be the anchor of one or more PNP chunks.", "# Store anchors so we can construct anchor id's in parse_string().", "a", "=", "chunk", ".", "get", "(", "XML_ANCHOR", ",", "\"\"", ")", ".", "split", "(", "_UID_SEPARATOR", ")", "[", "-", "1", "]", "if", "a", ":", "_anchors", "[", "a", "]", "=", "tokens", "return", "tokens" ]
Parses tokens from <word> elements in the given XML <chunk> element. Returns a flat list of tokens, in which each token is [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]. If a <chunk type="PNP"> is encountered, traverses all of the chunks in the PNP.
[ "Parses", "tokens", "from", "<word", ">", "elements", "in", "the", "given", "XML", "<chunk", ">", "element", ".", "Returns", "a", "flat", "list", "of", "tokens", "in", "which", "each", "token", "is", "[", "WORD", "POS", "CHUNK", "PNP", "RELATION", "ANCHOR", "LEMMA", "]", ".", "If", "a", "<chunk", "type", "=", "PNP", ">", "is", "encountered", "traverses", "all", "of", "the", "chunks", "in", "the", "PNP", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1489-L1534
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
_parse_relation
def _parse_relation(chunk, type="O"): """ Returns a string of the roles and relations parsed from the given <chunk> element. The chunk type (which is part of the relation string) can be given as parameter. """ r1 = chunk.get(XML_RELATION) r2 = chunk.get(XML_ID, chunk.get(XML_OF)) r1 = [x != "-" and x or None for x in r1.split("|")] or [None] r2 = [x != "-" and x or None for x in r2.split("|")] or [None] r2 = [x is not None and x.split(_UID_SEPARATOR )[-1] or x for x in r2] if len(r1) < len(r2): r1 = r1 + r1 * (len(r2)-len(r1)) # [1] ["SBJ", "OBJ"] => "SBJ-1;OBJ-1" if len(r2) < len(r1): r2 = r2 + r2 * (len(r1)-len(r2)) # [2,4] ["OBJ"] => "OBJ-2;OBJ-4" return ";".join(["-".join([x for x in (type, r1, r2) if x]) for r1, r2 in zip(r1, r2)])
python
def _parse_relation(chunk, type="O"): """ Returns a string of the roles and relations parsed from the given <chunk> element. The chunk type (which is part of the relation string) can be given as parameter. """ r1 = chunk.get(XML_RELATION) r2 = chunk.get(XML_ID, chunk.get(XML_OF)) r1 = [x != "-" and x or None for x in r1.split("|")] or [None] r2 = [x != "-" and x or None for x in r2.split("|")] or [None] r2 = [x is not None and x.split(_UID_SEPARATOR )[-1] or x for x in r2] if len(r1) < len(r2): r1 = r1 + r1 * (len(r2)-len(r1)) # [1] ["SBJ", "OBJ"] => "SBJ-1;OBJ-1" if len(r2) < len(r1): r2 = r2 + r2 * (len(r1)-len(r2)) # [2,4] ["OBJ"] => "OBJ-2;OBJ-4" return ";".join(["-".join([x for x in (type, r1, r2) if x]) for r1, r2 in zip(r1, r2)])
[ "def", "_parse_relation", "(", "chunk", ",", "type", "=", "\"O\"", ")", ":", "r1", "=", "chunk", ".", "get", "(", "XML_RELATION", ")", "r2", "=", "chunk", ".", "get", "(", "XML_ID", ",", "chunk", ".", "get", "(", "XML_OF", ")", ")", "r1", "=", "[", "x", "!=", "\"-\"", "and", "x", "or", "None", "for", "x", "in", "r1", ".", "split", "(", "\"|\"", ")", "]", "or", "[", "None", "]", "r2", "=", "[", "x", "!=", "\"-\"", "and", "x", "or", "None", "for", "x", "in", "r2", ".", "split", "(", "\"|\"", ")", "]", "or", "[", "None", "]", "r2", "=", "[", "x", "is", "not", "None", "and", "x", ".", "split", "(", "_UID_SEPARATOR", ")", "[", "-", "1", "]", "or", "x", "for", "x", "in", "r2", "]", "if", "len", "(", "r1", ")", "<", "len", "(", "r2", ")", ":", "r1", "=", "r1", "+", "r1", "*", "(", "len", "(", "r2", ")", "-", "len", "(", "r1", ")", ")", "# [1] [\"SBJ\", \"OBJ\"] => \"SBJ-1;OBJ-1\"", "if", "len", "(", "r2", ")", "<", "len", "(", "r1", ")", ":", "r2", "=", "r2", "+", "r2", "*", "(", "len", "(", "r1", ")", "-", "len", "(", "r2", ")", ")", "# [2,4] [\"OBJ\"] => \"OBJ-2;OBJ-4\"", "return", "\";\"", ".", "join", "(", "[", "\"-\"", ".", "join", "(", "[", "x", "for", "x", "in", "(", "type", ",", "r1", ",", "r2", ")", "if", "x", "]", ")", "for", "r1", ",", "r2", "in", "zip", "(", "r1", ",", "r2", ")", "]", ")" ]
Returns a string of the roles and relations parsed from the given <chunk> element. The chunk type (which is part of the relation string) can be given as parameter.
[ "Returns", "a", "string", "of", "the", "roles", "and", "relations", "parsed", "from", "the", "given", "<chunk", ">", "element", ".", "The", "chunk", "type", "(", "which", "is", "part", "of", "the", "relation", "string", ")", "can", "be", "given", "as", "parameter", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1536-L1547
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
_parse_token
def _parse_token(word, chunk="O", pnp="O", relation="O", anchor="O", format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns a list of token tags parsed from the given <word> element. Tags that are not attributes in a <word> (e.g., relation) can be given as parameters. """ tags = [] for tag in format: if tag == WORD : tags.append(xml_decode(word.value)) elif tag == POS : tags.append(xml_decode(word.get(XML_TYPE, "O"))) elif tag == CHUNK : tags.append(chunk) elif tag == PNP : tags.append(pnp) elif tag == REL : tags.append(relation) elif tag == ANCHOR : tags.append(anchor) elif tag == LEMMA : tags.append(xml_decode(word.get(XML_LEMMA, ""))) else: # Custom tags when the parser has been extended, see also Word.custom_tags{}. tags.append(xml_decode(word.get(tag, "O"))) return tags
python
def _parse_token(word, chunk="O", pnp="O", relation="O", anchor="O", format=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns a list of token tags parsed from the given <word> element. Tags that are not attributes in a <word> (e.g., relation) can be given as parameters. """ tags = [] for tag in format: if tag == WORD : tags.append(xml_decode(word.value)) elif tag == POS : tags.append(xml_decode(word.get(XML_TYPE, "O"))) elif tag == CHUNK : tags.append(chunk) elif tag == PNP : tags.append(pnp) elif tag == REL : tags.append(relation) elif tag == ANCHOR : tags.append(anchor) elif tag == LEMMA : tags.append(xml_decode(word.get(XML_LEMMA, ""))) else: # Custom tags when the parser has been extended, see also Word.custom_tags{}. tags.append(xml_decode(word.get(tag, "O"))) return tags
[ "def", "_parse_token", "(", "word", ",", "chunk", "=", "\"O\"", ",", "pnp", "=", "\"O\"", ",", "relation", "=", "\"O\"", ",", "anchor", "=", "\"O\"", ",", "format", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "tags", "=", "[", "]", "for", "tag", "in", "format", ":", "if", "tag", "==", "WORD", ":", "tags", ".", "append", "(", "xml_decode", "(", "word", ".", "value", ")", ")", "elif", "tag", "==", "POS", ":", "tags", ".", "append", "(", "xml_decode", "(", "word", ".", "get", "(", "XML_TYPE", ",", "\"O\"", ")", ")", ")", "elif", "tag", "==", "CHUNK", ":", "tags", ".", "append", "(", "chunk", ")", "elif", "tag", "==", "PNP", ":", "tags", ".", "append", "(", "pnp", ")", "elif", "tag", "==", "REL", ":", "tags", ".", "append", "(", "relation", ")", "elif", "tag", "==", "ANCHOR", ":", "tags", ".", "append", "(", "anchor", ")", "elif", "tag", "==", "LEMMA", ":", "tags", ".", "append", "(", "xml_decode", "(", "word", ".", "get", "(", "XML_LEMMA", ",", "\"\"", ")", ")", ")", "else", ":", "# Custom tags when the parser has been extended, see also Word.custom_tags{}.", "tags", ".", "append", "(", "xml_decode", "(", "word", ".", "get", "(", "tag", ",", "\"O\"", ")", ")", ")", "return", "tags" ]
Returns a list of token tags parsed from the given <word> element. Tags that are not attributes in a <word> (e.g., relation) can be given as parameters.
[ "Returns", "a", "list", "of", "token", "tags", "parsed", "from", "the", "given", "<word", ">", "element", ".", "Tags", "that", "are", "not", "attributes", "in", "a", "<word", ">", "(", "e", ".", "g", ".", "relation", ")", "can", "be", "given", "as", "parameters", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1549-L1566
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
nltk_tree
def nltk_tree(sentence): """ Returns an NLTK nltk.tree.Tree object from the given Sentence. The NLTK module should be on the search path somewhere. """ from nltk import tree def do_pnp(pnp): # Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format. s = ' '.join([do_chunk(ch) for ch in pnp.chunks]) return '(PNP %s)' % s def do_chunk(ch): # Returns the Chunk in NLTK bracket format. Recurse attached PNP's. s = ' '.join(['(%s %s)' % (w.pos, w.string) for w in ch.words]) s+= ' '.join([do_pnp(pnp) for pnp in ch.attachments]) return '(%s %s)' % (ch.type, s) T = ['(S'] v = [] # PNP's already visited. for ch in sentence.chunked(): if not ch.pnp and isinstance(ch, Chink): T.append('(%s %s)' % (ch.words[0].pos, ch.words[0].string)) elif not ch.pnp: T.append(do_chunk(ch)) #elif ch.pnp not in v: elif ch.pnp.anchor is None and ch.pnp not in v: # The chunk is part of a PNP without an anchor. T.append(do_pnp(ch.pnp)) v.append(ch.pnp) T.append(')') return tree.bracket_parse(' '.join(T))
python
def nltk_tree(sentence): """ Returns an NLTK nltk.tree.Tree object from the given Sentence. The NLTK module should be on the search path somewhere. """ from nltk import tree def do_pnp(pnp): # Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format. s = ' '.join([do_chunk(ch) for ch in pnp.chunks]) return '(PNP %s)' % s def do_chunk(ch): # Returns the Chunk in NLTK bracket format. Recurse attached PNP's. s = ' '.join(['(%s %s)' % (w.pos, w.string) for w in ch.words]) s+= ' '.join([do_pnp(pnp) for pnp in ch.attachments]) return '(%s %s)' % (ch.type, s) T = ['(S'] v = [] # PNP's already visited. for ch in sentence.chunked(): if not ch.pnp and isinstance(ch, Chink): T.append('(%s %s)' % (ch.words[0].pos, ch.words[0].string)) elif not ch.pnp: T.append(do_chunk(ch)) #elif ch.pnp not in v: elif ch.pnp.anchor is None and ch.pnp not in v: # The chunk is part of a PNP without an anchor. T.append(do_pnp(ch.pnp)) v.append(ch.pnp) T.append(')') return tree.bracket_parse(' '.join(T))
[ "def", "nltk_tree", "(", "sentence", ")", ":", "from", "nltk", "import", "tree", "def", "do_pnp", "(", "pnp", ")", ":", "# Returns the PNPChunk (and the contained Chunk objects) in NLTK bracket format.", "s", "=", "' '", ".", "join", "(", "[", "do_chunk", "(", "ch", ")", "for", "ch", "in", "pnp", ".", "chunks", "]", ")", "return", "'(PNP %s)'", "%", "s", "def", "do_chunk", "(", "ch", ")", ":", "# Returns the Chunk in NLTK bracket format. Recurse attached PNP's.", "s", "=", "' '", ".", "join", "(", "[", "'(%s %s)'", "%", "(", "w", ".", "pos", ",", "w", ".", "string", ")", "for", "w", "in", "ch", ".", "words", "]", ")", "s", "+=", "' '", ".", "join", "(", "[", "do_pnp", "(", "pnp", ")", "for", "pnp", "in", "ch", ".", "attachments", "]", ")", "return", "'(%s %s)'", "%", "(", "ch", ".", "type", ",", "s", ")", "T", "=", "[", "'(S'", "]", "v", "=", "[", "]", "# PNP's already visited.", "for", "ch", "in", "sentence", ".", "chunked", "(", ")", ":", "if", "not", "ch", ".", "pnp", "and", "isinstance", "(", "ch", ",", "Chink", ")", ":", "T", ".", "append", "(", "'(%s %s)'", "%", "(", "ch", ".", "words", "[", "0", "]", ".", "pos", ",", "ch", ".", "words", "[", "0", "]", ".", "string", ")", ")", "elif", "not", "ch", ".", "pnp", ":", "T", ".", "append", "(", "do_chunk", "(", "ch", ")", ")", "#elif ch.pnp not in v:", "elif", "ch", ".", "pnp", ".", "anchor", "is", "None", "and", "ch", ".", "pnp", "not", "in", "v", ":", "# The chunk is part of a PNP without an anchor.", "T", ".", "append", "(", "do_pnp", "(", "ch", ".", "pnp", ")", ")", "v", ".", "append", "(", "ch", ".", "pnp", ")", "T", ".", "append", "(", "')'", ")", "return", "tree", ".", "bracket_parse", "(", "' '", ".", "join", "(", "T", ")", ")" ]
Returns an NLTK nltk.tree.Tree object from the given Sentence. The NLTK module should be on the search path somewhere.
[ "Returns", "an", "NLTK", "nltk", ".", "tree", ".", "Tree", "object", "from", "the", "given", "Sentence", ".", "The", "NLTK", "module", "should", "be", "on", "the", "search", "path", "somewhere", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1570-L1599
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
graphviz_dot
def graphviz_dot(sentence, font="Arial", colors=BLUE): """ Returns a dot-formatted string that can be visualized as a graph in GraphViz. """ s = 'digraph sentence {\n' s += '\tranksep=0.75;\n' s += '\tnodesep=0.15;\n' s += '\tnode [penwidth=1, fontname="%s", shape=record, margin=0.1, height=0.35];\n' % font s += '\tedge [penwidth=1];\n' s += '\t{ rank=same;\n' # Create node groups for words, chunks and PNP chunks. for w in sentence.words: s += '\t\tword%s [label="<f0>%s|<f1>%s"%s];\n' % (w.index, w.string, w.type, _colorize(w, colors)) for w in sentence.words[:-1]: # Invisible edges forces the words into the right order: s += '\t\tword%s -> word%s [color=none];\n' % (w.index, w.index+1) s += '\t}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.chunks): s += '\t\tchunk%s [label="<f0>%s"%s];\n' % (i+1, "-".join([x for x in ( ch.type, ch.role, str(ch.relation or '')) if x]) or '-', _colorize(ch, colors)) for i, ch in enumerate(sentence.chunks[:-1]): # Invisible edges forces the chunks into the right order: s += '\t\tchunk%s -> chunk%s [color=none];\n' % (i+1, i+2) s += '}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.pnp): s += '\t\tpnp%s [label="<f0>PNP"%s];\n' % (i+1, _colorize(ch, colors)) s += '\t}\n' s += '\t{ rank=same;\n S [shape=circle, margin=0.25, penwidth=2]; }\n' # Connect words to chunks. # Connect chunks to PNP or S. for i, ch in enumerate(sentence.chunks): for w in ch: s += '\tword%s -> chunk%s;\n' % (w.index, i+1) if ch.pnp: s += '\tchunk%s -> pnp%s;\n' % (i+1, sentence.pnp.index(ch.pnp)+1) else: s += '\tchunk%s -> S;\n' % (i+1) if ch.type == 'VP': # Indicate related chunks with a dotted for r in ch.related: s += '\tchunk%s -> chunk%s [style=dotted, arrowhead=none];\n' % ( i+1, sentence.chunks.index(r)+1) # Connect PNP to anchor chunk or S. for i, ch in enumerate(sentence.pnp): if ch.anchor: s += '\tpnp%s -> chunk%s;\n' % (i+1, sentence.chunks.index(ch.anchor)+1) s += '\tpnp%s -> S [color=none];\n' % (i+1) else: s += '\tpnp%s -> S;\n' % (i+1) s += "}" return s
python
def graphviz_dot(sentence, font="Arial", colors=BLUE): """ Returns a dot-formatted string that can be visualized as a graph in GraphViz. """ s = 'digraph sentence {\n' s += '\tranksep=0.75;\n' s += '\tnodesep=0.15;\n' s += '\tnode [penwidth=1, fontname="%s", shape=record, margin=0.1, height=0.35];\n' % font s += '\tedge [penwidth=1];\n' s += '\t{ rank=same;\n' # Create node groups for words, chunks and PNP chunks. for w in sentence.words: s += '\t\tword%s [label="<f0>%s|<f1>%s"%s];\n' % (w.index, w.string, w.type, _colorize(w, colors)) for w in sentence.words[:-1]: # Invisible edges forces the words into the right order: s += '\t\tword%s -> word%s [color=none];\n' % (w.index, w.index+1) s += '\t}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.chunks): s += '\t\tchunk%s [label="<f0>%s"%s];\n' % (i+1, "-".join([x for x in ( ch.type, ch.role, str(ch.relation or '')) if x]) or '-', _colorize(ch, colors)) for i, ch in enumerate(sentence.chunks[:-1]): # Invisible edges forces the chunks into the right order: s += '\t\tchunk%s -> chunk%s [color=none];\n' % (i+1, i+2) s += '}\n' s += '\t{ rank=same;\n' for i, ch in enumerate(sentence.pnp): s += '\t\tpnp%s [label="<f0>PNP"%s];\n' % (i+1, _colorize(ch, colors)) s += '\t}\n' s += '\t{ rank=same;\n S [shape=circle, margin=0.25, penwidth=2]; }\n' # Connect words to chunks. # Connect chunks to PNP or S. for i, ch in enumerate(sentence.chunks): for w in ch: s += '\tword%s -> chunk%s;\n' % (w.index, i+1) if ch.pnp: s += '\tchunk%s -> pnp%s;\n' % (i+1, sentence.pnp.index(ch.pnp)+1) else: s += '\tchunk%s -> S;\n' % (i+1) if ch.type == 'VP': # Indicate related chunks with a dotted for r in ch.related: s += '\tchunk%s -> chunk%s [style=dotted, arrowhead=none];\n' % ( i+1, sentence.chunks.index(r)+1) # Connect PNP to anchor chunk or S. for i, ch in enumerate(sentence.pnp): if ch.anchor: s += '\tpnp%s -> chunk%s;\n' % (i+1, sentence.chunks.index(ch.anchor)+1) s += '\tpnp%s -> S [color=none];\n' % (i+1) else: s += '\tpnp%s -> S;\n' % (i+1) s += "}" return s
[ "def", "graphviz_dot", "(", "sentence", ",", "font", "=", "\"Arial\"", ",", "colors", "=", "BLUE", ")", ":", "s", "=", "'digraph sentence {\\n'", "s", "+=", "'\\tranksep=0.75;\\n'", "s", "+=", "'\\tnodesep=0.15;\\n'", "s", "+=", "'\\tnode [penwidth=1, fontname=\"%s\", shape=record, margin=0.1, height=0.35];\\n'", "%", "font", "s", "+=", "'\\tedge [penwidth=1];\\n'", "s", "+=", "'\\t{ rank=same;\\n'", "# Create node groups for words, chunks and PNP chunks.", "for", "w", "in", "sentence", ".", "words", ":", "s", "+=", "'\\t\\tword%s [label=\"<f0>%s|<f1>%s\"%s];\\n'", "%", "(", "w", ".", "index", ",", "w", ".", "string", ",", "w", ".", "type", ",", "_colorize", "(", "w", ",", "colors", ")", ")", "for", "w", "in", "sentence", ".", "words", "[", ":", "-", "1", "]", ":", "# Invisible edges forces the words into the right order:", "s", "+=", "'\\t\\tword%s -> word%s [color=none];\\n'", "%", "(", "w", ".", "index", ",", "w", ".", "index", "+", "1", ")", "s", "+=", "'\\t}\\n'", "s", "+=", "'\\t{ rank=same;\\n'", "for", "i", ",", "ch", "in", "enumerate", "(", "sentence", ".", "chunks", ")", ":", "s", "+=", "'\\t\\tchunk%s [label=\"<f0>%s\"%s];\\n'", "%", "(", "i", "+", "1", ",", "\"-\"", ".", "join", "(", "[", "x", "for", "x", "in", "(", "ch", ".", "type", ",", "ch", ".", "role", ",", "str", "(", "ch", ".", "relation", "or", "''", ")", ")", "if", "x", "]", ")", "or", "'-'", ",", "_colorize", "(", "ch", ",", "colors", ")", ")", "for", "i", ",", "ch", "in", "enumerate", "(", "sentence", ".", "chunks", "[", ":", "-", "1", "]", ")", ":", "# Invisible edges forces the chunks into the right order:", "s", "+=", "'\\t\\tchunk%s -> chunk%s [color=none];\\n'", "%", "(", "i", "+", "1", ",", "i", "+", "2", ")", "s", "+=", "'}\\n'", "s", "+=", "'\\t{ rank=same;\\n'", "for", "i", ",", "ch", "in", "enumerate", "(", "sentence", ".", "pnp", ")", ":", "s", "+=", "'\\t\\tpnp%s [label=\"<f0>PNP\"%s];\\n'", "%", "(", "i", "+", "1", ",", "_colorize", "(", "ch", ",", "colors", ")", ")", "s", "+=", "'\\t}\\n'", "s", "+=", "'\\t{ rank=same;\\n S [shape=circle, margin=0.25, penwidth=2]; }\\n'", "# Connect words to chunks.", "# Connect chunks to PNP or S.", "for", "i", ",", "ch", "in", "enumerate", "(", "sentence", ".", "chunks", ")", ":", "for", "w", "in", "ch", ":", "s", "+=", "'\\tword%s -> chunk%s;\\n'", "%", "(", "w", ".", "index", ",", "i", "+", "1", ")", "if", "ch", ".", "pnp", ":", "s", "+=", "'\\tchunk%s -> pnp%s;\\n'", "%", "(", "i", "+", "1", ",", "sentence", ".", "pnp", ".", "index", "(", "ch", ".", "pnp", ")", "+", "1", ")", "else", ":", "s", "+=", "'\\tchunk%s -> S;\\n'", "%", "(", "i", "+", "1", ")", "if", "ch", ".", "type", "==", "'VP'", ":", "# Indicate related chunks with a dotted", "for", "r", "in", "ch", ".", "related", ":", "s", "+=", "'\\tchunk%s -> chunk%s [style=dotted, arrowhead=none];\\n'", "%", "(", "i", "+", "1", ",", "sentence", ".", "chunks", ".", "index", "(", "r", ")", "+", "1", ")", "# Connect PNP to anchor chunk or S.", "for", "i", ",", "ch", "in", "enumerate", "(", "sentence", ".", "pnp", ")", ":", "if", "ch", ".", "anchor", ":", "s", "+=", "'\\tpnp%s -> chunk%s;\\n'", "%", "(", "i", "+", "1", ",", "sentence", ".", "chunks", ".", "index", "(", "ch", ".", "anchor", ")", "+", "1", ")", "s", "+=", "'\\tpnp%s -> S [color=none];\\n'", "%", "(", "i", "+", "1", ")", "else", ":", "s", "+=", "'\\tpnp%s -> S;\\n'", "%", "(", "i", "+", "1", ")", "s", "+=", "\"}\"", "return", "s" ]
Returns a dot-formatted string that can be visualized as a graph in GraphViz.
[ "Returns", "a", "dot", "-", "formatted", "string", "that", "can", "be", "visualized", "as", "a", "graph", "in", "GraphViz", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1621-L1672
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
table
def table(sentence, fill=1, placeholder="-"): """ Returns a string where the tags of tokens in the sentence are organized in outlined columns. """ tags = [WORD, POS, IOB, CHUNK, ROLE, REL, PNP, ANCHOR, LEMMA] tags += [tag for tag in sentence.token if tag not in tags] def format(token, tag): # Returns the token tag as a string. if tag == WORD : s = token.string elif tag == POS : s = token.type elif tag == IOB : s = token.chunk and (token.index == token.chunk.start and "B" or "I") elif tag == CHUNK : s = token.chunk and token.chunk.type elif tag == ROLE : s = token.chunk and token.chunk.role elif tag == REL : s = token.chunk and token.chunk.relation and str(token.chunk.relation) elif tag == PNP : s = token.chunk and token.chunk.pnp and token.chunk.pnp.type elif tag == ANCHOR : s = token.chunk and token.chunk.anchor_id elif tag == LEMMA : s = token.lemma else : s = token.custom_tags.get(tag) return s or placeholder def outline(column, fill=1, padding=3, align="left"): # Add spaces to each string in the column so they line out to the highest width. n = max([len(x) for x in column]+[fill]) if align == "left" : return [x+" "*(n-len(x))+" "*padding for x in column] if align == "right" : return [" "*(n-len(x))+x+" "*padding for x in column] # Gather the tags of the tokens in the sentece per column. # If the IOB-tag is I-, mark the chunk tag with "^". # Add the tag names as headers in each column. columns = [[format(token, tag) for token in sentence] for tag in tags] columns[3] = [columns[3][i]+(iob == "I" and " ^" or "") for i, iob in enumerate(columns[2])] del columns[2] for i, header in enumerate(['word', 'tag', 'chunk', 'role', 'id', 'pnp', 'anchor', 'lemma']+tags[9:]): columns[i].insert(0, "") columns[i].insert(0, header.upper()) # The left column (the word itself) is outlined to the right, # and has extra spacing so that words across sentences line out nicely below each other. for i, column in enumerate(columns): columns[i] = outline(column, fill+10*(i==0), align=("left","right")[i==0]) # Anchor column is useful in MBSP but not in pattern.en. if not MBSP: del columns[6] # Create a string with one row (i.e., one token) per line. return "\n".join(["".join([x[i] for x in columns]) for i in range(len(columns[0]))])
python
def table(sentence, fill=1, placeholder="-"): """ Returns a string where the tags of tokens in the sentence are organized in outlined columns. """ tags = [WORD, POS, IOB, CHUNK, ROLE, REL, PNP, ANCHOR, LEMMA] tags += [tag for tag in sentence.token if tag not in tags] def format(token, tag): # Returns the token tag as a string. if tag == WORD : s = token.string elif tag == POS : s = token.type elif tag == IOB : s = token.chunk and (token.index == token.chunk.start and "B" or "I") elif tag == CHUNK : s = token.chunk and token.chunk.type elif tag == ROLE : s = token.chunk and token.chunk.role elif tag == REL : s = token.chunk and token.chunk.relation and str(token.chunk.relation) elif tag == PNP : s = token.chunk and token.chunk.pnp and token.chunk.pnp.type elif tag == ANCHOR : s = token.chunk and token.chunk.anchor_id elif tag == LEMMA : s = token.lemma else : s = token.custom_tags.get(tag) return s or placeholder def outline(column, fill=1, padding=3, align="left"): # Add spaces to each string in the column so they line out to the highest width. n = max([len(x) for x in column]+[fill]) if align == "left" : return [x+" "*(n-len(x))+" "*padding for x in column] if align == "right" : return [" "*(n-len(x))+x+" "*padding for x in column] # Gather the tags of the tokens in the sentece per column. # If the IOB-tag is I-, mark the chunk tag with "^". # Add the tag names as headers in each column. columns = [[format(token, tag) for token in sentence] for tag in tags] columns[3] = [columns[3][i]+(iob == "I" and " ^" or "") for i, iob in enumerate(columns[2])] del columns[2] for i, header in enumerate(['word', 'tag', 'chunk', 'role', 'id', 'pnp', 'anchor', 'lemma']+tags[9:]): columns[i].insert(0, "") columns[i].insert(0, header.upper()) # The left column (the word itself) is outlined to the right, # and has extra spacing so that words across sentences line out nicely below each other. for i, column in enumerate(columns): columns[i] = outline(column, fill+10*(i==0), align=("left","right")[i==0]) # Anchor column is useful in MBSP but not in pattern.en. if not MBSP: del columns[6] # Create a string with one row (i.e., one token) per line. return "\n".join(["".join([x[i] for x in columns]) for i in range(len(columns[0]))])
[ "def", "table", "(", "sentence", ",", "fill", "=", "1", ",", "placeholder", "=", "\"-\"", ")", ":", "tags", "=", "[", "WORD", ",", "POS", ",", "IOB", ",", "CHUNK", ",", "ROLE", ",", "REL", ",", "PNP", ",", "ANCHOR", ",", "LEMMA", "]", "tags", "+=", "[", "tag", "for", "tag", "in", "sentence", ".", "token", "if", "tag", "not", "in", "tags", "]", "def", "format", "(", "token", ",", "tag", ")", ":", "# Returns the token tag as a string.", "if", "tag", "==", "WORD", ":", "s", "=", "token", ".", "string", "elif", "tag", "==", "POS", ":", "s", "=", "token", ".", "type", "elif", "tag", "==", "IOB", ":", "s", "=", "token", ".", "chunk", "and", "(", "token", ".", "index", "==", "token", ".", "chunk", ".", "start", "and", "\"B\"", "or", "\"I\"", ")", "elif", "tag", "==", "CHUNK", ":", "s", "=", "token", ".", "chunk", "and", "token", ".", "chunk", ".", "type", "elif", "tag", "==", "ROLE", ":", "s", "=", "token", ".", "chunk", "and", "token", ".", "chunk", ".", "role", "elif", "tag", "==", "REL", ":", "s", "=", "token", ".", "chunk", "and", "token", ".", "chunk", ".", "relation", "and", "str", "(", "token", ".", "chunk", ".", "relation", ")", "elif", "tag", "==", "PNP", ":", "s", "=", "token", ".", "chunk", "and", "token", ".", "chunk", ".", "pnp", "and", "token", ".", "chunk", ".", "pnp", ".", "type", "elif", "tag", "==", "ANCHOR", ":", "s", "=", "token", ".", "chunk", "and", "token", ".", "chunk", ".", "anchor_id", "elif", "tag", "==", "LEMMA", ":", "s", "=", "token", ".", "lemma", "else", ":", "s", "=", "token", ".", "custom_tags", ".", "get", "(", "tag", ")", "return", "s", "or", "placeholder", "def", "outline", "(", "column", ",", "fill", "=", "1", ",", "padding", "=", "3", ",", "align", "=", "\"left\"", ")", ":", "# Add spaces to each string in the column so they line out to the highest width.", "n", "=", "max", "(", "[", "len", "(", "x", ")", "for", "x", "in", "column", "]", "+", "[", "fill", "]", ")", "if", "align", "==", "\"left\"", ":", "return", "[", "x", "+", "\" \"", "*", "(", "n", "-", "len", "(", "x", ")", ")", "+", "\" \"", "*", "padding", "for", "x", "in", "column", "]", "if", "align", "==", "\"right\"", ":", "return", "[", "\" \"", "*", "(", "n", "-", "len", "(", "x", ")", ")", "+", "x", "+", "\" \"", "*", "padding", "for", "x", "in", "column", "]", "# Gather the tags of the tokens in the sentece per column.", "# If the IOB-tag is I-, mark the chunk tag with \"^\".", "# Add the tag names as headers in each column.", "columns", "=", "[", "[", "format", "(", "token", ",", "tag", ")", "for", "token", "in", "sentence", "]", "for", "tag", "in", "tags", "]", "columns", "[", "3", "]", "=", "[", "columns", "[", "3", "]", "[", "i", "]", "+", "(", "iob", "==", "\"I\"", "and", "\" ^\"", "or", "\"\"", ")", "for", "i", ",", "iob", "in", "enumerate", "(", "columns", "[", "2", "]", ")", "]", "del", "columns", "[", "2", "]", "for", "i", ",", "header", "in", "enumerate", "(", "[", "'word'", ",", "'tag'", ",", "'chunk'", ",", "'role'", ",", "'id'", ",", "'pnp'", ",", "'anchor'", ",", "'lemma'", "]", "+", "tags", "[", "9", ":", "]", ")", ":", "columns", "[", "i", "]", ".", "insert", "(", "0", ",", "\"\"", ")", "columns", "[", "i", "]", ".", "insert", "(", "0", ",", "header", ".", "upper", "(", ")", ")", "# The left column (the word itself) is outlined to the right,", "# and has extra spacing so that words across sentences line out nicely below each other.", "for", "i", ",", "column", "in", "enumerate", "(", "columns", ")", ":", "columns", "[", "i", "]", "=", "outline", "(", "column", ",", "fill", "+", "10", "*", "(", "i", "==", "0", ")", ",", "align", "=", "(", "\"left\"", ",", "\"right\"", ")", "[", "i", "==", "0", "]", ")", "# Anchor column is useful in MBSP but not in pattern.en.", "if", "not", "MBSP", ":", "del", "columns", "[", "6", "]", "# Create a string with one row (i.e., one token) per line.", "return", "\"\\n\"", ".", "join", "(", "[", "\"\"", ".", "join", "(", "[", "x", "[", "i", "]", "for", "x", "in", "columns", "]", ")", "for", "i", "in", "range", "(", "len", "(", "columns", "[", "0", "]", ")", ")", "]", ")" ]
Returns a string where the tags of tokens in the sentence are organized in outlined columns.
[ "Returns", "a", "string", "where", "the", "tags", "of", "tokens", "in", "the", "sentence", "are", "organized", "in", "outlined", "columns", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1676-L1717
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Word.tags
def tags(self): """ Yields a list of all the token tags as they appeared when the word was parsed. For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"] """ # See also. Sentence.__repr__(). ch, I,O,B = self.chunk, INSIDE+"-", OUTSIDE, BEGIN+"-" tags = [OUTSIDE for i in range(len(self.sentence.token))] for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA] if tag == WORD: tags[i] = encode_entities(self.string) elif tag == POS and self.type: tags[i] = self.type elif tag == CHUNK and ch and ch.type: tags[i] = (self == ch[0] and B or I) + ch.type elif tag == PNP and self.pnp: tags[i] = (self == self.pnp[0] and B or I) + "PNP" elif tag == REL and ch and len(ch.relations) > 0: tags[i] = ["-".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations] tags[i] = "*".join(tags[i]) elif tag == ANCHOR and ch: tags[i] = ch.anchor_id or OUTSIDE elif tag == LEMMA: tags[i] = encode_entities(self.lemma or "") elif tag in self.custom_tags: tags[i] = self.custom_tags.get(tag) or OUTSIDE return tags
python
def tags(self): """ Yields a list of all the token tags as they appeared when the word was parsed. For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"] """ # See also. Sentence.__repr__(). ch, I,O,B = self.chunk, INSIDE+"-", OUTSIDE, BEGIN+"-" tags = [OUTSIDE for i in range(len(self.sentence.token))] for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA] if tag == WORD: tags[i] = encode_entities(self.string) elif tag == POS and self.type: tags[i] = self.type elif tag == CHUNK and ch and ch.type: tags[i] = (self == ch[0] and B or I) + ch.type elif tag == PNP and self.pnp: tags[i] = (self == self.pnp[0] and B or I) + "PNP" elif tag == REL and ch and len(ch.relations) > 0: tags[i] = ["-".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations] tags[i] = "*".join(tags[i]) elif tag == ANCHOR and ch: tags[i] = ch.anchor_id or OUTSIDE elif tag == LEMMA: tags[i] = encode_entities(self.lemma or "") elif tag in self.custom_tags: tags[i] = self.custom_tags.get(tag) or OUTSIDE return tags
[ "def", "tags", "(", "self", ")", ":", "# See also. Sentence.__repr__().", "ch", ",", "I", ",", "O", ",", "B", "=", "self", ".", "chunk", ",", "INSIDE", "+", "\"-\"", ",", "OUTSIDE", ",", "BEGIN", "+", "\"-\"", "tags", "=", "[", "OUTSIDE", "for", "i", "in", "range", "(", "len", "(", "self", ".", "sentence", ".", "token", ")", ")", "]", "for", "i", ",", "tag", "in", "enumerate", "(", "self", ".", "sentence", ".", "token", ")", ":", "# Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]", "if", "tag", "==", "WORD", ":", "tags", "[", "i", "]", "=", "encode_entities", "(", "self", ".", "string", ")", "elif", "tag", "==", "POS", "and", "self", ".", "type", ":", "tags", "[", "i", "]", "=", "self", ".", "type", "elif", "tag", "==", "CHUNK", "and", "ch", "and", "ch", ".", "type", ":", "tags", "[", "i", "]", "=", "(", "self", "==", "ch", "[", "0", "]", "and", "B", "or", "I", ")", "+", "ch", ".", "type", "elif", "tag", "==", "PNP", "and", "self", ".", "pnp", ":", "tags", "[", "i", "]", "=", "(", "self", "==", "self", ".", "pnp", "[", "0", "]", "and", "B", "or", "I", ")", "+", "\"PNP\"", "elif", "tag", "==", "REL", "and", "ch", "and", "len", "(", "ch", ".", "relations", ")", ">", "0", ":", "tags", "[", "i", "]", "=", "[", "\"-\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "[", "ch", ".", "type", "]", "+", "list", "(", "reversed", "(", "r", ")", ")", "if", "x", "]", ")", "for", "r", "in", "ch", ".", "relations", "]", "tags", "[", "i", "]", "=", "\"*\"", ".", "join", "(", "tags", "[", "i", "]", ")", "elif", "tag", "==", "ANCHOR", "and", "ch", ":", "tags", "[", "i", "]", "=", "ch", ".", "anchor_id", "or", "OUTSIDE", "elif", "tag", "==", "LEMMA", ":", "tags", "[", "i", "]", "=", "encode_entities", "(", "self", ".", "lemma", "or", "\"\"", ")", "elif", "tag", "in", "self", ".", "custom_tags", ":", "tags", "[", "i", "]", "=", "self", ".", "custom_tags", ".", "get", "(", "tag", ")", "or", "OUTSIDE", "return", "tags" ]
Yields a list of all the token tags as they appeared when the word was parsed. For example: ["was", "VBD", "B-VP", "O", "VP-1", "A1", "be"]
[ "Yields", "a", "list", "of", "all", "the", "token", "tags", "as", "they", "appeared", "when", "the", "word", "was", "parsed", ".", "For", "example", ":", "[", "was", "VBD", "B", "-", "VP", "O", "VP", "-", "1", "A1", "be", "]" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L190-L215
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Word.next
def next(self, type=None): """ Returns the next word in the sentence with the given type. """ i = self.index + 1 s = self.sentence while i < len(s): if type in (s[i].type, None): return s[i] i += 1
python
def next(self, type=None): """ Returns the next word in the sentence with the given type. """ i = self.index + 1 s = self.sentence while i < len(s): if type in (s[i].type, None): return s[i] i += 1
[ "def", "next", "(", "self", ",", "type", "=", "None", ")", ":", "i", "=", "self", ".", "index", "+", "1", "s", "=", "self", ".", "sentence", "while", "i", "<", "len", "(", "s", ")", ":", "if", "type", "in", "(", "s", "[", "i", "]", ".", "type", ",", "None", ")", ":", "return", "s", "[", "i", "]", "i", "+=", "1" ]
Returns the next word in the sentence with the given type.
[ "Returns", "the", "next", "word", "in", "the", "sentence", "with", "the", "given", "type", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L222-L230
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Word.previous
def previous(self, type=None): """ Returns the next previous word in the sentence with the given type. """ i = self.index - 1 s = self.sentence while i > 0: if type in (s[i].type, None): return s[i] i -= 1
python
def previous(self, type=None): """ Returns the next previous word in the sentence with the given type. """ i = self.index - 1 s = self.sentence while i > 0: if type in (s[i].type, None): return s[i] i -= 1
[ "def", "previous", "(", "self", ",", "type", "=", "None", ")", ":", "i", "=", "self", ".", "index", "-", "1", "s", "=", "self", ".", "sentence", "while", "i", ">", "0", ":", "if", "type", "in", "(", "s", "[", "i", "]", ".", "type", ",", "None", ")", ":", "return", "s", "[", "i", "]", "i", "-=", "1" ]
Returns the next previous word in the sentence with the given type.
[ "Returns", "the", "next", "previous", "word", "in", "the", "sentence", "with", "the", "given", "type", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L232-L240
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.head
def head(self): """ Yields the head of the chunk (usually, the last word in the chunk). """ if self.type == "NP" and any(w.type.startswith("NNP") for w in self): w = find(lambda w: w.type.startswith("NNP"), reversed(self)) elif self.type == "NP": # "the cat" => "cat" w = find(lambda w: w.type.startswith("NN"), reversed(self)) elif self.type == "VP": # "is watching" => "watching" w = find(lambda w: w.type.startswith("VB"), reversed(self)) elif self.type == "PP": # "from up on" => "from" w = find(lambda w: w.type.startswith(("IN", "PP")), self) elif self.type == "PNP": # "from up on the roof" => "roof" w = find(lambda w: w.type.startswith("NN"), reversed(self)) else: w = None if w is None: w = self[-1] return w
python
def head(self): """ Yields the head of the chunk (usually, the last word in the chunk). """ if self.type == "NP" and any(w.type.startswith("NNP") for w in self): w = find(lambda w: w.type.startswith("NNP"), reversed(self)) elif self.type == "NP": # "the cat" => "cat" w = find(lambda w: w.type.startswith("NN"), reversed(self)) elif self.type == "VP": # "is watching" => "watching" w = find(lambda w: w.type.startswith("VB"), reversed(self)) elif self.type == "PP": # "from up on" => "from" w = find(lambda w: w.type.startswith(("IN", "PP")), self) elif self.type == "PNP": # "from up on the roof" => "roof" w = find(lambda w: w.type.startswith("NN"), reversed(self)) else: w = None if w is None: w = self[-1] return w
[ "def", "head", "(", "self", ")", ":", "if", "self", ".", "type", "==", "\"NP\"", "and", "any", "(", "w", ".", "type", ".", "startswith", "(", "\"NNP\"", ")", "for", "w", "in", "self", ")", ":", "w", "=", "find", "(", "lambda", "w", ":", "w", ".", "type", ".", "startswith", "(", "\"NNP\"", ")", ",", "reversed", "(", "self", ")", ")", "elif", "self", ".", "type", "==", "\"NP\"", ":", "# \"the cat\" => \"cat\"", "w", "=", "find", "(", "lambda", "w", ":", "w", ".", "type", ".", "startswith", "(", "\"NN\"", ")", ",", "reversed", "(", "self", ")", ")", "elif", "self", ".", "type", "==", "\"VP\"", ":", "# \"is watching\" => \"watching\"", "w", "=", "find", "(", "lambda", "w", ":", "w", ".", "type", ".", "startswith", "(", "\"VB\"", ")", ",", "reversed", "(", "self", ")", ")", "elif", "self", ".", "type", "==", "\"PP\"", ":", "# \"from up on\" => \"from\"", "w", "=", "find", "(", "lambda", "w", ":", "w", ".", "type", ".", "startswith", "(", "(", "\"IN\"", ",", "\"PP\"", ")", ")", ",", "self", ")", "elif", "self", ".", "type", "==", "\"PNP\"", ":", "# \"from up on the roof\" => \"roof\"", "w", "=", "find", "(", "lambda", "w", ":", "w", ".", "type", ".", "startswith", "(", "\"NN\"", ")", ",", "reversed", "(", "self", ")", ")", "else", ":", "w", "=", "None", "if", "w", "is", "None", ":", "w", "=", "self", "[", "-", "1", "]", "return", "w" ]
Yields the head of the chunk (usually, the last word in the chunk).
[ "Yields", "the", "head", "of", "the", "chunk", "(", "usually", "the", "last", "word", "in", "the", "chunk", ")", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L365-L382
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.related
def related(self): """ Yields a list of all chunks in the sentence with the same relation id. """ return [ch for ch in self.sentence.chunks if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]
python
def related(self): """ Yields a list of all chunks in the sentence with the same relation id. """ return [ch for ch in self.sentence.chunks if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]
[ "def", "related", "(", "self", ")", ":", "return", "[", "ch", "for", "ch", "in", "self", ".", "sentence", ".", "chunks", "if", "ch", "!=", "self", "and", "intersects", "(", "unzip", "(", "0", ",", "ch", ".", "relations", ")", ",", "unzip", "(", "0", ",", "self", ".", "relations", ")", ")", "]" ]
Yields a list of all chunks in the sentence with the same relation id.
[ "Yields", "a", "list", "of", "all", "chunks", "in", "the", "sentence", "with", "the", "same", "relation", "id", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L414-L418
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.anchor_id
def anchor_id(self): """ Yields the anchor tag as parsed from the original token. Chunks that are anchors have a tag with an "A" prefix (e.g., "A1"). Chunks that are PNP attachmens (or chunks inside a PNP) have "P" (e.g., "P1"). Chunks inside a PNP can be both anchor and attachment (e.g., "P1-A2"), as in: "clawed/A1 at/P1 mice/P1-A2 in/P2 the/P2 wall/P2" """ id = "" f = lambda ch: filter(lambda k: self.sentence._anchors[k] == ch, self.sentence._anchors) if self.pnp and self.pnp.anchor: id += "-" + "-".join(f(self.pnp)) if self.anchor: id += "-" + "-".join(f(self)) if self.attachments: id += "-" + "-".join(f(self)) return id.strip("-") or None
python
def anchor_id(self): """ Yields the anchor tag as parsed from the original token. Chunks that are anchors have a tag with an "A" prefix (e.g., "A1"). Chunks that are PNP attachmens (or chunks inside a PNP) have "P" (e.g., "P1"). Chunks inside a PNP can be both anchor and attachment (e.g., "P1-A2"), as in: "clawed/A1 at/P1 mice/P1-A2 in/P2 the/P2 wall/P2" """ id = "" f = lambda ch: filter(lambda k: self.sentence._anchors[k] == ch, self.sentence._anchors) if self.pnp and self.pnp.anchor: id += "-" + "-".join(f(self.pnp)) if self.anchor: id += "-" + "-".join(f(self)) if self.attachments: id += "-" + "-".join(f(self)) return id.strip("-") or None
[ "def", "anchor_id", "(", "self", ")", ":", "id", "=", "\"\"", "f", "=", "lambda", "ch", ":", "filter", "(", "lambda", "k", ":", "self", ".", "sentence", ".", "_anchors", "[", "k", "]", "==", "ch", ",", "self", ".", "sentence", ".", "_anchors", ")", "if", "self", ".", "pnp", "and", "self", ".", "pnp", ".", "anchor", ":", "id", "+=", "\"-\"", "+", "\"-\"", ".", "join", "(", "f", "(", "self", ".", "pnp", ")", ")", "if", "self", ".", "anchor", ":", "id", "+=", "\"-\"", "+", "\"-\"", ".", "join", "(", "f", "(", "self", ")", ")", "if", "self", ".", "attachments", ":", "id", "+=", "\"-\"", "+", "\"-\"", ".", "join", "(", "f", "(", "self", ")", ")", "return", "id", ".", "strip", "(", "\"-\"", ")", "or", "None" ]
Yields the anchor tag as parsed from the original token. Chunks that are anchors have a tag with an "A" prefix (e.g., "A1"). Chunks that are PNP attachmens (or chunks inside a PNP) have "P" (e.g., "P1"). Chunks inside a PNP can be both anchor and attachment (e.g., "P1-A2"), as in: "clawed/A1 at/P1 mice/P1-A2 in/P2 the/P2 wall/P2"
[ "Yields", "the", "anchor", "tag", "as", "parsed", "from", "the", "original", "token", ".", "Chunks", "that", "are", "anchors", "have", "a", "tag", "with", "an", "A", "prefix", "(", "e", ".", "g", ".", "A1", ")", ".", "Chunks", "that", "are", "PNP", "attachmens", "(", "or", "chunks", "inside", "a", "PNP", ")", "have", "P", "(", "e", ".", "g", ".", "P1", ")", ".", "Chunks", "inside", "a", "PNP", "can", "be", "both", "anchor", "and", "attachment", "(", "e", ".", "g", ".", "P1", "-", "A2", ")", "as", "in", ":", "clawed", "/", "A1", "at", "/", "P1", "mice", "/", "P1", "-", "A2", "in", "/", "P2", "the", "/", "P2", "wall", "/", "P2" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L427-L442
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.modifiers
def modifiers(self): """ For verb phrases (VP), yields a list of the nearest adjectives and adverbs. """ if self._modifiers is None: # Iterate over all the chunks and attach modifiers to their VP-anchor. is_modifier = lambda ch: ch.type in ("ADJP", "ADVP") and ch.relation is None for chunk in self.sentence.chunks: chunk._modifiers = [] for chunk in filter(is_modifier, self.sentence.chunks): anchor = chunk.nearest("VP") if anchor: anchor._modifiers.append(chunk) return self._modifiers
python
def modifiers(self): """ For verb phrases (VP), yields a list of the nearest adjectives and adverbs. """ if self._modifiers is None: # Iterate over all the chunks and attach modifiers to their VP-anchor. is_modifier = lambda ch: ch.type in ("ADJP", "ADVP") and ch.relation is None for chunk in self.sentence.chunks: chunk._modifiers = [] for chunk in filter(is_modifier, self.sentence.chunks): anchor = chunk.nearest("VP") if anchor: anchor._modifiers.append(chunk) return self._modifiers
[ "def", "modifiers", "(", "self", ")", ":", "if", "self", ".", "_modifiers", "is", "None", ":", "# Iterate over all the chunks and attach modifiers to their VP-anchor.", "is_modifier", "=", "lambda", "ch", ":", "ch", ".", "type", "in", "(", "\"ADJP\"", ",", "\"ADVP\"", ")", "and", "ch", ".", "relation", "is", "None", "for", "chunk", "in", "self", ".", "sentence", ".", "chunks", ":", "chunk", ".", "_modifiers", "=", "[", "]", "for", "chunk", "in", "filter", "(", "is_modifier", ",", "self", ".", "sentence", ".", "chunks", ")", ":", "anchor", "=", "chunk", ".", "nearest", "(", "\"VP\"", ")", "if", "anchor", ":", "anchor", ".", "_modifiers", ".", "append", "(", "chunk", ")", "return", "self", ".", "_modifiers" ]
For verb phrases (VP), yields a list of the nearest adjectives and adverbs.
[ "For", "verb", "phrases", "(", "VP", ")", "yields", "a", "list", "of", "the", "nearest", "adjectives", "and", "adverbs", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L450-L461
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.nearest
def nearest(self, type="VP"): """ Returns the nearest chunk in the sentence with the given type. This can be used (for example) to find adverbs and adjectives related to verbs, as in: "the cat is ravenous" => is what? => "ravenous". """ candidate, d = None, len(self.sentence.chunks) if isinstance(self, PNPChunk): i = self.sentence.chunks.index(self.chunks[0]) else: i = self.sentence.chunks.index(self) for j, chunk in enumerate(self.sentence.chunks): if chunk.type.startswith(type) and abs(i-j) < d: candidate, d = chunk, abs(i-j) return candidate
python
def nearest(self, type="VP"): """ Returns the nearest chunk in the sentence with the given type. This can be used (for example) to find adverbs and adjectives related to verbs, as in: "the cat is ravenous" => is what? => "ravenous". """ candidate, d = None, len(self.sentence.chunks) if isinstance(self, PNPChunk): i = self.sentence.chunks.index(self.chunks[0]) else: i = self.sentence.chunks.index(self) for j, chunk in enumerate(self.sentence.chunks): if chunk.type.startswith(type) and abs(i-j) < d: candidate, d = chunk, abs(i-j) return candidate
[ "def", "nearest", "(", "self", ",", "type", "=", "\"VP\"", ")", ":", "candidate", ",", "d", "=", "None", ",", "len", "(", "self", ".", "sentence", ".", "chunks", ")", "if", "isinstance", "(", "self", ",", "PNPChunk", ")", ":", "i", "=", "self", ".", "sentence", ".", "chunks", ".", "index", "(", "self", ".", "chunks", "[", "0", "]", ")", "else", ":", "i", "=", "self", ".", "sentence", ".", "chunks", ".", "index", "(", "self", ")", "for", "j", ",", "chunk", "in", "enumerate", "(", "self", ".", "sentence", ".", "chunks", ")", ":", "if", "chunk", ".", "type", ".", "startswith", "(", "type", ")", "and", "abs", "(", "i", "-", "j", ")", "<", "d", ":", "candidate", ",", "d", "=", "chunk", ",", "abs", "(", "i", "-", "j", ")", "return", "candidate" ]
Returns the nearest chunk in the sentence with the given type. This can be used (for example) to find adverbs and adjectives related to verbs, as in: "the cat is ravenous" => is what? => "ravenous".
[ "Returns", "the", "nearest", "chunk", "in", "the", "sentence", "with", "the", "given", "type", ".", "This", "can", "be", "used", "(", "for", "example", ")", "to", "find", "adverbs", "and", "adjectives", "related", "to", "verbs", "as", "in", ":", "the", "cat", "is", "ravenous", "=", ">", "is", "what?", "=", ">", "ravenous", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L463-L476
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.next
def next(self, type=None): """ Returns the next chunk in the sentence with the given type. """ i = self.stop s = self.sentence while i < len(s): if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i += 1
python
def next(self, type=None): """ Returns the next chunk in the sentence with the given type. """ i = self.stop s = self.sentence while i < len(s): if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i += 1
[ "def", "next", "(", "self", ",", "type", "=", "None", ")", ":", "i", "=", "self", ".", "stop", "s", "=", "self", ".", "sentence", "while", "i", "<", "len", "(", "s", ")", ":", "if", "s", "[", "i", "]", ".", "chunk", "is", "not", "None", "and", "type", "in", "(", "s", "[", "i", "]", ".", "chunk", ".", "type", ",", "None", ")", ":", "return", "s", "[", "i", "]", ".", "chunk", "i", "+=", "1" ]
Returns the next chunk in the sentence with the given type.
[ "Returns", "the", "next", "chunk", "in", "the", "sentence", "with", "the", "given", "type", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L478-L486
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Chunk.previous
def previous(self, type=None): """ Returns the next previous chunk in the sentence with the given type. """ i = self.start - 1 s = self.sentence while i > 0: if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i -= 1
python
def previous(self, type=None): """ Returns the next previous chunk in the sentence with the given type. """ i = self.start - 1 s = self.sentence while i > 0: if s[i].chunk is not None and type in (s[i].chunk.type, None): return s[i].chunk i -= 1
[ "def", "previous", "(", "self", ",", "type", "=", "None", ")", ":", "i", "=", "self", ".", "start", "-", "1", "s", "=", "self", ".", "sentence", "while", "i", ">", "0", ":", "if", "s", "[", "i", "]", ".", "chunk", "is", "not", "None", "and", "type", "in", "(", "s", "[", "i", "]", ".", "chunk", ".", "type", ",", "None", ")", ":", "return", "s", "[", "i", "]", ".", "chunk", "i", "-=", "1" ]
Returns the next previous chunk in the sentence with the given type.
[ "Returns", "the", "next", "previous", "chunk", "in", "the", "sentence", "with", "the", "given", "type", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L488-L496
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.append
def append(self, word, lemma=None, type=None, chunk=None, role=None, relation=None, pnp=None, anchor=None, iob=None, custom={}): """ Appends the next word to the sentence / chunk / preposition. For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1) - word : the current word, - lemma : the canonical form of the word, - type : part-of-speech tag for the word (NN, JJ, ...), - chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...), - role : the chunk's grammatical role (SBJ, OBJ, ...), - relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1), - pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional), - iob : BEGIN if the word marks the start of a new chunk, INSIDE (optional) if the word is part of the previous chunk, - custom : a dictionary of (tag, value)-items for user-defined word tags. """ self._do_word(word, lemma, type) # Append Word object. self._do_chunk(chunk, role, relation, iob) # Append Chunk, or add last word to last chunk. self._do_conjunction() self._do_relation() self._do_pnp(pnp, anchor) self._do_anchor(anchor) self._do_custom(custom)
python
def append(self, word, lemma=None, type=None, chunk=None, role=None, relation=None, pnp=None, anchor=None, iob=None, custom={}): """ Appends the next word to the sentence / chunk / preposition. For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1) - word : the current word, - lemma : the canonical form of the word, - type : part-of-speech tag for the word (NN, JJ, ...), - chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...), - role : the chunk's grammatical role (SBJ, OBJ, ...), - relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1), - pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional), - iob : BEGIN if the word marks the start of a new chunk, INSIDE (optional) if the word is part of the previous chunk, - custom : a dictionary of (tag, value)-items for user-defined word tags. """ self._do_word(word, lemma, type) # Append Word object. self._do_chunk(chunk, role, relation, iob) # Append Chunk, or add last word to last chunk. self._do_conjunction() self._do_relation() self._do_pnp(pnp, anchor) self._do_anchor(anchor) self._do_custom(custom)
[ "def", "append", "(", "self", ",", "word", ",", "lemma", "=", "None", ",", "type", "=", "None", ",", "chunk", "=", "None", ",", "role", "=", "None", ",", "relation", "=", "None", ",", "pnp", "=", "None", ",", "anchor", "=", "None", ",", "iob", "=", "None", ",", "custom", "=", "{", "}", ")", ":", "self", ".", "_do_word", "(", "word", ",", "lemma", ",", "type", ")", "# Append Word object.", "self", ".", "_do_chunk", "(", "chunk", ",", "role", ",", "relation", ",", "iob", ")", "# Append Chunk, or add last word to last chunk.", "self", ".", "_do_conjunction", "(", ")", "self", ".", "_do_relation", "(", ")", "self", ".", "_do_pnp", "(", "pnp", ",", "anchor", ")", "self", ".", "_do_anchor", "(", "anchor", ")", "self", ".", "_do_custom", "(", "custom", ")" ]
Appends the next word to the sentence / chunk / preposition. For example: Sentence.append("clawed", "claw", "VB", "VP", role=None, relation=1) - word : the current word, - lemma : the canonical form of the word, - type : part-of-speech tag for the word (NN, JJ, ...), - chunk : part-of-speech tag for the chunk this word is part of (NP, VP, ...), - role : the chunk's grammatical role (SBJ, OBJ, ...), - relation : an id shared by other related chunks (e.g., SBJ-1 <=> VP-1), - pnp : PNP if this word is in a prepositional noun phrase (B- prefix optional), - iob : BEGIN if the word marks the start of a new chunk, INSIDE (optional) if the word is part of the previous chunk, - custom : a dictionary of (tag, value)-items for user-defined word tags.
[ "Appends", "the", "next", "word", "to", "the", "sentence", "/", "chunk", "/", "preposition", ".", "For", "example", ":", "Sentence", ".", "append", "(", "clawed", "claw", "VB", "VP", "role", "=", "None", "relation", "=", "1", ")", "-", "word", ":", "the", "current", "word", "-", "lemma", ":", "the", "canonical", "form", "of", "the", "word", "-", "type", ":", "part", "-", "of", "-", "speech", "tag", "for", "the", "word", "(", "NN", "JJ", "...", ")", "-", "chunk", ":", "part", "-", "of", "-", "speech", "tag", "for", "the", "chunk", "this", "word", "is", "part", "of", "(", "NP", "VP", "...", ")", "-", "role", ":", "the", "chunk", "s", "grammatical", "role", "(", "SBJ", "OBJ", "...", ")", "-", "relation", ":", "an", "id", "shared", "by", "other", "related", "chunks", "(", "e", ".", "g", ".", "SBJ", "-", "1", "<", "=", ">", "VP", "-", "1", ")", "-", "pnp", ":", "PNP", "if", "this", "word", "is", "in", "a", "prepositional", "noun", "phrase", "(", "B", "-", "prefix", "optional", ")", "-", "iob", ":", "BEGIN", "if", "the", "word", "marks", "the", "start", "of", "a", "new", "chunk", "INSIDE", "(", "optional", ")", "if", "the", "word", "is", "part", "of", "the", "previous", "chunk", "-", "custom", ":", "a", "dictionary", "of", "(", "tag", "value", ")", "-", "items", "for", "user", "-", "defined", "word", "tags", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L708-L728
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.parse_token
def parse_token(self, token, tags=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token. """ p = { WORD: "", POS: None, IOB: None, CHUNK: None, PNP: None, REL: None, ROLE: None, ANCHOR: None, LEMMA: None } # Split the slash-formatted token into separate tags in the given order. # Decode &slash; characters (usually in words and lemmata). # Assume None for missing tags (except the word itself, which defaults to an empty string). custom = {} for k, v in izip(tags, token.split("/")): if SLASH0 in v: v = v.replace(SLASH, "/") if k not in p: custom[k] = None if v != OUTSIDE or k == WORD or k == LEMMA: # "type O negative" => "O" != OUTSIDE. (p if k not in custom else custom)[k] = v # Split IOB-prefix from the chunk tag: # B- marks the start of a new chunk, # I- marks inside of a chunk. ch = p[CHUNK] if ch is not None and ch.startswith(("B-", "I-")): p[IOB], p[CHUNK] = ch[:1], ch[2:] # B-NP # Split the role from the relation: # NP-SBJ-1 => relation id is 1 and role is SBJ, # VP-1 => relation id is 1 with no role. # Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3). if p[REL] is not None: ch, p[REL], p[ROLE] = self._parse_relation(p[REL]) # Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP). # For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP. if ch == "PP" \ and self._previous \ and self._previous[REL] == p[REL] \ and self._previous[ROLE] == p[ROLE]: ch = "NP" if p[CHUNK] is None and ch != OUTSIDE: p[CHUNK] = ch self._previous = p # Return the tags in the right order for Sentence.append(). return p[WORD], p[LEMMA], p[POS], p[CHUNK], p[ROLE], p[REL], p[PNP], p[ANCHOR], p[IOB], custom
python
def parse_token(self, token, tags=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]): """ Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token. """ p = { WORD: "", POS: None, IOB: None, CHUNK: None, PNP: None, REL: None, ROLE: None, ANCHOR: None, LEMMA: None } # Split the slash-formatted token into separate tags in the given order. # Decode &slash; characters (usually in words and lemmata). # Assume None for missing tags (except the word itself, which defaults to an empty string). custom = {} for k, v in izip(tags, token.split("/")): if SLASH0 in v: v = v.replace(SLASH, "/") if k not in p: custom[k] = None if v != OUTSIDE or k == WORD or k == LEMMA: # "type O negative" => "O" != OUTSIDE. (p if k not in custom else custom)[k] = v # Split IOB-prefix from the chunk tag: # B- marks the start of a new chunk, # I- marks inside of a chunk. ch = p[CHUNK] if ch is not None and ch.startswith(("B-", "I-")): p[IOB], p[CHUNK] = ch[:1], ch[2:] # B-NP # Split the role from the relation: # NP-SBJ-1 => relation id is 1 and role is SBJ, # VP-1 => relation id is 1 with no role. # Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3). if p[REL] is not None: ch, p[REL], p[ROLE] = self._parse_relation(p[REL]) # Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP). # For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP. if ch == "PP" \ and self._previous \ and self._previous[REL] == p[REL] \ and self._previous[ROLE] == p[ROLE]: ch = "NP" if p[CHUNK] is None and ch != OUTSIDE: p[CHUNK] = ch self._previous = p # Return the tags in the right order for Sentence.append(). return p[WORD], p[LEMMA], p[POS], p[CHUNK], p[ROLE], p[REL], p[PNP], p[ANCHOR], p[IOB], custom
[ "def", "parse_token", "(", "self", ",", "token", ",", "tags", "=", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", ":", "p", "=", "{", "WORD", ":", "\"\"", ",", "POS", ":", "None", ",", "IOB", ":", "None", ",", "CHUNK", ":", "None", ",", "PNP", ":", "None", ",", "REL", ":", "None", ",", "ROLE", ":", "None", ",", "ANCHOR", ":", "None", ",", "LEMMA", ":", "None", "}", "# Split the slash-formatted token into separate tags in the given order.", "# Decode &slash; characters (usually in words and lemmata).", "# Assume None for missing tags (except the word itself, which defaults to an empty string).", "custom", "=", "{", "}", "for", "k", ",", "v", "in", "izip", "(", "tags", ",", "token", ".", "split", "(", "\"/\"", ")", ")", ":", "if", "SLASH0", "in", "v", ":", "v", "=", "v", ".", "replace", "(", "SLASH", ",", "\"/\"", ")", "if", "k", "not", "in", "p", ":", "custom", "[", "k", "]", "=", "None", "if", "v", "!=", "OUTSIDE", "or", "k", "==", "WORD", "or", "k", "==", "LEMMA", ":", "# \"type O negative\" => \"O\" != OUTSIDE.", "(", "p", "if", "k", "not", "in", "custom", "else", "custom", ")", "[", "k", "]", "=", "v", "# Split IOB-prefix from the chunk tag:", "# B- marks the start of a new chunk, ", "# I- marks inside of a chunk.", "ch", "=", "p", "[", "CHUNK", "]", "if", "ch", "is", "not", "None", "and", "ch", ".", "startswith", "(", "(", "\"B-\"", ",", "\"I-\"", ")", ")", ":", "p", "[", "IOB", "]", ",", "p", "[", "CHUNK", "]", "=", "ch", "[", ":", "1", "]", ",", "ch", "[", "2", ":", "]", "# B-NP", "# Split the role from the relation:", "# NP-SBJ-1 => relation id is 1 and role is SBJ, ", "# VP-1 => relation id is 1 with no role.", "# Tokens may be tagged with multiple relations (e.g., NP-OBJ-1*NP-OBJ-3).", "if", "p", "[", "REL", "]", "is", "not", "None", ":", "ch", ",", "p", "[", "REL", "]", ",", "p", "[", "ROLE", "]", "=", "self", ".", "_parse_relation", "(", "p", "[", "REL", "]", ")", "# Infer a missing chunk tag from the relation tag (e.g., NP-SBJ-1 => NP).", "# For PP relation tags (e.g., PP-CLR-1), the first chunk is PP, the following chunks NP.", "if", "ch", "==", "\"PP\"", "and", "self", ".", "_previous", "and", "self", ".", "_previous", "[", "REL", "]", "==", "p", "[", "REL", "]", "and", "self", ".", "_previous", "[", "ROLE", "]", "==", "p", "[", "ROLE", "]", ":", "ch", "=", "\"NP\"", "if", "p", "[", "CHUNK", "]", "is", "None", "and", "ch", "!=", "OUTSIDE", ":", "p", "[", "CHUNK", "]", "=", "ch", "self", ".", "_previous", "=", "p", "# Return the tags in the right order for Sentence.append().", "return", "p", "[", "WORD", "]", ",", "p", "[", "LEMMA", "]", ",", "p", "[", "POS", "]", ",", "p", "[", "CHUNK", "]", ",", "p", "[", "ROLE", "]", ",", "p", "[", "REL", "]", ",", "p", "[", "PNP", "]", ",", "p", "[", "ANCHOR", "]", ",", "p", "[", "IOB", "]", ",", "custom" ]
Returns the arguments for Sentence.append() from a tagged token representation. The order in which token tags appear can be specified. The default order is (separated by slashes): - word, - part-of-speech, - (IOB-)chunk, - (IOB-)preposition, - chunk(-relation)(-role), - anchor, - lemma. Examples: The/DT/B-NP/O/NP-SBJ-1/O/the cats/NNS/I-NP/O/NP-SBJ-1/O/cat clawed/VBD/B-VP/O/VP-1/A1/claw at/IN/B-PP/B-PNP/PP/P1/at the/DT/B-NP/I-PNP/NP/P1/the sofa/NN/I-NP/I-PNP/NP/P1/sofa ././O/O/O/O/. Returns a (word, lemma, type, chunk, role, relation, preposition, anchor, iob, custom)-tuple, which can be passed to Sentence.append(): Sentence.append(*Sentence.parse_token("cats/NNS/NP")) The custom value is a dictionary of (tag, value)-items of unrecognized tags in the token.
[ "Returns", "the", "arguments", "for", "Sentence", ".", "append", "()", "from", "a", "tagged", "token", "representation", ".", "The", "order", "in", "which", "token", "tags", "appear", "can", "be", "specified", ".", "The", "default", "order", "is", "(", "separated", "by", "slashes", ")", ":", "-", "word", "-", "part", "-", "of", "-", "speech", "-", "(", "IOB", "-", ")", "chunk", "-", "(", "IOB", "-", ")", "preposition", "-", "chunk", "(", "-", "relation", ")", "(", "-", "role", ")", "-", "anchor", "-", "lemma", ".", "Examples", ":", "The", "/", "DT", "/", "B", "-", "NP", "/", "O", "/", "NP", "-", "SBJ", "-", "1", "/", "O", "/", "the", "cats", "/", "NNS", "/", "I", "-", "NP", "/", "O", "/", "NP", "-", "SBJ", "-", "1", "/", "O", "/", "cat", "clawed", "/", "VBD", "/", "B", "-", "VP", "/", "O", "/", "VP", "-", "1", "/", "A1", "/", "claw", "at", "/", "IN", "/", "B", "-", "PP", "/", "B", "-", "PNP", "/", "PP", "/", "P1", "/", "at", "the", "/", "DT", "/", "B", "-", "NP", "/", "I", "-", "PNP", "/", "NP", "/", "P1", "/", "the", "sofa", "/", "NN", "/", "I", "-", "NP", "/", "I", "-", "PNP", "/", "NP", "/", "P1", "/", "sofa", ".", "/", ".", "/", "O", "/", "O", "/", "O", "/", "O", "/", ".", "Returns", "a", "(", "word", "lemma", "type", "chunk", "role", "relation", "preposition", "anchor", "iob", "custom", ")", "-", "tuple", "which", "can", "be", "passed", "to", "Sentence", ".", "append", "()", ":", "Sentence", ".", "append", "(", "*", "Sentence", ".", "parse_token", "(", "cats", "/", "NNS", "/", "NP", "))", "The", "custom", "value", "is", "a", "dictionary", "of", "(", "tag", "value", ")", "-", "items", "of", "unrecognized", "tags", "in", "the", "token", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L730-L796
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._parse_relation
def _parse_relation(self, tag): """ Parses the chunk tag, role and relation id from the token relation tag. - VP => VP, [], [] - VP-1 => VP, [1], [None] - ADJP-PRD => ADJP, [None], [PRD] - NP-SBJ-1 => NP, [1], [SBJ] - NP-OBJ-1*NP-OBJ-2 => NP, [1,2], [OBJ,OBJ] - NP-SBJ;NP-OBJ-1 => NP, [1,1], [SBJ,OBJ] """ chunk, relation, role = None, [], [] if ";" in tag: # NP-SBJ;NP-OBJ-1 => 1 relates to both SBJ and OBJ. id = tag.split("*")[0][-2:] id = id if id.startswith("-") else "" tag = tag.replace(";", id + "*") if "*" in tag: tag = tag.split("*") else: tag = [tag] for s in tag: s = s.split("-") n = len(s) if n == 1: chunk = s[0] if n == 2: chunk = s[0]; relation.append(s[1]); role.append(None) if n >= 3: chunk = s[0]; relation.append(s[2]); role.append(s[1]) if n > 1: id = relation[-1] if id.isdigit(): relation[-1] = int(id) else: # Correct "ADJP-PRD": # (ADJP, [PRD], [None]) => (ADJP, [None], [PRD]) relation[-1], role[-1] = None, id return chunk, relation, role
python
def _parse_relation(self, tag): """ Parses the chunk tag, role and relation id from the token relation tag. - VP => VP, [], [] - VP-1 => VP, [1], [None] - ADJP-PRD => ADJP, [None], [PRD] - NP-SBJ-1 => NP, [1], [SBJ] - NP-OBJ-1*NP-OBJ-2 => NP, [1,2], [OBJ,OBJ] - NP-SBJ;NP-OBJ-1 => NP, [1,1], [SBJ,OBJ] """ chunk, relation, role = None, [], [] if ";" in tag: # NP-SBJ;NP-OBJ-1 => 1 relates to both SBJ and OBJ. id = tag.split("*")[0][-2:] id = id if id.startswith("-") else "" tag = tag.replace(";", id + "*") if "*" in tag: tag = tag.split("*") else: tag = [tag] for s in tag: s = s.split("-") n = len(s) if n == 1: chunk = s[0] if n == 2: chunk = s[0]; relation.append(s[1]); role.append(None) if n >= 3: chunk = s[0]; relation.append(s[2]); role.append(s[1]) if n > 1: id = relation[-1] if id.isdigit(): relation[-1] = int(id) else: # Correct "ADJP-PRD": # (ADJP, [PRD], [None]) => (ADJP, [None], [PRD]) relation[-1], role[-1] = None, id return chunk, relation, role
[ "def", "_parse_relation", "(", "self", ",", "tag", ")", ":", "chunk", ",", "relation", ",", "role", "=", "None", ",", "[", "]", ",", "[", "]", "if", "\";\"", "in", "tag", ":", "# NP-SBJ;NP-OBJ-1 => 1 relates to both SBJ and OBJ.", "id", "=", "tag", ".", "split", "(", "\"*\"", ")", "[", "0", "]", "[", "-", "2", ":", "]", "id", "=", "id", "if", "id", ".", "startswith", "(", "\"-\"", ")", "else", "\"\"", "tag", "=", "tag", ".", "replace", "(", "\";\"", ",", "id", "+", "\"*\"", ")", "if", "\"*\"", "in", "tag", ":", "tag", "=", "tag", ".", "split", "(", "\"*\"", ")", "else", ":", "tag", "=", "[", "tag", "]", "for", "s", "in", "tag", ":", "s", "=", "s", ".", "split", "(", "\"-\"", ")", "n", "=", "len", "(", "s", ")", "if", "n", "==", "1", ":", "chunk", "=", "s", "[", "0", "]", "if", "n", "==", "2", ":", "chunk", "=", "s", "[", "0", "]", "relation", ".", "append", "(", "s", "[", "1", "]", ")", "role", ".", "append", "(", "None", ")", "if", "n", ">=", "3", ":", "chunk", "=", "s", "[", "0", "]", "relation", ".", "append", "(", "s", "[", "2", "]", ")", "role", ".", "append", "(", "s", "[", "1", "]", ")", "if", "n", ">", "1", ":", "id", "=", "relation", "[", "-", "1", "]", "if", "id", ".", "isdigit", "(", ")", ":", "relation", "[", "-", "1", "]", "=", "int", "(", "id", ")", "else", ":", "# Correct \"ADJP-PRD\":", "# (ADJP, [PRD], [None]) => (ADJP, [None], [PRD])", "relation", "[", "-", "1", "]", ",", "role", "[", "-", "1", "]", "=", "None", ",", "id", "return", "chunk", ",", "relation", ",", "role" ]
Parses the chunk tag, role and relation id from the token relation tag. - VP => VP, [], [] - VP-1 => VP, [1], [None] - ADJP-PRD => ADJP, [None], [PRD] - NP-SBJ-1 => NP, [1], [SBJ] - NP-OBJ-1*NP-OBJ-2 => NP, [1,2], [OBJ,OBJ] - NP-SBJ;NP-OBJ-1 => NP, [1,1], [SBJ,OBJ]
[ "Parses", "the", "chunk", "tag", "role", "and", "relation", "id", "from", "the", "token", "relation", "tag", ".", "-", "VP", "=", ">", "VP", "[]", "[]", "-", "VP", "-", "1", "=", ">", "VP", "[", "1", "]", "[", "None", "]", "-", "ADJP", "-", "PRD", "=", ">", "ADJP", "[", "None", "]", "[", "PRD", "]", "-", "NP", "-", "SBJ", "-", "1", "=", ">", "NP", "[", "1", "]", "[", "SBJ", "]", "-", "NP", "-", "OBJ", "-", "1", "*", "NP", "-", "OBJ", "-", "2", "=", ">", "NP", "[", "1", "2", "]", "[", "OBJ", "OBJ", "]", "-", "NP", "-", "SBJ", ";", "NP", "-", "OBJ", "-", "1", "=", ">", "NP", "[", "1", "1", "]", "[", "SBJ", "OBJ", "]" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L798-L834
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_word
def _do_word(self, word, lemma=None, type=None): """ Adds a new Word to the sentence. Other Sentence._do_[tag] functions assume a new word has just been appended. """ # Improve 3rd person singular "'s" lemma to "be", e.g., as in "he's fine". if lemma == "'s" and type in ("VB", "VBZ"): lemma = "be" self.words.append(Word(self, word, lemma, type, index=len(self.words)))
python
def _do_word(self, word, lemma=None, type=None): """ Adds a new Word to the sentence. Other Sentence._do_[tag] functions assume a new word has just been appended. """ # Improve 3rd person singular "'s" lemma to "be", e.g., as in "he's fine". if lemma == "'s" and type in ("VB", "VBZ"): lemma = "be" self.words.append(Word(self, word, lemma, type, index=len(self.words)))
[ "def", "_do_word", "(", "self", ",", "word", ",", "lemma", "=", "None", ",", "type", "=", "None", ")", ":", "# Improve 3rd person singular \"'s\" lemma to \"be\", e.g., as in \"he's fine\".", "if", "lemma", "==", "\"'s\"", "and", "type", "in", "(", "\"VB\"", ",", "\"VBZ\"", ")", ":", "lemma", "=", "\"be\"", "self", ".", "words", ".", "append", "(", "Word", "(", "self", ",", "word", ",", "lemma", ",", "type", ",", "index", "=", "len", "(", "self", ".", "words", ")", ")", ")" ]
Adds a new Word to the sentence. Other Sentence._do_[tag] functions assume a new word has just been appended.
[ "Adds", "a", "new", "Word", "to", "the", "sentence", ".", "Other", "Sentence", ".", "_do_", "[", "tag", "]", "functions", "assume", "a", "new", "word", "has", "just", "been", "appended", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L836-L843
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_chunk
def _do_chunk(self, type, role=None, relation=None, iob=None): """ Adds a new Chunk to the sentence, or adds the last word to the previous chunk. The word is attached to the previous chunk if both type and relation match, and if the word's chunk tag does not start with "B-" (i.e., iob != BEGIN). Punctuation marks (or other "O" chunk tags) are not chunked. """ if (type is None or type == OUTSIDE) and \ (role is None or role == OUTSIDE) and (relation is None or relation == OUTSIDE): return if iob != BEGIN \ and self.chunks \ and self.chunks[-1].type == type \ and self._relation == (relation, role) \ and self.words[-2].chunk is not None: # "one, two" => "one" & "two" different chunks. self.chunks[-1].append(self.words[-1]) else: ch = Chunk(self, [self.words[-1]], type, role, relation) self.chunks.append(ch) self._relation = (relation, role)
python
def _do_chunk(self, type, role=None, relation=None, iob=None): """ Adds a new Chunk to the sentence, or adds the last word to the previous chunk. The word is attached to the previous chunk if both type and relation match, and if the word's chunk tag does not start with "B-" (i.e., iob != BEGIN). Punctuation marks (or other "O" chunk tags) are not chunked. """ if (type is None or type == OUTSIDE) and \ (role is None or role == OUTSIDE) and (relation is None or relation == OUTSIDE): return if iob != BEGIN \ and self.chunks \ and self.chunks[-1].type == type \ and self._relation == (relation, role) \ and self.words[-2].chunk is not None: # "one, two" => "one" & "two" different chunks. self.chunks[-1].append(self.words[-1]) else: ch = Chunk(self, [self.words[-1]], type, role, relation) self.chunks.append(ch) self._relation = (relation, role)
[ "def", "_do_chunk", "(", "self", ",", "type", ",", "role", "=", "None", ",", "relation", "=", "None", ",", "iob", "=", "None", ")", ":", "if", "(", "type", "is", "None", "or", "type", "==", "OUTSIDE", ")", "and", "(", "role", "is", "None", "or", "role", "==", "OUTSIDE", ")", "and", "(", "relation", "is", "None", "or", "relation", "==", "OUTSIDE", ")", ":", "return", "if", "iob", "!=", "BEGIN", "and", "self", ".", "chunks", "and", "self", ".", "chunks", "[", "-", "1", "]", ".", "type", "==", "type", "and", "self", ".", "_relation", "==", "(", "relation", ",", "role", ")", "and", "self", ".", "words", "[", "-", "2", "]", ".", "chunk", "is", "not", "None", ":", "# \"one, two\" => \"one\" & \"two\" different chunks.", "self", ".", "chunks", "[", "-", "1", "]", ".", "append", "(", "self", ".", "words", "[", "-", "1", "]", ")", "else", ":", "ch", "=", "Chunk", "(", "self", ",", "[", "self", ".", "words", "[", "-", "1", "]", "]", ",", "type", ",", "role", ",", "relation", ")", "self", ".", "chunks", ".", "append", "(", "ch", ")", "self", ".", "_relation", "=", "(", "relation", ",", "role", ")" ]
Adds a new Chunk to the sentence, or adds the last word to the previous chunk. The word is attached to the previous chunk if both type and relation match, and if the word's chunk tag does not start with "B-" (i.e., iob != BEGIN). Punctuation marks (or other "O" chunk tags) are not chunked.
[ "Adds", "a", "new", "Chunk", "to", "the", "sentence", "or", "adds", "the", "last", "word", "to", "the", "previous", "chunk", ".", "The", "word", "is", "attached", "to", "the", "previous", "chunk", "if", "both", "type", "and", "relation", "match", "and", "if", "the", "word", "s", "chunk", "tag", "does", "not", "start", "with", "B", "-", "(", "i", ".", "e", ".", "iob", "!", "=", "BEGIN", ")", ".", "Punctuation", "marks", "(", "or", "other", "O", "chunk", "tags", ")", "are", "not", "chunked", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L845-L863
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_relation
def _do_relation(self): """ Attaches subjects, objects and verbs. If the previous chunk is a subject/object/verb, it is stored in Sentence.relations{}. """ if self.chunks: ch = self.chunks[-1] for relation, role in ch.relations: if role == "SBJ" or role == "OBJ": self.relations[role][relation] = ch if ch.type in ("VP",): self.relations[ch.type][ch.relation] = ch
python
def _do_relation(self): """ Attaches subjects, objects and verbs. If the previous chunk is a subject/object/verb, it is stored in Sentence.relations{}. """ if self.chunks: ch = self.chunks[-1] for relation, role in ch.relations: if role == "SBJ" or role == "OBJ": self.relations[role][relation] = ch if ch.type in ("VP",): self.relations[ch.type][ch.relation] = ch
[ "def", "_do_relation", "(", "self", ")", ":", "if", "self", ".", "chunks", ":", "ch", "=", "self", ".", "chunks", "[", "-", "1", "]", "for", "relation", ",", "role", "in", "ch", ".", "relations", ":", "if", "role", "==", "\"SBJ\"", "or", "role", "==", "\"OBJ\"", ":", "self", ".", "relations", "[", "role", "]", "[", "relation", "]", "=", "ch", "if", "ch", ".", "type", "in", "(", "\"VP\"", ",", ")", ":", "self", ".", "relations", "[", "ch", ".", "type", "]", "[", "ch", ".", "relation", "]", "=", "ch" ]
Attaches subjects, objects and verbs. If the previous chunk is a subject/object/verb, it is stored in Sentence.relations{}.
[ "Attaches", "subjects", "objects", "and", "verbs", ".", "If", "the", "previous", "chunk", "is", "a", "subject", "/", "object", "/", "verb", "it", "is", "stored", "in", "Sentence", ".", "relations", "{}", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L865-L875
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_pnp
def _do_pnp(self, pnp, anchor=None): """ Attaches prepositional noun phrases. Identifies PNP's from either the PNP tag or the P-attachment tag. This does not determine the PP-anchor, it only groups words in a PNP chunk. """ if anchor or pnp and pnp.endswith("PNP"): if anchor is not None: m = find(lambda x: x.startswith("P"), anchor) else: m = None if self.pnp \ and pnp \ and pnp != OUTSIDE \ and pnp.startswith("B-") is False \ and self.words[-2].pnp is not None: self.pnp[-1].append(self.words[-1]) elif m is not None and m == self._attachment: self.pnp[-1].append(self.words[-1]) else: ch = PNPChunk(self, [self.words[-1]], type="PNP") self.pnp.append(ch) self._attachment = m
python
def _do_pnp(self, pnp, anchor=None): """ Attaches prepositional noun phrases. Identifies PNP's from either the PNP tag or the P-attachment tag. This does not determine the PP-anchor, it only groups words in a PNP chunk. """ if anchor or pnp and pnp.endswith("PNP"): if anchor is not None: m = find(lambda x: x.startswith("P"), anchor) else: m = None if self.pnp \ and pnp \ and pnp != OUTSIDE \ and pnp.startswith("B-") is False \ and self.words[-2].pnp is not None: self.pnp[-1].append(self.words[-1]) elif m is not None and m == self._attachment: self.pnp[-1].append(self.words[-1]) else: ch = PNPChunk(self, [self.words[-1]], type="PNP") self.pnp.append(ch) self._attachment = m
[ "def", "_do_pnp", "(", "self", ",", "pnp", ",", "anchor", "=", "None", ")", ":", "if", "anchor", "or", "pnp", "and", "pnp", ".", "endswith", "(", "\"PNP\"", ")", ":", "if", "anchor", "is", "not", "None", ":", "m", "=", "find", "(", "lambda", "x", ":", "x", ".", "startswith", "(", "\"P\"", ")", ",", "anchor", ")", "else", ":", "m", "=", "None", "if", "self", ".", "pnp", "and", "pnp", "and", "pnp", "!=", "OUTSIDE", "and", "pnp", ".", "startswith", "(", "\"B-\"", ")", "is", "False", "and", "self", ".", "words", "[", "-", "2", "]", ".", "pnp", "is", "not", "None", ":", "self", ".", "pnp", "[", "-", "1", "]", ".", "append", "(", "self", ".", "words", "[", "-", "1", "]", ")", "elif", "m", "is", "not", "None", "and", "m", "==", "self", ".", "_attachment", ":", "self", ".", "pnp", "[", "-", "1", "]", ".", "append", "(", "self", ".", "words", "[", "-", "1", "]", ")", "else", ":", "ch", "=", "PNPChunk", "(", "self", ",", "[", "self", ".", "words", "[", "-", "1", "]", "]", ",", "type", "=", "\"PNP\"", ")", "self", ".", "pnp", ".", "append", "(", "ch", ")", "self", ".", "_attachment", "=", "m" ]
Attaches prepositional noun phrases. Identifies PNP's from either the PNP tag or the P-attachment tag. This does not determine the PP-anchor, it only groups words in a PNP chunk.
[ "Attaches", "prepositional", "noun", "phrases", ".", "Identifies", "PNP", "s", "from", "either", "the", "PNP", "tag", "or", "the", "P", "-", "attachment", "tag", ".", "This", "does", "not", "determine", "the", "PP", "-", "anchor", "it", "only", "groups", "words", "in", "a", "PNP", "chunk", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L877-L898
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_anchor
def _do_anchor(self, anchor): """ Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked. """ if anchor: for x in anchor.split("-"): A, P = None, None if x.startswith("A") and len(self.chunks) > 0: # anchor A, P = x, x.replace("A","P") self._anchors[A] = self.chunks[-1] if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP) A, P = x.replace("P","A"), x self._anchors[P] = self.pnp[-1] if A in self._anchors and P in self._anchors and not self._anchors[P].anchor: pnp = self._anchors[P] pnp.anchor = self._anchors[A] pnp.anchor.attachments.append(pnp)
python
def _do_anchor(self, anchor): """ Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked. """ if anchor: for x in anchor.split("-"): A, P = None, None if x.startswith("A") and len(self.chunks) > 0: # anchor A, P = x, x.replace("A","P") self._anchors[A] = self.chunks[-1] if x.startswith("P") and len(self.pnp) > 0: # attachment (PNP) A, P = x.replace("P","A"), x self._anchors[P] = self.pnp[-1] if A in self._anchors and P in self._anchors and not self._anchors[P].anchor: pnp = self._anchors[P] pnp.anchor = self._anchors[A] pnp.anchor.attachments.append(pnp)
[ "def", "_do_anchor", "(", "self", ",", "anchor", ")", ":", "if", "anchor", ":", "for", "x", "in", "anchor", ".", "split", "(", "\"-\"", ")", ":", "A", ",", "P", "=", "None", ",", "None", "if", "x", ".", "startswith", "(", "\"A\"", ")", "and", "len", "(", "self", ".", "chunks", ")", ">", "0", ":", "# anchor", "A", ",", "P", "=", "x", ",", "x", ".", "replace", "(", "\"A\"", ",", "\"P\"", ")", "self", ".", "_anchors", "[", "A", "]", "=", "self", ".", "chunks", "[", "-", "1", "]", "if", "x", ".", "startswith", "(", "\"P\"", ")", "and", "len", "(", "self", ".", "pnp", ")", ">", "0", ":", "# attachment (PNP)", "A", ",", "P", "=", "x", ".", "replace", "(", "\"P\"", ",", "\"A\"", ")", ",", "x", "self", ".", "_anchors", "[", "P", "]", "=", "self", ".", "pnp", "[", "-", "1", "]", "if", "A", "in", "self", ".", "_anchors", "and", "P", "in", "self", ".", "_anchors", "and", "not", "self", ".", "_anchors", "[", "P", "]", ".", "anchor", ":", "pnp", "=", "self", ".", "_anchors", "[", "P", "]", "pnp", ".", "anchor", "=", "self", ".", "_anchors", "[", "A", "]", "pnp", ".", "anchor", ".", "attachments", ".", "append", "(", "pnp", ")" ]
Collects preposition anchors and attachments in a dictionary. Once the dictionary has an entry for both the anchor and the attachment, they are linked.
[ "Collects", "preposition", "anchors", "and", "attachments", "in", "a", "dictionary", ".", "Once", "the", "dictionary", "has", "an", "entry", "for", "both", "the", "anchor", "and", "the", "attachment", "they", "are", "linked", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L900-L916
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence._do_conjunction
def _do_conjunction(self, _and=("and", "e", "en", "et", "und", "y")): """ Attach conjunctions. CC-words like "and" and "or" between two chunks indicate a conjunction. """ w = self.words if len(w) > 2 and w[-2].type == "CC" and w[-2].chunk is None: cc = w[-2].string.lower() in _and and AND or OR ch1 = w[-3].chunk ch2 = w[-1].chunk if ch1 is not None and \ ch2 is not None: ch1.conjunctions.append(ch2, cc) ch2.conjunctions.append(ch1, cc)
python
def _do_conjunction(self, _and=("and", "e", "en", "et", "und", "y")): """ Attach conjunctions. CC-words like "and" and "or" between two chunks indicate a conjunction. """ w = self.words if len(w) > 2 and w[-2].type == "CC" and w[-2].chunk is None: cc = w[-2].string.lower() in _and and AND or OR ch1 = w[-3].chunk ch2 = w[-1].chunk if ch1 is not None and \ ch2 is not None: ch1.conjunctions.append(ch2, cc) ch2.conjunctions.append(ch1, cc)
[ "def", "_do_conjunction", "(", "self", ",", "_and", "=", "(", "\"and\"", ",", "\"e\"", ",", "\"en\"", ",", "\"et\"", ",", "\"und\"", ",", "\"y\"", ")", ")", ":", "w", "=", "self", ".", "words", "if", "len", "(", "w", ")", ">", "2", "and", "w", "[", "-", "2", "]", ".", "type", "==", "\"CC\"", "and", "w", "[", "-", "2", "]", ".", "chunk", "is", "None", ":", "cc", "=", "w", "[", "-", "2", "]", ".", "string", ".", "lower", "(", ")", "in", "_and", "and", "AND", "or", "OR", "ch1", "=", "w", "[", "-", "3", "]", ".", "chunk", "ch2", "=", "w", "[", "-", "1", "]", ".", "chunk", "if", "ch1", "is", "not", "None", "and", "ch2", "is", "not", "None", ":", "ch1", ".", "conjunctions", ".", "append", "(", "ch2", ",", "cc", ")", "ch2", ".", "conjunctions", ".", "append", "(", "ch1", ",", "cc", ")" ]
Attach conjunctions. CC-words like "and" and "or" between two chunks indicate a conjunction.
[ "Attach", "conjunctions", ".", "CC", "-", "words", "like", "and", "and", "or", "between", "two", "chunks", "indicate", "a", "conjunction", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L925-L937
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.get
def get(self, index, tag=LEMMA): """ Returns a tag for the word at the given index. The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. """ if tag == WORD: return self.words[index] if tag == LEMMA: return self.words[index].lemma if tag == POS: return self.words[index].type if tag == CHUNK: return self.words[index].chunk if tag == PNP: return self.words[index].pnp if tag == REL: ch = self.words[index].chunk; return ch and ch.relation if tag == ROLE: ch = self.words[index].chunk; return ch and ch.role if tag == ANCHOR: ch = self.words[index].pnp; return ch and ch.anchor if tag in self.words[index].custom_tags: return self.words[index].custom_tags[tag] return None
python
def get(self, index, tag=LEMMA): """ Returns a tag for the word at the given index. The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. """ if tag == WORD: return self.words[index] if tag == LEMMA: return self.words[index].lemma if tag == POS: return self.words[index].type if tag == CHUNK: return self.words[index].chunk if tag == PNP: return self.words[index].pnp if tag == REL: ch = self.words[index].chunk; return ch and ch.relation if tag == ROLE: ch = self.words[index].chunk; return ch and ch.role if tag == ANCHOR: ch = self.words[index].pnp; return ch and ch.anchor if tag in self.words[index].custom_tags: return self.words[index].custom_tags[tag] return None
[ "def", "get", "(", "self", ",", "index", ",", "tag", "=", "LEMMA", ")", ":", "if", "tag", "==", "WORD", ":", "return", "self", ".", "words", "[", "index", "]", "if", "tag", "==", "LEMMA", ":", "return", "self", ".", "words", "[", "index", "]", ".", "lemma", "if", "tag", "==", "POS", ":", "return", "self", ".", "words", "[", "index", "]", ".", "type", "if", "tag", "==", "CHUNK", ":", "return", "self", ".", "words", "[", "index", "]", ".", "chunk", "if", "tag", "==", "PNP", ":", "return", "self", ".", "words", "[", "index", "]", ".", "pnp", "if", "tag", "==", "REL", ":", "ch", "=", "self", ".", "words", "[", "index", "]", ".", "chunk", "return", "ch", "and", "ch", ".", "relation", "if", "tag", "==", "ROLE", ":", "ch", "=", "self", ".", "words", "[", "index", "]", ".", "chunk", "return", "ch", "and", "ch", ".", "role", "if", "tag", "==", "ANCHOR", ":", "ch", "=", "self", ".", "words", "[", "index", "]", ".", "pnp", "return", "ch", "and", "ch", ".", "anchor", "if", "tag", "in", "self", ".", "words", "[", "index", "]", ".", "custom_tags", ":", "return", "self", ".", "words", "[", "index", "]", ".", "custom_tags", "[", "tag", "]", "return", "None" ]
Returns a tag for the word at the given index. The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag.
[ "Returns", "a", "tag", "for", "the", "word", "at", "the", "given", "index", ".", "The", "tag", "can", "be", "WORD", "LEMMA", "POS", "CHUNK", "PNP", "RELATION", "ROLE", "ANCHOR", "or", "a", "custom", "word", "tag", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L939-L961
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.loop
def loop(self, *tags): """ Iterates over the tags in the entire Sentence, For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata. Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. Any order or combination of tags can be supplied. """ for i in range(len(self.words)): yield tuple([self.get(i, tag=tag) for tag in tags])
python
def loop(self, *tags): """ Iterates over the tags in the entire Sentence, For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata. Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. Any order or combination of tags can be supplied. """ for i in range(len(self.words)): yield tuple([self.get(i, tag=tag) for tag in tags])
[ "def", "loop", "(", "self", ",", "*", "tags", ")", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "words", ")", ")", ":", "yield", "tuple", "(", "[", "self", ".", "get", "(", "i", ",", "tag", "=", "tag", ")", "for", "tag", "in", "tags", "]", ")" ]
Iterates over the tags in the entire Sentence, For example, Sentence.loop(POS, LEMMA) yields tuples of the part-of-speech tags and lemmata. Possible tags: WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. Any order or combination of tags can be supplied.
[ "Iterates", "over", "the", "tags", "in", "the", "entire", "Sentence", "For", "example", "Sentence", ".", "loop", "(", "POS", "LEMMA", ")", "yields", "tuples", "of", "the", "part", "-", "of", "-", "speech", "tags", "and", "lemmata", ".", "Possible", "tags", ":", "WORD", "LEMMA", "POS", "CHUNK", "PNP", "RELATION", "ROLE", "ANCHOR", "or", "a", "custom", "word", "tag", ".", "Any", "order", "or", "combination", "of", "tags", "can", "be", "supplied", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L963-L970
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.indexof
def indexof(self, value, tag=WORD): """ Returns the indices of tokens in the sentence where the given token tag equals the string. The string can contain a wildcard "*" at the end (this way "NN*" will match "NN" and "NNS"). The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. For example: Sentence.indexof("VP", tag=CHUNK) returns the indices of all the words that are part of a VP chunk. """ match = lambda a, b: a.endswith("*") and b.startswith(a[:-1]) or a==b indices = [] for i in range(len(self.words)): if match(value, unicode(self.get(i, tag))): indices.append(i) return indices
python
def indexof(self, value, tag=WORD): """ Returns the indices of tokens in the sentence where the given token tag equals the string. The string can contain a wildcard "*" at the end (this way "NN*" will match "NN" and "NNS"). The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. For example: Sentence.indexof("VP", tag=CHUNK) returns the indices of all the words that are part of a VP chunk. """ match = lambda a, b: a.endswith("*") and b.startswith(a[:-1]) or a==b indices = [] for i in range(len(self.words)): if match(value, unicode(self.get(i, tag))): indices.append(i) return indices
[ "def", "indexof", "(", "self", ",", "value", ",", "tag", "=", "WORD", ")", ":", "match", "=", "lambda", "a", ",", "b", ":", "a", ".", "endswith", "(", "\"*\"", ")", "and", "b", ".", "startswith", "(", "a", "[", ":", "-", "1", "]", ")", "or", "a", "==", "b", "indices", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "self", ".", "words", ")", ")", ":", "if", "match", "(", "value", ",", "unicode", "(", "self", ".", "get", "(", "i", ",", "tag", ")", ")", ")", ":", "indices", ".", "append", "(", "i", ")", "return", "indices" ]
Returns the indices of tokens in the sentence where the given token tag equals the string. The string can contain a wildcard "*" at the end (this way "NN*" will match "NN" and "NNS"). The tag can be WORD, LEMMA, POS, CHUNK, PNP, RELATION, ROLE, ANCHOR or a custom word tag. For example: Sentence.indexof("VP", tag=CHUNK) returns the indices of all the words that are part of a VP chunk.
[ "Returns", "the", "indices", "of", "tokens", "in", "the", "sentence", "where", "the", "given", "token", "tag", "equals", "the", "string", ".", "The", "string", "can", "contain", "a", "wildcard", "*", "at", "the", "end", "(", "this", "way", "NN", "*", "will", "match", "NN", "and", "NNS", ")", ".", "The", "tag", "can", "be", "WORD", "LEMMA", "POS", "CHUNK", "PNP", "RELATION", "ROLE", "ANCHOR", "or", "a", "custom", "word", "tag", ".", "For", "example", ":", "Sentence", ".", "indexof", "(", "VP", "tag", "=", "CHUNK", ")", "returns", "the", "indices", "of", "all", "the", "words", "that", "are", "part", "of", "a", "VP", "chunk", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L972-L984
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.slice
def slice(self, start, stop): """ Returns a portion of the sentence from word start index to word stop index. The returned slice is a subclass of Sentence and a deep copy. """ s = Slice(token=self.token, language=self.language) for i, word in enumerate(self.words[start:stop]): # The easiest way to copy (part of) a sentence # is by unpacking all of the token tags and passing them to Sentence.append(). p0 = word.string # WORD p1 = word.lemma # LEMMA p2 = word.type # POS p3 = word.chunk is not None and word.chunk.type or None # CHUNK p4 = word.pnp is not None and "PNP" or None # PNP p5 = word.chunk is not None and unzip(0, word.chunk.relations) or None # REL p6 = word.chunk is not None and unzip(1, word.chunk.relations) or None # ROLE p7 = word.chunk and word.chunk.anchor_id or None # ANCHOR p8 = word.chunk and word.chunk.start == start+i and BEGIN or None # IOB p9 = word.custom_tags # User-defined tags. # If the given range does not contain the chunk head, remove the chunk tags. if word.chunk is not None and (word.chunk.stop > stop): p3, p4, p5, p6, p7, p8 = None, None, None, None, None, None # If the word starts the preposition, add the IOB B-prefix (i.e., B-PNP). if word.pnp is not None and word.pnp.start == start+i: p4 = BEGIN+"-"+"PNP" # If the given range does not contain the entire PNP, remove the PNP tags. # The range must contain the entire PNP, # since it starts with the PP and ends with the chunk head (and is meaningless without these). if word.pnp is not None and (word.pnp.start < start or word.chunk.stop > stop): p4, p7 = None, None s.append(word=p0, lemma=p1, type=p2, chunk=p3, pnp=p4, relation=p5, role=p6, anchor=p7, iob=p8, custom=p9) s.parent = self s._start = start return s
python
def slice(self, start, stop): """ Returns a portion of the sentence from word start index to word stop index. The returned slice is a subclass of Sentence and a deep copy. """ s = Slice(token=self.token, language=self.language) for i, word in enumerate(self.words[start:stop]): # The easiest way to copy (part of) a sentence # is by unpacking all of the token tags and passing them to Sentence.append(). p0 = word.string # WORD p1 = word.lemma # LEMMA p2 = word.type # POS p3 = word.chunk is not None and word.chunk.type or None # CHUNK p4 = word.pnp is not None and "PNP" or None # PNP p5 = word.chunk is not None and unzip(0, word.chunk.relations) or None # REL p6 = word.chunk is not None and unzip(1, word.chunk.relations) or None # ROLE p7 = word.chunk and word.chunk.anchor_id or None # ANCHOR p8 = word.chunk and word.chunk.start == start+i and BEGIN or None # IOB p9 = word.custom_tags # User-defined tags. # If the given range does not contain the chunk head, remove the chunk tags. if word.chunk is not None and (word.chunk.stop > stop): p3, p4, p5, p6, p7, p8 = None, None, None, None, None, None # If the word starts the preposition, add the IOB B-prefix (i.e., B-PNP). if word.pnp is not None and word.pnp.start == start+i: p4 = BEGIN+"-"+"PNP" # If the given range does not contain the entire PNP, remove the PNP tags. # The range must contain the entire PNP, # since it starts with the PP and ends with the chunk head (and is meaningless without these). if word.pnp is not None and (word.pnp.start < start or word.chunk.stop > stop): p4, p7 = None, None s.append(word=p0, lemma=p1, type=p2, chunk=p3, pnp=p4, relation=p5, role=p6, anchor=p7, iob=p8, custom=p9) s.parent = self s._start = start return s
[ "def", "slice", "(", "self", ",", "start", ",", "stop", ")", ":", "s", "=", "Slice", "(", "token", "=", "self", ".", "token", ",", "language", "=", "self", ".", "language", ")", "for", "i", ",", "word", "in", "enumerate", "(", "self", ".", "words", "[", "start", ":", "stop", "]", ")", ":", "# The easiest way to copy (part of) a sentence", "# is by unpacking all of the token tags and passing them to Sentence.append().", "p0", "=", "word", ".", "string", "# WORD", "p1", "=", "word", ".", "lemma", "# LEMMA", "p2", "=", "word", ".", "type", "# POS", "p3", "=", "word", ".", "chunk", "is", "not", "None", "and", "word", ".", "chunk", ".", "type", "or", "None", "# CHUNK", "p4", "=", "word", ".", "pnp", "is", "not", "None", "and", "\"PNP\"", "or", "None", "# PNP", "p5", "=", "word", ".", "chunk", "is", "not", "None", "and", "unzip", "(", "0", ",", "word", ".", "chunk", ".", "relations", ")", "or", "None", "# REL ", "p6", "=", "word", ".", "chunk", "is", "not", "None", "and", "unzip", "(", "1", ",", "word", ".", "chunk", ".", "relations", ")", "or", "None", "# ROLE", "p7", "=", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "anchor_id", "or", "None", "# ANCHOR", "p8", "=", "word", ".", "chunk", "and", "word", ".", "chunk", ".", "start", "==", "start", "+", "i", "and", "BEGIN", "or", "None", "# IOB", "p9", "=", "word", ".", "custom_tags", "# User-defined tags.", "# If the given range does not contain the chunk head, remove the chunk tags.", "if", "word", ".", "chunk", "is", "not", "None", "and", "(", "word", ".", "chunk", ".", "stop", ">", "stop", ")", ":", "p3", ",", "p4", ",", "p5", ",", "p6", ",", "p7", ",", "p8", "=", "None", ",", "None", ",", "None", ",", "None", ",", "None", ",", "None", "# If the word starts the preposition, add the IOB B-prefix (i.e., B-PNP).", "if", "word", ".", "pnp", "is", "not", "None", "and", "word", ".", "pnp", ".", "start", "==", "start", "+", "i", ":", "p4", "=", "BEGIN", "+", "\"-\"", "+", "\"PNP\"", "# If the given range does not contain the entire PNP, remove the PNP tags.", "# The range must contain the entire PNP, ", "# since it starts with the PP and ends with the chunk head (and is meaningless without these).", "if", "word", ".", "pnp", "is", "not", "None", "and", "(", "word", ".", "pnp", ".", "start", "<", "start", "or", "word", ".", "chunk", ".", "stop", ">", "stop", ")", ":", "p4", ",", "p7", "=", "None", ",", "None", "s", ".", "append", "(", "word", "=", "p0", ",", "lemma", "=", "p1", ",", "type", "=", "p2", ",", "chunk", "=", "p3", ",", "pnp", "=", "p4", ",", "relation", "=", "p5", ",", "role", "=", "p6", ",", "anchor", "=", "p7", ",", "iob", "=", "p8", ",", "custom", "=", "p9", ")", "s", ".", "parent", "=", "self", "s", ".", "_start", "=", "start", "return", "s" ]
Returns a portion of the sentence from word start index to word stop index. The returned slice is a subclass of Sentence and a deep copy.
[ "Returns", "a", "portion", "of", "the", "sentence", "from", "word", "start", "index", "to", "word", "stop", "index", ".", "The", "returned", "slice", "is", "a", "subclass", "of", "Sentence", "and", "a", "deep", "copy", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L986-L1018
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.constituents
def constituents(self, pnp=False): """ Returns an in-order list of mixed Chunk and Word objects. With pnp=True, also contains PNPChunk objects whenever possible. """ a = [] for word in self.words: if pnp and word.pnp is not None: if len(a) == 0 or a[-1] != word.pnp: a.append(word.pnp) elif word.chunk is not None: if len(a) == 0 or a[-1] != word.chunk: a.append(word.chunk) else: a.append(word) return a
python
def constituents(self, pnp=False): """ Returns an in-order list of mixed Chunk and Word objects. With pnp=True, also contains PNPChunk objects whenever possible. """ a = [] for word in self.words: if pnp and word.pnp is not None: if len(a) == 0 or a[-1] != word.pnp: a.append(word.pnp) elif word.chunk is not None: if len(a) == 0 or a[-1] != word.chunk: a.append(word.chunk) else: a.append(word) return a
[ "def", "constituents", "(", "self", ",", "pnp", "=", "False", ")", ":", "a", "=", "[", "]", "for", "word", "in", "self", ".", "words", ":", "if", "pnp", "and", "word", ".", "pnp", "is", "not", "None", ":", "if", "len", "(", "a", ")", "==", "0", "or", "a", "[", "-", "1", "]", "!=", "word", ".", "pnp", ":", "a", ".", "append", "(", "word", ".", "pnp", ")", "elif", "word", ".", "chunk", "is", "not", "None", ":", "if", "len", "(", "a", ")", "==", "0", "or", "a", "[", "-", "1", "]", "!=", "word", ".", "chunk", ":", "a", ".", "append", "(", "word", ".", "chunk", ")", "else", ":", "a", ".", "append", "(", "word", ")", "return", "a" ]
Returns an in-order list of mixed Chunk and Word objects. With pnp=True, also contains PNPChunk objects whenever possible.
[ "Returns", "an", "in", "-", "order", "list", "of", "mixed", "Chunk", "and", "Word", "objects", ".", "With", "pnp", "=", "True", "also", "contains", "PNPChunk", "objects", "whenever", "possible", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1026-L1040
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Sentence.from_xml
def from_xml(cls, xml): """ Returns a new Text from the given XML string. """ s = parse_string(xml) return Sentence(s.split("\n")[0], token=s.tags, language=s.language)
python
def from_xml(cls, xml): """ Returns a new Text from the given XML string. """ s = parse_string(xml) return Sentence(s.split("\n")[0], token=s.tags, language=s.language)
[ "def", "from_xml", "(", "cls", ",", "xml", ")", ":", "s", "=", "parse_string", "(", "xml", ")", "return", "Sentence", "(", "s", ".", "split", "(", "\"\\n\"", ")", "[", "0", "]", ",", "token", "=", "s", ".", "tags", ",", "language", "=", "s", ".", "language", ")" ]
Returns a new Text from the given XML string.
[ "Returns", "a", "new", "Text", "from", "the", "given", "XML", "string", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1064-L1068
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
Text.xml
def xml(self): """ Yields the sentence as an XML-formatted string (plain bytestring, UTF-8 encoded). All the sentences in the XML are wrapped in a <text> element. """ xml = [] xml.append('<?xml version="1.0" encoding="%s"?>' % XML_ENCODING.get(self.encoding, self.encoding)) xml.append("<%s>" % XML_TEXT) xml.extend([sentence.xml for sentence in self]) xml.append("</%s>" % XML_TEXT) return "\n".join(xml)
python
def xml(self): """ Yields the sentence as an XML-formatted string (plain bytestring, UTF-8 encoded). All the sentences in the XML are wrapped in a <text> element. """ xml = [] xml.append('<?xml version="1.0" encoding="%s"?>' % XML_ENCODING.get(self.encoding, self.encoding)) xml.append("<%s>" % XML_TEXT) xml.extend([sentence.xml for sentence in self]) xml.append("</%s>" % XML_TEXT) return "\n".join(xml)
[ "def", "xml", "(", "self", ")", ":", "xml", "=", "[", "]", "xml", ".", "append", "(", "'<?xml version=\"1.0\" encoding=\"%s\"?>'", "%", "XML_ENCODING", ".", "get", "(", "self", ".", "encoding", ",", "self", ".", "encoding", ")", ")", "xml", ".", "append", "(", "\"<%s>\"", "%", "XML_TEXT", ")", "xml", ".", "extend", "(", "[", "sentence", ".", "xml", "for", "sentence", "in", "self", "]", ")", "xml", ".", "append", "(", "\"</%s>\"", "%", "XML_TEXT", ")", "return", "\"\\n\"", ".", "join", "(", "xml", ")" ]
Yields the sentence as an XML-formatted string (plain bytestring, UTF-8 encoded). All the sentences in the XML are wrapped in a <text> element.
[ "Yields", "the", "sentence", "as", "an", "XML", "-", "formatted", "string", "(", "plain", "bytestring", "UTF", "-", "8", "encoded", ")", ".", "All", "the", "sentences", "in", "the", "XML", "are", "wrapped", "in", "a", "<text", ">", "element", "." ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1186-L1195
markuskiller/textblob-de
prepare_docs.py
get_credits
def get_credits(): """Extract credits from `AUTHORS.rst`""" credits = read(os.path.join(_HERE, "AUTHORS.rst")).split("\n") from_index = credits.index("Active Contributors") credits = "\n".join(credits[from_index + 2:]) return credits
python
def get_credits(): """Extract credits from `AUTHORS.rst`""" credits = read(os.path.join(_HERE, "AUTHORS.rst")).split("\n") from_index = credits.index("Active Contributors") credits = "\n".join(credits[from_index + 2:]) return credits
[ "def", "get_credits", "(", ")", ":", "credits", "=", "read", "(", "os", ".", "path", ".", "join", "(", "_HERE", ",", "\"AUTHORS.rst\"", ")", ")", ".", "split", "(", "\"\\n\"", ")", "from_index", "=", "credits", ".", "index", "(", "\"Active Contributors\"", ")", "credits", "=", "\"\\n\"", ".", "join", "(", "credits", "[", "from_index", "+", "2", ":", "]", ")", "return", "credits" ]
Extract credits from `AUTHORS.rst`
[ "Extract", "credits", "from", "AUTHORS", ".", "rst" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/prepare_docs.py#L30-L35
markuskiller/textblob-de
prepare_docs.py
rst2markdown_github
def rst2markdown_github(path_to_rst, path_to_md, pandoc="pandoc"): """ Converts ``rst`` to **markdown_github**, using :program:`pandoc` **Input** * ``FILE.rst`` **Output** * ``FILE.md`` """ _proc = subprocess.Popen([pandoc, "-f", "rst", "-t", "markdown_github", #"-o", path_to_md, path_to_rst], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("Converting README.rst to markdown_github, " "using 'pandoc' ...") _stdout, _stderr = _proc.communicate() with _open(path_to_md, "w", encoding="utf-8") as _md: _skip_newline = False for line in _stdout.decode('utf-8').split(os.linesep): line = re.sub("``` sourceCode", "``` python", line) if line.startswith("[!["): _md.write(line) _md.write("\n") if not line.startswith(("[![LICENSE")): _skip_newline = True elif _skip_newline and line == "": _skip_newline = False continue else: _md.write(line) _md.write("\n") if _stderr: print("pandoc.exe STDERR: ", _stderr) if os.path.isfile(path_to_md) and os.stat(path_to_md).st_size > 0: print("README.rst converted and saved as: {}".format(path_to_md))
python
def rst2markdown_github(path_to_rst, path_to_md, pandoc="pandoc"): """ Converts ``rst`` to **markdown_github**, using :program:`pandoc` **Input** * ``FILE.rst`` **Output** * ``FILE.md`` """ _proc = subprocess.Popen([pandoc, "-f", "rst", "-t", "markdown_github", #"-o", path_to_md, path_to_rst], stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("Converting README.rst to markdown_github, " "using 'pandoc' ...") _stdout, _stderr = _proc.communicate() with _open(path_to_md, "w", encoding="utf-8") as _md: _skip_newline = False for line in _stdout.decode('utf-8').split(os.linesep): line = re.sub("``` sourceCode", "``` python", line) if line.startswith("[!["): _md.write(line) _md.write("\n") if not line.startswith(("[![LICENSE")): _skip_newline = True elif _skip_newline and line == "": _skip_newline = False continue else: _md.write(line) _md.write("\n") if _stderr: print("pandoc.exe STDERR: ", _stderr) if os.path.isfile(path_to_md) and os.stat(path_to_md).st_size > 0: print("README.rst converted and saved as: {}".format(path_to_md))
[ "def", "rst2markdown_github", "(", "path_to_rst", ",", "path_to_md", ",", "pandoc", "=", "\"pandoc\"", ")", ":", "_proc", "=", "subprocess", ".", "Popen", "(", "[", "pandoc", ",", "\"-f\"", ",", "\"rst\"", ",", "\"-t\"", ",", "\"markdown_github\"", ",", "#\"-o\", path_to_md,", "path_to_rst", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "print", "(", "\"Converting README.rst to markdown_github, \"", "\"using 'pandoc' ...\"", ")", "_stdout", ",", "_stderr", "=", "_proc", ".", "communicate", "(", ")", "with", "_open", "(", "path_to_md", ",", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "_md", ":", "_skip_newline", "=", "False", "for", "line", "in", "_stdout", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "os", ".", "linesep", ")", ":", "line", "=", "re", ".", "sub", "(", "\"``` sourceCode\"", ",", "\"``` python\"", ",", "line", ")", "if", "line", ".", "startswith", "(", "\"[![\"", ")", ":", "_md", ".", "write", "(", "line", ")", "_md", ".", "write", "(", "\"\\n\"", ")", "if", "not", "line", ".", "startswith", "(", "(", "\"[![LICENSE\"", ")", ")", ":", "_skip_newline", "=", "True", "elif", "_skip_newline", "and", "line", "==", "\"\"", ":", "_skip_newline", "=", "False", "continue", "else", ":", "_md", ".", "write", "(", "line", ")", "_md", ".", "write", "(", "\"\\n\"", ")", "if", "_stderr", ":", "print", "(", "\"pandoc.exe STDERR: \"", ",", "_stderr", ")", "if", "os", ".", "path", ".", "isfile", "(", "path_to_md", ")", "and", "os", ".", "stat", "(", "path_to_md", ")", ".", "st_size", ">", "0", ":", "print", "(", "\"README.rst converted and saved as: {}\"", ".", "format", "(", "path_to_md", ")", ")" ]
Converts ``rst`` to **markdown_github**, using :program:`pandoc` **Input** * ``FILE.rst`` **Output** * ``FILE.md``
[ "Converts", "rst", "to", "**", "markdown_github", "**", "using", ":", "program", ":", "pandoc" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/prepare_docs.py#L38-L79
markuskiller/textblob-de
prepare_docs.py
console_help2rst
def console_help2rst(cwd, help_cmd, path_to_rst, rst_title, format_as_code=False): """ Extract HELP information from ``<program> -h | --help`` message **Input** * ``$ <program> -h | --help`` * ``$ cd <cwd> && make help`` **Output** * ``docs/src/console_help_xy.rst`` """ generated_time_str = """ :: generated: {0} """.format(time.strftime("%d %B %Y - %H:%M")) with _open(path_to_rst, "w", encoding='utf-8') as f: print("File", f) print("cwd", cwd) print("help_cmd", help_cmd) os.chdir(cwd) _proc = subprocess.Popen( help_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True,) help_msg = _proc.stdout.readlines() f.write(get_rst_title( rst_title, "-", overline=True)) f.write(generated_time_str) if "README" in path_to_rst: help_msg = "".join(help_msg[10:]) #help_msg = PACKAGE_DOCSTRING + help_msg for line in help_msg: # exclude directory walk messages of 'make' if line.strip().startswith("make[1]:"): print("skipped line: {}".format(line)) # exclude warning messages elif line.strip().startswith("\x1b[1m"): print("skipped line: {}".format(line)) # exclude warning messages on Windows (without ``colorama``) elif line.strip().startswith("Using fallback version of '"): print("skipped line: {}".format(line)) else: # correctly indent tips in 'make help' if line.strip().startswith("-->"): f.write(3 * "\t") if format_as_code: f.write("\t" + line.strip()) f.write("\n") else: f.write(line) f.write("\n") if "README" in path_to_rst: f.write(get_rst_title("Credits", "^")) f.write(get_credits()) print("\ncmd:{} in dir:{} --> RST generated:\n\t{}\n\n".format( help_cmd, cwd, path_to_rst))
python
def console_help2rst(cwd, help_cmd, path_to_rst, rst_title, format_as_code=False): """ Extract HELP information from ``<program> -h | --help`` message **Input** * ``$ <program> -h | --help`` * ``$ cd <cwd> && make help`` **Output** * ``docs/src/console_help_xy.rst`` """ generated_time_str = """ :: generated: {0} """.format(time.strftime("%d %B %Y - %H:%M")) with _open(path_to_rst, "w", encoding='utf-8') as f: print("File", f) print("cwd", cwd) print("help_cmd", help_cmd) os.chdir(cwd) _proc = subprocess.Popen( help_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True,) help_msg = _proc.stdout.readlines() f.write(get_rst_title( rst_title, "-", overline=True)) f.write(generated_time_str) if "README" in path_to_rst: help_msg = "".join(help_msg[10:]) #help_msg = PACKAGE_DOCSTRING + help_msg for line in help_msg: # exclude directory walk messages of 'make' if line.strip().startswith("make[1]:"): print("skipped line: {}".format(line)) # exclude warning messages elif line.strip().startswith("\x1b[1m"): print("skipped line: {}".format(line)) # exclude warning messages on Windows (without ``colorama``) elif line.strip().startswith("Using fallback version of '"): print("skipped line: {}".format(line)) else: # correctly indent tips in 'make help' if line.strip().startswith("-->"): f.write(3 * "\t") if format_as_code: f.write("\t" + line.strip()) f.write("\n") else: f.write(line) f.write("\n") if "README" in path_to_rst: f.write(get_rst_title("Credits", "^")) f.write(get_credits()) print("\ncmd:{} in dir:{} --> RST generated:\n\t{}\n\n".format( help_cmd, cwd, path_to_rst))
[ "def", "console_help2rst", "(", "cwd", ",", "help_cmd", ",", "path_to_rst", ",", "rst_title", ",", "format_as_code", "=", "False", ")", ":", "generated_time_str", "=", "\"\"\"\n\n ::\n\n generated: {0}\n\n\"\"\"", ".", "format", "(", "time", ".", "strftime", "(", "\"%d %B %Y - %H:%M\"", ")", ")", "with", "_open", "(", "path_to_rst", ",", "\"w\"", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "print", "(", "\"File\"", ",", "f", ")", "print", "(", "\"cwd\"", ",", "cwd", ")", "print", "(", "\"help_cmd\"", ",", "help_cmd", ")", "os", ".", "chdir", "(", "cwd", ")", "_proc", "=", "subprocess", ".", "Popen", "(", "help_cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ",", ")", "help_msg", "=", "_proc", ".", "stdout", ".", "readlines", "(", ")", "f", ".", "write", "(", "get_rst_title", "(", "rst_title", ",", "\"-\"", ",", "overline", "=", "True", ")", ")", "f", ".", "write", "(", "generated_time_str", ")", "if", "\"README\"", "in", "path_to_rst", ":", "help_msg", "=", "\"\"", ".", "join", "(", "help_msg", "[", "10", ":", "]", ")", "#help_msg = PACKAGE_DOCSTRING + help_msg", "for", "line", "in", "help_msg", ":", "# exclude directory walk messages of 'make'", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"make[1]:\"", ")", ":", "print", "(", "\"skipped line: {}\"", ".", "format", "(", "line", ")", ")", "# exclude warning messages", "elif", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"\\x1b[1m\"", ")", ":", "print", "(", "\"skipped line: {}\"", ".", "format", "(", "line", ")", ")", "# exclude warning messages on Windows (without ``colorama``)", "elif", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"Using fallback version of '\"", ")", ":", "print", "(", "\"skipped line: {}\"", ".", "format", "(", "line", ")", ")", "else", ":", "# correctly indent tips in 'make help'", "if", "line", ".", "strip", "(", ")", ".", "startswith", "(", "\"-->\"", ")", ":", "f", ".", "write", "(", "3", "*", "\"\\t\"", ")", "if", "format_as_code", ":", "f", ".", "write", "(", "\"\\t\"", "+", "line", ".", "strip", "(", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "else", ":", "f", ".", "write", "(", "line", ")", "f", ".", "write", "(", "\"\\n\"", ")", "if", "\"README\"", "in", "path_to_rst", ":", "f", ".", "write", "(", "get_rst_title", "(", "\"Credits\"", ",", "\"^\"", ")", ")", "f", ".", "write", "(", "get_credits", "(", ")", ")", "print", "(", "\"\\ncmd:{} in dir:{} --> RST generated:\\n\\t{}\\n\\n\"", ".", "format", "(", "help_cmd", ",", "cwd", ",", "path_to_rst", ")", ")" ]
Extract HELP information from ``<program> -h | --help`` message **Input** * ``$ <program> -h | --help`` * ``$ cd <cwd> && make help`` **Output** * ``docs/src/console_help_xy.rst``
[ "Extract", "HELP", "information", "from", "<program", ">", "-", "h", "|", "--", "help", "message" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/prepare_docs.py#L82-L150
markuskiller/textblob-de
prepare_docs.py
update_docs
def update_docs(readme=True, makefiles=True): """Update documentation (ready for publishing new release) Usually called by ``make docs`` :param bool make_doc: generate DOC page from Makefile help messages """ if readme: _pandoc = get_external_executable("pandoc") rst2markdown_github(os.path.join(_HERE, "README.rst"), os.path.join(_HERE, "README.md"), pandoc=_pandoc) if makefiles: _make = get_external_executable("make") project_makefile_dir = os.path.abspath(_HERE) project_makefile_rst = os.path.join( _HERE, 'docs', 'src', 'project_makefile.rst') docs_makefile_dir = os.path.join(_HERE, 'docs', 'src') docs_makefile_rst = os.path.join( _HERE, 'docs', 'src', 'docs_makefile.rst') #: ``help2rst_queue`` stores tuples of #: ``(cwd, help_cmd, path_to_rst_file, rst_title_of_new_file)`` help2rst_queue = [ (project_makefile_dir, [_make, "help"], project_makefile_rst, "Project ``Makefile``"), (docs_makefile_dir, [_make, "help"], docs_makefile_rst, "Documentation ``Makefile``")] for cwd, help_cmd, outfile, title in help2rst_queue: console_help2rst( cwd, help_cmd, outfile, title, format_as_code=True)
python
def update_docs(readme=True, makefiles=True): """Update documentation (ready for publishing new release) Usually called by ``make docs`` :param bool make_doc: generate DOC page from Makefile help messages """ if readme: _pandoc = get_external_executable("pandoc") rst2markdown_github(os.path.join(_HERE, "README.rst"), os.path.join(_HERE, "README.md"), pandoc=_pandoc) if makefiles: _make = get_external_executable("make") project_makefile_dir = os.path.abspath(_HERE) project_makefile_rst = os.path.join( _HERE, 'docs', 'src', 'project_makefile.rst') docs_makefile_dir = os.path.join(_HERE, 'docs', 'src') docs_makefile_rst = os.path.join( _HERE, 'docs', 'src', 'docs_makefile.rst') #: ``help2rst_queue`` stores tuples of #: ``(cwd, help_cmd, path_to_rst_file, rst_title_of_new_file)`` help2rst_queue = [ (project_makefile_dir, [_make, "help"], project_makefile_rst, "Project ``Makefile``"), (docs_makefile_dir, [_make, "help"], docs_makefile_rst, "Documentation ``Makefile``")] for cwd, help_cmd, outfile, title in help2rst_queue: console_help2rst( cwd, help_cmd, outfile, title, format_as_code=True)
[ "def", "update_docs", "(", "readme", "=", "True", ",", "makefiles", "=", "True", ")", ":", "if", "readme", ":", "_pandoc", "=", "get_external_executable", "(", "\"pandoc\"", ")", "rst2markdown_github", "(", "os", ".", "path", ".", "join", "(", "_HERE", ",", "\"README.rst\"", ")", ",", "os", ".", "path", ".", "join", "(", "_HERE", ",", "\"README.md\"", ")", ",", "pandoc", "=", "_pandoc", ")", "if", "makefiles", ":", "_make", "=", "get_external_executable", "(", "\"make\"", ")", "project_makefile_dir", "=", "os", ".", "path", ".", "abspath", "(", "_HERE", ")", "project_makefile_rst", "=", "os", ".", "path", ".", "join", "(", "_HERE", ",", "'docs'", ",", "'src'", ",", "'project_makefile.rst'", ")", "docs_makefile_dir", "=", "os", ".", "path", ".", "join", "(", "_HERE", ",", "'docs'", ",", "'src'", ")", "docs_makefile_rst", "=", "os", ".", "path", ".", "join", "(", "_HERE", ",", "'docs'", ",", "'src'", ",", "'docs_makefile.rst'", ")", "#: ``help2rst_queue`` stores tuples of", "#: ``(cwd, help_cmd, path_to_rst_file, rst_title_of_new_file)``", "help2rst_queue", "=", "[", "(", "project_makefile_dir", ",", "[", "_make", ",", "\"help\"", "]", ",", "project_makefile_rst", ",", "\"Project ``Makefile``\"", ")", ",", "(", "docs_makefile_dir", ",", "[", "_make", ",", "\"help\"", "]", ",", "docs_makefile_rst", ",", "\"Documentation ``Makefile``\"", ")", "]", "for", "cwd", ",", "help_cmd", ",", "outfile", ",", "title", "in", "help2rst_queue", ":", "console_help2rst", "(", "cwd", ",", "help_cmd", ",", "outfile", ",", "title", ",", "format_as_code", "=", "True", ")" ]
Update documentation (ready for publishing new release) Usually called by ``make docs`` :param bool make_doc: generate DOC page from Makefile help messages
[ "Update", "documentation", "(", "ready", "for", "publishing", "new", "release", ")" ]
train
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/prepare_docs.py#L153-L197
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.rms
def rms(self, x, params=()): """ Returns root mean square value of f(x, params) """ internal_x, internal_params = self.pre_process(np.asarray(x), np.asarray(params)) if internal_params.ndim > 1: raise NotImplementedError("Parameters should be constant.") result = np.empty(internal_x.size//self.nx) for idx in range(internal_x.shape[0]): result[idx] = np.sqrt(np.mean(np.square(self.f_cb( internal_x[idx, :], internal_params)))) return result
python
def rms(self, x, params=()): """ Returns root mean square value of f(x, params) """ internal_x, internal_params = self.pre_process(np.asarray(x), np.asarray(params)) if internal_params.ndim > 1: raise NotImplementedError("Parameters should be constant.") result = np.empty(internal_x.size//self.nx) for idx in range(internal_x.shape[0]): result[idx] = np.sqrt(np.mean(np.square(self.f_cb( internal_x[idx, :], internal_params)))) return result
[ "def", "rms", "(", "self", ",", "x", ",", "params", "=", "(", ")", ")", ":", "internal_x", ",", "internal_params", "=", "self", ".", "pre_process", "(", "np", ".", "asarray", "(", "x", ")", ",", "np", ".", "asarray", "(", "params", ")", ")", "if", "internal_params", ".", "ndim", ">", "1", ":", "raise", "NotImplementedError", "(", "\"Parameters should be constant.\"", ")", "result", "=", "np", ".", "empty", "(", "internal_x", ".", "size", "//", "self", ".", "nx", ")", "for", "idx", "in", "range", "(", "internal_x", ".", "shape", "[", "0", "]", ")", ":", "result", "[", "idx", "]", "=", "np", ".", "sqrt", "(", "np", ".", "mean", "(", "np", ".", "square", "(", "self", ".", "f_cb", "(", "internal_x", "[", "idx", ",", ":", "]", ",", "internal_params", ")", ")", ")", ")", "return", "result" ]
Returns root mean square value of f(x, params)
[ "Returns", "root", "mean", "square", "value", "of", "f", "(", "x", "params", ")" ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L79-L89
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.solve_series
def solve_series(self, x0, params, varied_data, varied_idx, internal_x0=None, solver=None, propagate=True, **kwargs): """ Solve system for a set of parameters in which one is varied Parameters ---------- x0 : array_like Guess (subject to ``self.post_processors``) params : array_like Parameter values vaired_data : array_like Numerical values of the varied parameter. varied_idx : int or str Index of the varied parameter (indexing starts at 0). If ``self.par_by_name`` this should be the name (str) of the varied parameter. internal_x0 : array_like (default: None) Guess (*not* subject to ``self.post_processors``). Overrides ``x0`` when given. solver : str or callback See :meth:`solve`. propagate : bool (default: True) Use last successful solution as ``x0`` in consecutive solves. \\*\\*kwargs : Keyword arguments pass along to :meth:`solve`. Returns ------- xout : array Of shape ``(varied_data.size, x0.size)``. info_dicts : list of dictionaries Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc. """ if self.x_by_name and isinstance(x0, dict): x0 = [x0[k] for k in self.names] if self.par_by_name: if isinstance(params, dict): params = [params[k] for k in self.param_names] if isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) new_params = np.atleast_1d(np.array(params, dtype=np.float64)) xout = np.empty((len(varied_data), len(x0))) self.internal_xout = np.empty_like(xout) self.internal_params_out = np.empty((len(varied_data), len(new_params))) info_dicts = [] new_x0 = np.array(x0, dtype=np.float64) # copy conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys for idx, value in enumerate(varied_data): try: new_params[varied_idx] = value except TypeError: new_params = value # e.g. type(new_params) == int if conds is not None: kwargs['initial_conditions'] = conds x, info_dict = self.solve(new_x0, new_params, internal_x0, solver, **kwargs) if propagate: if info_dict['success']: try: # See ChainedNeqSys.solve new_x0 = info_dict['x_vecs'][0] internal_x0 = info_dict['internal_x_vecs'][0] conds = info_dict['intermediate_info'][0].get( 'conditions', None) except: new_x0 = x internal_x0 = None conds = info_dict.get('conditions', None) xout[idx, :] = x self.internal_xout[idx, :] = self.internal_x self.internal_params_out[idx, :] = self.internal_params info_dicts.append(info_dict) return xout, info_dicts
python
def solve_series(self, x0, params, varied_data, varied_idx, internal_x0=None, solver=None, propagate=True, **kwargs): """ Solve system for a set of parameters in which one is varied Parameters ---------- x0 : array_like Guess (subject to ``self.post_processors``) params : array_like Parameter values vaired_data : array_like Numerical values of the varied parameter. varied_idx : int or str Index of the varied parameter (indexing starts at 0). If ``self.par_by_name`` this should be the name (str) of the varied parameter. internal_x0 : array_like (default: None) Guess (*not* subject to ``self.post_processors``). Overrides ``x0`` when given. solver : str or callback See :meth:`solve`. propagate : bool (default: True) Use last successful solution as ``x0`` in consecutive solves. \\*\\*kwargs : Keyword arguments pass along to :meth:`solve`. Returns ------- xout : array Of shape ``(varied_data.size, x0.size)``. info_dicts : list of dictionaries Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc. """ if self.x_by_name and isinstance(x0, dict): x0 = [x0[k] for k in self.names] if self.par_by_name: if isinstance(params, dict): params = [params[k] for k in self.param_names] if isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) new_params = np.atleast_1d(np.array(params, dtype=np.float64)) xout = np.empty((len(varied_data), len(x0))) self.internal_xout = np.empty_like(xout) self.internal_params_out = np.empty((len(varied_data), len(new_params))) info_dicts = [] new_x0 = np.array(x0, dtype=np.float64) # copy conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys for idx, value in enumerate(varied_data): try: new_params[varied_idx] = value except TypeError: new_params = value # e.g. type(new_params) == int if conds is not None: kwargs['initial_conditions'] = conds x, info_dict = self.solve(new_x0, new_params, internal_x0, solver, **kwargs) if propagate: if info_dict['success']: try: # See ChainedNeqSys.solve new_x0 = info_dict['x_vecs'][0] internal_x0 = info_dict['internal_x_vecs'][0] conds = info_dict['intermediate_info'][0].get( 'conditions', None) except: new_x0 = x internal_x0 = None conds = info_dict.get('conditions', None) xout[idx, :] = x self.internal_xout[idx, :] = self.internal_x self.internal_params_out[idx, :] = self.internal_params info_dicts.append(info_dict) return xout, info_dicts
[ "def", "solve_series", "(", "self", ",", "x0", ",", "params", ",", "varied_data", ",", "varied_idx", ",", "internal_x0", "=", "None", ",", "solver", "=", "None", ",", "propagate", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "x_by_name", "and", "isinstance", "(", "x0", ",", "dict", ")", ":", "x0", "=", "[", "x0", "[", "k", "]", "for", "k", "in", "self", ".", "names", "]", "if", "self", ".", "par_by_name", ":", "if", "isinstance", "(", "params", ",", "dict", ")", ":", "params", "=", "[", "params", "[", "k", "]", "for", "k", "in", "self", ".", "param_names", "]", "if", "isinstance", "(", "varied_idx", ",", "str", ")", ":", "varied_idx", "=", "self", ".", "param_names", ".", "index", "(", "varied_idx", ")", "new_params", "=", "np", ".", "atleast_1d", "(", "np", ".", "array", "(", "params", ",", "dtype", "=", "np", ".", "float64", ")", ")", "xout", "=", "np", ".", "empty", "(", "(", "len", "(", "varied_data", ")", ",", "len", "(", "x0", ")", ")", ")", "self", ".", "internal_xout", "=", "np", ".", "empty_like", "(", "xout", ")", "self", ".", "internal_params_out", "=", "np", ".", "empty", "(", "(", "len", "(", "varied_data", ")", ",", "len", "(", "new_params", ")", ")", ")", "info_dicts", "=", "[", "]", "new_x0", "=", "np", ".", "array", "(", "x0", ",", "dtype", "=", "np", ".", "float64", ")", "# copy", "conds", "=", "kwargs", ".", "get", "(", "'initial_conditions'", ",", "None", ")", "# see ConditionalNeqSys", "for", "idx", ",", "value", "in", "enumerate", "(", "varied_data", ")", ":", "try", ":", "new_params", "[", "varied_idx", "]", "=", "value", "except", "TypeError", ":", "new_params", "=", "value", "# e.g. type(new_params) == int", "if", "conds", "is", "not", "None", ":", "kwargs", "[", "'initial_conditions'", "]", "=", "conds", "x", ",", "info_dict", "=", "self", ".", "solve", "(", "new_x0", ",", "new_params", ",", "internal_x0", ",", "solver", ",", "*", "*", "kwargs", ")", "if", "propagate", ":", "if", "info_dict", "[", "'success'", "]", ":", "try", ":", "# See ChainedNeqSys.solve", "new_x0", "=", "info_dict", "[", "'x_vecs'", "]", "[", "0", "]", "internal_x0", "=", "info_dict", "[", "'internal_x_vecs'", "]", "[", "0", "]", "conds", "=", "info_dict", "[", "'intermediate_info'", "]", "[", "0", "]", ".", "get", "(", "'conditions'", ",", "None", ")", "except", ":", "new_x0", "=", "x", "internal_x0", "=", "None", "conds", "=", "info_dict", ".", "get", "(", "'conditions'", ",", "None", ")", "xout", "[", "idx", ",", ":", "]", "=", "x", "self", ".", "internal_xout", "[", "idx", ",", ":", "]", "=", "self", ".", "internal_x", "self", ".", "internal_params_out", "[", "idx", ",", ":", "]", "=", "self", ".", "internal_params", "info_dicts", ".", "append", "(", "info_dict", ")", "return", "xout", ",", "info_dicts" ]
Solve system for a set of parameters in which one is varied Parameters ---------- x0 : array_like Guess (subject to ``self.post_processors``) params : array_like Parameter values vaired_data : array_like Numerical values of the varied parameter. varied_idx : int or str Index of the varied parameter (indexing starts at 0). If ``self.par_by_name`` this should be the name (str) of the varied parameter. internal_x0 : array_like (default: None) Guess (*not* subject to ``self.post_processors``). Overrides ``x0`` when given. solver : str or callback See :meth:`solve`. propagate : bool (default: True) Use last successful solution as ``x0`` in consecutive solves. \\*\\*kwargs : Keyword arguments pass along to :meth:`solve`. Returns ------- xout : array Of shape ``(varied_data.size, x0.size)``. info_dicts : list of dictionaries Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
[ "Solve", "system", "for", "a", "set", "of", "parameters", "in", "which", "one", "is", "varied" ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L91-L166
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.plot_series
def plot_series(self, xres, varied_data, varied_idx, **kwargs): """ Plots the results from :meth:`solve_series`. Parameters ---------- xres : array Of shape ``(varied_data.size, self.nx)``. varied_data : array See :meth:`solve_series`. varied_idx : int or str See :meth:`solve_series`. \\*\\*kwargs : Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`. """ for attr in 'names latex_names'.split(): if kwargs.get(attr, None) is None: kwargs[attr] = getattr(self, attr) ax = plot_series(xres, varied_data, **kwargs) if self.par_by_name and isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) if self.latex_param_names: ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx]) elif self.param_names: ax.set_xlabel(self.param_names[varied_idx]) return ax
python
def plot_series(self, xres, varied_data, varied_idx, **kwargs): """ Plots the results from :meth:`solve_series`. Parameters ---------- xres : array Of shape ``(varied_data.size, self.nx)``. varied_data : array See :meth:`solve_series`. varied_idx : int or str See :meth:`solve_series`. \\*\\*kwargs : Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`. """ for attr in 'names latex_names'.split(): if kwargs.get(attr, None) is None: kwargs[attr] = getattr(self, attr) ax = plot_series(xres, varied_data, **kwargs) if self.par_by_name and isinstance(varied_idx, str): varied_idx = self.param_names.index(varied_idx) if self.latex_param_names: ax.set_xlabel('$%s$' % self.latex_param_names[varied_idx]) elif self.param_names: ax.set_xlabel(self.param_names[varied_idx]) return ax
[ "def", "plot_series", "(", "self", ",", "xres", ",", "varied_data", ",", "varied_idx", ",", "*", "*", "kwargs", ")", ":", "for", "attr", "in", "'names latex_names'", ".", "split", "(", ")", ":", "if", "kwargs", ".", "get", "(", "attr", ",", "None", ")", "is", "None", ":", "kwargs", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "ax", "=", "plot_series", "(", "xres", ",", "varied_data", ",", "*", "*", "kwargs", ")", "if", "self", ".", "par_by_name", "and", "isinstance", "(", "varied_idx", ",", "str", ")", ":", "varied_idx", "=", "self", ".", "param_names", ".", "index", "(", "varied_idx", ")", "if", "self", ".", "latex_param_names", ":", "ax", ".", "set_xlabel", "(", "'$%s$'", "%", "self", ".", "latex_param_names", "[", "varied_idx", "]", ")", "elif", "self", ".", "param_names", ":", "ax", ".", "set_xlabel", "(", "self", ".", "param_names", "[", "varied_idx", "]", ")", "return", "ax" ]
Plots the results from :meth:`solve_series`. Parameters ---------- xres : array Of shape ``(varied_data.size, self.nx)``. varied_data : array See :meth:`solve_series`. varied_idx : int or str See :meth:`solve_series`. \\*\\*kwargs : Keyword arguments passed to :func:`pyneqsys.plotting.plot_series`.
[ "Plots", "the", "results", "from", ":", "meth", ":", "solve_series", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L168-L193
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.plot_series_residuals
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs): """ Analogous to :meth:`plot_series` but will plot residuals. """ nf = len(self.f_cb(*self.pre_process(xres[0], params))) xerr = np.empty((xres.shape[0], nf)) new_params = np.array(params) for idx, row in enumerate(xres): new_params[varied_idx] = varied_data[idx] xerr[idx, :] = self.f_cb(*self.pre_process(row, params)) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
python
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs): """ Analogous to :meth:`plot_series` but will plot residuals. """ nf = len(self.f_cb(*self.pre_process(xres[0], params))) xerr = np.empty((xres.shape[0], nf)) new_params = np.array(params) for idx, row in enumerate(xres): new_params[varied_idx] = varied_data[idx] xerr[idx, :] = self.f_cb(*self.pre_process(row, params)) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
[ "def", "plot_series_residuals", "(", "self", ",", "xres", ",", "varied_data", ",", "varied_idx", ",", "params", ",", "*", "*", "kwargs", ")", ":", "nf", "=", "len", "(", "self", ".", "f_cb", "(", "*", "self", ".", "pre_process", "(", "xres", "[", "0", "]", ",", "params", ")", ")", ")", "xerr", "=", "np", ".", "empty", "(", "(", "xres", ".", "shape", "[", "0", "]", ",", "nf", ")", ")", "new_params", "=", "np", ".", "array", "(", "params", ")", "for", "idx", ",", "row", "in", "enumerate", "(", "xres", ")", ":", "new_params", "[", "varied_idx", "]", "=", "varied_data", "[", "idx", "]", "xerr", "[", "idx", ",", ":", "]", "=", "self", ".", "f_cb", "(", "*", "self", ".", "pre_process", "(", "row", ",", "params", ")", ")", "return", "self", ".", "plot_series", "(", "xerr", ",", "varied_data", ",", "varied_idx", ",", "*", "*", "kwargs", ")" ]
Analogous to :meth:`plot_series` but will plot residuals.
[ "Analogous", "to", ":", "meth", ":", "plot_series", "but", "will", "plot", "residuals", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L195-L204
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.plot_series_residuals_internal
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs): """ Analogous to :meth:`plot_series` but for internal residuals from last run. """ nf = len(self.f_cb(*self.pre_process( self.internal_xout[0], self.internal_params_out[0]))) xerr = np.empty((self.internal_xout.shape[0], nf)) for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)): xerr[idx, :] = self.f_cb(res, params) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
python
def plot_series_residuals_internal(self, varied_data, varied_idx, **kwargs): """ Analogous to :meth:`plot_series` but for internal residuals from last run. """ nf = len(self.f_cb(*self.pre_process( self.internal_xout[0], self.internal_params_out[0]))) xerr = np.empty((self.internal_xout.shape[0], nf)) for idx, (res, params) in enumerate(zip(self.internal_xout, self.internal_params_out)): xerr[idx, :] = self.f_cb(res, params) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
[ "def", "plot_series_residuals_internal", "(", "self", ",", "varied_data", ",", "varied_idx", ",", "*", "*", "kwargs", ")", ":", "nf", "=", "len", "(", "self", ".", "f_cb", "(", "*", "self", ".", "pre_process", "(", "self", ".", "internal_xout", "[", "0", "]", ",", "self", ".", "internal_params_out", "[", "0", "]", ")", ")", ")", "xerr", "=", "np", ".", "empty", "(", "(", "self", ".", "internal_xout", ".", "shape", "[", "0", "]", ",", "nf", ")", ")", "for", "idx", ",", "(", "res", ",", "params", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "internal_xout", ",", "self", ".", "internal_params_out", ")", ")", ":", "xerr", "[", "idx", ",", ":", "]", "=", "self", ".", "f_cb", "(", "res", ",", "params", ")", "return", "self", ".", "plot_series", "(", "xerr", ",", "varied_data", ",", "varied_idx", ",", "*", "*", "kwargs", ")" ]
Analogous to :meth:`plot_series` but for internal residuals from last run.
[ "Analogous", "to", ":", "meth", ":", "plot_series", "but", "for", "internal", "residuals", "from", "last", "run", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L206-L213
bjodah/pyneqsys
pyneqsys/core.py
_NeqSysBase.solve_and_plot_series
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None, plot_residuals_kwargs=None, **kwargs): """ Solve and plot for a series of a varied parameter. Convenience method, see :meth:`solve_series`, :meth:`plot_series` & :meth:`plot_series_residuals_internal` for more information. """ sol, nfo = self.solve_series( x0, params, varied_data, varied_idx, solver=solver, **kwargs) ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo, **(plot_kwargs or {})) extra = dict(ax_sol=ax_sol, info=nfo) if plot_residuals_kwargs: extra['ax_resid'] = self.plot_series_residuals_internal( varied_data, varied_idx, info=nfo, **(plot_residuals_kwargs or {}) ) return sol, extra
python
def solve_and_plot_series(self, x0, params, varied_data, varied_idx, solver=None, plot_kwargs=None, plot_residuals_kwargs=None, **kwargs): """ Solve and plot for a series of a varied parameter. Convenience method, see :meth:`solve_series`, :meth:`plot_series` & :meth:`plot_series_residuals_internal` for more information. """ sol, nfo = self.solve_series( x0, params, varied_data, varied_idx, solver=solver, **kwargs) ax_sol = self.plot_series(sol, varied_data, varied_idx, info=nfo, **(plot_kwargs or {})) extra = dict(ax_sol=ax_sol, info=nfo) if plot_residuals_kwargs: extra['ax_resid'] = self.plot_series_residuals_internal( varied_data, varied_idx, info=nfo, **(plot_residuals_kwargs or {}) ) return sol, extra
[ "def", "solve_and_plot_series", "(", "self", ",", "x0", ",", "params", ",", "varied_data", ",", "varied_idx", ",", "solver", "=", "None", ",", "plot_kwargs", "=", "None", ",", "plot_residuals_kwargs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "sol", ",", "nfo", "=", "self", ".", "solve_series", "(", "x0", ",", "params", ",", "varied_data", ",", "varied_idx", ",", "solver", "=", "solver", ",", "*", "*", "kwargs", ")", "ax_sol", "=", "self", ".", "plot_series", "(", "sol", ",", "varied_data", ",", "varied_idx", ",", "info", "=", "nfo", ",", "*", "*", "(", "plot_kwargs", "or", "{", "}", ")", ")", "extra", "=", "dict", "(", "ax_sol", "=", "ax_sol", ",", "info", "=", "nfo", ")", "if", "plot_residuals_kwargs", ":", "extra", "[", "'ax_resid'", "]", "=", "self", ".", "plot_series_residuals_internal", "(", "varied_data", ",", "varied_idx", ",", "info", "=", "nfo", ",", "*", "*", "(", "plot_residuals_kwargs", "or", "{", "}", ")", ")", "return", "sol", ",", "extra" ]
Solve and plot for a series of a varied parameter. Convenience method, see :meth:`solve_series`, :meth:`plot_series` & :meth:`plot_series_residuals_internal` for more information.
[ "Solve", "and", "plot", "for", "a", "series", "of", "a", "varied", "parameter", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L215-L233
bjodah/pyneqsys
pyneqsys/core.py
NeqSys.pre_process
def pre_process(self, x0, params=()): """ Used internally for transformation of variables. """ # Should be used by all methods matching "solve_*" if self.x_by_name and isinstance(x0, dict): x0 = [x0[k] for k in self.names] if self.par_by_name and isinstance(params, dict): params = [params[k] for k in self.param_names] for pre_processor in self.pre_processors: x0, params = pre_processor(x0, params) return x0, np.atleast_1d(params)
python
def pre_process(self, x0, params=()): """ Used internally for transformation of variables. """ # Should be used by all methods matching "solve_*" if self.x_by_name and isinstance(x0, dict): x0 = [x0[k] for k in self.names] if self.par_by_name and isinstance(params, dict): params = [params[k] for k in self.param_names] for pre_processor in self.pre_processors: x0, params = pre_processor(x0, params) return x0, np.atleast_1d(params)
[ "def", "pre_process", "(", "self", ",", "x0", ",", "params", "=", "(", ")", ")", ":", "# Should be used by all methods matching \"solve_*\"", "if", "self", ".", "x_by_name", "and", "isinstance", "(", "x0", ",", "dict", ")", ":", "x0", "=", "[", "x0", "[", "k", "]", "for", "k", "in", "self", ".", "names", "]", "if", "self", ".", "par_by_name", "and", "isinstance", "(", "params", ",", "dict", ")", ":", "params", "=", "[", "params", "[", "k", "]", "for", "k", "in", "self", ".", "param_names", "]", "for", "pre_processor", "in", "self", ".", "pre_processors", ":", "x0", ",", "params", "=", "pre_processor", "(", "x0", ",", "params", ")", "return", "x0", ",", "np", ".", "atleast_1d", "(", "params", ")" ]
Used internally for transformation of variables.
[ "Used", "internally", "for", "transformation", "of", "variables", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L318-L327
bjodah/pyneqsys
pyneqsys/core.py
NeqSys.post_process
def post_process(self, xout, params_out): """ Used internally for transformation of variables. """ # Should be used by all methods matching "solve_*" for post_processor in self.post_processors: xout, params_out = post_processor(xout, params_out) return xout, params_out
python
def post_process(self, xout, params_out): """ Used internally for transformation of variables. """ # Should be used by all methods matching "solve_*" for post_processor in self.post_processors: xout, params_out = post_processor(xout, params_out) return xout, params_out
[ "def", "post_process", "(", "self", ",", "xout", ",", "params_out", ")", ":", "# Should be used by all methods matching \"solve_*\"", "for", "post_processor", "in", "self", ".", "post_processors", ":", "xout", ",", "params_out", "=", "post_processor", "(", "xout", ",", "params_out", ")", "return", "xout", ",", "params_out" ]
Used internally for transformation of variables.
[ "Used", "internally", "for", "transformation", "of", "variables", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L329-L334
bjodah/pyneqsys
pyneqsys/core.py
NeqSys.solve
def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs): """ Solve with user specified ``solver`` choice. Parameters ---------- x0: 1D array of floats Guess (subject to ``self.post_processors``) params: 1D array_like of floats Parameters (subject to ``self.post_processors``) internal_x0: 1D array of floats When given it overrides (processed) ``x0``. ``internal_x0`` is not subject to ``self.post_processors``. solver: str or callable or None or iterable of such if str: uses _solve_``solver``(\*args, \*\*kwargs). if ``None``: chooses from PYNEQSYS_SOLVER environment variable. if iterable: chain solving. attached_solver: callable factory Invokes: solver = attached_solver(self). Returns ------- array: solution vector (post-processed by self.post_processors) dict: info dictionary containing 'success', 'nfev', 'njev' etc. Examples -------- >>> neqsys = NeqSys(2, 2, lambda x, p: [ ... (x[0] - x[1])**p[0]/2 + x[0] - 1, ... (x[1] - x[0])**p[0]/2 + x[1] ... ]) >>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath')) >>> assert sol['success'] >>> print(x) [0.841163901914009663684741869855] [0.158836098085990336315258130144] """ if not isinstance(solver, (tuple, list)): solver = [solver] if not isinstance(attached_solver, (tuple, list)): attached_solver = [attached_solver] + [None]*(len(solver) - 1) _x0, self.internal_params = self.pre_process(x0, params) for solv, attached_solv in zip(solver, attached_solver): if internal_x0 is not None: _x0 = internal_x0 elif self.internal_x0_cb is not None: _x0 = self.internal_x0_cb(x0, params) nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs) _x0 = nfo['x'].copy() self.internal_x = _x0 x0 = self.post_process(self.internal_x, self.internal_params)[0] return x0, nfo
python
def solve(self, x0, params=(), internal_x0=None, solver=None, attached_solver=None, **kwargs): """ Solve with user specified ``solver`` choice. Parameters ---------- x0: 1D array of floats Guess (subject to ``self.post_processors``) params: 1D array_like of floats Parameters (subject to ``self.post_processors``) internal_x0: 1D array of floats When given it overrides (processed) ``x0``. ``internal_x0`` is not subject to ``self.post_processors``. solver: str or callable or None or iterable of such if str: uses _solve_``solver``(\*args, \*\*kwargs). if ``None``: chooses from PYNEQSYS_SOLVER environment variable. if iterable: chain solving. attached_solver: callable factory Invokes: solver = attached_solver(self). Returns ------- array: solution vector (post-processed by self.post_processors) dict: info dictionary containing 'success', 'nfev', 'njev' etc. Examples -------- >>> neqsys = NeqSys(2, 2, lambda x, p: [ ... (x[0] - x[1])**p[0]/2 + x[0] - 1, ... (x[1] - x[0])**p[0]/2 + x[1] ... ]) >>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath')) >>> assert sol['success'] >>> print(x) [0.841163901914009663684741869855] [0.158836098085990336315258130144] """ if not isinstance(solver, (tuple, list)): solver = [solver] if not isinstance(attached_solver, (tuple, list)): attached_solver = [attached_solver] + [None]*(len(solver) - 1) _x0, self.internal_params = self.pre_process(x0, params) for solv, attached_solv in zip(solver, attached_solver): if internal_x0 is not None: _x0 = internal_x0 elif self.internal_x0_cb is not None: _x0 = self.internal_x0_cb(x0, params) nfo = self._get_solver_cb(solv, attached_solv)(_x0, **kwargs) _x0 = nfo['x'].copy() self.internal_x = _x0 x0 = self.post_process(self.internal_x, self.internal_params)[0] return x0, nfo
[ "def", "solve", "(", "self", ",", "x0", ",", "params", "=", "(", ")", ",", "internal_x0", "=", "None", ",", "solver", "=", "None", ",", "attached_solver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "solver", ",", "(", "tuple", ",", "list", ")", ")", ":", "solver", "=", "[", "solver", "]", "if", "not", "isinstance", "(", "attached_solver", ",", "(", "tuple", ",", "list", ")", ")", ":", "attached_solver", "=", "[", "attached_solver", "]", "+", "[", "None", "]", "*", "(", "len", "(", "solver", ")", "-", "1", ")", "_x0", ",", "self", ".", "internal_params", "=", "self", ".", "pre_process", "(", "x0", ",", "params", ")", "for", "solv", ",", "attached_solv", "in", "zip", "(", "solver", ",", "attached_solver", ")", ":", "if", "internal_x0", "is", "not", "None", ":", "_x0", "=", "internal_x0", "elif", "self", ".", "internal_x0_cb", "is", "not", "None", ":", "_x0", "=", "self", ".", "internal_x0_cb", "(", "x0", ",", "params", ")", "nfo", "=", "self", ".", "_get_solver_cb", "(", "solv", ",", "attached_solv", ")", "(", "_x0", ",", "*", "*", "kwargs", ")", "_x0", "=", "nfo", "[", "'x'", "]", ".", "copy", "(", ")", "self", ".", "internal_x", "=", "_x0", "x0", "=", "self", ".", "post_process", "(", "self", ".", "internal_x", ",", "self", ".", "internal_params", ")", "[", "0", "]", "return", "x0", ",", "nfo" ]
Solve with user specified ``solver`` choice. Parameters ---------- x0: 1D array of floats Guess (subject to ``self.post_processors``) params: 1D array_like of floats Parameters (subject to ``self.post_processors``) internal_x0: 1D array of floats When given it overrides (processed) ``x0``. ``internal_x0`` is not subject to ``self.post_processors``. solver: str or callable or None or iterable of such if str: uses _solve_``solver``(\*args, \*\*kwargs). if ``None``: chooses from PYNEQSYS_SOLVER environment variable. if iterable: chain solving. attached_solver: callable factory Invokes: solver = attached_solver(self). Returns ------- array: solution vector (post-processed by self.post_processors) dict: info dictionary containing 'success', 'nfev', 'njev' etc. Examples -------- >>> neqsys = NeqSys(2, 2, lambda x, p: [ ... (x[0] - x[1])**p[0]/2 + x[0] - 1, ... (x[1] - x[0])**p[0]/2 + x[1] ... ]) >>> x, sol = neqsys.solve([1, 0], [3], solver=(None, 'mpmath')) >>> assert sol['success'] >>> print(x) [0.841163901914009663684741869855] [0.158836098085990336315258130144]
[ "Solve", "with", "user", "specified", "solver", "choice", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L336-L390
bjodah/pyneqsys
pyneqsys/core.py
NeqSys._solve_scipy
def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs): """ Uses ``scipy.optimize.root`` See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html Parameters ---------- intern_x0: array_like initial guess tol: float Tolerance method: str What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``. """ from scipy.optimize import root if method is None: if self.nf > self.nx: method = 'lm' elif self.nf == self.nx: method = 'hybr' else: raise ValueError('Underdetermined problem') if 'band' in kwargs: raise ValueError("Set 'band' at initialization instead.") if 'args' in kwargs: raise ValueError("Set 'args' as params in initialization instead.") new_kwargs = kwargs.copy() if self.band is not None: warnings.warn("Band argument ignored (see SciPy docs)") new_kwargs['band'] = self.band new_kwargs['args'] = self.internal_params return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
python
def _solve_scipy(self, intern_x0, tol=1e-8, method=None, **kwargs): """ Uses ``scipy.optimize.root`` See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html Parameters ---------- intern_x0: array_like initial guess tol: float Tolerance method: str What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``. """ from scipy.optimize import root if method is None: if self.nf > self.nx: method = 'lm' elif self.nf == self.nx: method = 'hybr' else: raise ValueError('Underdetermined problem') if 'band' in kwargs: raise ValueError("Set 'band' at initialization instead.") if 'args' in kwargs: raise ValueError("Set 'args' as params in initialization instead.") new_kwargs = kwargs.copy() if self.band is not None: warnings.warn("Band argument ignored (see SciPy docs)") new_kwargs['band'] = self.band new_kwargs['args'] = self.internal_params return root(self.f_cb, intern_x0, jac=self.j_cb, method=method, tol=tol, **new_kwargs)
[ "def", "_solve_scipy", "(", "self", ",", "intern_x0", ",", "tol", "=", "1e-8", ",", "method", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "scipy", ".", "optimize", "import", "root", "if", "method", "is", "None", ":", "if", "self", ".", "nf", ">", "self", ".", "nx", ":", "method", "=", "'lm'", "elif", "self", ".", "nf", "==", "self", ".", "nx", ":", "method", "=", "'hybr'", "else", ":", "raise", "ValueError", "(", "'Underdetermined problem'", ")", "if", "'band'", "in", "kwargs", ":", "raise", "ValueError", "(", "\"Set 'band' at initialization instead.\"", ")", "if", "'args'", "in", "kwargs", ":", "raise", "ValueError", "(", "\"Set 'args' as params in initialization instead.\"", ")", "new_kwargs", "=", "kwargs", ".", "copy", "(", ")", "if", "self", ".", "band", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"Band argument ignored (see SciPy docs)\"", ")", "new_kwargs", "[", "'band'", "]", "=", "self", ".", "band", "new_kwargs", "[", "'args'", "]", "=", "self", ".", "internal_params", "return", "root", "(", "self", ".", "f_cb", ",", "intern_x0", ",", "jac", "=", "self", ".", "j_cb", ",", "method", "=", "method", ",", "tol", "=", "tol", ",", "*", "*", "new_kwargs", ")" ]
Uses ``scipy.optimize.root`` See: http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html Parameters ---------- intern_x0: array_like initial guess tol: float Tolerance method: str What method to use. Defaults to ``'lm'`` if ``self.nf > self.nx`` otherwise ``'hybr'``.
[ "Uses", "scipy", ".", "optimize", ".", "root" ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L392-L426
bjodah/pyneqsys
pyneqsys/core.py
ConditionalNeqSys.solve
def solve(self, x0, params=(), internal_x0=None, solver=None, conditional_maxiter=20, initial_conditions=None, **kwargs): """ Solve the problem (systems of equations) Parameters ---------- x0 : array Guess. params : array See :meth:`NeqSys.solve`. internal_x0 : array See :meth:`NeqSys.solve`. solver : str or callable or iterable of such. See :meth:`NeqSys.solve`. conditional_maxiter : int Maximum number of switches between conditions. initial_conditions : iterable of bools Corresponding conditions to ``x0`` \\*\\*kwargs : Keyword arguments passed on to :meth:`NeqSys.solve`. """ if initial_conditions is not None: conds = initial_conditions else: conds = self.get_conds(x0, params, initial_conditions) idx, nfev, njev = 0, 0, 0 while idx < conditional_maxiter: neqsys = self.neqsys_factory(conds) x0, info = neqsys.solve(x0, params, internal_x0, solver, **kwargs) if idx == 0: internal_x0 = None nfev += info['nfev'] njev += info.get('njev', 0) new_conds = self.get_conds(x0, params, conds) if new_conds == conds: break else: conds = new_conds idx += 1 if idx == conditional_maxiter: raise Exception("Solving failed, conditional_maxiter reached") self.internal_x = info['x'] self.internal_params = neqsys.internal_params result = { 'x': info['x'], 'success': info['success'], 'conditions': conds, 'nfev': nfev, 'njev': njev, } if 'fun' in info: result['fun'] = info['fun'] return x0, result
python
def solve(self, x0, params=(), internal_x0=None, solver=None, conditional_maxiter=20, initial_conditions=None, **kwargs): """ Solve the problem (systems of equations) Parameters ---------- x0 : array Guess. params : array See :meth:`NeqSys.solve`. internal_x0 : array See :meth:`NeqSys.solve`. solver : str or callable or iterable of such. See :meth:`NeqSys.solve`. conditional_maxiter : int Maximum number of switches between conditions. initial_conditions : iterable of bools Corresponding conditions to ``x0`` \\*\\*kwargs : Keyword arguments passed on to :meth:`NeqSys.solve`. """ if initial_conditions is not None: conds = initial_conditions else: conds = self.get_conds(x0, params, initial_conditions) idx, nfev, njev = 0, 0, 0 while idx < conditional_maxiter: neqsys = self.neqsys_factory(conds) x0, info = neqsys.solve(x0, params, internal_x0, solver, **kwargs) if idx == 0: internal_x0 = None nfev += info['nfev'] njev += info.get('njev', 0) new_conds = self.get_conds(x0, params, conds) if new_conds == conds: break else: conds = new_conds idx += 1 if idx == conditional_maxiter: raise Exception("Solving failed, conditional_maxiter reached") self.internal_x = info['x'] self.internal_params = neqsys.internal_params result = { 'x': info['x'], 'success': info['success'], 'conditions': conds, 'nfev': nfev, 'njev': njev, } if 'fun' in info: result['fun'] = info['fun'] return x0, result
[ "def", "solve", "(", "self", ",", "x0", ",", "params", "=", "(", ")", ",", "internal_x0", "=", "None", ",", "solver", "=", "None", ",", "conditional_maxiter", "=", "20", ",", "initial_conditions", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "initial_conditions", "is", "not", "None", ":", "conds", "=", "initial_conditions", "else", ":", "conds", "=", "self", ".", "get_conds", "(", "x0", ",", "params", ",", "initial_conditions", ")", "idx", ",", "nfev", ",", "njev", "=", "0", ",", "0", ",", "0", "while", "idx", "<", "conditional_maxiter", ":", "neqsys", "=", "self", ".", "neqsys_factory", "(", "conds", ")", "x0", ",", "info", "=", "neqsys", ".", "solve", "(", "x0", ",", "params", ",", "internal_x0", ",", "solver", ",", "*", "*", "kwargs", ")", "if", "idx", "==", "0", ":", "internal_x0", "=", "None", "nfev", "+=", "info", "[", "'nfev'", "]", "njev", "+=", "info", ".", "get", "(", "'njev'", ",", "0", ")", "new_conds", "=", "self", ".", "get_conds", "(", "x0", ",", "params", ",", "conds", ")", "if", "new_conds", "==", "conds", ":", "break", "else", ":", "conds", "=", "new_conds", "idx", "+=", "1", "if", "idx", "==", "conditional_maxiter", ":", "raise", "Exception", "(", "\"Solving failed, conditional_maxiter reached\"", ")", "self", ".", "internal_x", "=", "info", "[", "'x'", "]", "self", ".", "internal_params", "=", "neqsys", ".", "internal_params", "result", "=", "{", "'x'", ":", "info", "[", "'x'", "]", ",", "'success'", ":", "info", "[", "'success'", "]", ",", "'conditions'", ":", "conds", ",", "'nfev'", ":", "nfev", ",", "'njev'", ":", "njev", ",", "}", "if", "'fun'", "in", "info", ":", "result", "[", "'fun'", "]", "=", "info", "[", "'fun'", "]", "return", "x0", ",", "result" ]
Solve the problem (systems of equations) Parameters ---------- x0 : array Guess. params : array See :meth:`NeqSys.solve`. internal_x0 : array See :meth:`NeqSys.solve`. solver : str or callable or iterable of such. See :meth:`NeqSys.solve`. conditional_maxiter : int Maximum number of switches between conditions. initial_conditions : iterable of bools Corresponding conditions to ``x0`` \\*\\*kwargs : Keyword arguments passed on to :meth:`NeqSys.solve`.
[ "Solve", "the", "problem", "(", "systems", "of", "equations", ")" ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L626-L679
bjodah/pyneqsys
examples/bi_dimensional.py
solve
def solve(guess_a, guess_b, power, solver='scipy'): """ Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """ # The problem is 2 dimensional so we need 2 symbols x = sp.symbols('x:2', real=True) # There is a user specified parameter ``p`` in this problem: p = sp.Symbol('p', real=True, negative=False, integer=True) # Our system consists of 2-non-linear equations: f = [x[0] + (x[0] - x[1])**p/2 - 1, (x[1] - x[0])**p/2 + x[1]] # We construct our ``SymbolicSys`` instance by passing variables, equations and parameters: neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically) # Finally we solve the system using user-specified ``solver`` choice: return neqsys.solve([guess_a, guess_b], [power], solver=solver)
python
def solve(guess_a, guess_b, power, solver='scipy'): """ Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """ # The problem is 2 dimensional so we need 2 symbols x = sp.symbols('x:2', real=True) # There is a user specified parameter ``p`` in this problem: p = sp.Symbol('p', real=True, negative=False, integer=True) # Our system consists of 2-non-linear equations: f = [x[0] + (x[0] - x[1])**p/2 - 1, (x[1] - x[0])**p/2 + x[1]] # We construct our ``SymbolicSys`` instance by passing variables, equations and parameters: neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically) # Finally we solve the system using user-specified ``solver`` choice: return neqsys.solve([guess_a, guess_b], [power], solver=solver)
[ "def", "solve", "(", "guess_a", ",", "guess_b", ",", "power", ",", "solver", "=", "'scipy'", ")", ":", "# The problem is 2 dimensional so we need 2 symbols", "x", "=", "sp", ".", "symbols", "(", "'x:2'", ",", "real", "=", "True", ")", "# There is a user specified parameter ``p`` in this problem:", "p", "=", "sp", ".", "Symbol", "(", "'p'", ",", "real", "=", "True", ",", "negative", "=", "False", ",", "integer", "=", "True", ")", "# Our system consists of 2-non-linear equations:", "f", "=", "[", "x", "[", "0", "]", "+", "(", "x", "[", "0", "]", "-", "x", "[", "1", "]", ")", "**", "p", "/", "2", "-", "1", ",", "(", "x", "[", "1", "]", "-", "x", "[", "0", "]", ")", "**", "p", "/", "2", "+", "x", "[", "1", "]", "]", "# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:", "neqsys", "=", "SymbolicSys", "(", "x", ",", "f", ",", "[", "p", "]", ")", "# (this will derive the Jacobian symbolically)", "# Finally we solve the system using user-specified ``solver`` choice:", "return", "neqsys", ".", "solve", "(", "[", "guess_a", ",", "guess_b", "]", ",", "[", "power", "]", ",", "solver", "=", "solver", ")" ]
Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method.
[ "Constructs", "a", "pyneqsys", ".", "symbolic", ".", "SymbolicSys", "instance", "and", "returns", "from", "its", "solve", "method", "." ]
train
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/examples/bi_dimensional.py#L13-L26