partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
test
get_token
Parse the next token in the stream. Returns a `LatexToken`. Raises `LatexWalkerEndOfStream` if end of stream reached. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_token()` instead.
pylatexenc/latexwalker.py
def get_token(s, pos, brackets_are_chars=True, environments=True, **parse_flags): """ Parse the next token in the stream. Returns a `LatexToken`. Raises `LatexWalkerEndOfStream` if end of stream reached. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_token()` instead. """ return LatexWalker(s, **parse_flags).get_token(pos=pos, brackets_are_chars=brackets_are_chars, environments=environments)
def get_token(s, pos, brackets_are_chars=True, environments=True, **parse_flags): """ Parse the next token in the stream. Returns a `LatexToken`. Raises `LatexWalkerEndOfStream` if end of stream reached. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_token()` instead. """ return LatexWalker(s, **parse_flags).get_token(pos=pos, brackets_are_chars=brackets_are_chars, environments=environments)
[ "Parse", "the", "next", "token", "in", "the", "stream", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1255-L1266
[ "def", "get_token", "(", "s", ",", "pos", ",", "brackets_are_chars", "=", "True", ",", "environments", "=", "True", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_token", "(", "pos", "=", "pos", ",", "brackets_are_chars", "=", "brackets_are_chars", ",", "environments", "=", "environments", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
get_latex_expression
Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead.
pylatexenc/latexwalker.py
def get_latex_expression(s, pos, **parse_flags): """ Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead. """ return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos)
def get_latex_expression(s, pos, **parse_flags): """ Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead. """ return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos)
[ "Reads", "a", "latex", "expression", "e", ".", "g", ".", "macro", "argument", ".", "This", "may", "be", "a", "single", "char", "an", "escape", "sequence", "or", "a", "expression", "placed", "in", "braces", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1269-L1281
[ "def", "get_latex_expression", "(", "s", ",", "pos", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_expression", "(", "pos", "=", "pos", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
get_latex_maybe_optional_arg
Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead.
pylatexenc/latexwalker.py
def get_latex_maybe_optional_arg(s, pos, **parse_flags): """ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. """ return LatexWalker(s, **parse_flags).get_latex_maybe_optional_arg(pos=pos)
def get_latex_maybe_optional_arg(s, pos, **parse_flags): """ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. """ return LatexWalker(s, **parse_flags).get_latex_maybe_optional_arg(pos=pos)
[ "Attempts", "to", "parse", "an", "optional", "argument", ".", "Returns", "a", "tuple", "(", "groupnode", "pos", "len", ")", "if", "success", "otherwise", "returns", "None", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1284-L1293
[ "def", "get_latex_maybe_optional_arg", "(", "s", ",", "pos", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_maybe_optional_arg", "(", "pos", "=", "pos", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
get_latex_braced_group
Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead.
pylatexenc/latexwalker.py
def get_latex_braced_group(s, pos, brace_type='{', **parse_flags): """ Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead. """ return LatexWalker(s, **parse_flags).get_latex_braced_group(pos=pos, brace_type=brace_type)
def get_latex_braced_group(s, pos, brace_type='{', **parse_flags): """ Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead. """ return LatexWalker(s, **parse_flags).get_latex_braced_group(pos=pos, brace_type=brace_type)
[ "Reads", "a", "latex", "expression", "enclosed", "in", "braces", "{", "...", "}", ".", "The", "first", "token", "of", "s", "[", "pos", ":", "]", "must", "be", "an", "opening", "brace", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1296-L1309
[ "def", "get_latex_braced_group", "(", "s", ",", "pos", ",", "brace_type", "=", "'{'", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_braced_group", "(", "pos", "=", "pos", ",", "brace_type", "=", "brace_type", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
get_latex_environment
Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first token in the stream must be the \\begin{environment}. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_environment()` instead.
pylatexenc/latexwalker.py
def get_latex_environment(s, pos, environmentname=None, **parse_flags): """ Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first token in the stream must be the \\begin{environment}. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_environment()` instead. """ return LatexWalker(s, **parse_flags).get_latex_environment(pos=pos, environmentname=environmentname)
def get_latex_environment(s, pos, environmentname=None, **parse_flags): """ Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first token in the stream must be the \\begin{environment}. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_environment()` instead. """ return LatexWalker(s, **parse_flags).get_latex_environment(pos=pos, environmentname=environmentname)
[ "Reads", "a", "latex", "expression", "enclosed", "in", "a", "\\\\", "begin", "{", "environment", "}", "...", "\\\\", "end", "{", "environment", "}", ".", "The", "first", "token", "in", "the", "stream", "must", "be", "the", "\\\\", "begin", "{", "environment", "}", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1312-L1323
[ "def", "get_latex_environment", "(", "s", ",", "pos", ",", "environmentname", "=", "None", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_environment", "(", "pos", "=", "pos", ",", "environmentname", "=", "environmentname", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
get_latex_nodes
Parses latex content `s`. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of `LatexNode` 's. If `stop_upon_closing_brace` is given, then `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_nodes()` instead.
pylatexenc/latexwalker.py
def get_latex_nodes(s, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None, **parse_flags): """ Parses latex content `s`. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of `LatexNode` 's. If `stop_upon_closing_brace` is given, then `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_nodes()` instead. """ return LatexWalker(s, **parse_flags).get_latex_nodes(stop_upon_closing_brace=stop_upon_closing_brace, stop_upon_end_environment=stop_upon_end_environment, stop_upon_closing_mathmode=stop_upon_closing_mathmode)
def get_latex_nodes(s, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None, **parse_flags): """ Parses latex content `s`. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of `LatexNode` 's. If `stop_upon_closing_brace` is given, then `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_nodes()` instead. """ return LatexWalker(s, **parse_flags).get_latex_nodes(stop_upon_closing_brace=stop_upon_closing_brace, stop_upon_end_environment=stop_upon_end_environment, stop_upon_closing_mathmode=stop_upon_closing_mathmode)
[ "Parses", "latex", "content", "s", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L1325-L1341
[ "def", "get_latex_nodes", "(", "s", ",", "pos", "=", "0", ",", "stop_upon_closing_brace", "=", "None", ",", "stop_upon_end_environment", "=", "None", ",", "stop_upon_closing_mathmode", "=", "None", ",", "*", "*", "parse_flags", ")", ":", "return", "LatexWalker", "(", "s", ",", "*", "*", "parse_flags", ")", ".", "get_latex_nodes", "(", "stop_upon_closing_brace", "=", "stop_upon_closing_brace", ",", "stop_upon_end_environment", "=", "stop_upon_end_environment", ",", "stop_upon_closing_mathmode", "=", "stop_upon_closing_mathmode", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_token
Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single "token", as defined by :py:class:`LatexToken`. Parse the token in the stream pointed to at position `pos`. Returns a :py:class:`LatexToken`. Raises :py:exc:`LatexWalkerEndOfStream` if end of stream reached. If `brackets_are_chars=False`, then square bracket characters count as 'brace_open' and 'brace_close' token types (see :py:class:`LatexToken`); otherwise (the default) they are considered just like other normal characters. If `environments=False`, then '\\begin' and '\\end' tokens count as regular 'macro' tokens (see :py:class:`LatexToken`); otherwise (the default) they are considered as the token types 'begin_environment' and 'end_environment'. If `keep_inline_math` is not `None`, then that value overrides that of `self.keep_inline_math` for the duration of this method call.
pylatexenc/latexwalker.py
def get_token(self, pos, brackets_are_chars=True, environments=True, keep_inline_math=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single "token", as defined by :py:class:`LatexToken`. Parse the token in the stream pointed to at position `pos`. Returns a :py:class:`LatexToken`. Raises :py:exc:`LatexWalkerEndOfStream` if end of stream reached. If `brackets_are_chars=False`, then square bracket characters count as 'brace_open' and 'brace_close' token types (see :py:class:`LatexToken`); otherwise (the default) they are considered just like other normal characters. If `environments=False`, then '\\begin' and '\\end' tokens count as regular 'macro' tokens (see :py:class:`LatexToken`); otherwise (the default) they are considered as the token types 'begin_environment' and 'end_environment'. If `keep_inline_math` is not `None`, then that value overrides that of `self.keep_inline_math` for the duration of this method call. """ s = self.s # shorthand with _PushPropOverride(self, 'keep_inline_math', keep_inline_math): space = '' while (pos < len(s) and s[pos].isspace()): space += s[pos] pos += 1 if (space.endswith('\n\n')): # two \n's indicate new paragraph. # Adding pre-space is overkill here I think. return LatexToken(tok='char', arg='\n\n', pos=pos-2, len=2, pre_space='') if (pos >= len(s)): raise LatexWalkerEndOfStream() if (s[pos] == '\\'): # escape sequence i = 2 macro = s[pos+1] # next char is necessarily part of macro # following chars part of macro only if all are alphabetical isalphamacro = False if (s[pos+1].isalpha()): isalphamacro = True while pos+i<len(s) and s[pos+i].isalpha(): macro += s[pos+i] i += 1 # possibly followed by a star if (pos+i<len(s) and s[pos+i] == '*'): macro += '*' i += 1 # see if we have a begin/end environment if (environments and (macro == 'begin' or macro == 'end')): # \begin{environment} or \end{environment} envmatch = re.match(r'^\s*\{([\w*]+)\}', s[pos+i:]) if (envmatch is None): raise LatexWalkerParseError( s=s, pos=pos, msg="Bad \\%s macro: expected {environment}" %(macro) ) return LatexToken( tok=('begin_environment' if macro == 'begin' else 'end_environment'), arg=envmatch.group(1), pos=pos, len=i+envmatch.end(), # !!envmatch.end() counts from pos+i pre_space=space ) # get the following whitespace, and store it in the macro's post_space post_space = '' if isalphamacro: # important, LaTeX does not consume space after non-alpha macros, like \& while pos+i<len(s) and s[pos+i].isspace(): post_space += s[pos+i] i += 1 return LatexToken(tok='macro', arg=macro, pos=pos, len=i, pre_space=space, post_space=post_space) if (s[pos] == '%'): # latex comment m = re.search(r'(\n|\r|\n\r)\s*', s[pos:]) mlen = None if m is not None: arglen = m.start() # relative to pos already mlen = m.end() # relative to pos already mspace = m.group() else: arglen = len(s)-pos# [ ==len(s[pos:]) ] mlen = arglen mspace = '' return LatexToken(tok='comment', arg=s[pos+1:pos+arglen], pos=pos, len=mlen, pre_space=space, post_space=mspace) openbracechars = '{' closebracechars = '}' if not brackets_are_chars: openbracechars += '[' closebracechars += ']' if s[pos] in openbracechars: return LatexToken(tok='brace_open', arg=s[pos], pos=pos, len=1, pre_space=space) if s[pos] in closebracechars: return LatexToken(tok='brace_close', arg=s[pos], pos=pos, len=1, pre_space=space) # check if it is an inline math char, if we care about inline math. if (s[pos] == '$' and self.keep_inline_math): # check that we don't have double-$$, which would be a display environment. if not (pos+1 < len(s) and s[pos+1] == '$'): return LatexToken(tok='mathmode_inline', arg=s[pos], pos=pos, len=1, pre_space=space) # otherwise, proceed to 'char' type. return LatexToken(tok='char', arg=s[pos], pos=pos, len=1, pre_space=space)
def get_token(self, pos, brackets_are_chars=True, environments=True, keep_inline_math=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single "token", as defined by :py:class:`LatexToken`. Parse the token in the stream pointed to at position `pos`. Returns a :py:class:`LatexToken`. Raises :py:exc:`LatexWalkerEndOfStream` if end of stream reached. If `brackets_are_chars=False`, then square bracket characters count as 'brace_open' and 'brace_close' token types (see :py:class:`LatexToken`); otherwise (the default) they are considered just like other normal characters. If `environments=False`, then '\\begin' and '\\end' tokens count as regular 'macro' tokens (see :py:class:`LatexToken`); otherwise (the default) they are considered as the token types 'begin_environment' and 'end_environment'. If `keep_inline_math` is not `None`, then that value overrides that of `self.keep_inline_math` for the duration of this method call. """ s = self.s # shorthand with _PushPropOverride(self, 'keep_inline_math', keep_inline_math): space = '' while (pos < len(s) and s[pos].isspace()): space += s[pos] pos += 1 if (space.endswith('\n\n')): # two \n's indicate new paragraph. # Adding pre-space is overkill here I think. return LatexToken(tok='char', arg='\n\n', pos=pos-2, len=2, pre_space='') if (pos >= len(s)): raise LatexWalkerEndOfStream() if (s[pos] == '\\'): # escape sequence i = 2 macro = s[pos+1] # next char is necessarily part of macro # following chars part of macro only if all are alphabetical isalphamacro = False if (s[pos+1].isalpha()): isalphamacro = True while pos+i<len(s) and s[pos+i].isalpha(): macro += s[pos+i] i += 1 # possibly followed by a star if (pos+i<len(s) and s[pos+i] == '*'): macro += '*' i += 1 # see if we have a begin/end environment if (environments and (macro == 'begin' or macro == 'end')): # \begin{environment} or \end{environment} envmatch = re.match(r'^\s*\{([\w*]+)\}', s[pos+i:]) if (envmatch is None): raise LatexWalkerParseError( s=s, pos=pos, msg="Bad \\%s macro: expected {environment}" %(macro) ) return LatexToken( tok=('begin_environment' if macro == 'begin' else 'end_environment'), arg=envmatch.group(1), pos=pos, len=i+envmatch.end(), # !!envmatch.end() counts from pos+i pre_space=space ) # get the following whitespace, and store it in the macro's post_space post_space = '' if isalphamacro: # important, LaTeX does not consume space after non-alpha macros, like \& while pos+i<len(s) and s[pos+i].isspace(): post_space += s[pos+i] i += 1 return LatexToken(tok='macro', arg=macro, pos=pos, len=i, pre_space=space, post_space=post_space) if (s[pos] == '%'): # latex comment m = re.search(r'(\n|\r|\n\r)\s*', s[pos:]) mlen = None if m is not None: arglen = m.start() # relative to pos already mlen = m.end() # relative to pos already mspace = m.group() else: arglen = len(s)-pos# [ ==len(s[pos:]) ] mlen = arglen mspace = '' return LatexToken(tok='comment', arg=s[pos+1:pos+arglen], pos=pos, len=mlen, pre_space=space, post_space=mspace) openbracechars = '{' closebracechars = '}' if not brackets_are_chars: openbracechars += '[' closebracechars += ']' if s[pos] in openbracechars: return LatexToken(tok='brace_open', arg=s[pos], pos=pos, len=1, pre_space=space) if s[pos] in closebracechars: return LatexToken(tok='brace_close', arg=s[pos], pos=pos, len=1, pre_space=space) # check if it is an inline math char, if we care about inline math. if (s[pos] == '$' and self.keep_inline_math): # check that we don't have double-$$, which would be a display environment. if not (pos+1 < len(s) and s[pos+1] == '$'): return LatexToken(tok='mathmode_inline', arg=s[pos], pos=pos, len=1, pre_space=space) # otherwise, proceed to 'char' type. return LatexToken(tok='char', arg=s[pos], pos=pos, len=1, pre_space=space)
[ "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "starting", "at", "position", "pos", "to", "parse", "a", "single", "token", "as", "defined", "by", ":", "py", ":", "class", ":", "LatexToken", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L702-L821
[ "def", "get_token", "(", "self", ",", "pos", ",", "brackets_are_chars", "=", "True", ",", "environments", "=", "True", ",", "keep_inline_math", "=", "None", ")", ":", "s", "=", "self", ".", "s", "# shorthand", "with", "_PushPropOverride", "(", "self", ",", "'keep_inline_math'", ",", "keep_inline_math", ")", ":", "space", "=", "''", "while", "(", "pos", "<", "len", "(", "s", ")", "and", "s", "[", "pos", "]", ".", "isspace", "(", ")", ")", ":", "space", "+=", "s", "[", "pos", "]", "pos", "+=", "1", "if", "(", "space", ".", "endswith", "(", "'\\n\\n'", ")", ")", ":", "# two \\n's indicate new paragraph.", "# Adding pre-space is overkill here I think.", "return", "LatexToken", "(", "tok", "=", "'char'", ",", "arg", "=", "'\\n\\n'", ",", "pos", "=", "pos", "-", "2", ",", "len", "=", "2", ",", "pre_space", "=", "''", ")", "if", "(", "pos", ">=", "len", "(", "s", ")", ")", ":", "raise", "LatexWalkerEndOfStream", "(", ")", "if", "(", "s", "[", "pos", "]", "==", "'\\\\'", ")", ":", "# escape sequence", "i", "=", "2", "macro", "=", "s", "[", "pos", "+", "1", "]", "# next char is necessarily part of macro", "# following chars part of macro only if all are alphabetical", "isalphamacro", "=", "False", "if", "(", "s", "[", "pos", "+", "1", "]", ".", "isalpha", "(", ")", ")", ":", "isalphamacro", "=", "True", "while", "pos", "+", "i", "<", "len", "(", "s", ")", "and", "s", "[", "pos", "+", "i", "]", ".", "isalpha", "(", ")", ":", "macro", "+=", "s", "[", "pos", "+", "i", "]", "i", "+=", "1", "# possibly followed by a star", "if", "(", "pos", "+", "i", "<", "len", "(", "s", ")", "and", "s", "[", "pos", "+", "i", "]", "==", "'*'", ")", ":", "macro", "+=", "'*'", "i", "+=", "1", "# see if we have a begin/end environment", "if", "(", "environments", "and", "(", "macro", "==", "'begin'", "or", "macro", "==", "'end'", ")", ")", ":", "# \\begin{environment} or \\end{environment}", "envmatch", "=", "re", ".", "match", "(", "r'^\\s*\\{([\\w*]+)\\}'", ",", "s", "[", "pos", "+", "i", ":", "]", ")", "if", "(", "envmatch", "is", "None", ")", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "s", ",", "pos", "=", "pos", ",", "msg", "=", "\"Bad \\\\%s macro: expected {environment}\"", "%", "(", "macro", ")", ")", "return", "LatexToken", "(", "tok", "=", "(", "'begin_environment'", "if", "macro", "==", "'begin'", "else", "'end_environment'", ")", ",", "arg", "=", "envmatch", ".", "group", "(", "1", ")", ",", "pos", "=", "pos", ",", "len", "=", "i", "+", "envmatch", ".", "end", "(", ")", ",", "# !!envmatch.end() counts from pos+i", "pre_space", "=", "space", ")", "# get the following whitespace, and store it in the macro's post_space", "post_space", "=", "''", "if", "isalphamacro", ":", "# important, LaTeX does not consume space after non-alpha macros, like \\&", "while", "pos", "+", "i", "<", "len", "(", "s", ")", "and", "s", "[", "pos", "+", "i", "]", ".", "isspace", "(", ")", ":", "post_space", "+=", "s", "[", "pos", "+", "i", "]", "i", "+=", "1", "return", "LatexToken", "(", "tok", "=", "'macro'", ",", "arg", "=", "macro", ",", "pos", "=", "pos", ",", "len", "=", "i", ",", "pre_space", "=", "space", ",", "post_space", "=", "post_space", ")", "if", "(", "s", "[", "pos", "]", "==", "'%'", ")", ":", "# latex comment", "m", "=", "re", ".", "search", "(", "r'(\\n|\\r|\\n\\r)\\s*'", ",", "s", "[", "pos", ":", "]", ")", "mlen", "=", "None", "if", "m", "is", "not", "None", ":", "arglen", "=", "m", ".", "start", "(", ")", "# relative to pos already", "mlen", "=", "m", ".", "end", "(", ")", "# relative to pos already", "mspace", "=", "m", ".", "group", "(", ")", "else", ":", "arglen", "=", "len", "(", "s", ")", "-", "pos", "# [ ==len(s[pos:]) ]", "mlen", "=", "arglen", "mspace", "=", "''", "return", "LatexToken", "(", "tok", "=", "'comment'", ",", "arg", "=", "s", "[", "pos", "+", "1", ":", "pos", "+", "arglen", "]", ",", "pos", "=", "pos", ",", "len", "=", "mlen", ",", "pre_space", "=", "space", ",", "post_space", "=", "mspace", ")", "openbracechars", "=", "'{'", "closebracechars", "=", "'}'", "if", "not", "brackets_are_chars", ":", "openbracechars", "+=", "'['", "closebracechars", "+=", "']'", "if", "s", "[", "pos", "]", "in", "openbracechars", ":", "return", "LatexToken", "(", "tok", "=", "'brace_open'", ",", "arg", "=", "s", "[", "pos", "]", ",", "pos", "=", "pos", ",", "len", "=", "1", ",", "pre_space", "=", "space", ")", "if", "s", "[", "pos", "]", "in", "closebracechars", ":", "return", "LatexToken", "(", "tok", "=", "'brace_close'", ",", "arg", "=", "s", "[", "pos", "]", ",", "pos", "=", "pos", ",", "len", "=", "1", ",", "pre_space", "=", "space", ")", "# check if it is an inline math char, if we care about inline math.", "if", "(", "s", "[", "pos", "]", "==", "'$'", "and", "self", ".", "keep_inline_math", ")", ":", "# check that we don't have double-$$, which would be a display environment.", "if", "not", "(", "pos", "+", "1", "<", "len", "(", "s", ")", "and", "s", "[", "pos", "+", "1", "]", "==", "'$'", ")", ":", "return", "LatexToken", "(", "tok", "=", "'mathmode_inline'", ",", "arg", "=", "s", "[", "pos", "]", ",", "pos", "=", "pos", ",", "len", "=", "1", ",", "pre_space", "=", "space", ")", "# otherwise, proceed to 'char' type.", "return", "LatexToken", "(", "tok", "=", "'char'", ",", "arg", "=", "s", "[", "pos", "]", ",", "pos", "=", "pos", ",", "len", "=", "1", ",", "pre_space", "=", "space", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_latex_expression
Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single LaTeX expression. Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. This is what TeX calls a "token" (and not what we call a token... anyway). Returns a tuple `(node, pos, len)`, where `pos` is the position of the first char of the expression and `len` the length of the expression.
pylatexenc/latexwalker.py
def get_latex_expression(self, pos, strict_braces=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single LaTeX expression. Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. This is what TeX calls a "token" (and not what we call a token... anyway). Returns a tuple `(node, pos, len)`, where `pos` is the position of the first char of the expression and `len` the length of the expression. """ with _PushPropOverride(self, 'strict_braces', strict_braces): tok = self.get_token(pos, environments=False, keep_inline_math=False) if (tok.tok == 'macro'): if (tok.arg == 'end'): if not self.tolerant_parsing: # error, this should be an \end{environment}, not an argument in itself raise LatexWalkerParseError("Expected expression, got \end", self.s, pos) else: return (LatexCharsNode(chars=''), tok.pos, 0) return (LatexMacroNode(macroname=tok.arg, nodeoptarg=None, nodeargs=[], macro_post_space=tok.post_space), tok.pos, tok.len) if (tok.tok == 'comment'): return self.get_latex_expression(pos+tok.len) if (tok.tok == 'brace_open'): return self.get_latex_braced_group(tok.pos) if (tok.tok == 'brace_close'): if (self.strict_braces and not self.tolerant_parsing): raise LatexWalkerParseError("Expected expression, got closing brace!", self.s, pos) return (LatexCharsNode(chars=''), tok.pos, 0) if (tok.tok == 'char'): return (LatexCharsNode(chars=tok.arg), tok.pos, tok.len) raise LatexWalkerParseError("Unknown token type: %s" %(tok.tok), self.s, pos)
def get_latex_expression(self, pos, strict_braces=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single LaTeX expression. Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. This is what TeX calls a "token" (and not what we call a token... anyway). Returns a tuple `(node, pos, len)`, where `pos` is the position of the first char of the expression and `len` the length of the expression. """ with _PushPropOverride(self, 'strict_braces', strict_braces): tok = self.get_token(pos, environments=False, keep_inline_math=False) if (tok.tok == 'macro'): if (tok.arg == 'end'): if not self.tolerant_parsing: # error, this should be an \end{environment}, not an argument in itself raise LatexWalkerParseError("Expected expression, got \end", self.s, pos) else: return (LatexCharsNode(chars=''), tok.pos, 0) return (LatexMacroNode(macroname=tok.arg, nodeoptarg=None, nodeargs=[], macro_post_space=tok.post_space), tok.pos, tok.len) if (tok.tok == 'comment'): return self.get_latex_expression(pos+tok.len) if (tok.tok == 'brace_open'): return self.get_latex_braced_group(tok.pos) if (tok.tok == 'brace_close'): if (self.strict_braces and not self.tolerant_parsing): raise LatexWalkerParseError("Expected expression, got closing brace!", self.s, pos) return (LatexCharsNode(chars=''), tok.pos, 0) if (tok.tok == 'char'): return (LatexCharsNode(chars=tok.arg), tok.pos, tok.len) raise LatexWalkerParseError("Unknown token type: %s" %(tok.tok), self.s, pos)
[ "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "starting", "at", "position", "pos", "to", "parse", "a", "single", "LaTeX", "expression", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L824-L862
[ "def", "get_latex_expression", "(", "self", ",", "pos", ",", "strict_braces", "=", "None", ")", ":", "with", "_PushPropOverride", "(", "self", ",", "'strict_braces'", ",", "strict_braces", ")", ":", "tok", "=", "self", ".", "get_token", "(", "pos", ",", "environments", "=", "False", ",", "keep_inline_math", "=", "False", ")", "if", "(", "tok", ".", "tok", "==", "'macro'", ")", ":", "if", "(", "tok", ".", "arg", "==", "'end'", ")", ":", "if", "not", "self", ".", "tolerant_parsing", ":", "# error, this should be an \\end{environment}, not an argument in itself", "raise", "LatexWalkerParseError", "(", "\"Expected expression, got \\end\"", ",", "self", ".", "s", ",", "pos", ")", "else", ":", "return", "(", "LatexCharsNode", "(", "chars", "=", "''", ")", ",", "tok", ".", "pos", ",", "0", ")", "return", "(", "LatexMacroNode", "(", "macroname", "=", "tok", ".", "arg", ",", "nodeoptarg", "=", "None", ",", "nodeargs", "=", "[", "]", ",", "macro_post_space", "=", "tok", ".", "post_space", ")", ",", "tok", ".", "pos", ",", "tok", ".", "len", ")", "if", "(", "tok", ".", "tok", "==", "'comment'", ")", ":", "return", "self", ".", "get_latex_expression", "(", "pos", "+", "tok", ".", "len", ")", "if", "(", "tok", ".", "tok", "==", "'brace_open'", ")", ":", "return", "self", ".", "get_latex_braced_group", "(", "tok", ".", "pos", ")", "if", "(", "tok", ".", "tok", "==", "'brace_close'", ")", ":", "if", "(", "self", ".", "strict_braces", "and", "not", "self", ".", "tolerant_parsing", ")", ":", "raise", "LatexWalkerParseError", "(", "\"Expected expression, got closing brace!\"", ",", "self", ".", "s", ",", "pos", ")", "return", "(", "LatexCharsNode", "(", "chars", "=", "''", ")", ",", "tok", ".", "pos", ",", "0", ")", "if", "(", "tok", ".", "tok", "==", "'char'", ")", ":", "return", "(", "LatexCharsNode", "(", "chars", "=", "tok", ".", "arg", ")", ",", "tok", ".", "pos", ",", "tok", ".", "len", ")", "raise", "LatexWalkerParseError", "(", "\"Unknown token type: %s\"", "%", "(", "tok", ".", "tok", ")", ",", "self", ".", "s", ",", "pos", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_latex_maybe_optional_arg
Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to attempt to parse an optional argument. Attempts to parse an optional argument. If this is successful, we return a tuple `(node, pos, len)` if success where `node` is a :py:class:`LatexGroupNode`. Otherwise, this method returns None.
pylatexenc/latexwalker.py
def get_latex_maybe_optional_arg(self, pos): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to attempt to parse an optional argument. Attempts to parse an optional argument. If this is successful, we return a tuple `(node, pos, len)` if success where `node` is a :py:class:`LatexGroupNode`. Otherwise, this method returns None. """ tok = self.get_token(pos, brackets_are_chars=False, environments=False) if (tok.tok == 'brace_open' and tok.arg == '['): return self.get_latex_braced_group(pos, brace_type='[') return None
def get_latex_maybe_optional_arg(self, pos): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to attempt to parse an optional argument. Attempts to parse an optional argument. If this is successful, we return a tuple `(node, pos, len)` if success where `node` is a :py:class:`LatexGroupNode`. Otherwise, this method returns None. """ tok = self.get_token(pos, brackets_are_chars=False, environments=False) if (tok.tok == 'brace_open' and tok.arg == '['): return self.get_latex_braced_group(pos, brace_type='[') return None
[ "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "starting", "at", "position", "pos", "to", "attempt", "to", "parse", "an", "optional", "argument", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L865-L879
[ "def", "get_latex_maybe_optional_arg", "(", "self", ",", "pos", ")", ":", "tok", "=", "self", ".", "get_token", "(", "pos", ",", "brackets_are_chars", "=", "False", ",", "environments", "=", "False", ")", "if", "(", "tok", ".", "tok", "==", "'brace_open'", "and", "tok", ".", "arg", "==", "'['", ")", ":", "return", "self", ".", "get_latex_braced_group", "(", "pos", ",", "brace_type", "=", "'['", ")", "return", "None" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_latex_braced_group
Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex group delimited by braces. Reads a latex expression enclosed in braces ``{ ... }``. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`, where `node` is a :py:class:`LatexGroupNode` instance, `pos` is the position of the first char of the expression (which has to be an opening brace), and `len` is the length of the group, including the closing brace (relative to the starting position).
pylatexenc/latexwalker.py
def get_latex_braced_group(self, pos, brace_type='{'): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex group delimited by braces. Reads a latex expression enclosed in braces ``{ ... }``. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`, where `node` is a :py:class:`LatexGroupNode` instance, `pos` is the position of the first char of the expression (which has to be an opening brace), and `len` is the length of the group, including the closing brace (relative to the starting position). """ closing_brace = None if (brace_type == '{'): closing_brace = '}' elif (brace_type == '['): closing_brace = ']' else: raise LatexWalkerParseError(s=self.s, pos=pos, msg="Uknown brace type: %s" %(brace_type)) brackets_are_chars = (brace_type != '[') firsttok = self.get_token(pos, brackets_are_chars=brackets_are_chars) if (firsttok.tok != 'brace_open' or firsttok.arg != brace_type): raise LatexWalkerParseError( s=self.s, pos=pos, msg='get_latex_braced_group: not an opening brace/bracket: %s' %(self.s[pos]) ) #pos = firsttok.pos + firsttok.len (nodelist, npos, nlen) = self.get_latex_nodes(firsttok.pos + firsttok.len, stop_upon_closing_brace=closing_brace) return (LatexGroupNode(nodelist=nodelist), firsttok.pos, npos + nlen - firsttok.pos)
def get_latex_braced_group(self, pos, brace_type='{'): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex group delimited by braces. Reads a latex expression enclosed in braces ``{ ... }``. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`, where `node` is a :py:class:`LatexGroupNode` instance, `pos` is the position of the first char of the expression (which has to be an opening brace), and `len` is the length of the group, including the closing brace (relative to the starting position). """ closing_brace = None if (brace_type == '{'): closing_brace = '}' elif (brace_type == '['): closing_brace = ']' else: raise LatexWalkerParseError(s=self.s, pos=pos, msg="Uknown brace type: %s" %(brace_type)) brackets_are_chars = (brace_type != '[') firsttok = self.get_token(pos, brackets_are_chars=brackets_are_chars) if (firsttok.tok != 'brace_open' or firsttok.arg != brace_type): raise LatexWalkerParseError( s=self.s, pos=pos, msg='get_latex_braced_group: not an opening brace/bracket: %s' %(self.s[pos]) ) #pos = firsttok.pos + firsttok.len (nodelist, npos, nlen) = self.get_latex_nodes(firsttok.pos + firsttok.len, stop_upon_closing_brace=closing_brace) return (LatexGroupNode(nodelist=nodelist), firsttok.pos, npos + nlen - firsttok.pos)
[ "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "starting", "at", "position", "pos", "to", "read", "a", "latex", "group", "delimited", "by", "braces", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L882-L919
[ "def", "get_latex_braced_group", "(", "self", ",", "pos", ",", "brace_type", "=", "'{'", ")", ":", "closing_brace", "=", "None", "if", "(", "brace_type", "==", "'{'", ")", ":", "closing_brace", "=", "'}'", "elif", "(", "brace_type", "==", "'['", ")", ":", "closing_brace", "=", "']'", "else", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "pos", ",", "msg", "=", "\"Uknown brace type: %s\"", "%", "(", "brace_type", ")", ")", "brackets_are_chars", "=", "(", "brace_type", "!=", "'['", ")", "firsttok", "=", "self", ".", "get_token", "(", "pos", ",", "brackets_are_chars", "=", "brackets_are_chars", ")", "if", "(", "firsttok", ".", "tok", "!=", "'brace_open'", "or", "firsttok", ".", "arg", "!=", "brace_type", ")", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "pos", ",", "msg", "=", "'get_latex_braced_group: not an opening brace/bracket: %s'", "%", "(", "self", ".", "s", "[", "pos", "]", ")", ")", "#pos = firsttok.pos + firsttok.len", "(", "nodelist", ",", "npos", ",", "nlen", ")", "=", "self", ".", "get_latex_nodes", "(", "firsttok", ".", "pos", "+", "firsttok", ".", "len", ",", "stop_upon_closing_brace", "=", "closing_brace", ")", "return", "(", "LatexGroupNode", "(", "nodelist", "=", "nodelist", ")", ",", "firsttok", ".", "pos", ",", "npos", "+", "nlen", "-", "firsttok", ".", "pos", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_latex_environment
r""" Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex environment. Reads a latex expression enclosed in a ``\begin{environment}...\end{environment}``. The first token in the stream must be the ``\begin{environment}``. If `environmentname` is given and nonempty, then additionally a :py:exc:`LatexWalkerParseError` is raised if the environment in the input stream does not match the provided name. This function will attempt to heuristically parse an optional argument, and possibly a mandatory argument given to the environment. No space is allowed between ``\begin{environment}`` and an opening square bracket or opening brace. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`.
pylatexenc/latexwalker.py
def get_latex_environment(self, pos, environmentname=None): r""" Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex environment. Reads a latex expression enclosed in a ``\begin{environment}...\end{environment}``. The first token in the stream must be the ``\begin{environment}``. If `environmentname` is given and nonempty, then additionally a :py:exc:`LatexWalkerParseError` is raised if the environment in the input stream does not match the provided name. This function will attempt to heuristically parse an optional argument, and possibly a mandatory argument given to the environment. No space is allowed between ``\begin{environment}`` and an opening square bracket or opening brace. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. """ startpos = pos firsttok = self.get_token(pos) if (firsttok.tok != 'begin_environment' or (environmentname is not None and firsttok.arg != environmentname)): raise LatexWalkerParseError(s=self.s, pos=pos, msg=r'get_latex_environment: expected \begin{%s}: %s' %( environmentname if environmentname is not None else '<environment name>', firsttok.arg )) if (environmentname is None): environmentname = firsttok.arg pos = firsttok.pos + firsttok.len optargs = [] args = [] # see if the \begin{environment} is immediately followed by some # options. Important: Don't eat the brace of a commutator!! Don't allow # any space between the environment and the open bracket. optargtuple = None if (self.s[pos] == '['): optargtuple = self.get_latex_maybe_optional_arg(pos) if (optargtuple is not None): optargs.append(optargtuple[0]) pos = optargtuple[1]+optargtuple[2] else: # Try to see if we have a mandatory argument. Don't use get_token # as we don't want to skip any space. if self.s[pos] == '{': (argnode, apos, alen) = self.get_latex_braced_group(pos) args.append(argnode) pos = apos+alen (nodelist, npos, nlen) = self.get_latex_nodes(pos, stop_upon_end_environment=environmentname) return (LatexEnvironmentNode(envname=environmentname, nodelist=nodelist, optargs=optargs, args=args), startpos, npos+nlen-startpos)
def get_latex_environment(self, pos, environmentname=None): r""" Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex environment. Reads a latex expression enclosed in a ``\begin{environment}...\end{environment}``. The first token in the stream must be the ``\begin{environment}``. If `environmentname` is given and nonempty, then additionally a :py:exc:`LatexWalkerParseError` is raised if the environment in the input stream does not match the provided name. This function will attempt to heuristically parse an optional argument, and possibly a mandatory argument given to the environment. No space is allowed between ``\begin{environment}`` and an opening square bracket or opening brace. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. """ startpos = pos firsttok = self.get_token(pos) if (firsttok.tok != 'begin_environment' or (environmentname is not None and firsttok.arg != environmentname)): raise LatexWalkerParseError(s=self.s, pos=pos, msg=r'get_latex_environment: expected \begin{%s}: %s' %( environmentname if environmentname is not None else '<environment name>', firsttok.arg )) if (environmentname is None): environmentname = firsttok.arg pos = firsttok.pos + firsttok.len optargs = [] args = [] # see if the \begin{environment} is immediately followed by some # options. Important: Don't eat the brace of a commutator!! Don't allow # any space between the environment and the open bracket. optargtuple = None if (self.s[pos] == '['): optargtuple = self.get_latex_maybe_optional_arg(pos) if (optargtuple is not None): optargs.append(optargtuple[0]) pos = optargtuple[1]+optargtuple[2] else: # Try to see if we have a mandatory argument. Don't use get_token # as we don't want to skip any space. if self.s[pos] == '{': (argnode, apos, alen) = self.get_latex_braced_group(pos) args.append(argnode) pos = apos+alen (nodelist, npos, nlen) = self.get_latex_nodes(pos, stop_upon_end_environment=environmentname) return (LatexEnvironmentNode(envname=environmentname, nodelist=nodelist, optargs=optargs, args=args), startpos, npos+nlen-startpos)
[ "r", "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "starting", "at", "position", "pos", "to", "read", "a", "latex", "environment", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L922-L987
[ "def", "get_latex_environment", "(", "self", ",", "pos", ",", "environmentname", "=", "None", ")", ":", "startpos", "=", "pos", "firsttok", "=", "self", ".", "get_token", "(", "pos", ")", "if", "(", "firsttok", ".", "tok", "!=", "'begin_environment'", "or", "(", "environmentname", "is", "not", "None", "and", "firsttok", ".", "arg", "!=", "environmentname", ")", ")", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "pos", ",", "msg", "=", "r'get_latex_environment: expected \\begin{%s}: %s'", "%", "(", "environmentname", "if", "environmentname", "is", "not", "None", "else", "'<environment name>'", ",", "firsttok", ".", "arg", ")", ")", "if", "(", "environmentname", "is", "None", ")", ":", "environmentname", "=", "firsttok", ".", "arg", "pos", "=", "firsttok", ".", "pos", "+", "firsttok", ".", "len", "optargs", "=", "[", "]", "args", "=", "[", "]", "# see if the \\begin{environment} is immediately followed by some", "# options. Important: Don't eat the brace of a commutator!! Don't allow", "# any space between the environment and the open bracket.", "optargtuple", "=", "None", "if", "(", "self", ".", "s", "[", "pos", "]", "==", "'['", ")", ":", "optargtuple", "=", "self", ".", "get_latex_maybe_optional_arg", "(", "pos", ")", "if", "(", "optargtuple", "is", "not", "None", ")", ":", "optargs", ".", "append", "(", "optargtuple", "[", "0", "]", ")", "pos", "=", "optargtuple", "[", "1", "]", "+", "optargtuple", "[", "2", "]", "else", ":", "# Try to see if we have a mandatory argument. Don't use get_token", "# as we don't want to skip any space.", "if", "self", ".", "s", "[", "pos", "]", "==", "'{'", ":", "(", "argnode", ",", "apos", ",", "alen", ")", "=", "self", ".", "get_latex_braced_group", "(", "pos", ")", "args", ".", "append", "(", "argnode", ")", "pos", "=", "apos", "+", "alen", "(", "nodelist", ",", "npos", ",", "nlen", ")", "=", "self", ".", "get_latex_nodes", "(", "pos", ",", "stop_upon_end_environment", "=", "environmentname", ")", "return", "(", "LatexEnvironmentNode", "(", "envname", "=", "environmentname", ",", "nodelist", "=", "nodelist", ",", "optargs", "=", "optargs", ",", "args", "=", "args", ")", ",", "startpos", ",", "npos", "+", "nlen", "-", "startpos", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexWalker.get_latex_nodes
Parses the latex content given to the constructor (and stored in `self.s`) into a list of nodes. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of :py:class:`LatexNode`\ 's. If `stop_upon_closing_brace` is given and set to a character, then parsing stops once the given closing brace is encountered (but not inside a subgroup). The brace is given as a character, ']' or '}'. The returned `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. If `stop_upon_end_environment` is provided, then parsing stops once the given environment was closed. If there is an environment mismatch, then a `LatexWalkerParseError` is raised except in tolerant parsing mode (see py:meth:`parse_flags()`). Again, the closing environment is included in the length count but not the nodes. If `stop_upon_closing_mathmode` is specified, then the parsing stops once the corresponding math mode (assumed already open) is closed. Currently, only inline math modes delimited by ``$`` are supported. I.e., currently, if set, only the value ``stop_upon_closing_mathmode='$'`` is valid.
pylatexenc/latexwalker.py
def get_latex_nodes(self, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None): """ Parses the latex content given to the constructor (and stored in `self.s`) into a list of nodes. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of :py:class:`LatexNode`\ 's. If `stop_upon_closing_brace` is given and set to a character, then parsing stops once the given closing brace is encountered (but not inside a subgroup). The brace is given as a character, ']' or '}'. The returned `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. If `stop_upon_end_environment` is provided, then parsing stops once the given environment was closed. If there is an environment mismatch, then a `LatexWalkerParseError` is raised except in tolerant parsing mode (see py:meth:`parse_flags()`). Again, the closing environment is included in the length count but not the nodes. If `stop_upon_closing_mathmode` is specified, then the parsing stops once the corresponding math mode (assumed already open) is closed. Currently, only inline math modes delimited by ``$`` are supported. I.e., currently, if set, only the value ``stop_upon_closing_mathmode='$'`` is valid. """ nodelist = [] brackets_are_chars = True if (stop_upon_closing_brace == ']'): brackets_are_chars = False origpos = pos class PosPointer: def __init__(self, pos=0, lastchars=''): self.pos = pos self.lastchars = lastchars p = PosPointer(pos) def do_read(nodelist, p): """ Read a single token and process it, recursing into brace blocks and environments etc if needed, and appending stuff to nodelist. Return True whenever we should stop trying to read more. (e.g. upon reaching the a matched stop_upon_end_environment etc.) """ try: tok = self.get_token(p.pos, brackets_are_chars=brackets_are_chars) except LatexWalkerEndOfStream: if self.tolerant_parsing: return True raise # re-raise p.pos = tok.pos + tok.len # if it's a char, just append it to the stream of last characters. if (tok.tok == 'char'): p.lastchars += tok.pre_space + tok.arg return False # if it's not a char, push the last `p.lastchars` into the node list before anything else if len(p.lastchars): strnode = LatexCharsNode(chars=p.lastchars+tok.pre_space) nodelist.append(strnode) p.lastchars = '' elif len(tok.pre_space): # If we have pre_space, add a separate chars node that contains # the spaces. We do this seperately, so that latex2text can # ignore these groups by default to avoid too much space on the # output. This allows latex2text to implement the # `strict_latex_spaces=True` flag correctly. spacestrnode = LatexCharsNode(chars=tok.pre_space) nodelist.append(spacestrnode) # and see what the token is. if (tok.tok == 'brace_close'): # we've reached the end of the group. stop the parsing. if (tok.arg != stop_upon_closing_brace): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing brace: `%s\'' %(tok.arg) ) return False return True if (tok.tok == 'end_environment'): # we've reached the end of an environment. if (tok.arg != stop_upon_end_environment): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg=('Unexpected mismatching closing environment: `%s\', ' 'expecting `%s\'' %(tok.arg, stop_upon_end_environment)) ) return False return True if (tok.tok == 'mathmode_inline'): # if we care about keeping math mode inlines verbatim, gulp all of the expression. if stop_upon_closing_mathmode is not None: if stop_upon_closing_mathmode != '$': raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing math mode: `$\'' ) return True # we have encountered a new math inline, so gulp all of the math expression (mathinline_nodelist, mpos, mlen) = self.get_latex_nodes(p.pos, stop_upon_closing_mathmode='$') p.pos = mpos + mlen nodelist.append(LatexMathNode(displaytype='inline', nodelist=mathinline_nodelist)) return if (tok.tok == 'comment'): commentnode = LatexCommentNode(comment=tok.arg, comment_post_space=tok.post_space) nodelist.append(commentnode) return if (tok.tok == 'brace_open'): # another braced group to read. (groupnode, bpos, blen) = self.get_latex_braced_group(tok.pos) p.pos = bpos + blen nodelist.append(groupnode) return if (tok.tok == 'begin_environment'): # an environment to read. (envnode, epos, elen) = self.get_latex_environment(tok.pos, environmentname=tok.arg) p.pos = epos + elen # add node and continue. nodelist.append(envnode) return if (tok.tok == 'macro'): # read a macro. see if it has arguments. nodeoptarg = None nodeargs = [] macname = tok.arg.rstrip('*') # for lookup in macro_dict if macname in self.macro_dict: mac = self.macro_dict[macname] def getoptarg(pos): """ Gets a possibly optional argument. returns (argnode, new-pos) where argnode might be `None` if the argument was not specified. """ optarginfotuple = self.get_latex_maybe_optional_arg(pos) if optarginfotuple is not None: (nodeoptarg, optargpos, optarglen) = optarginfotuple return (nodeoptarg, optargpos+optarglen) return (None, pos) def getarg(pos): """ Gets a mandatory argument. returns (argnode, new-pos) """ (nodearg, npos, nlen) = self.get_latex_expression(pos, strict_braces=False) return (nodearg, npos + nlen) if mac.optarg: (nodeoptarg, p.pos) = getoptarg(p.pos) if isinstance(mac.numargs, _basestring): # specific argument specification for arg in mac.numargs: if arg == '{': (node, p.pos) = getarg(p.pos) nodeargs.append(node) elif arg == '[': (node, p.pos) = getoptarg(p.pos) nodeargs.append(node) else: raise LatexWalkerError( "Unknown macro argument kind for macro %s: %s" % (mac.macroname, arg) ) else: for n in range(mac.numargs): (nodearg, p.pos) = getarg(p.pos) nodeargs.append(nodearg) nodelist.append(LatexMacroNode(macroname=tok.arg, nodeoptarg=nodeoptarg, nodeargs=nodeargs, macro_post_space=tok.post_space)) return None raise LatexWalkerParseError(s=self.s, pos=p.pos, msg="Unknown token: %r" %(tok)) while True: try: r_endnow = do_read(nodelist, p) except LatexWalkerEndOfStream: if stop_upon_closing_brace or stop_upon_end_environment: # unexpected eof if (not self.tolerant_parsing): raise LatexWalkerError("Unexpected end of stream!") else: r_endnow = False else: r_endnow = True if (r_endnow): # add last chars if (p.lastchars): strnode = LatexCharsNode(chars=p.lastchars) nodelist.append(strnode) return (nodelist, origpos, p.pos - origpos) raise LatexWalkerError( # lgtm [py/unreachable-statement] "CONGRATULATIONS !! " "You are the first human to telepathically break an infinite loop !!!!!!!" )
def get_latex_nodes(self, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None): """ Parses the latex content given to the constructor (and stored in `self.s`) into a list of nodes. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of :py:class:`LatexNode`\ 's. If `stop_upon_closing_brace` is given and set to a character, then parsing stops once the given closing brace is encountered (but not inside a subgroup). The brace is given as a character, ']' or '}'. The returned `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. If `stop_upon_end_environment` is provided, then parsing stops once the given environment was closed. If there is an environment mismatch, then a `LatexWalkerParseError` is raised except in tolerant parsing mode (see py:meth:`parse_flags()`). Again, the closing environment is included in the length count but not the nodes. If `stop_upon_closing_mathmode` is specified, then the parsing stops once the corresponding math mode (assumed already open) is closed. Currently, only inline math modes delimited by ``$`` are supported. I.e., currently, if set, only the value ``stop_upon_closing_mathmode='$'`` is valid. """ nodelist = [] brackets_are_chars = True if (stop_upon_closing_brace == ']'): brackets_are_chars = False origpos = pos class PosPointer: def __init__(self, pos=0, lastchars=''): self.pos = pos self.lastchars = lastchars p = PosPointer(pos) def do_read(nodelist, p): """ Read a single token and process it, recursing into brace blocks and environments etc if needed, and appending stuff to nodelist. Return True whenever we should stop trying to read more. (e.g. upon reaching the a matched stop_upon_end_environment etc.) """ try: tok = self.get_token(p.pos, brackets_are_chars=brackets_are_chars) except LatexWalkerEndOfStream: if self.tolerant_parsing: return True raise # re-raise p.pos = tok.pos + tok.len # if it's a char, just append it to the stream of last characters. if (tok.tok == 'char'): p.lastchars += tok.pre_space + tok.arg return False # if it's not a char, push the last `p.lastchars` into the node list before anything else if len(p.lastchars): strnode = LatexCharsNode(chars=p.lastchars+tok.pre_space) nodelist.append(strnode) p.lastchars = '' elif len(tok.pre_space): # If we have pre_space, add a separate chars node that contains # the spaces. We do this seperately, so that latex2text can # ignore these groups by default to avoid too much space on the # output. This allows latex2text to implement the # `strict_latex_spaces=True` flag correctly. spacestrnode = LatexCharsNode(chars=tok.pre_space) nodelist.append(spacestrnode) # and see what the token is. if (tok.tok == 'brace_close'): # we've reached the end of the group. stop the parsing. if (tok.arg != stop_upon_closing_brace): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing brace: `%s\'' %(tok.arg) ) return False return True if (tok.tok == 'end_environment'): # we've reached the end of an environment. if (tok.arg != stop_upon_end_environment): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg=('Unexpected mismatching closing environment: `%s\', ' 'expecting `%s\'' %(tok.arg, stop_upon_end_environment)) ) return False return True if (tok.tok == 'mathmode_inline'): # if we care about keeping math mode inlines verbatim, gulp all of the expression. if stop_upon_closing_mathmode is not None: if stop_upon_closing_mathmode != '$': raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing math mode: `$\'' ) return True # we have encountered a new math inline, so gulp all of the math expression (mathinline_nodelist, mpos, mlen) = self.get_latex_nodes(p.pos, stop_upon_closing_mathmode='$') p.pos = mpos + mlen nodelist.append(LatexMathNode(displaytype='inline', nodelist=mathinline_nodelist)) return if (tok.tok == 'comment'): commentnode = LatexCommentNode(comment=tok.arg, comment_post_space=tok.post_space) nodelist.append(commentnode) return if (tok.tok == 'brace_open'): # another braced group to read. (groupnode, bpos, blen) = self.get_latex_braced_group(tok.pos) p.pos = bpos + blen nodelist.append(groupnode) return if (tok.tok == 'begin_environment'): # an environment to read. (envnode, epos, elen) = self.get_latex_environment(tok.pos, environmentname=tok.arg) p.pos = epos + elen # add node and continue. nodelist.append(envnode) return if (tok.tok == 'macro'): # read a macro. see if it has arguments. nodeoptarg = None nodeargs = [] macname = tok.arg.rstrip('*') # for lookup in macro_dict if macname in self.macro_dict: mac = self.macro_dict[macname] def getoptarg(pos): """ Gets a possibly optional argument. returns (argnode, new-pos) where argnode might be `None` if the argument was not specified. """ optarginfotuple = self.get_latex_maybe_optional_arg(pos) if optarginfotuple is not None: (nodeoptarg, optargpos, optarglen) = optarginfotuple return (nodeoptarg, optargpos+optarglen) return (None, pos) def getarg(pos): """ Gets a mandatory argument. returns (argnode, new-pos) """ (nodearg, npos, nlen) = self.get_latex_expression(pos, strict_braces=False) return (nodearg, npos + nlen) if mac.optarg: (nodeoptarg, p.pos) = getoptarg(p.pos) if isinstance(mac.numargs, _basestring): # specific argument specification for arg in mac.numargs: if arg == '{': (node, p.pos) = getarg(p.pos) nodeargs.append(node) elif arg == '[': (node, p.pos) = getoptarg(p.pos) nodeargs.append(node) else: raise LatexWalkerError( "Unknown macro argument kind for macro %s: %s" % (mac.macroname, arg) ) else: for n in range(mac.numargs): (nodearg, p.pos) = getarg(p.pos) nodeargs.append(nodearg) nodelist.append(LatexMacroNode(macroname=tok.arg, nodeoptarg=nodeoptarg, nodeargs=nodeargs, macro_post_space=tok.post_space)) return None raise LatexWalkerParseError(s=self.s, pos=p.pos, msg="Unknown token: %r" %(tok)) while True: try: r_endnow = do_read(nodelist, p) except LatexWalkerEndOfStream: if stop_upon_closing_brace or stop_upon_end_environment: # unexpected eof if (not self.tolerant_parsing): raise LatexWalkerError("Unexpected end of stream!") else: r_endnow = False else: r_endnow = True if (r_endnow): # add last chars if (p.lastchars): strnode = LatexCharsNode(chars=p.lastchars) nodelist.append(strnode) return (nodelist, origpos, p.pos - origpos) raise LatexWalkerError( # lgtm [py/unreachable-statement] "CONGRATULATIONS !! " "You are the first human to telepathically break an infinite loop !!!!!!!" )
[ "Parses", "the", "latex", "content", "given", "to", "the", "constructor", "(", "and", "stored", "in", "self", ".", "s", ")", "into", "a", "list", "of", "nodes", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexwalker.py#L992-L1218
[ "def", "get_latex_nodes", "(", "self", ",", "pos", "=", "0", ",", "stop_upon_closing_brace", "=", "None", ",", "stop_upon_end_environment", "=", "None", ",", "stop_upon_closing_mathmode", "=", "None", ")", ":", "nodelist", "=", "[", "]", "brackets_are_chars", "=", "True", "if", "(", "stop_upon_closing_brace", "==", "']'", ")", ":", "brackets_are_chars", "=", "False", "origpos", "=", "pos", "class", "PosPointer", ":", "def", "__init__", "(", "self", ",", "pos", "=", "0", ",", "lastchars", "=", "''", ")", ":", "self", ".", "pos", "=", "pos", "self", ".", "lastchars", "=", "lastchars", "p", "=", "PosPointer", "(", "pos", ")", "def", "do_read", "(", "nodelist", ",", "p", ")", ":", "\"\"\"\n Read a single token and process it, recursing into brace blocks and environments etc if\n needed, and appending stuff to nodelist.\n\n Return True whenever we should stop trying to read more. (e.g. upon reaching the a matched\n stop_upon_end_environment etc.)\n \"\"\"", "try", ":", "tok", "=", "self", ".", "get_token", "(", "p", ".", "pos", ",", "brackets_are_chars", "=", "brackets_are_chars", ")", "except", "LatexWalkerEndOfStream", ":", "if", "self", ".", "tolerant_parsing", ":", "return", "True", "raise", "# re-raise", "p", ".", "pos", "=", "tok", ".", "pos", "+", "tok", ".", "len", "# if it's a char, just append it to the stream of last characters.", "if", "(", "tok", ".", "tok", "==", "'char'", ")", ":", "p", ".", "lastchars", "+=", "tok", ".", "pre_space", "+", "tok", ".", "arg", "return", "False", "# if it's not a char, push the last `p.lastchars` into the node list before anything else", "if", "len", "(", "p", ".", "lastchars", ")", ":", "strnode", "=", "LatexCharsNode", "(", "chars", "=", "p", ".", "lastchars", "+", "tok", ".", "pre_space", ")", "nodelist", ".", "append", "(", "strnode", ")", "p", ".", "lastchars", "=", "''", "elif", "len", "(", "tok", ".", "pre_space", ")", ":", "# If we have pre_space, add a separate chars node that contains", "# the spaces. We do this seperately, so that latex2text can", "# ignore these groups by default to avoid too much space on the", "# output. This allows latex2text to implement the", "# `strict_latex_spaces=True` flag correctly.", "spacestrnode", "=", "LatexCharsNode", "(", "chars", "=", "tok", ".", "pre_space", ")", "nodelist", ".", "append", "(", "spacestrnode", ")", "# and see what the token is.", "if", "(", "tok", ".", "tok", "==", "'brace_close'", ")", ":", "# we've reached the end of the group. stop the parsing.", "if", "(", "tok", ".", "arg", "!=", "stop_upon_closing_brace", ")", ":", "if", "(", "not", "self", ".", "tolerant_parsing", ")", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "tok", ".", "pos", ",", "msg", "=", "'Unexpected mismatching closing brace: `%s\\''", "%", "(", "tok", ".", "arg", ")", ")", "return", "False", "return", "True", "if", "(", "tok", ".", "tok", "==", "'end_environment'", ")", ":", "# we've reached the end of an environment.", "if", "(", "tok", ".", "arg", "!=", "stop_upon_end_environment", ")", ":", "if", "(", "not", "self", ".", "tolerant_parsing", ")", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "tok", ".", "pos", ",", "msg", "=", "(", "'Unexpected mismatching closing environment: `%s\\', '", "'expecting `%s\\''", "%", "(", "tok", ".", "arg", ",", "stop_upon_end_environment", ")", ")", ")", "return", "False", "return", "True", "if", "(", "tok", ".", "tok", "==", "'mathmode_inline'", ")", ":", "# if we care about keeping math mode inlines verbatim, gulp all of the expression.", "if", "stop_upon_closing_mathmode", "is", "not", "None", ":", "if", "stop_upon_closing_mathmode", "!=", "'$'", ":", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "tok", ".", "pos", ",", "msg", "=", "'Unexpected mismatching closing math mode: `$\\''", ")", "return", "True", "# we have encountered a new math inline, so gulp all of the math expression", "(", "mathinline_nodelist", ",", "mpos", ",", "mlen", ")", "=", "self", ".", "get_latex_nodes", "(", "p", ".", "pos", ",", "stop_upon_closing_mathmode", "=", "'$'", ")", "p", ".", "pos", "=", "mpos", "+", "mlen", "nodelist", ".", "append", "(", "LatexMathNode", "(", "displaytype", "=", "'inline'", ",", "nodelist", "=", "mathinline_nodelist", ")", ")", "return", "if", "(", "tok", ".", "tok", "==", "'comment'", ")", ":", "commentnode", "=", "LatexCommentNode", "(", "comment", "=", "tok", ".", "arg", ",", "comment_post_space", "=", "tok", ".", "post_space", ")", "nodelist", ".", "append", "(", "commentnode", ")", "return", "if", "(", "tok", ".", "tok", "==", "'brace_open'", ")", ":", "# another braced group to read.", "(", "groupnode", ",", "bpos", ",", "blen", ")", "=", "self", ".", "get_latex_braced_group", "(", "tok", ".", "pos", ")", "p", ".", "pos", "=", "bpos", "+", "blen", "nodelist", ".", "append", "(", "groupnode", ")", "return", "if", "(", "tok", ".", "tok", "==", "'begin_environment'", ")", ":", "# an environment to read.", "(", "envnode", ",", "epos", ",", "elen", ")", "=", "self", ".", "get_latex_environment", "(", "tok", ".", "pos", ",", "environmentname", "=", "tok", ".", "arg", ")", "p", ".", "pos", "=", "epos", "+", "elen", "# add node and continue.", "nodelist", ".", "append", "(", "envnode", ")", "return", "if", "(", "tok", ".", "tok", "==", "'macro'", ")", ":", "# read a macro. see if it has arguments.", "nodeoptarg", "=", "None", "nodeargs", "=", "[", "]", "macname", "=", "tok", ".", "arg", ".", "rstrip", "(", "'*'", ")", "# for lookup in macro_dict", "if", "macname", "in", "self", ".", "macro_dict", ":", "mac", "=", "self", ".", "macro_dict", "[", "macname", "]", "def", "getoptarg", "(", "pos", ")", ":", "\"\"\"\n Gets a possibly optional argument. returns (argnode, new-pos) where argnode\n might be `None` if the argument was not specified.\n \"\"\"", "optarginfotuple", "=", "self", ".", "get_latex_maybe_optional_arg", "(", "pos", ")", "if", "optarginfotuple", "is", "not", "None", ":", "(", "nodeoptarg", ",", "optargpos", ",", "optarglen", ")", "=", "optarginfotuple", "return", "(", "nodeoptarg", ",", "optargpos", "+", "optarglen", ")", "return", "(", "None", ",", "pos", ")", "def", "getarg", "(", "pos", ")", ":", "\"\"\"\n Gets a mandatory argument. returns (argnode, new-pos)\n \"\"\"", "(", "nodearg", ",", "npos", ",", "nlen", ")", "=", "self", ".", "get_latex_expression", "(", "pos", ",", "strict_braces", "=", "False", ")", "return", "(", "nodearg", ",", "npos", "+", "nlen", ")", "if", "mac", ".", "optarg", ":", "(", "nodeoptarg", ",", "p", ".", "pos", ")", "=", "getoptarg", "(", "p", ".", "pos", ")", "if", "isinstance", "(", "mac", ".", "numargs", ",", "_basestring", ")", ":", "# specific argument specification", "for", "arg", "in", "mac", ".", "numargs", ":", "if", "arg", "==", "'{'", ":", "(", "node", ",", "p", ".", "pos", ")", "=", "getarg", "(", "p", ".", "pos", ")", "nodeargs", ".", "append", "(", "node", ")", "elif", "arg", "==", "'['", ":", "(", "node", ",", "p", ".", "pos", ")", "=", "getoptarg", "(", "p", ".", "pos", ")", "nodeargs", ".", "append", "(", "node", ")", "else", ":", "raise", "LatexWalkerError", "(", "\"Unknown macro argument kind for macro %s: %s\"", "%", "(", "mac", ".", "macroname", ",", "arg", ")", ")", "else", ":", "for", "n", "in", "range", "(", "mac", ".", "numargs", ")", ":", "(", "nodearg", ",", "p", ".", "pos", ")", "=", "getarg", "(", "p", ".", "pos", ")", "nodeargs", ".", "append", "(", "nodearg", ")", "nodelist", ".", "append", "(", "LatexMacroNode", "(", "macroname", "=", "tok", ".", "arg", ",", "nodeoptarg", "=", "nodeoptarg", ",", "nodeargs", "=", "nodeargs", ",", "macro_post_space", "=", "tok", ".", "post_space", ")", ")", "return", "None", "raise", "LatexWalkerParseError", "(", "s", "=", "self", ".", "s", ",", "pos", "=", "p", ".", "pos", ",", "msg", "=", "\"Unknown token: %r\"", "%", "(", "tok", ")", ")", "while", "True", ":", "try", ":", "r_endnow", "=", "do_read", "(", "nodelist", ",", "p", ")", "except", "LatexWalkerEndOfStream", ":", "if", "stop_upon_closing_brace", "or", "stop_upon_end_environment", ":", "# unexpected eof", "if", "(", "not", "self", ".", "tolerant_parsing", ")", ":", "raise", "LatexWalkerError", "(", "\"Unexpected end of stream!\"", ")", "else", ":", "r_endnow", "=", "False", "else", ":", "r_endnow", "=", "True", "if", "(", "r_endnow", ")", ":", "# add last chars", "if", "(", "p", ".", "lastchars", ")", ":", "strnode", "=", "LatexCharsNode", "(", "chars", "=", "p", ".", "lastchars", ")", "nodelist", ".", "append", "(", "strnode", ")", "return", "(", "nodelist", ",", "origpos", ",", "p", ".", "pos", "-", "origpos", ")", "raise", "LatexWalkerError", "(", "# lgtm [py/unreachable-statement]", "\"CONGRATULATIONS !! \"", "\"You are the first human to telepathically break an infinite loop !!!!!!!\"", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
latex2text
Extracts text from `content` meant for database indexing. `content` is some LaTeX code. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead.
pylatexenc/latex2text.py
def latex2text(content, tolerant_parsing=False, keep_inline_math=False, keep_comments=False): """ Extracts text from `content` meant for database indexing. `content` is some LaTeX code. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ (nodelist, tpos, tlen) = latexwalker.get_latex_nodes(content, keep_inline_math=keep_inline_math, tolerant_parsing=tolerant_parsing) return latexnodes2text(nodelist, keep_inline_math=keep_inline_math, keep_comments=keep_comments)
def latex2text(content, tolerant_parsing=False, keep_inline_math=False, keep_comments=False): """ Extracts text from `content` meant for database indexing. `content` is some LaTeX code. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ (nodelist, tpos, tlen) = latexwalker.get_latex_nodes(content, keep_inline_math=keep_inline_math, tolerant_parsing=tolerant_parsing) return latexnodes2text(nodelist, keep_inline_math=keep_inline_math, keep_comments=keep_comments)
[ "Extracts", "text", "from", "content", "meant", "for", "database", "indexing", ".", "content", "is", "some", "LaTeX", "code", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L961-L973
[ "def", "latex2text", "(", "content", ",", "tolerant_parsing", "=", "False", ",", "keep_inline_math", "=", "False", ",", "keep_comments", "=", "False", ")", ":", "(", "nodelist", ",", "tpos", ",", "tlen", ")", "=", "latexwalker", ".", "get_latex_nodes", "(", "content", ",", "keep_inline_math", "=", "keep_inline_math", ",", "tolerant_parsing", "=", "tolerant_parsing", ")", "return", "latexnodes2text", "(", "nodelist", ",", "keep_inline_math", "=", "keep_inline_math", ",", "keep_comments", "=", "keep_comments", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
latexnodes2text
Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:func:`pylatexenc.latexwalker.get_latex_nodes()`. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead.
pylatexenc/latex2text.py
def latexnodes2text(nodelist, keep_inline_math=False, keep_comments=False): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:func:`pylatexenc.latexwalker.get_latex_nodes()`. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ return LatexNodes2Text( keep_inline_math=keep_inline_math, keep_comments=keep_comments ).nodelist_to_text(nodelist)
def latexnodes2text(nodelist, keep_inline_math=False, keep_comments=False): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:func:`pylatexenc.latexwalker.get_latex_nodes()`. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ return LatexNodes2Text( keep_inline_math=keep_inline_math, keep_comments=keep_comments ).nodelist_to_text(nodelist)
[ "Extracts", "text", "from", "a", "node", "list", ".", "nodelist", "is", "a", "list", "of", "nodes", "as", "returned", "by", ":", "py", ":", "func", ":", "pylatexenc", ".", "latexwalker", ".", "get_latex_nodes", "()", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L976-L988
[ "def", "latexnodes2text", "(", "nodelist", ",", "keep_inline_math", "=", "False", ",", "keep_comments", "=", "False", ")", ":", "return", "LatexNodes2Text", "(", "keep_inline_math", "=", "keep_inline_math", ",", "keep_comments", "=", "keep_comments", ")", ".", "nodelist_to_text", "(", "nodelist", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text.set_tex_input_directory
Set where to look for input files when encountering the ``\\input`` or ``\\include`` macro. Alternatively, you may also override :py:meth:`read_input_file()` to implement a custom file lookup mechanism. The argument `tex_input_directory` is the directory relative to which to search for input files. If `strict_input` is set to `True`, then we always check that the referenced file lies within the subtree of `tex_input_directory`, prohibiting for instance hacks with '..' in filenames or using symbolic links to refer to files out of the directory tree. The argument `latex_walker_init_args` allows you to specify the parse flags passed to the constructor of :py:class:`pylatexenc.latexwalker.LatexWalker` when parsing the input file.
pylatexenc/latex2text.py
def set_tex_input_directory(self, tex_input_directory, latex_walker_init_args=None, strict_input=True): """ Set where to look for input files when encountering the ``\\input`` or ``\\include`` macro. Alternatively, you may also override :py:meth:`read_input_file()` to implement a custom file lookup mechanism. The argument `tex_input_directory` is the directory relative to which to search for input files. If `strict_input` is set to `True`, then we always check that the referenced file lies within the subtree of `tex_input_directory`, prohibiting for instance hacks with '..' in filenames or using symbolic links to refer to files out of the directory tree. The argument `latex_walker_init_args` allows you to specify the parse flags passed to the constructor of :py:class:`pylatexenc.latexwalker.LatexWalker` when parsing the input file. """ self.tex_input_directory = tex_input_directory self.latex_walker_init_args = latex_walker_init_args if latex_walker_init_args else {} self.strict_input = strict_input if tex_input_directory: self.macro_dict['input'] = MacroDef('input', lambda n: self._callback_input(n)) self.macro_dict['include'] = MacroDef('include', lambda n: self._callback_input(n)) else: self.macro_dict['input'] = MacroDef('input', discard=True) self.macro_dict['include'] = MacroDef('include', discard=True)
def set_tex_input_directory(self, tex_input_directory, latex_walker_init_args=None, strict_input=True): """ Set where to look for input files when encountering the ``\\input`` or ``\\include`` macro. Alternatively, you may also override :py:meth:`read_input_file()` to implement a custom file lookup mechanism. The argument `tex_input_directory` is the directory relative to which to search for input files. If `strict_input` is set to `True`, then we always check that the referenced file lies within the subtree of `tex_input_directory`, prohibiting for instance hacks with '..' in filenames or using symbolic links to refer to files out of the directory tree. The argument `latex_walker_init_args` allows you to specify the parse flags passed to the constructor of :py:class:`pylatexenc.latexwalker.LatexWalker` when parsing the input file. """ self.tex_input_directory = tex_input_directory self.latex_walker_init_args = latex_walker_init_args if latex_walker_init_args else {} self.strict_input = strict_input if tex_input_directory: self.macro_dict['input'] = MacroDef('input', lambda n: self._callback_input(n)) self.macro_dict['include'] = MacroDef('include', lambda n: self._callback_input(n)) else: self.macro_dict['input'] = MacroDef('input', discard=True) self.macro_dict['include'] = MacroDef('include', discard=True)
[ "Set", "where", "to", "look", "for", "input", "files", "when", "encountering", "the", "\\\\", "input", "or", "\\\\", "include", "macro", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L656-L686
[ "def", "set_tex_input_directory", "(", "self", ",", "tex_input_directory", ",", "latex_walker_init_args", "=", "None", ",", "strict_input", "=", "True", ")", ":", "self", ".", "tex_input_directory", "=", "tex_input_directory", "self", ".", "latex_walker_init_args", "=", "latex_walker_init_args", "if", "latex_walker_init_args", "else", "{", "}", "self", ".", "strict_input", "=", "strict_input", "if", "tex_input_directory", ":", "self", ".", "macro_dict", "[", "'input'", "]", "=", "MacroDef", "(", "'input'", ",", "lambda", "n", ":", "self", ".", "_callback_input", "(", "n", ")", ")", "self", ".", "macro_dict", "[", "'include'", "]", "=", "MacroDef", "(", "'include'", ",", "lambda", "n", ":", "self", ".", "_callback_input", "(", "n", ")", ")", "else", ":", "self", ".", "macro_dict", "[", "'input'", "]", "=", "MacroDef", "(", "'input'", ",", "discard", "=", "True", ")", "self", ".", "macro_dict", "[", "'include'", "]", "=", "MacroDef", "(", "'include'", ",", "discard", "=", "True", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text.read_input_file
This method may be overridden to implement a custom lookup mechanism when encountering ``\\input`` or ``\\include`` directives. The default implementation looks for a file of the given name relative to the directory set by :py:meth:`set_tex_input_directory()`. If `strict_input=True` was set, we ensure strictly that the file resides in a subtree of the reference input directory (after canonicalizing the paths and resolving all symlinks). You may override this method to obtain the input data in however way you see fit. (In that case, a call to `set_tex_input_directory()` may not be needed as that function simply sets properties which are used by the default implementation of `read_input_file()`.) This function accepts the referred filename as argument (the argument to the ``\\input`` macro), and should return a string with the file contents (or generate a warning or raise an error).
pylatexenc/latex2text.py
def read_input_file(self, fn): """ This method may be overridden to implement a custom lookup mechanism when encountering ``\\input`` or ``\\include`` directives. The default implementation looks for a file of the given name relative to the directory set by :py:meth:`set_tex_input_directory()`. If `strict_input=True` was set, we ensure strictly that the file resides in a subtree of the reference input directory (after canonicalizing the paths and resolving all symlinks). You may override this method to obtain the input data in however way you see fit. (In that case, a call to `set_tex_input_directory()` may not be needed as that function simply sets properties which are used by the default implementation of `read_input_file()`.) This function accepts the referred filename as argument (the argument to the ``\\input`` macro), and should return a string with the file contents (or generate a warning or raise an error). """ fnfull = os.path.realpath(os.path.join(self.tex_input_directory, fn)) if self.strict_input: # make sure that the input file is strictly within dirfull, and didn't escape with # '../..' tricks or via symlinks. dirfull = os.path.realpath(self.tex_input_directory) if not fnfull.startswith(dirfull): logger.warning( "Can't access path '%s' leading outside of mandated directory [strict input mode]", fn ) return '' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.tex'): fnfull = fnfull + '.tex' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.latex'): fnfull = fnfull + '.latex' if not os.path.isfile(fnfull): logger.warning(u"Error, file doesn't exist: '%s'", fn) return '' logger.debug("Reading input file %r", fnfull) try: with open(fnfull) as f: return f.read() except IOError as e: logger.warning(u"Error, can't access '%s': %s", fn, e) return ''
def read_input_file(self, fn): """ This method may be overridden to implement a custom lookup mechanism when encountering ``\\input`` or ``\\include`` directives. The default implementation looks for a file of the given name relative to the directory set by :py:meth:`set_tex_input_directory()`. If `strict_input=True` was set, we ensure strictly that the file resides in a subtree of the reference input directory (after canonicalizing the paths and resolving all symlinks). You may override this method to obtain the input data in however way you see fit. (In that case, a call to `set_tex_input_directory()` may not be needed as that function simply sets properties which are used by the default implementation of `read_input_file()`.) This function accepts the referred filename as argument (the argument to the ``\\input`` macro), and should return a string with the file contents (or generate a warning or raise an error). """ fnfull = os.path.realpath(os.path.join(self.tex_input_directory, fn)) if self.strict_input: # make sure that the input file is strictly within dirfull, and didn't escape with # '../..' tricks or via symlinks. dirfull = os.path.realpath(self.tex_input_directory) if not fnfull.startswith(dirfull): logger.warning( "Can't access path '%s' leading outside of mandated directory [strict input mode]", fn ) return '' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.tex'): fnfull = fnfull + '.tex' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.latex'): fnfull = fnfull + '.latex' if not os.path.isfile(fnfull): logger.warning(u"Error, file doesn't exist: '%s'", fn) return '' logger.debug("Reading input file %r", fnfull) try: with open(fnfull) as f: return f.read() except IOError as e: logger.warning(u"Error, can't access '%s': %s", fn, e) return ''
[ "This", "method", "may", "be", "overridden", "to", "implement", "a", "custom", "lookup", "mechanism", "when", "encountering", "\\\\", "input", "or", "\\\\", "include", "directives", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L689-L736
[ "def", "read_input_file", "(", "self", ",", "fn", ")", ":", "fnfull", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "tex_input_directory", ",", "fn", ")", ")", "if", "self", ".", "strict_input", ":", "# make sure that the input file is strictly within dirfull, and didn't escape with", "# '../..' tricks or via symlinks.", "dirfull", "=", "os", ".", "path", ".", "realpath", "(", "self", ".", "tex_input_directory", ")", "if", "not", "fnfull", ".", "startswith", "(", "dirfull", ")", ":", "logger", ".", "warning", "(", "\"Can't access path '%s' leading outside of mandated directory [strict input mode]\"", ",", "fn", ")", "return", "''", "if", "not", "os", ".", "path", ".", "exists", "(", "fnfull", ")", "and", "os", ".", "path", ".", "exists", "(", "fnfull", "+", "'.tex'", ")", ":", "fnfull", "=", "fnfull", "+", "'.tex'", "if", "not", "os", ".", "path", ".", "exists", "(", "fnfull", ")", "and", "os", ".", "path", ".", "exists", "(", "fnfull", "+", "'.latex'", ")", ":", "fnfull", "=", "fnfull", "+", "'.latex'", "if", "not", "os", ".", "path", ".", "isfile", "(", "fnfull", ")", ":", "logger", ".", "warning", "(", "u\"Error, file doesn't exist: '%s'\"", ",", "fn", ")", "return", "''", "logger", ".", "debug", "(", "\"Reading input file %r\"", ",", "fnfull", ")", "try", ":", "with", "open", "(", "fnfull", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "IOError", "as", "e", ":", "logger", ".", "warning", "(", "u\"Error, can't access '%s': %s\"", ",", "fn", ",", "e", ")", "return", "''" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text.latex_to_text
Parses the given `latex` code and returns its textual representation. The `parse_flags` are the flags to give on to the :py:class:`pylatexenc.latexwalker.LatexWalker` constructor.
pylatexenc/latex2text.py
def latex_to_text(self, latex, **parse_flags): """ Parses the given `latex` code and returns its textual representation. The `parse_flags` are the flags to give on to the :py:class:`pylatexenc.latexwalker.LatexWalker` constructor. """ return self.nodelist_to_text(latexwalker.LatexWalker(latex, **parse_flags).get_latex_nodes()[0])
def latex_to_text(self, latex, **parse_flags): """ Parses the given `latex` code and returns its textual representation. The `parse_flags` are the flags to give on to the :py:class:`pylatexenc.latexwalker.LatexWalker` constructor. """ return self.nodelist_to_text(latexwalker.LatexWalker(latex, **parse_flags).get_latex_nodes()[0])
[ "Parses", "the", "given", "latex", "code", "and", "returns", "its", "textual", "representation", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L755-L762
[ "def", "latex_to_text", "(", "self", ",", "latex", ",", "*", "*", "parse_flags", ")", ":", "return", "self", ".", "nodelist_to_text", "(", "latexwalker", ".", "LatexWalker", "(", "latex", ",", "*", "*", "parse_flags", ")", ".", "get_latex_nodes", "(", ")", "[", "0", "]", ")" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text.nodelist_to_text
Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:meth:`pylatexenc.latexwalker.LatexWalker.get_latex_nodes()`. In addition to converting each node in the list to text using `node_to_text()`, we apply some global replacements and fine-tuning to the resulting text to account for `text_replacements` (e.g., to fix quotes, tab alignment ``&`` chars, etc.)
pylatexenc/latex2text.py
def nodelist_to_text(self, nodelist): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:meth:`pylatexenc.latexwalker.LatexWalker.get_latex_nodes()`. In addition to converting each node in the list to text using `node_to_text()`, we apply some global replacements and fine-tuning to the resulting text to account for `text_replacements` (e.g., to fix quotes, tab alignment ``&`` chars, etc.) """ s = self._nodelistcontents_to_text(nodelist) # now, perform suitable replacements for pattern, replacement in self.text_replacements: if (hasattr(pattern, 'sub')): s = pattern.sub(replacement, s) else: s = s.replace(pattern, replacement) if not self.keep_inline_math: s = s.replace('$', ''); # removing math mode inline signs, just keep their Unicode counterparts.. return s
def nodelist_to_text(self, nodelist): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:meth:`pylatexenc.latexwalker.LatexWalker.get_latex_nodes()`. In addition to converting each node in the list to text using `node_to_text()`, we apply some global replacements and fine-tuning to the resulting text to account for `text_replacements` (e.g., to fix quotes, tab alignment ``&`` chars, etc.) """ s = self._nodelistcontents_to_text(nodelist) # now, perform suitable replacements for pattern, replacement in self.text_replacements: if (hasattr(pattern, 'sub')): s = pattern.sub(replacement, s) else: s = s.replace(pattern, replacement) if not self.keep_inline_math: s = s.replace('$', ''); # removing math mode inline signs, just keep their Unicode counterparts.. return s
[ "Extracts", "text", "from", "a", "node", "list", ".", "nodelist", "is", "a", "list", "of", "nodes", "as", "returned", "by", ":", "py", ":", "meth", ":", "pylatexenc", ".", "latexwalker", ".", "LatexWalker", ".", "get_latex_nodes", "()", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L765-L788
[ "def", "nodelist_to_text", "(", "self", ",", "nodelist", ")", ":", "s", "=", "self", ".", "_nodelistcontents_to_text", "(", "nodelist", ")", "# now, perform suitable replacements", "for", "pattern", ",", "replacement", "in", "self", ".", "text_replacements", ":", "if", "(", "hasattr", "(", "pattern", ",", "'sub'", ")", ")", ":", "s", "=", "pattern", ".", "sub", "(", "replacement", ",", "s", ")", "else", ":", "s", "=", "s", ".", "replace", "(", "pattern", ",", "replacement", ")", "if", "not", "self", ".", "keep_inline_math", ":", "s", "=", "s", ".", "replace", "(", "'$'", ",", "''", ")", "# removing math mode inline signs, just keep their Unicode counterparts..", "return", "s" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text._nodelistcontents_to_text
Turn the node list to text representations of each node. Basically apply `node_to_text()` to each node. (But not quite actually, since we take some care as to where we add whitespace.)
pylatexenc/latex2text.py
def _nodelistcontents_to_text(self, nodelist): """ Turn the node list to text representations of each node. Basically apply `node_to_text()` to each node. (But not quite actually, since we take some care as to where we add whitespace.) """ s = '' prev_node = None for node in nodelist: if self._is_bare_macro_node(prev_node) and node.isNodeType(latexwalker.LatexCharsNode): if not self.strict_latex_spaces['between-macro-and-chars']: # after a macro with absolutely no arguments, include post_space # in output by default if there are other chars that follow. # This is for more breathing space (especially in equations(?)), # and for compatibility with earlier versions of pylatexenc (<= # 1.3). This is NOT LaTeX' default behavior (see issue #11), so # only do this if `strict_latex_spaces=False`. s += prev_node.macro_post_space s += self.node_to_text(node) prev_node = node return s
def _nodelistcontents_to_text(self, nodelist): """ Turn the node list to text representations of each node. Basically apply `node_to_text()` to each node. (But not quite actually, since we take some care as to where we add whitespace.) """ s = '' prev_node = None for node in nodelist: if self._is_bare_macro_node(prev_node) and node.isNodeType(latexwalker.LatexCharsNode): if not self.strict_latex_spaces['between-macro-and-chars']: # after a macro with absolutely no arguments, include post_space # in output by default if there are other chars that follow. # This is for more breathing space (especially in equations(?)), # and for compatibility with earlier versions of pylatexenc (<= # 1.3). This is NOT LaTeX' default behavior (see issue #11), so # only do this if `strict_latex_spaces=False`. s += prev_node.macro_post_space s += self.node_to_text(node) prev_node = node return s
[ "Turn", "the", "node", "list", "to", "text", "representations", "of", "each", "node", ".", "Basically", "apply", "node_to_text", "()", "to", "each", "node", ".", "(", "But", "not", "quite", "actually", "since", "we", "take", "some", "care", "as", "to", "where", "we", "add", "whitespace", ".", ")" ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L791-L811
[ "def", "_nodelistcontents_to_text", "(", "self", ",", "nodelist", ")", ":", "s", "=", "''", "prev_node", "=", "None", "for", "node", "in", "nodelist", ":", "if", "self", ".", "_is_bare_macro_node", "(", "prev_node", ")", "and", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexCharsNode", ")", ":", "if", "not", "self", ".", "strict_latex_spaces", "[", "'between-macro-and-chars'", "]", ":", "# after a macro with absolutely no arguments, include post_space", "# in output by default if there are other chars that follow.", "# This is for more breathing space (especially in equations(?)),", "# and for compatibility with earlier versions of pylatexenc (<=", "# 1.3). This is NOT LaTeX' default behavior (see issue #11), so", "# only do this if `strict_latex_spaces=False`.", "s", "+=", "prev_node", ".", "macro_post_space", "s", "+=", "self", ".", "node_to_text", "(", "node", ")", "prev_node", "=", "node", "return", "s" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
LatexNodes2Text.node_to_text
Return the textual representation of the given `node`. If `prev_node_hint` is specified, then the current node is formatted suitably as following the node given in `prev_node_hint`. This might affect how much space we keep/discard, etc.
pylatexenc/latex2text.py
def node_to_text(self, node, prev_node_hint=None): """ Return the textual representation of the given `node`. If `prev_node_hint` is specified, then the current node is formatted suitably as following the node given in `prev_node_hint`. This might affect how much space we keep/discard, etc. """ if node is None: return "" if node.isNodeType(latexwalker.LatexCharsNode): # Unless in strict latex spaces mode, ignore nodes consisting only # of empty chars, as this tends to produce too much space... These # have been inserted by LatexWalker() in some occasions to keep # track of all relevant pre_space of tokens, such as between two # braced groups ("{one} {two}") or other such situations. if not self.strict_latex_spaces['between-latex-constructs'] and len(node.chars.strip()) == 0: return "" return node.chars if node.isNodeType(latexwalker.LatexCommentNode): if self.keep_comments: if self.strict_latex_spaces['after-comment']: return '%' + node.comment + '\n' else: # default spaces, i.e., keep what spaces were already there after the comment return '%' + node.comment + node.comment_post_space else: if self.strict_latex_spaces['after-comment']: return "" else: # default spaces, i.e., keep what spaces were already there after the comment # This can be useful to preserve e.g. indentation of the next line return node.comment_post_space if node.isNodeType(latexwalker.LatexGroupNode): contents = self._groupnodecontents_to_text(node) if self.keep_braced_groups and len(contents) >= self.keep_braced_groups_minlen: return "{" + contents + "}" return contents def apply_simplify_repl(node, simplify_repl, nodelistargs, what): if callable(simplify_repl): if 'l2tobj' in getfullargspec(simplify_repl)[0]: # callable accepts an argument named 'l2tobj', provide pointer to self return simplify_repl(node, l2tobj=self) return simplify_repl(node) if '%' in simplify_repl: try: return simplify_repl % tuple([self._groupnodecontents_to_text(nn) for nn in nodelistargs]) except (TypeError, ValueError): logger.warning( "WARNING: Error in configuration: {} failed its substitution!".format(what) ) return simplify_repl # too bad, keep the percent signs as they are... return simplify_repl if node.isNodeType(latexwalker.LatexMacroNode): # get macro behavior definition. macroname = node.macroname.rstrip('*') if macroname in self.macro_dict: mac = self.macro_dict[macroname] else: # no predefined behavior, use default: mac = self.macro_dict[''] def get_macro_str_repl(node, macroname, mac): if mac.simplify_repl: return apply_simplify_repl(node, mac.simplify_repl, node.nodeargs, what="macro '%s'"%(macroname)) if mac.discard: return "" a = node.nodeargs if (node.nodeoptarg): a.prepend(node.nodeoptarg) return "".join([self._groupnodecontents_to_text(n) for n in a]) macrostr = get_macro_str_repl(node, macroname, mac) return macrostr if node.isNodeType(latexwalker.LatexEnvironmentNode): # get environment behavior definition. envname = node.envname.rstrip('*') if (envname in self.env_dict): envdef = self.env_dict[envname] else: # no predefined behavior, use default: envdef = self.env_dict[''] if envdef.simplify_repl: return apply_simplify_repl(node, envdef.simplify_repl, node.nodelist, what="environment '%s'"%(envname)) if envdef.discard: return "" return self._nodelistcontents_to_text(node.nodelist) if node.isNodeType(latexwalker.LatexMathNode): if self.keep_inline_math: # we care about math modes and we should keep this verbatim return latexwalker.math_node_to_latex(node) else: # note, this here only happens if the latexwalker had keep_inline_math=True with _PushEquationContext(self): return self._nodelistcontents_to_text(node.nodelist) logger.warning("LatexNodes2Text.node_to_text(): Unknown node: %r", node) # discard anything else. return ""
def node_to_text(self, node, prev_node_hint=None): """ Return the textual representation of the given `node`. If `prev_node_hint` is specified, then the current node is formatted suitably as following the node given in `prev_node_hint`. This might affect how much space we keep/discard, etc. """ if node is None: return "" if node.isNodeType(latexwalker.LatexCharsNode): # Unless in strict latex spaces mode, ignore nodes consisting only # of empty chars, as this tends to produce too much space... These # have been inserted by LatexWalker() in some occasions to keep # track of all relevant pre_space of tokens, such as between two # braced groups ("{one} {two}") or other such situations. if not self.strict_latex_spaces['between-latex-constructs'] and len(node.chars.strip()) == 0: return "" return node.chars if node.isNodeType(latexwalker.LatexCommentNode): if self.keep_comments: if self.strict_latex_spaces['after-comment']: return '%' + node.comment + '\n' else: # default spaces, i.e., keep what spaces were already there after the comment return '%' + node.comment + node.comment_post_space else: if self.strict_latex_spaces['after-comment']: return "" else: # default spaces, i.e., keep what spaces were already there after the comment # This can be useful to preserve e.g. indentation of the next line return node.comment_post_space if node.isNodeType(latexwalker.LatexGroupNode): contents = self._groupnodecontents_to_text(node) if self.keep_braced_groups and len(contents) >= self.keep_braced_groups_minlen: return "{" + contents + "}" return contents def apply_simplify_repl(node, simplify_repl, nodelistargs, what): if callable(simplify_repl): if 'l2tobj' in getfullargspec(simplify_repl)[0]: # callable accepts an argument named 'l2tobj', provide pointer to self return simplify_repl(node, l2tobj=self) return simplify_repl(node) if '%' in simplify_repl: try: return simplify_repl % tuple([self._groupnodecontents_to_text(nn) for nn in nodelistargs]) except (TypeError, ValueError): logger.warning( "WARNING: Error in configuration: {} failed its substitution!".format(what) ) return simplify_repl # too bad, keep the percent signs as they are... return simplify_repl if node.isNodeType(latexwalker.LatexMacroNode): # get macro behavior definition. macroname = node.macroname.rstrip('*') if macroname in self.macro_dict: mac = self.macro_dict[macroname] else: # no predefined behavior, use default: mac = self.macro_dict[''] def get_macro_str_repl(node, macroname, mac): if mac.simplify_repl: return apply_simplify_repl(node, mac.simplify_repl, node.nodeargs, what="macro '%s'"%(macroname)) if mac.discard: return "" a = node.nodeargs if (node.nodeoptarg): a.prepend(node.nodeoptarg) return "".join([self._groupnodecontents_to_text(n) for n in a]) macrostr = get_macro_str_repl(node, macroname, mac) return macrostr if node.isNodeType(latexwalker.LatexEnvironmentNode): # get environment behavior definition. envname = node.envname.rstrip('*') if (envname in self.env_dict): envdef = self.env_dict[envname] else: # no predefined behavior, use default: envdef = self.env_dict[''] if envdef.simplify_repl: return apply_simplify_repl(node, envdef.simplify_repl, node.nodelist, what="environment '%s'"%(envname)) if envdef.discard: return "" return self._nodelistcontents_to_text(node.nodelist) if node.isNodeType(latexwalker.LatexMathNode): if self.keep_inline_math: # we care about math modes and we should keep this verbatim return latexwalker.math_node_to_latex(node) else: # note, this here only happens if the latexwalker had keep_inline_math=True with _PushEquationContext(self): return self._nodelistcontents_to_text(node.nodelist) logger.warning("LatexNodes2Text.node_to_text(): Unknown node: %r", node) # discard anything else. return ""
[ "Return", "the", "textual", "representation", "of", "the", "given", "node", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latex2text.py#L813-L924
[ "def", "node_to_text", "(", "self", ",", "node", ",", "prev_node_hint", "=", "None", ")", ":", "if", "node", "is", "None", ":", "return", "\"\"", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexCharsNode", ")", ":", "# Unless in strict latex spaces mode, ignore nodes consisting only", "# of empty chars, as this tends to produce too much space... These", "# have been inserted by LatexWalker() in some occasions to keep", "# track of all relevant pre_space of tokens, such as between two", "# braced groups (\"{one} {two}\") or other such situations.", "if", "not", "self", ".", "strict_latex_spaces", "[", "'between-latex-constructs'", "]", "and", "len", "(", "node", ".", "chars", ".", "strip", "(", ")", ")", "==", "0", ":", "return", "\"\"", "return", "node", ".", "chars", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexCommentNode", ")", ":", "if", "self", ".", "keep_comments", ":", "if", "self", ".", "strict_latex_spaces", "[", "'after-comment'", "]", ":", "return", "'%'", "+", "node", ".", "comment", "+", "'\\n'", "else", ":", "# default spaces, i.e., keep what spaces were already there after the comment", "return", "'%'", "+", "node", ".", "comment", "+", "node", ".", "comment_post_space", "else", ":", "if", "self", ".", "strict_latex_spaces", "[", "'after-comment'", "]", ":", "return", "\"\"", "else", ":", "# default spaces, i.e., keep what spaces were already there after the comment", "# This can be useful to preserve e.g. indentation of the next line", "return", "node", ".", "comment_post_space", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexGroupNode", ")", ":", "contents", "=", "self", ".", "_groupnodecontents_to_text", "(", "node", ")", "if", "self", ".", "keep_braced_groups", "and", "len", "(", "contents", ")", ">=", "self", ".", "keep_braced_groups_minlen", ":", "return", "\"{\"", "+", "contents", "+", "\"}\"", "return", "contents", "def", "apply_simplify_repl", "(", "node", ",", "simplify_repl", ",", "nodelistargs", ",", "what", ")", ":", "if", "callable", "(", "simplify_repl", ")", ":", "if", "'l2tobj'", "in", "getfullargspec", "(", "simplify_repl", ")", "[", "0", "]", ":", "# callable accepts an argument named 'l2tobj', provide pointer to self", "return", "simplify_repl", "(", "node", ",", "l2tobj", "=", "self", ")", "return", "simplify_repl", "(", "node", ")", "if", "'%'", "in", "simplify_repl", ":", "try", ":", "return", "simplify_repl", "%", "tuple", "(", "[", "self", ".", "_groupnodecontents_to_text", "(", "nn", ")", "for", "nn", "in", "nodelistargs", "]", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "logger", ".", "warning", "(", "\"WARNING: Error in configuration: {} failed its substitution!\"", ".", "format", "(", "what", ")", ")", "return", "simplify_repl", "# too bad, keep the percent signs as they are...", "return", "simplify_repl", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexMacroNode", ")", ":", "# get macro behavior definition.", "macroname", "=", "node", ".", "macroname", ".", "rstrip", "(", "'*'", ")", "if", "macroname", "in", "self", ".", "macro_dict", ":", "mac", "=", "self", ".", "macro_dict", "[", "macroname", "]", "else", ":", "# no predefined behavior, use default:", "mac", "=", "self", ".", "macro_dict", "[", "''", "]", "def", "get_macro_str_repl", "(", "node", ",", "macroname", ",", "mac", ")", ":", "if", "mac", ".", "simplify_repl", ":", "return", "apply_simplify_repl", "(", "node", ",", "mac", ".", "simplify_repl", ",", "node", ".", "nodeargs", ",", "what", "=", "\"macro '%s'\"", "%", "(", "macroname", ")", ")", "if", "mac", ".", "discard", ":", "return", "\"\"", "a", "=", "node", ".", "nodeargs", "if", "(", "node", ".", "nodeoptarg", ")", ":", "a", ".", "prepend", "(", "node", ".", "nodeoptarg", ")", "return", "\"\"", ".", "join", "(", "[", "self", ".", "_groupnodecontents_to_text", "(", "n", ")", "for", "n", "in", "a", "]", ")", "macrostr", "=", "get_macro_str_repl", "(", "node", ",", "macroname", ",", "mac", ")", "return", "macrostr", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexEnvironmentNode", ")", ":", "# get environment behavior definition.", "envname", "=", "node", ".", "envname", ".", "rstrip", "(", "'*'", ")", "if", "(", "envname", "in", "self", ".", "env_dict", ")", ":", "envdef", "=", "self", ".", "env_dict", "[", "envname", "]", "else", ":", "# no predefined behavior, use default:", "envdef", "=", "self", ".", "env_dict", "[", "''", "]", "if", "envdef", ".", "simplify_repl", ":", "return", "apply_simplify_repl", "(", "node", ",", "envdef", ".", "simplify_repl", ",", "node", ".", "nodelist", ",", "what", "=", "\"environment '%s'\"", "%", "(", "envname", ")", ")", "if", "envdef", ".", "discard", ":", "return", "\"\"", "return", "self", ".", "_nodelistcontents_to_text", "(", "node", ".", "nodelist", ")", "if", "node", ".", "isNodeType", "(", "latexwalker", ".", "LatexMathNode", ")", ":", "if", "self", ".", "keep_inline_math", ":", "# we care about math modes and we should keep this verbatim", "return", "latexwalker", ".", "math_node_to_latex", "(", "node", ")", "else", ":", "# note, this here only happens if the latexwalker had keep_inline_math=True", "with", "_PushEquationContext", "(", "self", ")", ":", "return", "self", ".", "_nodelistcontents_to_text", "(", "node", ".", "nodelist", ")", "logger", ".", "warning", "(", "\"LatexNodes2Text.node_to_text(): Unknown node: %r\"", ",", "node", ")", "# discard anything else.", "return", "\"\"" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
utf8tolatex
u""" Encode a UTF-8 string to a LaTeX snippet. If `non_ascii_only` is set to `True`, then usual (ascii) characters such as ``#``, ``{``, ``}`` etc. will not be escaped. If set to `False` (the default), they are escaped to their respective LaTeX escape sequences. If `brackets` is set to `True` (the default), then LaTeX macros are enclosed in brackets. For example, ``sant\N{LATIN SMALL LETTER E WITH ACUTE}`` is replaced by ``sant{\\'e}`` if `brackets=True` and by ``sant\\'e`` if `brackets=False`. .. warning:: Using `brackets=False` might give you an invalid LaTeX string, so avoid it! (for instance, ``ma\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}tre`` will be replaced incorrectly by ``ma\\^\\itre`` resulting in an unknown macro ``\\itre``). If `substitute_bad_chars=True`, then any non-ascii character for which no LaTeX escape sequence is known is replaced by a question mark in boldface. Otherwise (by default), the character is left as it is. If `fail_bad_chars=True`, then a `ValueError` is raised if we cannot find a character substitution for any non-ascii character. .. versionchanged:: 1.3 Added `fail_bad_chars` switch
pylatexenc/latexencode.py
def utf8tolatex(s, non_ascii_only=False, brackets=True, substitute_bad_chars=False, fail_bad_chars=False): u""" Encode a UTF-8 string to a LaTeX snippet. If `non_ascii_only` is set to `True`, then usual (ascii) characters such as ``#``, ``{``, ``}`` etc. will not be escaped. If set to `False` (the default), they are escaped to their respective LaTeX escape sequences. If `brackets` is set to `True` (the default), then LaTeX macros are enclosed in brackets. For example, ``sant\N{LATIN SMALL LETTER E WITH ACUTE}`` is replaced by ``sant{\\'e}`` if `brackets=True` and by ``sant\\'e`` if `brackets=False`. .. warning:: Using `brackets=False` might give you an invalid LaTeX string, so avoid it! (for instance, ``ma\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}tre`` will be replaced incorrectly by ``ma\\^\\itre`` resulting in an unknown macro ``\\itre``). If `substitute_bad_chars=True`, then any non-ascii character for which no LaTeX escape sequence is known is replaced by a question mark in boldface. Otherwise (by default), the character is left as it is. If `fail_bad_chars=True`, then a `ValueError` is raised if we cannot find a character substitution for any non-ascii character. .. versionchanged:: 1.3 Added `fail_bad_chars` switch """ s = unicode(s) # make sure s is unicode s = unicodedata.normalize('NFC', s) if not s: return "" result = u"" for ch in s: #log.longdebug("Encoding char %r", ch) if (non_ascii_only and ord(ch) < 127): result += ch else: lch = utf82latex.get(ord(ch), None) if (lch is not None): # add brackets if needed, i.e. if we have a substituting macro. # note: in condition, beware, that lch might be of zero length. result += ( '{'+lch+'}' if brackets and lch[0:1] == '\\' else lch ) elif ((ord(ch) >= 32 and ord(ch) <= 127) or (ch in "\n\r\t")): # ordinary printable ascii char, just add it result += ch else: # non-ascii char msg = u"Character cannot be encoded into LaTeX: U+%04X - `%s'" % (ord(ch), ch) if fail_bad_chars: raise ValueError(msg) log.warning(msg) if substitute_bad_chars: result += r'{\bfseries ?}' else: # keep unescaped char result += ch return result
def utf8tolatex(s, non_ascii_only=False, brackets=True, substitute_bad_chars=False, fail_bad_chars=False): u""" Encode a UTF-8 string to a LaTeX snippet. If `non_ascii_only` is set to `True`, then usual (ascii) characters such as ``#``, ``{``, ``}`` etc. will not be escaped. If set to `False` (the default), they are escaped to their respective LaTeX escape sequences. If `brackets` is set to `True` (the default), then LaTeX macros are enclosed in brackets. For example, ``sant\N{LATIN SMALL LETTER E WITH ACUTE}`` is replaced by ``sant{\\'e}`` if `brackets=True` and by ``sant\\'e`` if `brackets=False`. .. warning:: Using `brackets=False` might give you an invalid LaTeX string, so avoid it! (for instance, ``ma\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}tre`` will be replaced incorrectly by ``ma\\^\\itre`` resulting in an unknown macro ``\\itre``). If `substitute_bad_chars=True`, then any non-ascii character for which no LaTeX escape sequence is known is replaced by a question mark in boldface. Otherwise (by default), the character is left as it is. If `fail_bad_chars=True`, then a `ValueError` is raised if we cannot find a character substitution for any non-ascii character. .. versionchanged:: 1.3 Added `fail_bad_chars` switch """ s = unicode(s) # make sure s is unicode s = unicodedata.normalize('NFC', s) if not s: return "" result = u"" for ch in s: #log.longdebug("Encoding char %r", ch) if (non_ascii_only and ord(ch) < 127): result += ch else: lch = utf82latex.get(ord(ch), None) if (lch is not None): # add brackets if needed, i.e. if we have a substituting macro. # note: in condition, beware, that lch might be of zero length. result += ( '{'+lch+'}' if brackets and lch[0:1] == '\\' else lch ) elif ((ord(ch) >= 32 and ord(ch) <= 127) or (ch in "\n\r\t")): # ordinary printable ascii char, just add it result += ch else: # non-ascii char msg = u"Character cannot be encoded into LaTeX: U+%04X - `%s'" % (ord(ch), ch) if fail_bad_chars: raise ValueError(msg) log.warning(msg) if substitute_bad_chars: result += r'{\bfseries ?}' else: # keep unescaped char result += ch return result
[ "u", "Encode", "a", "UTF", "-", "8", "string", "to", "a", "LaTeX", "snippet", "." ]
phfaist/pylatexenc
python
https://github.com/phfaist/pylatexenc/blob/0c1788d1349e749501e67a6fba54d79e6e0d54f6/pylatexenc/latexencode.py#L53-L117
[ "def", "utf8tolatex", "(", "s", ",", "non_ascii_only", "=", "False", ",", "brackets", "=", "True", ",", "substitute_bad_chars", "=", "False", ",", "fail_bad_chars", "=", "False", ")", ":", "s", "=", "unicode", "(", "s", ")", "# make sure s is unicode", "s", "=", "unicodedata", ".", "normalize", "(", "'NFC'", ",", "s", ")", "if", "not", "s", ":", "return", "\"\"", "result", "=", "u\"\"", "for", "ch", "in", "s", ":", "#log.longdebug(\"Encoding char %r\", ch)", "if", "(", "non_ascii_only", "and", "ord", "(", "ch", ")", "<", "127", ")", ":", "result", "+=", "ch", "else", ":", "lch", "=", "utf82latex", ".", "get", "(", "ord", "(", "ch", ")", ",", "None", ")", "if", "(", "lch", "is", "not", "None", ")", ":", "# add brackets if needed, i.e. if we have a substituting macro.", "# note: in condition, beware, that lch might be of zero length.", "result", "+=", "(", "'{'", "+", "lch", "+", "'}'", "if", "brackets", "and", "lch", "[", "0", ":", "1", "]", "==", "'\\\\'", "else", "lch", ")", "elif", "(", "(", "ord", "(", "ch", ")", ">=", "32", "and", "ord", "(", "ch", ")", "<=", "127", ")", "or", "(", "ch", "in", "\"\\n\\r\\t\"", ")", ")", ":", "# ordinary printable ascii char, just add it", "result", "+=", "ch", "else", ":", "# non-ascii char", "msg", "=", "u\"Character cannot be encoded into LaTeX: U+%04X - `%s'\"", "%", "(", "ord", "(", "ch", ")", ",", "ch", ")", "if", "fail_bad_chars", ":", "raise", "ValueError", "(", "msg", ")", "log", ".", "warning", "(", "msg", ")", "if", "substitute_bad_chars", ":", "result", "+=", "r'{\\bfseries ?}'", "else", ":", "# keep unescaped char", "result", "+=", "ch", "return", "result" ]
0c1788d1349e749501e67a6fba54d79e6e0d54f6
test
_unascii
Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8 This method takes the output of the JSONEncoder and expands any \\uNNNN escapes it finds (except for \\u0000 to \\u001F, which are converted to \\xNN escapes). For performance, it assumes that the input is valid JSON, and performs few sanity checks.
canonicaljson.py
def _unascii(s): """Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8 This method takes the output of the JSONEncoder and expands any \\uNNNN escapes it finds (except for \\u0000 to \\u001F, which are converted to \\xNN escapes). For performance, it assumes that the input is valid JSON, and performs few sanity checks. """ # make the fast path fast: if there are no matches in the string, the # whole thing is ascii. On python 2, that means we're done. On python 3, # we have to turn it into a bytes, which is quickest with encode('utf-8') m = _U_ESCAPE.search(s) if not m: return s if PY2 else s.encode('utf-8') # appending to a string (or a bytes) is slooow, so we accumulate sections # of string result in 'chunks', and join them all together later. # (It doesn't seem to make much difference whether we accumulate # utf8-encoded bytes, or strings which we utf-8 encode after rejoining) # chunks = [] # 'pos' tracks the index in 's' that we have processed into 'chunks' so # far. pos = 0 while m: start = m.start() end = m.end() g = m.group(1) if g is None: # escaped backslash: pass it through along with anything before the # match chunks.append(s[pos:end]) else: # \uNNNN, but we have to watch out for surrogate pairs. # # On python 2, str.encode("utf-8") will decode utf-16 surrogates # before re-encoding, so it's fine for us to pass the surrogates # through. (Indeed we must, to deal with UCS-2 python builds, per # https://github.com/matrix-org/python-canonicaljson/issues/12). # # On python 3, str.encode("utf-8") complains about surrogates, so # we have to unpack them. c = int(g, 16) if c < 0x20: # leave as a \uNNNN escape chunks.append(s[pos:end]) else: if PY3: # pragma nocover if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u': esc2 = s[end + 2:end + 6] c2 = int(esc2, 16) if c2 & 0xfc00 == 0xdc00: c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)) end += 6 chunks.append(s[pos:start]) chunks.append(unichr(c)) pos = end m = _U_ESCAPE.search(s, pos) # pass through anything after the last match chunks.append(s[pos:]) return (''.join(chunks)).encode("utf-8")
def _unascii(s): """Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8 This method takes the output of the JSONEncoder and expands any \\uNNNN escapes it finds (except for \\u0000 to \\u001F, which are converted to \\xNN escapes). For performance, it assumes that the input is valid JSON, and performs few sanity checks. """ # make the fast path fast: if there are no matches in the string, the # whole thing is ascii. On python 2, that means we're done. On python 3, # we have to turn it into a bytes, which is quickest with encode('utf-8') m = _U_ESCAPE.search(s) if not m: return s if PY2 else s.encode('utf-8') # appending to a string (or a bytes) is slooow, so we accumulate sections # of string result in 'chunks', and join them all together later. # (It doesn't seem to make much difference whether we accumulate # utf8-encoded bytes, or strings which we utf-8 encode after rejoining) # chunks = [] # 'pos' tracks the index in 's' that we have processed into 'chunks' so # far. pos = 0 while m: start = m.start() end = m.end() g = m.group(1) if g is None: # escaped backslash: pass it through along with anything before the # match chunks.append(s[pos:end]) else: # \uNNNN, but we have to watch out for surrogate pairs. # # On python 2, str.encode("utf-8") will decode utf-16 surrogates # before re-encoding, so it's fine for us to pass the surrogates # through. (Indeed we must, to deal with UCS-2 python builds, per # https://github.com/matrix-org/python-canonicaljson/issues/12). # # On python 3, str.encode("utf-8") complains about surrogates, so # we have to unpack them. c = int(g, 16) if c < 0x20: # leave as a \uNNNN escape chunks.append(s[pos:end]) else: if PY3: # pragma nocover if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u': esc2 = s[end + 2:end + 6] c2 = int(esc2, 16) if c2 & 0xfc00 == 0xdc00: c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)) end += 6 chunks.append(s[pos:start]) chunks.append(unichr(c)) pos = end m = _U_ESCAPE.search(s, pos) # pass through anything after the last match chunks.append(s[pos:]) return (''.join(chunks)).encode("utf-8")
[ "Unpack", "\\\\", "uNNNN", "escapes", "in", "s", "and", "encode", "the", "result", "as", "UTF", "-", "8" ]
matrix-org/python-canonicaljson
python
https://github.com/matrix-org/python-canonicaljson/blob/c508635e867ff11026b610c00ca906002d8fb9af/canonicaljson.py#L74-L147
[ "def", "_unascii", "(", "s", ")", ":", "# make the fast path fast: if there are no matches in the string, the", "# whole thing is ascii. On python 2, that means we're done. On python 3,", "# we have to turn it into a bytes, which is quickest with encode('utf-8')", "m", "=", "_U_ESCAPE", ".", "search", "(", "s", ")", "if", "not", "m", ":", "return", "s", "if", "PY2", "else", "s", ".", "encode", "(", "'utf-8'", ")", "# appending to a string (or a bytes) is slooow, so we accumulate sections", "# of string result in 'chunks', and join them all together later.", "# (It doesn't seem to make much difference whether we accumulate", "# utf8-encoded bytes, or strings which we utf-8 encode after rejoining)", "#", "chunks", "=", "[", "]", "# 'pos' tracks the index in 's' that we have processed into 'chunks' so", "# far.", "pos", "=", "0", "while", "m", ":", "start", "=", "m", ".", "start", "(", ")", "end", "=", "m", ".", "end", "(", ")", "g", "=", "m", ".", "group", "(", "1", ")", "if", "g", "is", "None", ":", "# escaped backslash: pass it through along with anything before the", "# match", "chunks", ".", "append", "(", "s", "[", "pos", ":", "end", "]", ")", "else", ":", "# \\uNNNN, but we have to watch out for surrogate pairs.", "#", "# On python 2, str.encode(\"utf-8\") will decode utf-16 surrogates", "# before re-encoding, so it's fine for us to pass the surrogates", "# through. (Indeed we must, to deal with UCS-2 python builds, per", "# https://github.com/matrix-org/python-canonicaljson/issues/12).", "#", "# On python 3, str.encode(\"utf-8\") complains about surrogates, so", "# we have to unpack them.", "c", "=", "int", "(", "g", ",", "16", ")", "if", "c", "<", "0x20", ":", "# leave as a \\uNNNN escape", "chunks", ".", "append", "(", "s", "[", "pos", ":", "end", "]", ")", "else", ":", "if", "PY3", ":", "# pragma nocover", "if", "c", "&", "0xfc00", "==", "0xd800", "and", "s", "[", "end", ":", "end", "+", "2", "]", "==", "'\\\\u'", ":", "esc2", "=", "s", "[", "end", "+", "2", ":", "end", "+", "6", "]", "c2", "=", "int", "(", "esc2", ",", "16", ")", "if", "c2", "&", "0xfc00", "==", "0xdc00", ":", "c", "=", "0x10000", "+", "(", "(", "(", "c", "-", "0xd800", ")", "<<", "10", ")", "|", "(", "c2", "-", "0xdc00", ")", ")", "end", "+=", "6", "chunks", ".", "append", "(", "s", "[", "pos", ":", "start", "]", ")", "chunks", ".", "append", "(", "unichr", "(", "c", ")", ")", "pos", "=", "end", "m", "=", "_U_ESCAPE", ".", "search", "(", "s", ",", "pos", ")", "# pass through anything after the last match", "chunks", ".", "append", "(", "s", "[", "pos", ":", "]", ")", "return", "(", "''", ".", "join", "(", "chunks", ")", ")", ".", "encode", "(", "\"utf-8\"", ")" ]
c508635e867ff11026b610c00ca906002d8fb9af
test
Organisation.get_organisation_information
Get information fot this organisation. Returns a dictionary of values.
trolly/organisation.py
def get_organisation_information(self, query_params=None): ''' Get information fot this organisation. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_organisation_information(self, query_params=None): ''' Get information fot this organisation. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "information", "fot", "this", "organisation", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L14-L21
[ "def", "get_organisation_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.get_boards
Get all the boards for this organisation. Returns a list of Board s. Returns: list(Board): The boards attached to this organisation
trolly/organisation.py
def get_boards(self, **query_params): ''' Get all the boards for this organisation. Returns a list of Board s. Returns: list(Board): The boards attached to this organisation ''' boards = self.get_boards_json(self.base_uri, query_params=query_params) boards_list = [] for board_json in boards: boards_list.append(self.create_board(board_json)) return boards_list
def get_boards(self, **query_params): ''' Get all the boards for this organisation. Returns a list of Board s. Returns: list(Board): The boards attached to this organisation ''' boards = self.get_boards_json(self.base_uri, query_params=query_params) boards_list = [] for board_json in boards: boards_list.append(self.create_board(board_json)) return boards_list
[ "Get", "all", "the", "boards", "for", "this", "organisation", ".", "Returns", "a", "list", "of", "Board", "s", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L23-L36
[ "def", "get_boards", "(", "self", ",", "*", "*", "query_params", ")", ":", "boards", "=", "self", ".", "get_boards_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "boards_list", "=", "[", "]", "for", "board_json", "in", "boards", ":", "boards_list", ".", "append", "(", "self", ".", "create_board", "(", "board_json", ")", ")", "return", "boards_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.get_members
Get all members attached to this organisation. Returns a list of Member objects Returns: list(Member): The members attached to this organisation
trolly/organisation.py
def get_members(self, **query_params): ''' Get all members attached to this organisation. Returns a list of Member objects Returns: list(Member): The members attached to this organisation ''' members = self.get_members_json(self.base_uri, query_params=query_params) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
def get_members(self, **query_params): ''' Get all members attached to this organisation. Returns a list of Member objects Returns: list(Member): The members attached to this organisation ''' members = self.get_members_json(self.base_uri, query_params=query_params) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
[ "Get", "all", "members", "attached", "to", "this", "organisation", ".", "Returns", "a", "list", "of", "Member", "objects" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L38-L53
[ "def", "get_members", "(", "self", ",", "*", "*", "query_params", ")", ":", "members", "=", "self", ".", "get_members_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "members_list", "=", "[", "]", "for", "member_json", "in", "members", ":", "members_list", ".", "append", "(", "self", ".", "create_member", "(", "member_json", ")", ")", "return", "members_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.update_organisation
Update this organisations information. Returns a new organisation object.
trolly/organisation.py
def update_organisation(self, query_params=None): ''' Update this organisations information. Returns a new organisation object. ''' organisation_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_organisation(organisation_json)
def update_organisation(self, query_params=None): ''' Update this organisations information. Returns a new organisation object. ''' organisation_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_organisation(organisation_json)
[ "Update", "this", "organisations", "information", ".", "Returns", "a", "new", "organisation", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L55-L66
[ "def", "update_organisation", "(", "self", ",", "query_params", "=", "None", ")", ":", "organisation_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_organisation", "(", "organisation_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.remove_member
Remove a member from the organisation.Returns JSON of all members if successful or raises an Unauthorised exception if not.
trolly/organisation.py
def remove_member(self, member_id): ''' Remove a member from the organisation.Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='DELETE' )
def remove_member(self, member_id): ''' Remove a member from the organisation.Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='DELETE' )
[ "Remove", "a", "member", "from", "the", "organisation", ".", "Returns", "JSON", "of", "all", "members", "if", "successful", "or", "raises", "an", "Unauthorised", "exception", "if", "not", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L68-L76
[ "def", "remove_member", "(", "self", ",", "member_id", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/members/%s'", "%", "member_id", ",", "http_method", "=", "'DELETE'", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.add_member_by_id
Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not.
trolly/organisation.py
def add_member_by_id(self, member_id, membership_type='normal'): ''' Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='PUT', query_params={ 'type': membership_type } )
def add_member_by_id(self, member_id, membership_type='normal'): ''' Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='PUT', query_params={ 'type': membership_type } )
[ "Add", "a", "member", "to", "the", "board", "using", "the", "id", ".", "Membership", "type", "can", "be", "normal", "or", "admin", ".", "Returns", "JSON", "of", "all", "members", "if", "successful", "or", "raises", "an", "Unauthorised", "exception", "if", "not", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L78-L90
[ "def", "add_member_by_id", "(", "self", ",", "member_id", ",", "membership_type", "=", "'normal'", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/members/%s'", "%", "member_id", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'type'", ":", "membership_type", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Organisation.add_member
Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not.
trolly/organisation.py
def add_member(self, email, fullname, membership_type='normal'): ''' Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members', http_method='PUT', query_params={ 'email': email, 'fullName': fullname, 'type': membership_type } )
def add_member(self, email, fullname, membership_type='normal'): ''' Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members', http_method='PUT', query_params={ 'email': email, 'fullName': fullname, 'type': membership_type } )
[ "Add", "a", "member", "to", "the", "board", ".", "Membership", "type", "can", "be", "normal", "or", "admin", ".", "Returns", "JSON", "of", "all", "members", "if", "successful", "or", "raises", "an", "Unauthorised", "exception", "if", "not", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/organisation.py#L92-L106
[ "def", "add_member", "(", "self", ",", "email", ",", "fullname", ",", "membership_type", "=", "'normal'", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/members'", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'email'", ":", "email", ",", "'fullName'", ":", "fullname", ",", "'type'", ":", "membership_type", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
List.get_list_information
Get information for this list. Returns a dictionary of values.
trolly/list.py
def get_list_information(self, query_params=None): ''' Get information for this list. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_list_information(self, query_params=None): ''' Get information for this list. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "information", "for", "this", "list", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/list.py#L18-L25
[ "def", "get_list_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
List.add_card
Create a card for this list. Returns a Card object.
trolly/list.py
def add_card(self, query_params=None): ''' Create a card for this list. Returns a Card object. ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards', http_method='POST', query_params=query_params or {} ) return self.create_card(card_json)
def add_card(self, query_params=None): ''' Create a card for this list. Returns a Card object. ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards', http_method='POST', query_params=query_params or {} ) return self.create_card(card_json)
[ "Create", "a", "card", "for", "this", "list", ".", "Returns", "a", "Card", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/list.py#L64-L74
[ "def", "add_card", "(", "self", ",", "query_params", "=", "None", ")", ":", "card_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/cards'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_card", "(", "card_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Label.get_label_information
Get all information for this Label. Returns a dictionary of values.
trolly/label.py
def get_label_information(self, query_params=None): ''' Get all information for this Label. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_label_information(self, query_params=None): ''' Get all information for this Label. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "all", "information", "for", "this", "Label", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/label.py#L20-L27
[ "def", "get_label_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Label.get_items
Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item.
trolly/label.py
def get_items(self, query_params=None): ''' Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', query_params=query_params or {} )
def get_items(self, query_params=None): ''' Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', query_params=query_params or {} )
[ "Get", "all", "the", "items", "for", "this", "label", ".", "Returns", "a", "list", "of", "dictionaries", ".", "Each", "dictionary", "has", "the", "values", "for", "an", "item", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/label.py#L29-L37
[ "def", "get_items", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/checkItems'", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Label._update_label_name
Update the current label's name. Returns a new Label object.
trolly/label.py
def _update_label_name(self, name): ''' Update the current label's name. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_label(label_json)
def _update_label_name(self, name): ''' Update the current label's name. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_label(label_json)
[ "Update", "the", "current", "label", "s", "name", ".", "Returns", "a", "new", "Label", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/label.py#L44-L54
[ "def", "_update_label_name", "(", "self", ",", "name", ")", ":", "label_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'name'", ":", "name", "}", ")", "return", "self", ".", "create_label", "(", "label_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Label._update_label_dict
Update the current label. Returns a new Label object.
trolly/label.py
def _update_label_dict(self, query_params={}): ''' Update the current label. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params ) return self.create_label(label_json)
def _update_label_dict(self, query_params={}): ''' Update the current label. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params ) return self.create_label(label_json)
[ "Update", "the", "current", "label", ".", "Returns", "a", "new", "Label", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/label.py#L57-L67
[ "def", "_update_label_dict", "(", "self", ",", "query_params", "=", "{", "}", ")", ":", "label_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "query_params", ")", "return", "self", ".", "create_label", "(", "label_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Authorise.get_authorisation_url
Returns a URL that needs to be opened in a browser to retrieve an access token.
trolly/authorise.py
def get_authorisation_url(self, application_name, token_expire='1day'): ''' Returns a URL that needs to be opened in a browser to retrieve an access token. ''' query_params = { 'name': application_name, 'expiration': token_expire, 'response_type': 'token', 'scope': 'read,write' } authorisation_url = self.build_uri( path='/authorize', query_params=self.add_authorisation(query_params) ) print('Please go to the following URL and get the user authorisation ' 'token:\n', authorisation_url) return authorisation_url
def get_authorisation_url(self, application_name, token_expire='1day'): ''' Returns a URL that needs to be opened in a browser to retrieve an access token. ''' query_params = { 'name': application_name, 'expiration': token_expire, 'response_type': 'token', 'scope': 'read,write' } authorisation_url = self.build_uri( path='/authorize', query_params=self.add_authorisation(query_params) ) print('Please go to the following URL and get the user authorisation ' 'token:\n', authorisation_url) return authorisation_url
[ "Returns", "a", "URL", "that", "needs", "to", "be", "opened", "in", "a", "browser", "to", "retrieve", "an", "access", "token", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/authorise.py#L14-L33
[ "def", "get_authorisation_url", "(", "self", ",", "application_name", ",", "token_expire", "=", "'1day'", ")", ":", "query_params", "=", "{", "'name'", ":", "application_name", ",", "'expiration'", ":", "token_expire", ",", "'response_type'", ":", "'token'", ",", "'scope'", ":", "'read,write'", "}", "authorisation_url", "=", "self", ".", "build_uri", "(", "path", "=", "'/authorize'", ",", "query_params", "=", "self", ".", "add_authorisation", "(", "query_params", ")", ")", "print", "(", "'Please go to the following URL and get the user authorisation '", "'token:\\n'", ",", "authorisation_url", ")", "return", "authorisation_url" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.get_card_information
Get information for this card. Returns a dictionary of values.
trolly/card.py
def get_card_information(self, query_params=None): ''' Get information for this card. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_card_information(self, query_params=None): ''' Get information for this card. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "information", "for", "this", "card", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L22-L29
[ "def", "get_card_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.get_board
Get board information for this card. Returns a Board object. Returns: Board: The board this card is attached to
trolly/card.py
def get_board(self, **query_params): ''' Get board information for this card. Returns a Board object. Returns: Board: The board this card is attached to ''' board_json = self.get_board_json(self.base_uri, query_params=query_params) return self.create_board(board_json)
def get_board(self, **query_params): ''' Get board information for this card. Returns a Board object. Returns: Board: The board this card is attached to ''' board_json = self.get_board_json(self.base_uri, query_params=query_params) return self.create_board(board_json)
[ "Get", "board", "information", "for", "this", "card", ".", "Returns", "a", "Board", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L31-L40
[ "def", "get_board", "(", "self", ",", "*", "*", "query_params", ")", ":", "board_json", "=", "self", ".", "get_board_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "return", "self", ".", "create_board", "(", "board_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.get_list
Get list information for this card. Returns a List object. Returns: List: The list this card is attached to
trolly/card.py
def get_list(self, **query_params): ''' Get list information for this card. Returns a List object. Returns: List: The list this card is attached to ''' list_json = self.get_list_json(self.base_uri, query_params=query_params) return self.create_list(list_json)
def get_list(self, **query_params): ''' Get list information for this card. Returns a List object. Returns: List: The list this card is attached to ''' list_json = self.get_list_json(self.base_uri, query_params=query_params) return self.create_list(list_json)
[ "Get", "list", "information", "for", "this", "card", ".", "Returns", "a", "List", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L42-L51
[ "def", "get_list", "(", "self", ",", "*", "*", "query_params", ")", ":", "list_json", "=", "self", ".", "get_list_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "return", "self", ".", "create_list", "(", "list_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.get_checklists
Get the checklists for this card. Returns a list of Checklist objects. Returns: list(Checklist): The checklists attached to this card
trolly/card.py
def get_checklists(self, **query_params): ''' Get the checklists for this card. Returns a list of Checklist objects. Returns: list(Checklist): The checklists attached to this card ''' checklists = self.get_checklist_json(self.base_uri, query_params=query_params) checklists_list = [] for checklist_json in checklists: checklists_list.append(self.create_checklist(checklist_json)) return checklists_list
def get_checklists(self, **query_params): ''' Get the checklists for this card. Returns a list of Checklist objects. Returns: list(Checklist): The checklists attached to this card ''' checklists = self.get_checklist_json(self.base_uri, query_params=query_params) checklists_list = [] for checklist_json in checklists: checklists_list.append(self.create_checklist(checklist_json)) return checklists_list
[ "Get", "the", "checklists", "for", "this", "card", ".", "Returns", "a", "list", "of", "Checklist", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L53-L67
[ "def", "get_checklists", "(", "self", ",", "*", "*", "query_params", ")", ":", "checklists", "=", "self", ".", "get_checklist_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "checklists_list", "=", "[", "]", "for", "checklist_json", "in", "checklists", ":", "checklists_list", ".", "append", "(", "self", ".", "create_checklist", "(", "checklist_json", ")", ")", "return", "checklists_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.add_comment
Adds a comment to this card by the current user.
trolly/card.py
def add_comment(self, comment_text): ''' Adds a comment to this card by the current user. ''' return self.fetch_json( uri_path=self.base_uri + '/actions/comments', http_method='POST', query_params={'text': comment_text} )
def add_comment(self, comment_text): ''' Adds a comment to this card by the current user. ''' return self.fetch_json( uri_path=self.base_uri + '/actions/comments', http_method='POST', query_params={'text': comment_text} )
[ "Adds", "a", "comment", "to", "this", "card", "by", "the", "current", "user", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L98-L106
[ "def", "add_comment", "(", "self", ",", "comment_text", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/actions/comments'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "{", "'text'", ":", "comment_text", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.add_attachment
Adds an attachment to this card.
trolly/card.py
def add_attachment(self, filename, open_file): ''' Adds an attachment to this card. ''' fields = { 'api_key': self.client.api_key, 'token': self.client.user_auth_token } content_type, body = self.encode_multipart_formdata( fields=fields, filename=filename, file_values=open_file ) return self.fetch_json( uri_path=self.base_uri + '/attachments', http_method='POST', body=body, headers={'Content-Type': content_type}, )
def add_attachment(self, filename, open_file): ''' Adds an attachment to this card. ''' fields = { 'api_key': self.client.api_key, 'token': self.client.user_auth_token } content_type, body = self.encode_multipart_formdata( fields=fields, filename=filename, file_values=open_file ) return self.fetch_json( uri_path=self.base_uri + '/attachments', http_method='POST', body=body, headers={'Content-Type': content_type}, )
[ "Adds", "an", "attachment", "to", "this", "card", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L108-L128
[ "def", "add_attachment", "(", "self", ",", "filename", ",", "open_file", ")", ":", "fields", "=", "{", "'api_key'", ":", "self", ".", "client", ".", "api_key", ",", "'token'", ":", "self", ".", "client", ".", "user_auth_token", "}", "content_type", ",", "body", "=", "self", ".", "encode_multipart_formdata", "(", "fields", "=", "fields", ",", "filename", "=", "filename", ",", "file_values", "=", "open_file", ")", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/attachments'", ",", "http_method", "=", "'POST'", ",", "body", "=", "body", ",", "headers", "=", "{", "'Content-Type'", ":", "content_type", "}", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.add_checklist
Add a checklist to this card. Returns a Checklist object.
trolly/card.py
def add_checklist(self, query_params=None): ''' Add a checklist to this card. Returns a Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri + '/checklists', http_method='POST', query_params=query_params or {} ) return self.create_checklist(checklist_json)
def add_checklist(self, query_params=None): ''' Add a checklist to this card. Returns a Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri + '/checklists', http_method='POST', query_params=query_params or {} ) return self.create_checklist(checklist_json)
[ "Add", "a", "checklist", "to", "this", "card", ".", "Returns", "a", "Checklist", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L130-L140
[ "def", "add_checklist", "(", "self", ",", "query_params", "=", "None", ")", ":", "checklist_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/checklists'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_checklist", "(", "checklist_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card._add_label_from_dict
Add a label to this card, from a dictionary.
trolly/card.py
def _add_label_from_dict(self, query_params=None): ''' Add a label to this card, from a dictionary. ''' return self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} )
def _add_label_from_dict(self, query_params=None): ''' Add a label to this card, from a dictionary. ''' return self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} )
[ "Add", "a", "label", "to", "this", "card", "from", "a", "dictionary", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L147-L155
[ "def", "_add_label_from_dict", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/labels'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card._add_label_from_class
Add an existing label to this card.
trolly/card.py
def _add_label_from_class(self, label=None): ''' Add an existing label to this card. ''' return self.fetch_json( uri_path=self.base_uri + '/idLabels', http_method='POST', query_params={'value': label.id} )
def _add_label_from_class(self, label=None): ''' Add an existing label to this card. ''' return self.fetch_json( uri_path=self.base_uri + '/idLabels', http_method='POST', query_params={'value': label.id} )
[ "Add", "an", "existing", "label", "to", "this", "card", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L158-L166
[ "def", "_add_label_from_class", "(", "self", ",", "label", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/idLabels'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "{", "'value'", ":", "label", ".", "id", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.add_member
Add a member to this card. Returns a list of Member objects.
trolly/card.py
def add_member(self, member_id): ''' Add a member to this card. Returns a list of Member objects. ''' members = self.fetch_json( uri_path=self.base_uri + '/idMembers', http_method='POST', query_params={'value': member_id} ) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
def add_member(self, member_id): ''' Add a member to this card. Returns a list of Member objects. ''' members = self.fetch_json( uri_path=self.base_uri + '/idMembers', http_method='POST', query_params={'value': member_id} ) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
[ "Add", "a", "member", "to", "this", "card", ".", "Returns", "a", "list", "of", "Member", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L168-L182
[ "def", "add_member", "(", "self", ",", "member_id", ")", ":", "members", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/idMembers'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "{", "'value'", ":", "member_id", "}", ")", "members_list", "=", "[", "]", "for", "member_json", "in", "members", ":", "members_list", ".", "append", "(", "self", ".", "create_member", "(", "member_json", ")", ")", "return", "members_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Card.encode_multipart_formdata
Encodes data to updload a file to Trello. Fields is a dictionary of api_key and token. Filename is the name of the file and file_values is the open(file).read() string.
trolly/card.py
def encode_multipart_formdata(self, fields, filename, file_values): ''' Encodes data to updload a file to Trello. Fields is a dictionary of api_key and token. Filename is the name of the file and file_values is the open(file).read() string. ''' boundary = '----------Trello_Boundary_$' crlf = '\r\n' data = [] for key in fields: data.append('--' + boundary) data.append('Content-Disposition: form-data; name="%s"' % key) data.append('') data.append(fields[key]) data.append('--' + boundary) data.append( 'Content-Disposition: form-data; name="file"; filename="%s"' % filename) data.append('Content-Type: %s' % self.get_content_type(filename)) data.append('') data.append(file_values) data.append('--' + boundary + '--') data.append('') # Try and avoid the damn unicode errors data = [str(segment) for segment in data] body = crlf.join(data) content_type = 'multipart/form-data; boundary=%s' % boundary return content_type, body
def encode_multipart_formdata(self, fields, filename, file_values): ''' Encodes data to updload a file to Trello. Fields is a dictionary of api_key and token. Filename is the name of the file and file_values is the open(file).read() string. ''' boundary = '----------Trello_Boundary_$' crlf = '\r\n' data = [] for key in fields: data.append('--' + boundary) data.append('Content-Disposition: form-data; name="%s"' % key) data.append('') data.append(fields[key]) data.append('--' + boundary) data.append( 'Content-Disposition: form-data; name="file"; filename="%s"' % filename) data.append('Content-Type: %s' % self.get_content_type(filename)) data.append('') data.append(file_values) data.append('--' + boundary + '--') data.append('') # Try and avoid the damn unicode errors data = [str(segment) for segment in data] body = crlf.join(data) content_type = 'multipart/form-data; boundary=%s' % boundary return content_type, body
[ "Encodes", "data", "to", "updload", "a", "file", "to", "Trello", ".", "Fields", "is", "a", "dictionary", "of", "api_key", "and", "token", ".", "Filename", "is", "the", "name", "of", "the", "file", "and", "file_values", "is", "the", "open", "(", "file", ")", ".", "read", "()", "string", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/card.py#L214-L248
[ "def", "encode_multipart_formdata", "(", "self", ",", "fields", ",", "filename", ",", "file_values", ")", ":", "boundary", "=", "'----------Trello_Boundary_$'", "crlf", "=", "'\\r\\n'", "data", "=", "[", "]", "for", "key", "in", "fields", ":", "data", ".", "append", "(", "'--'", "+", "boundary", ")", "data", ".", "append", "(", "'Content-Disposition: form-data; name=\"%s\"'", "%", "key", ")", "data", ".", "append", "(", "''", ")", "data", ".", "append", "(", "fields", "[", "key", "]", ")", "data", ".", "append", "(", "'--'", "+", "boundary", ")", "data", ".", "append", "(", "'Content-Disposition: form-data; name=\"file\"; filename=\"%s\"'", "%", "filename", ")", "data", ".", "append", "(", "'Content-Type: %s'", "%", "self", ".", "get_content_type", "(", "filename", ")", ")", "data", ".", "append", "(", "''", ")", "data", ".", "append", "(", "file_values", ")", "data", ".", "append", "(", "'--'", "+", "boundary", "+", "'--'", ")", "data", ".", "append", "(", "''", ")", "# Try and avoid the damn unicode errors", "data", "=", "[", "str", "(", "segment", ")", "for", "segment", "in", "data", "]", "body", "=", "crlf", ".", "join", "(", "data", ")", "content_type", "=", "'multipart/form-data; boundary=%s'", "%", "boundary", "return", "content_type", ",", "body" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Member.get_member_information
Get Information for a member. Returns a dictionary of values. Returns: dict
trolly/member.py
def get_member_information(self, query_params=None): ''' Get Information for a member. Returns a dictionary of values. Returns: dict ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_member_information(self, query_params=None): ''' Get Information for a member. Returns a dictionary of values. Returns: dict ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "Information", "for", "a", "member", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/member.py#L17-L27
[ "def", "get_member_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Member.get_cards
Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to
trolly/member.py
def get_cards(self, **query_params): ''' Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to ''' cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
def get_cards(self, **query_params): ''' Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to ''' cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
[ "Get", "all", "cards", "this", "member", "is", "attached", "to", ".", "Return", "a", "list", "of", "Card", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/member.py#L45-L59
[ "def", "get_cards", "(", "self", ",", "*", "*", "query_params", ")", ":", "cards", "=", "self", ".", "get_cards_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "cards_list", "=", "[", "]", "for", "card_json", "in", "cards", ":", "cards_list", ".", "append", "(", "self", ".", "create_card", "(", "card_json", ")", ")", "return", "cards_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Member.get_organisations
Get all organisations this member is attached to. Return a list of Organisation objects. Returns: list(Organisation): Return all organisations this member is attached to
trolly/member.py
def get_organisations(self, **query_params): ''' Get all organisations this member is attached to. Return a list of Organisation objects. Returns: list(Organisation): Return all organisations this member is attached to ''' organisations = self.get_organisations_json(self.base_uri, query_params=query_params) organisations_list = [] for organisation_json in organisations: organisations_list.append( self.create_organisation(organisation_json)) return organisations_list
def get_organisations(self, **query_params): ''' Get all organisations this member is attached to. Return a list of Organisation objects. Returns: list(Organisation): Return all organisations this member is attached to ''' organisations = self.get_organisations_json(self.base_uri, query_params=query_params) organisations_list = [] for organisation_json in organisations: organisations_list.append( self.create_organisation(organisation_json)) return organisations_list
[ "Get", "all", "organisations", "this", "member", "is", "attached", "to", ".", "Return", "a", "list", "of", "Organisation", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/member.py#L61-L78
[ "def", "get_organisations", "(", "self", ",", "*", "*", "query_params", ")", ":", "organisations", "=", "self", ".", "get_organisations_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "organisations_list", "=", "[", "]", "for", "organisation_json", "in", "organisations", ":", "organisations_list", ".", "append", "(", "self", ".", "create_organisation", "(", "organisation_json", ")", ")", "return", "organisations_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Member.create_new_board
Create a new board. name is required in query_params. Returns a Board object. Returns: Board: Returns the created board
trolly/member.py
def create_new_board(self, query_params=None): ''' Create a new board. name is required in query_params. Returns a Board object. Returns: Board: Returns the created board ''' board_json = self.fetch_json( uri_path='/boards', http_method='POST', query_params=query_params or {} ) return self.create_board(board_json)
def create_new_board(self, query_params=None): ''' Create a new board. name is required in query_params. Returns a Board object. Returns: Board: Returns the created board ''' board_json = self.fetch_json( uri_path='/boards', http_method='POST', query_params=query_params or {} ) return self.create_board(board_json)
[ "Create", "a", "new", "board", ".", "name", "is", "required", "in", "query_params", ".", "Returns", "a", "Board", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/member.py#L80-L93
[ "def", "create_new_board", "(", "self", ",", "query_params", "=", "None", ")", ":", "board_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "'/boards'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_board", "(", "board_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
singledispatchmethod
Enable singledispatch for class methods. See http://stackoverflow.com/a/24602374/274318
trolly/lib.py
def singledispatchmethod(method): ''' Enable singledispatch for class methods. See http://stackoverflow.com/a/24602374/274318 ''' dispatcher = singledispatch(method) def wrapper(*args, **kw): return dispatcher.dispatch(args[1].__class__)(*args, **kw) wrapper.register = dispatcher.register update_wrapper(wrapper, dispatcher) return wrapper
def singledispatchmethod(method): ''' Enable singledispatch for class methods. See http://stackoverflow.com/a/24602374/274318 ''' dispatcher = singledispatch(method) def wrapper(*args, **kw): return dispatcher.dispatch(args[1].__class__)(*args, **kw) wrapper.register = dispatcher.register update_wrapper(wrapper, dispatcher) return wrapper
[ "Enable", "singledispatch", "for", "class", "methods", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/lib.py#L5-L16
[ "def", "singledispatchmethod", "(", "method", ")", ":", "dispatcher", "=", "singledispatch", "(", "method", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "return", "dispatcher", ".", "dispatch", "(", "args", "[", "1", "]", ".", "__class__", ")", "(", "*", "args", ",", "*", "*", "kw", ")", "wrapper", ".", "register", "=", "dispatcher", ".", "register", "update_wrapper", "(", "wrapper", ",", "dispatcher", ")", "return", "wrapper" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
TrelloObject.create_checklist_item
Create a ChecklistItem object from JSON object
trolly/trelloobject.py
def create_checklist_item(self, card_id, checklist_id, checklistitem_json, **kwargs): ''' Create a ChecklistItem object from JSON object ''' return self.client.create_checklist_item(card_id, checklist_id, checklistitem_json, **kwargs)
def create_checklist_item(self, card_id, checklist_id, checklistitem_json, **kwargs): ''' Create a ChecklistItem object from JSON object ''' return self.client.create_checklist_item(card_id, checklist_id, checklistitem_json, **kwargs)
[ "Create", "a", "ChecklistItem", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/trelloobject.py#L130-L134
[ "def", "create_checklist_item", "(", "self", ",", "card_id", ",", "checklist_id", ",", "checklistitem_json", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "client", ".", "create_checklist_item", "(", "card_id", ",", "checklist_id", ",", "checklistitem_json", ",", "*", "*", "kwargs", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_board_information
Get all information for this board. Returns a dictionary of values.
trolly/board.py
def get_board_information(self, query_params=None): ''' Get all information for this board. Returns a dictionary of values. ''' return self.fetch_json( uri_path='/boards/' + self.id, query_params=query_params or {} )
def get_board_information(self, query_params=None): ''' Get all information for this board. Returns a dictionary of values. ''' return self.fetch_json( uri_path='/boards/' + self.id, query_params=query_params or {} )
[ "Get", "all", "information", "for", "this", "board", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L18-L25
[ "def", "get_board_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "'/boards/'", "+", "self", ".", "id", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_lists
Get the lists attached to this board. Returns a list of List objects. Returns: list(List): The lists attached to this board
trolly/board.py
def get_lists(self, **query_params): ''' Get the lists attached to this board. Returns a list of List objects. Returns: list(List): The lists attached to this board ''' lists = self.get_lists_json(self.base_uri, query_params=query_params) lists_list = [] for list_json in lists: lists_list.append(self.create_list(list_json)) return lists_list
def get_lists(self, **query_params): ''' Get the lists attached to this board. Returns a list of List objects. Returns: list(List): The lists attached to this board ''' lists = self.get_lists_json(self.base_uri, query_params=query_params) lists_list = [] for list_json in lists: lists_list.append(self.create_list(list_json)) return lists_list
[ "Get", "the", "lists", "attached", "to", "this", "board", ".", "Returns", "a", "list", "of", "List", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L27-L40
[ "def", "get_lists", "(", "self", ",", "*", "*", "query_params", ")", ":", "lists", "=", "self", ".", "get_lists_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "lists_list", "=", "[", "]", "for", "list_json", "in", "lists", ":", "lists_list", ".", "append", "(", "self", ".", "create_list", "(", "list_json", ")", ")", "return", "lists_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_labels
Get the labels attached to this board. Returns a label of Label objects. Returns: list(Label): The labels attached to this board
trolly/board.py
def get_labels(self, **query_params): ''' Get the labels attached to this board. Returns a label of Label objects. Returns: list(Label): The labels attached to this board ''' labels = self.get_labels_json(self.base_uri, query_params=query_params) labels_list = [] for label_json in labels: labels_list.append(self.create_label(label_json)) return labels_list
def get_labels(self, **query_params): ''' Get the labels attached to this board. Returns a label of Label objects. Returns: list(Label): The labels attached to this board ''' labels = self.get_labels_json(self.base_uri, query_params=query_params) labels_list = [] for label_json in labels: labels_list.append(self.create_label(label_json)) return labels_list
[ "Get", "the", "labels", "attached", "to", "this", "board", ".", "Returns", "a", "label", "of", "Label", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L42-L56
[ "def", "get_labels", "(", "self", ",", "*", "*", "query_params", ")", ":", "labels", "=", "self", ".", "get_labels_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "labels_list", "=", "[", "]", "for", "label_json", "in", "labels", ":", "labels_list", ".", "append", "(", "self", ".", "create_label", "(", "label_json", ")", ")", "return", "labels_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_card
Get a Card for a given card id. Returns a Card object. Returns: Card: The card with the given card_id
trolly/board.py
def get_card(self, card_id, **query_params): ''' Get a Card for a given card id. Returns a Card object. Returns: Card: The card with the given card_id ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards/' + card_id ) return self.create_card(card_json)
def get_card(self, card_id, **query_params): ''' Get a Card for a given card id. Returns a Card object. Returns: Card: The card with the given card_id ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards/' + card_id ) return self.create_card(card_json)
[ "Get", "a", "Card", "for", "a", "given", "card", "id", ".", "Returns", "a", "Card", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L73-L84
[ "def", "get_card", "(", "self", ",", "card_id", ",", "*", "*", "query_params", ")", ":", "card_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/cards/'", "+", "card_id", ")", "return", "self", ".", "create_card", "(", "card_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_checklists
Get the checklists for this board. Returns a list of Checklist objects.
trolly/board.py
def get_checklists( self ): """ Get the checklists for this board. Returns a list of Checklist objects. """ checklists = self.getChecklistsJson( self.base_uri ) checklists_list = [] for checklist_json in checklists: checklists_list.append( self.createChecklist( checklist_json ) ) return checklists_list
def get_checklists( self ): """ Get the checklists for this board. Returns a list of Checklist objects. """ checklists = self.getChecklistsJson( self.base_uri ) checklists_list = [] for checklist_json in checklists: checklists_list.append( self.createChecklist( checklist_json ) ) return checklists_list
[ "Get", "the", "checklists", "for", "this", "board", ".", "Returns", "a", "list", "of", "Checklist", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L86-L96
[ "def", "get_checklists", "(", "self", ")", ":", "checklists", "=", "self", ".", "getChecklistsJson", "(", "self", ".", "base_uri", ")", "checklists_list", "=", "[", "]", "for", "checklist_json", "in", "checklists", ":", "checklists_list", ".", "append", "(", "self", ".", "createChecklist", "(", "checklist_json", ")", ")", "return", "checklists_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.get_organisation
Get the Organisation for this board. Returns Organisation object. Returns: list(Organisation): The organisation attached to this board
trolly/board.py
def get_organisation(self, **query_params): ''' Get the Organisation for this board. Returns Organisation object. Returns: list(Organisation): The organisation attached to this board ''' organisation_json = self.get_organisations_json( self.base_uri, query_params=query_params) return self.create_organisation(organisation_json)
def get_organisation(self, **query_params): ''' Get the Organisation for this board. Returns Organisation object. Returns: list(Organisation): The organisation attached to this board ''' organisation_json = self.get_organisations_json( self.base_uri, query_params=query_params) return self.create_organisation(organisation_json)
[ "Get", "the", "Organisation", "for", "this", "board", ".", "Returns", "Organisation", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L115-L125
[ "def", "get_organisation", "(", "self", ",", "*", "*", "query_params", ")", ":", "organisation_json", "=", "self", ".", "get_organisations_json", "(", "self", ".", "base_uri", ",", "query_params", "=", "query_params", ")", "return", "self", ".", "create_organisation", "(", "organisation_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.update_board
Update this board's information. Returns a new board.
trolly/board.py
def update_board(self, query_params=None): ''' Update this board's information. Returns a new board. ''' board_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_board(board_json)
def update_board(self, query_params=None): ''' Update this board's information. Returns a new board. ''' board_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_board(board_json)
[ "Update", "this", "board", "s", "information", ".", "Returns", "a", "new", "board", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L127-L137
[ "def", "update_board", "(", "self", ",", "query_params", "=", "None", ")", ":", "board_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_board", "(", "board_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.add_list
Create a list for a board. Returns a new List object.
trolly/board.py
def add_list(self, query_params=None): ''' Create a list for a board. Returns a new List object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/lists', http_method='POST', query_params=query_params or {} ) return self.create_list(list_json)
def add_list(self, query_params=None): ''' Create a list for a board. Returns a new List object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/lists', http_method='POST', query_params=query_params or {} ) return self.create_list(list_json)
[ "Create", "a", "list", "for", "a", "board", ".", "Returns", "a", "new", "List", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L139-L149
[ "def", "add_list", "(", "self", ",", "query_params", "=", "None", ")", ":", "list_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/lists'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_list", "(", "list_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Board.add_label
Create a label for a board. Returns a new Label object.
trolly/board.py
def add_label(self, query_params=None): ''' Create a label for a board. Returns a new Label object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} ) return self.create_label(list_json)
def add_label(self, query_params=None): ''' Create a label for a board. Returns a new Label object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} ) return self.create_label(list_json)
[ "Create", "a", "label", "for", "a", "board", ".", "Returns", "a", "new", "Label", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/board.py#L151-L161
[ "def", "add_label", "(", "self", ",", "query_params", "=", "None", ")", ":", "list_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/labels'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")", "return", "self", ".", "create_label", "(", "list_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.get_checklist_information
Get all information for this Checklist. Returns a dictionary of values.
trolly/checklist.py
def get_checklist_information(self, query_params=None): ''' Get all information for this Checklist. Returns a dictionary of values. ''' # We don't use trelloobject.TrelloObject.get_checklist_json, because # that is meant to return lists of checklists. return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
def get_checklist_information(self, query_params=None): ''' Get all information for this Checklist. Returns a dictionary of values. ''' # We don't use trelloobject.TrelloObject.get_checklist_json, because # that is meant to return lists of checklists. return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
[ "Get", "all", "information", "for", "this", "Checklist", ".", "Returns", "a", "dictionary", "of", "values", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L18-L27
[ "def", "get_checklist_information", "(", "self", ",", "query_params", "=", "None", ")", ":", "# We don't use trelloobject.TrelloObject.get_checklist_json, because", "# that is meant to return lists of checklists.", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.get_card
Get card this checklist is on.
trolly/checklist.py
def get_card(self): ''' Get card this checklist is on. ''' card_id = self.get_checklist_information().get('idCard', None) if card_id: return self.client.get_card(card_id)
def get_card(self): ''' Get card this checklist is on. ''' card_id = self.get_checklist_information().get('idCard', None) if card_id: return self.client.get_card(card_id)
[ "Get", "card", "this", "checklist", "is", "on", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L29-L35
[ "def", "get_card", "(", "self", ")", ":", "card_id", "=", "self", ".", "get_checklist_information", "(", ")", ".", "get", "(", "'idCard'", ",", "None", ")", "if", "card_id", ":", "return", "self", ".", "client", ".", "get_card", "(", "card_id", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.get_item_objects
Get the items for this checklist. Returns a list of ChecklistItem objects.
trolly/checklist.py
def get_item_objects(self, query_params=None): """ Get the items for this checklist. Returns a list of ChecklistItem objects. """ card = self.get_card() checklistitems_list = [] for checklistitem_json in self.get_items(query_params): checklistitems_list.append(self.create_checklist_item(card.id, self.id, checklistitem_json)) return checklistitems_list
def get_item_objects(self, query_params=None): """ Get the items for this checklist. Returns a list of ChecklistItem objects. """ card = self.get_card() checklistitems_list = [] for checklistitem_json in self.get_items(query_params): checklistitems_list.append(self.create_checklist_item(card.id, self.id, checklistitem_json)) return checklistitems_list
[ "Get", "the", "items", "for", "this", "checklist", ".", "Returns", "a", "list", "of", "ChecklistItem", "objects", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L47-L56
[ "def", "get_item_objects", "(", "self", ",", "query_params", "=", "None", ")", ":", "card", "=", "self", ".", "get_card", "(", ")", "checklistitems_list", "=", "[", "]", "for", "checklistitem_json", "in", "self", ".", "get_items", "(", "query_params", ")", ":", "checklistitems_list", ".", "append", "(", "self", ".", "create_checklist_item", "(", "card", ".", "id", ",", "self", ".", "id", ",", "checklistitem_json", ")", ")", "return", "checklistitems_list" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.update_checklist
Update the current checklist. Returns a new Checklist object.
trolly/checklist.py
def update_checklist(self, name): ''' Update the current checklist. Returns a new Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_checklist(checklist_json)
def update_checklist(self, name): ''' Update the current checklist. Returns a new Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_checklist(checklist_json)
[ "Update", "the", "current", "checklist", ".", "Returns", "a", "new", "Checklist", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L59-L69
[ "def", "update_checklist", "(", "self", ",", "name", ")", ":", "checklist_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'name'", ":", "name", "}", ")", "return", "self", ".", "create_checklist", "(", "checklist_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.add_item
Add an item to this checklist. Returns a dictionary of values of new item.
trolly/checklist.py
def add_item(self, query_params=None): ''' Add an item to this checklist. Returns a dictionary of values of new item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', http_method='POST', query_params=query_params or {} )
def add_item(self, query_params=None): ''' Add an item to this checklist. Returns a dictionary of values of new item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', http_method='POST', query_params=query_params or {} )
[ "Add", "an", "item", "to", "this", "checklist", ".", "Returns", "a", "dictionary", "of", "values", "of", "new", "item", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L71-L80
[ "def", "add_item", "(", "self", ",", "query_params", "=", "None", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/checkItems'", ",", "http_method", "=", "'POST'", ",", "query_params", "=", "query_params", "or", "{", "}", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Checklist.remove_item
Deletes an item from this checklist.
trolly/checklist.py
def remove_item(self, item_id): ''' Deletes an item from this checklist. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems/' + item_id, http_method='DELETE' )
def remove_item(self, item_id): ''' Deletes an item from this checklist. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems/' + item_id, http_method='DELETE' )
[ "Deletes", "an", "item", "from", "this", "checklist", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L82-L89
[ "def", "remove_item", "(", "self", ",", "item_id", ")", ":", "return", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/checkItems/'", "+", "item_id", ",", "http_method", "=", "'DELETE'", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
ChecklistItem.update_name
Rename the current checklist item. Returns a new ChecklistItem object.
trolly/checklist.py
def update_name( self, name ): """ Rename the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/name', http_method = 'PUT', query_params = {'value': name} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
def update_name( self, name ): """ Rename the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/name', http_method = 'PUT', query_params = {'value': name} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
[ "Rename", "the", "current", "checklist", "item", ".", "Returns", "a", "new", "ChecklistItem", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L108-L118
[ "def", "update_name", "(", "self", ",", "name", ")", ":", "checklistitem_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/name'", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'value'", ":", "name", "}", ")", "return", "self", ".", "create_checklist_item", "(", "self", ".", "idCard", ",", "self", ".", "idChecklist", ",", "checklistitem_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
ChecklistItem.update_state
Set the state of the current checklist item. Returns a new ChecklistItem object.
trolly/checklist.py
def update_state(self, state): """ Set the state of the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/state', http_method = 'PUT', query_params = {'value': 'complete' if state else 'incomplete'} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
def update_state(self, state): """ Set the state of the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/state', http_method = 'PUT', query_params = {'value': 'complete' if state else 'incomplete'} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
[ "Set", "the", "state", "of", "the", "current", "checklist", "item", ".", "Returns", "a", "new", "ChecklistItem", "object", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/checklist.py#L121-L131
[ "def", "update_state", "(", "self", ",", "state", ")", ":", "checklistitem_json", "=", "self", ".", "fetch_json", "(", "uri_path", "=", "self", ".", "base_uri", "+", "'/state'", ",", "http_method", "=", "'PUT'", ",", "query_params", "=", "{", "'value'", ":", "'complete'", "if", "state", "else", "'incomplete'", "}", ")", "return", "self", ".", "create_checklist_item", "(", "self", ".", "idCard", ",", "self", ".", "idChecklist", ",", "checklistitem_json", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.add_authorisation
Adds the API key and user auth token to the query parameters
trolly/client.py
def add_authorisation(self, query_params): ''' Adds the API key and user auth token to the query parameters ''' query_params['key'] = self.api_key if self.user_auth_token: query_params['token'] = self.user_auth_token return query_params
def add_authorisation(self, query_params): ''' Adds the API key and user auth token to the query parameters ''' query_params['key'] = self.api_key if self.user_auth_token: query_params['token'] = self.user_auth_token return query_params
[ "Adds", "the", "API", "key", "and", "user", "auth", "token", "to", "the", "query", "parameters" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L29-L38
[ "def", "add_authorisation", "(", "self", ",", "query_params", ")", ":", "query_params", "[", "'key'", "]", "=", "self", ".", "api_key", "if", "self", ".", "user_auth_token", ":", "query_params", "[", "'token'", "]", "=", "self", ".", "user_auth_token", "return", "query_params" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.check_errors
Check HTTP reponse for known errors
trolly/client.py
def check_errors(self, uri, response): ''' Check HTTP reponse for known errors ''' if response.status == 401: raise trolly.Unauthorised(uri, response) if response.status != 200: raise trolly.ResourceUnavailable(uri, response)
def check_errors(self, uri, response): ''' Check HTTP reponse for known errors ''' if response.status == 401: raise trolly.Unauthorised(uri, response) if response.status != 200: raise trolly.ResourceUnavailable(uri, response)
[ "Check", "HTTP", "reponse", "for", "known", "errors" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L48-L56
[ "def", "check_errors", "(", "self", ",", "uri", ",", "response", ")", ":", "if", "response", ".", "status", "==", "401", ":", "raise", "trolly", ".", "Unauthorised", "(", "uri", ",", "response", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "trolly", ".", "ResourceUnavailable", "(", "uri", ",", "response", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.build_uri
Build the URI for the API call.
trolly/client.py
def build_uri(self, path, query_params): ''' Build the URI for the API call. ''' url = 'https://api.trello.com/1' + self.clean_path(path) url += '?' + urlencode(query_params) return url
def build_uri(self, path, query_params): ''' Build the URI for the API call. ''' url = 'https://api.trello.com/1' + self.clean_path(path) url += '?' + urlencode(query_params) return url
[ "Build", "the", "URI", "for", "the", "API", "call", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L58-L65
[ "def", "build_uri", "(", "self", ",", "path", ",", "query_params", ")", ":", "url", "=", "'https://api.trello.com/1'", "+", "self", ".", "clean_path", "(", "path", ")", "url", "+=", "'?'", "+", "urlencode", "(", "query_params", ")", "return", "url" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.fetch_json
Make a call to Trello API and capture JSON response. Raises an error when it fails. Returns: dict: Dictionary with the JSON data
trolly/client.py
def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None): ''' Make a call to Trello API and capture JSON response. Raises an error when it fails. Returns: dict: Dictionary with the JSON data ''' query_params = query_params or {} headers = headers or {} query_params = self.add_authorisation(query_params) uri = self.build_uri(uri_path, query_params) allowed_methods = ("POST", "PUT", "DELETE") if http_method in allowed_methods and 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' response, content = self.client.request( uri=uri, method=http_method, body=body, headers=headers ) self.check_errors(uri, response) return json.loads(content.decode('utf-8'))
def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None): ''' Make a call to Trello API and capture JSON response. Raises an error when it fails. Returns: dict: Dictionary with the JSON data ''' query_params = query_params or {} headers = headers or {} query_params = self.add_authorisation(query_params) uri = self.build_uri(uri_path, query_params) allowed_methods = ("POST", "PUT", "DELETE") if http_method in allowed_methods and 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' response, content = self.client.request( uri=uri, method=http_method, body=body, headers=headers ) self.check_errors(uri, response) return json.loads(content.decode('utf-8'))
[ "Make", "a", "call", "to", "Trello", "API", "and", "capture", "JSON", "response", ".", "Raises", "an", "error", "when", "it", "fails", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L67-L96
[ "def", "fetch_json", "(", "self", ",", "uri_path", ",", "http_method", "=", "'GET'", ",", "query_params", "=", "None", ",", "body", "=", "None", ",", "headers", "=", "None", ")", ":", "query_params", "=", "query_params", "or", "{", "}", "headers", "=", "headers", "or", "{", "}", "query_params", "=", "self", ".", "add_authorisation", "(", "query_params", ")", "uri", "=", "self", ".", "build_uri", "(", "uri_path", ",", "query_params", ")", "allowed_methods", "=", "(", "\"POST\"", ",", "\"PUT\"", ",", "\"DELETE\"", ")", "if", "http_method", "in", "allowed_methods", "and", "'Content-Type'", "not", "in", "headers", ":", "headers", "[", "'Content-Type'", "]", "=", "'application/json'", "headers", "[", "'Accept'", "]", "=", "'application/json'", "response", ",", "content", "=", "self", ".", "client", ".", "request", "(", "uri", "=", "uri", ",", "method", "=", "http_method", ",", "body", "=", "body", ",", "headers", "=", "headers", ")", "self", ".", "check_errors", "(", "uri", ",", "response", ")", "return", "json", ".", "loads", "(", "content", ".", "decode", "(", "'utf-8'", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_organisation
Create an Organisation object from a JSON object Returns: Organisation: The organisation from the given `organisation_json`.
trolly/client.py
def create_organisation(self, organisation_json): ''' Create an Organisation object from a JSON object Returns: Organisation: The organisation from the given `organisation_json`. ''' return trolly.organisation.Organisation( trello_client=self, organisation_id=organisation_json['id'], name=organisation_json['name'], data=organisation_json, )
def create_organisation(self, organisation_json): ''' Create an Organisation object from a JSON object Returns: Organisation: The organisation from the given `organisation_json`. ''' return trolly.organisation.Organisation( trello_client=self, organisation_id=organisation_json['id'], name=organisation_json['name'], data=organisation_json, )
[ "Create", "an", "Organisation", "object", "from", "a", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L98-L110
[ "def", "create_organisation", "(", "self", ",", "organisation_json", ")", ":", "return", "trolly", ".", "organisation", ".", "Organisation", "(", "trello_client", "=", "self", ",", "organisation_id", "=", "organisation_json", "[", "'id'", "]", ",", "name", "=", "organisation_json", "[", "'name'", "]", ",", "data", "=", "organisation_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_board
Create Board object from a JSON object Returns: Board: The board from the given `board_json`.
trolly/client.py
def create_board(self, board_json): ''' Create Board object from a JSON object Returns: Board: The board from the given `board_json`. ''' return trolly.board.Board( trello_client=self, board_id=board_json['id'], name=board_json['name'], data=board_json, )
def create_board(self, board_json): ''' Create Board object from a JSON object Returns: Board: The board from the given `board_json`. ''' return trolly.board.Board( trello_client=self, board_id=board_json['id'], name=board_json['name'], data=board_json, )
[ "Create", "Board", "object", "from", "a", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L112-L124
[ "def", "create_board", "(", "self", ",", "board_json", ")", ":", "return", "trolly", ".", "board", ".", "Board", "(", "trello_client", "=", "self", ",", "board_id", "=", "board_json", "[", "'id'", "]", ",", "name", "=", "board_json", "[", "'name'", "]", ",", "data", "=", "board_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_label
Create Label object from JSON object Returns: Label: The label from the given `label_json`.
trolly/client.py
def create_label(self, label_json): ''' Create Label object from JSON object Returns: Label: The label from the given `label_json`. ''' return trolly.label.Label( trello_client=self, label_id=label_json['id'], name=label_json['name'], data=label_json, )
def create_label(self, label_json): ''' Create Label object from JSON object Returns: Label: The label from the given `label_json`. ''' return trolly.label.Label( trello_client=self, label_id=label_json['id'], name=label_json['name'], data=label_json, )
[ "Create", "Label", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L126-L138
[ "def", "create_label", "(", "self", ",", "label_json", ")", ":", "return", "trolly", ".", "label", ".", "Label", "(", "trello_client", "=", "self", ",", "label_id", "=", "label_json", "[", "'id'", "]", ",", "name", "=", "label_json", "[", "'name'", "]", ",", "data", "=", "label_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_list
Create List object from JSON object Returns: List: The list from the given `list_json`.
trolly/client.py
def create_list(self, list_json): ''' Create List object from JSON object Returns: List: The list from the given `list_json`. ''' return trolly.list.List( trello_client=self, list_id=list_json['id'], name=list_json['name'], data=list_json, )
def create_list(self, list_json): ''' Create List object from JSON object Returns: List: The list from the given `list_json`. ''' return trolly.list.List( trello_client=self, list_id=list_json['id'], name=list_json['name'], data=list_json, )
[ "Create", "List", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L140-L152
[ "def", "create_list", "(", "self", ",", "list_json", ")", ":", "return", "trolly", ".", "list", ".", "List", "(", "trello_client", "=", "self", ",", "list_id", "=", "list_json", "[", "'id'", "]", ",", "name", "=", "list_json", "[", "'name'", "]", ",", "data", "=", "list_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_card
Create a Card object from JSON object Returns: Card: The card from the given `card_json`.
trolly/client.py
def create_card(self, card_json): ''' Create a Card object from JSON object Returns: Card: The card from the given `card_json`. ''' return trolly.card.Card( trello_client=self, card_id=card_json['id'], name=card_json['name'], data=card_json, )
def create_card(self, card_json): ''' Create a Card object from JSON object Returns: Card: The card from the given `card_json`. ''' return trolly.card.Card( trello_client=self, card_id=card_json['id'], name=card_json['name'], data=card_json, )
[ "Create", "a", "Card", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L154-L166
[ "def", "create_card", "(", "self", ",", "card_json", ")", ":", "return", "trolly", ".", "card", ".", "Card", "(", "trello_client", "=", "self", ",", "card_id", "=", "card_json", "[", "'id'", "]", ",", "name", "=", "card_json", "[", "'name'", "]", ",", "data", "=", "card_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_checklist
Create a Checklist object from JSON object Returns: Checklist: The checklist from the given `checklist_json`.
trolly/client.py
def create_checklist(self, checklist_json): ''' Create a Checklist object from JSON object Returns: Checklist: The checklist from the given `checklist_json`. ''' return trolly.checklist.Checklist( trello_client=self, checklist_id=checklist_json['id'], name=checklist_json['name'], data=checklist_json, )
def create_checklist(self, checklist_json): ''' Create a Checklist object from JSON object Returns: Checklist: The checklist from the given `checklist_json`. ''' return trolly.checklist.Checklist( trello_client=self, checklist_id=checklist_json['id'], name=checklist_json['name'], data=checklist_json, )
[ "Create", "a", "Checklist", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L168-L180
[ "def", "create_checklist", "(", "self", ",", "checklist_json", ")", ":", "return", "trolly", ".", "checklist", ".", "Checklist", "(", "trello_client", "=", "self", ",", "checklist_id", "=", "checklist_json", "[", "'id'", "]", ",", "name", "=", "checklist_json", "[", "'name'", "]", ",", "data", "=", "checklist_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_checklist_item
Create a ChecklistItem object from JSON object
trolly/client.py
def create_checklist_item(self, card_id, checklist_id, checklistitem_json): """ Create a ChecklistItem object from JSON object """ return trolly.checklist.ChecklistItem( trello_client=self, card_id=card_id, checklist_id=checklist_id, checklistitem_id=checklistitem_json['id'].encode('utf-8'), name=checklistitem_json['name'].encode('utf-8'), state=checklistitem_json['state'].encode('utf-8') )
def create_checklist_item(self, card_id, checklist_id, checklistitem_json): """ Create a ChecklistItem object from JSON object """ return trolly.checklist.ChecklistItem( trello_client=self, card_id=card_id, checklist_id=checklist_id, checklistitem_id=checklistitem_json['id'].encode('utf-8'), name=checklistitem_json['name'].encode('utf-8'), state=checklistitem_json['state'].encode('utf-8') )
[ "Create", "a", "ChecklistItem", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L182-L193
[ "def", "create_checklist_item", "(", "self", ",", "card_id", ",", "checklist_id", ",", "checklistitem_json", ")", ":", "return", "trolly", ".", "checklist", ".", "ChecklistItem", "(", "trello_client", "=", "self", ",", "card_id", "=", "card_id", ",", "checklist_id", "=", "checklist_id", ",", "checklistitem_id", "=", "checklistitem_json", "[", "'id'", "]", ".", "encode", "(", "'utf-8'", ")", ",", "name", "=", "checklistitem_json", "[", "'name'", "]", ".", "encode", "(", "'utf-8'", ")", ",", "state", "=", "checklistitem_json", "[", "'state'", "]", ".", "encode", "(", "'utf-8'", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.create_member
Create a Member object from JSON object Returns: Member: The member from the given `member_json`.
trolly/client.py
def create_member(self, member_json): ''' Create a Member object from JSON object Returns: Member: The member from the given `member_json`. ''' return trolly.member.Member( trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json, )
def create_member(self, member_json): ''' Create a Member object from JSON object Returns: Member: The member from the given `member_json`. ''' return trolly.member.Member( trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json, )
[ "Create", "a", "Member", "object", "from", "JSON", "object" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L195-L207
[ "def", "create_member", "(", "self", ",", "member_json", ")", ":", "return", "trolly", ".", "member", ".", "Member", "(", "trello_client", "=", "self", ",", "member_id", "=", "member_json", "[", "'id'", "]", ",", "name", "=", "member_json", "[", "'fullName'", "]", ",", "data", "=", "member_json", ",", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_organisation
Get an organisation Returns: Organisation: The organisation with the given `id`
trolly/client.py
def get_organisation(self, id, name=None): ''' Get an organisation Returns: Organisation: The organisation with the given `id` ''' return self.create_organisation(dict(id=id, name=name))
def get_organisation(self, id, name=None): ''' Get an organisation Returns: Organisation: The organisation with the given `id` ''' return self.create_organisation(dict(id=id, name=name))
[ "Get", "an", "organisation" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L209-L216
[ "def", "get_organisation", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_organisation", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_board
Get a board Returns: Board: The board with the given `id`
trolly/client.py
def get_board(self, id, name=None): ''' Get a board Returns: Board: The board with the given `id` ''' return self.create_board(dict(id=id, name=name))
def get_board(self, id, name=None): ''' Get a board Returns: Board: The board with the given `id` ''' return self.create_board(dict(id=id, name=name))
[ "Get", "a", "board" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L218-L225
[ "def", "get_board", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_board", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_list
Get a list Returns: List: The list with the given `id`
trolly/client.py
def get_list(self, id, name=None): ''' Get a list Returns: List: The list with the given `id` ''' return self.create_list(dict(id=id, name=name))
def get_list(self, id, name=None): ''' Get a list Returns: List: The list with the given `id` ''' return self.create_list(dict(id=id, name=name))
[ "Get", "a", "list" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L227-L234
[ "def", "get_list", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_list", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_card
Get a card Returns: Card: The card with the given `id`
trolly/client.py
def get_card(self, id, name=None): ''' Get a card Returns: Card: The card with the given `id` ''' return self.create_card(dict(id=id, name=name))
def get_card(self, id, name=None): ''' Get a card Returns: Card: The card with the given `id` ''' return self.create_card(dict(id=id, name=name))
[ "Get", "a", "card" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L236-L243
[ "def", "get_card", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_card", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_checklist
Get a checklist Returns: Checklist: The checklist with the given `id`
trolly/client.py
def get_checklist(self, id, name=None): ''' Get a checklist Returns: Checklist: The checklist with the given `id` ''' return self.create_checklist(dict(id=id, name=name))
def get_checklist(self, id, name=None): ''' Get a checklist Returns: Checklist: The checklist with the given `id` ''' return self.create_checklist(dict(id=id, name=name))
[ "Get", "a", "checklist" ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L245-L252
[ "def", "get_checklist", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_checklist", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
Client.get_member
Get a member or your current member if `id` wasn't given. Returns: Member: The member with the given `id`, defaults to the logged in member.
trolly/client.py
def get_member(self, id='me', name=None): ''' Get a member or your current member if `id` wasn't given. Returns: Member: The member with the given `id`, defaults to the logged in member. ''' return self.create_member(dict(id=id, fullName=name))
def get_member(self, id='me', name=None): ''' Get a member or your current member if `id` wasn't given. Returns: Member: The member with the given `id`, defaults to the logged in member. ''' return self.create_member(dict(id=id, fullName=name))
[ "Get", "a", "member", "or", "your", "current", "member", "if", "id", "wasn", "t", "given", "." ]
its-rigs/Trolly
python
https://github.com/its-rigs/Trolly/blob/483dc94c352df40dc05ead31820b059b2545cf82/trolly/client.py#L254-L262
[ "def", "get_member", "(", "self", ",", "id", "=", "'me'", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_member", "(", "dict", "(", "id", "=", "id", ",", "fullName", "=", "name", ")", ")" ]
483dc94c352df40dc05ead31820b059b2545cf82
test
domain_from_url
Get root domain from url. Will prune away query strings, url paths, protocol prefix and sub-domains Exceptions will be raised on invalid urls
similarweb/utils.py
def domain_from_url(url): """ Get root domain from url. Will prune away query strings, url paths, protocol prefix and sub-domains Exceptions will be raised on invalid urls """ ext = tldextract.extract(url) if not ext.suffix: raise InvalidURLException() new_url = ext.domain + "." + ext.suffix return new_url
def domain_from_url(url): """ Get root domain from url. Will prune away query strings, url paths, protocol prefix and sub-domains Exceptions will be raised on invalid urls """ ext = tldextract.extract(url) if not ext.suffix: raise InvalidURLException() new_url = ext.domain + "." + ext.suffix return new_url
[ "Get", "root", "domain", "from", "url", ".", "Will", "prune", "away", "query", "strings", "url", "paths", "protocol", "prefix", "and", "sub", "-", "domains", "Exceptions", "will", "be", "raised", "on", "invalid", "urls" ]
audiencepi/SimilarWeb-Python
python
https://github.com/audiencepi/SimilarWeb-Python/blob/4bf5e2315815cb622bd64714573f8df29339e615/similarweb/utils.py#L5-L15
[ "def", "domain_from_url", "(", "url", ")", ":", "ext", "=", "tldextract", ".", "extract", "(", "url", ")", "if", "not", "ext", ".", "suffix", ":", "raise", "InvalidURLException", "(", ")", "new_url", "=", "ext", ".", "domain", "+", "\".\"", "+", "ext", ".", "suffix", "return", "new_url" ]
4bf5e2315815cb622bd64714573f8df29339e615
test
to_raw_text_markupless
A generator to convert raw text segments, without xml to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated.
ciseau/wiki_markup_processing.py
def to_raw_text_markupless(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, without xml to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ return sent_tokenize( remove_dates(_remove_urls(text)), keep_whitespace, normalize_ascii )
def to_raw_text_markupless(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, without xml to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ return sent_tokenize( remove_dates(_remove_urls(text)), keep_whitespace, normalize_ascii )
[ "A", "generator", "to", "convert", "raw", "text", "segments", "without", "xml", "to", "a", "list", "of", "words", "without", "any", "markup", ".", "Additionally", "dates", "are", "replaced", "by", "7777", "for", "normalization", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/wiki_markup_processing.py#L118-L140
[ "def", "to_raw_text_markupless", "(", "text", ",", "keep_whitespace", "=", "False", ",", "normalize_ascii", "=", "True", ")", ":", "return", "sent_tokenize", "(", "remove_dates", "(", "_remove_urls", "(", "text", ")", ")", ",", "keep_whitespace", ",", "normalize_ascii", ")" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
to_raw_text
A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated.
ciseau/wiki_markup_processing.py
def to_raw_text(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_urls(text) out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = _remove_brackets(out) out = remove_remaining_double_brackets(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_dates(out) out = remove_math_sections(out) out = remove_html(out) out = sent_tokenize(out, keep_whitespace, normalize_ascii) return out
def to_raw_text(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_urls(text) out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = _remove_brackets(out) out = remove_remaining_double_brackets(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_dates(out) out = remove_math_sections(out) out = remove_html(out) out = sent_tokenize(out, keep_whitespace, normalize_ascii) return out
[ "A", "generator", "to", "convert", "raw", "text", "segments", "with", "xml", "and", "other", "non", "-", "textual", "content", "to", "a", "list", "of", "words", "without", "any", "markup", ".", "Additionally", "dates", "are", "replaced", "by", "7777", "for", "normalization", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/wiki_markup_processing.py#L143-L175
[ "def", "to_raw_text", "(", "text", ",", "keep_whitespace", "=", "False", ",", "normalize_ascii", "=", "True", ")", ":", "out", "=", "text", "out", "=", "_remove_urls", "(", "text", ")", "out", "=", "_remove_mvar", "(", "out", ")", "out", "=", "_remove_squiggly_bracket", "(", "out", ")", "out", "=", "_remove_table", "(", "out", ")", "out", "=", "_remove_brackets", "(", "out", ")", "out", "=", "remove_remaining_double_brackets", "(", "out", ")", "out", "=", "remove_markup", "(", "out", ")", "out", "=", "remove_wikipedia_link", ".", "sub", "(", "anchor_replacer", ",", "out", ")", "out", "=", "remove_bullets_nbsps", ".", "sub", "(", "empty_space", ",", "out", ")", "out", "=", "remove_dates", "(", "out", ")", "out", "=", "remove_math_sections", "(", "out", ")", "out", "=", "remove_html", "(", "out", ")", "out", "=", "sent_tokenize", "(", "out", ",", "keep_whitespace", ",", "normalize_ascii", ")", "return", "out" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
to_raw_text_pairings
A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization, along with wikipedia anchors kept. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated.
ciseau/wiki_markup_processing.py
def to_raw_text_pairings(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization, along with wikipedia anchors kept. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_math_sections(out) out = remove_html(out) for sentence in sent_tokenize(out, keep_whitespace, normalize_ascii): yield sentence
def to_raw_text_pairings(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization, along with wikipedia anchors kept. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_math_sections(out) out = remove_html(out) for sentence in sent_tokenize(out, keep_whitespace, normalize_ascii): yield sentence
[ "A", "generator", "to", "convert", "raw", "text", "segments", "with", "xml", "and", "other", "non", "-", "textual", "content", "to", "a", "list", "of", "words", "without", "any", "markup", ".", "Additionally", "dates", "are", "replaced", "by", "7777", "for", "normalization", "along", "with", "wikipedia", "anchors", "kept", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/wiki_markup_processing.py#L178-L207
[ "def", "to_raw_text_pairings", "(", "text", ",", "keep_whitespace", "=", "False", ",", "normalize_ascii", "=", "True", ")", ":", "out", "=", "text", "out", "=", "_remove_mvar", "(", "out", ")", "out", "=", "_remove_squiggly_bracket", "(", "out", ")", "out", "=", "_remove_table", "(", "out", ")", "out", "=", "remove_markup", "(", "out", ")", "out", "=", "remove_wikipedia_link", ".", "sub", "(", "anchor_replacer", ",", "out", ")", "out", "=", "remove_bullets_nbsps", ".", "sub", "(", "empty_space", ",", "out", ")", "out", "=", "remove_math_sections", "(", "out", ")", "out", "=", "remove_html", "(", "out", ")", "for", "sentence", "in", "sent_tokenize", "(", "out", ",", "keep_whitespace", ",", "normalize_ascii", ")", ":", "yield", "sentence" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
detect_sentence_boundaries
Subdivide an input list of strings (tokens) into multiple lists according to detected sentence boundaries. ``` detect_sentence_boundaries( ["Cat ", "sat ", "mat", ". ", "Cat ", "'s ", "named ", "Cool", "."] ) #=> [ ["Cat ", "sat ", "mat", ". "], ["Cat ", "'s ", "named ", "Cool", "."] ] ``` Arguments: ---------- tokens : list<str> Returns: -------- list<list<str>> : original list subdivided into multiple lists according to (detected) sentence boundaries.
ciseau/sentence_tokenizer.py
def detect_sentence_boundaries(tokens): """ Subdivide an input list of strings (tokens) into multiple lists according to detected sentence boundaries. ``` detect_sentence_boundaries( ["Cat ", "sat ", "mat", ". ", "Cat ", "'s ", "named ", "Cool", "."] ) #=> [ ["Cat ", "sat ", "mat", ". "], ["Cat ", "'s ", "named ", "Cool", "."] ] ``` Arguments: ---------- tokens : list<str> Returns: -------- list<list<str>> : original list subdivided into multiple lists according to (detected) sentence boundaries. """ tokenized = group_quoted_tokens(tokens) words = [] sentences = [] for i in range(len(tokenized)): # this is a parenthetical: end_sentence = False if isinstance(tokenized[i], list): if len(words) == 0: # end if a sentence finishes inside quoted section, # and no sentence was begun beforehand if is_end_symbol(tokenized[i][-2].rstrip()): end_sentence = True else: # end if a sentence finishes inside quote marks if (tokenized[i][0][0] == '"' and is_end_symbol(tokenized[i][-2].rstrip()) and not tokenized[i][1][0].isupper()): end_sentence = True words.extend(tokenized[i]) else: stripped_tokenized = tokenized[i].rstrip() if is_end_symbol(stripped_tokenized): words.append(tokenized[i]) not_last_word = i + 1 != len(tokenized) next_word_lowercase = ( not_last_word and tokenized[i+1][0].islower() ) next_word_continue_punct = ( not_last_word and tokenized[i+1][0] in CONTINUE_PUNCT_SYMBOLS ) end_sentence = not ( not_last_word and ( next_word_lowercase or next_word_continue_punct ) ) else: words.append(tokenized[i]) if end_sentence: sentences.append(words) words = [] # add final sentence, if it wasn't added yet. if len(words) > 0: sentences.append(words) # If the final word ends in a period: if len(sentences) > 0 and sentences[-1][-1]: alpha_word_piece = word_with_alpha_and_period.match(sentences[-1][-1]) if alpha_word_piece: sentences[-1][-1] = alpha_word_piece.group(1) sentences[-1].append(alpha_word_piece.group(2)) return sentences
def detect_sentence_boundaries(tokens): """ Subdivide an input list of strings (tokens) into multiple lists according to detected sentence boundaries. ``` detect_sentence_boundaries( ["Cat ", "sat ", "mat", ". ", "Cat ", "'s ", "named ", "Cool", "."] ) #=> [ ["Cat ", "sat ", "mat", ". "], ["Cat ", "'s ", "named ", "Cool", "."] ] ``` Arguments: ---------- tokens : list<str> Returns: -------- list<list<str>> : original list subdivided into multiple lists according to (detected) sentence boundaries. """ tokenized = group_quoted_tokens(tokens) words = [] sentences = [] for i in range(len(tokenized)): # this is a parenthetical: end_sentence = False if isinstance(tokenized[i], list): if len(words) == 0: # end if a sentence finishes inside quoted section, # and no sentence was begun beforehand if is_end_symbol(tokenized[i][-2].rstrip()): end_sentence = True else: # end if a sentence finishes inside quote marks if (tokenized[i][0][0] == '"' and is_end_symbol(tokenized[i][-2].rstrip()) and not tokenized[i][1][0].isupper()): end_sentence = True words.extend(tokenized[i]) else: stripped_tokenized = tokenized[i].rstrip() if is_end_symbol(stripped_tokenized): words.append(tokenized[i]) not_last_word = i + 1 != len(tokenized) next_word_lowercase = ( not_last_word and tokenized[i+1][0].islower() ) next_word_continue_punct = ( not_last_word and tokenized[i+1][0] in CONTINUE_PUNCT_SYMBOLS ) end_sentence = not ( not_last_word and ( next_word_lowercase or next_word_continue_punct ) ) else: words.append(tokenized[i]) if end_sentence: sentences.append(words) words = [] # add final sentence, if it wasn't added yet. if len(words) > 0: sentences.append(words) # If the final word ends in a period: if len(sentences) > 0 and sentences[-1][-1]: alpha_word_piece = word_with_alpha_and_period.match(sentences[-1][-1]) if alpha_word_piece: sentences[-1][-1] = alpha_word_piece.group(1) sentences[-1].append(alpha_word_piece.group(2)) return sentences
[ "Subdivide", "an", "input", "list", "of", "strings", "(", "tokens", ")", "into", "multiple", "lists", "according", "to", "detected", "sentence", "boundaries", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/sentence_tokenizer.py#L15-L96
[ "def", "detect_sentence_boundaries", "(", "tokens", ")", ":", "tokenized", "=", "group_quoted_tokens", "(", "tokens", ")", "words", "=", "[", "]", "sentences", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "tokenized", ")", ")", ":", "# this is a parenthetical:", "end_sentence", "=", "False", "if", "isinstance", "(", "tokenized", "[", "i", "]", ",", "list", ")", ":", "if", "len", "(", "words", ")", "==", "0", ":", "# end if a sentence finishes inside quoted section,", "# and no sentence was begun beforehand", "if", "is_end_symbol", "(", "tokenized", "[", "i", "]", "[", "-", "2", "]", ".", "rstrip", "(", ")", ")", ":", "end_sentence", "=", "True", "else", ":", "# end if a sentence finishes inside quote marks", "if", "(", "tokenized", "[", "i", "]", "[", "0", "]", "[", "0", "]", "==", "'\"'", "and", "is_end_symbol", "(", "tokenized", "[", "i", "]", "[", "-", "2", "]", ".", "rstrip", "(", ")", ")", "and", "not", "tokenized", "[", "i", "]", "[", "1", "]", "[", "0", "]", ".", "isupper", "(", ")", ")", ":", "end_sentence", "=", "True", "words", ".", "extend", "(", "tokenized", "[", "i", "]", ")", "else", ":", "stripped_tokenized", "=", "tokenized", "[", "i", "]", ".", "rstrip", "(", ")", "if", "is_end_symbol", "(", "stripped_tokenized", ")", ":", "words", ".", "append", "(", "tokenized", "[", "i", "]", ")", "not_last_word", "=", "i", "+", "1", "!=", "len", "(", "tokenized", ")", "next_word_lowercase", "=", "(", "not_last_word", "and", "tokenized", "[", "i", "+", "1", "]", "[", "0", "]", ".", "islower", "(", ")", ")", "next_word_continue_punct", "=", "(", "not_last_word", "and", "tokenized", "[", "i", "+", "1", "]", "[", "0", "]", "in", "CONTINUE_PUNCT_SYMBOLS", ")", "end_sentence", "=", "not", "(", "not_last_word", "and", "(", "next_word_lowercase", "or", "next_word_continue_punct", ")", ")", "else", ":", "words", ".", "append", "(", "tokenized", "[", "i", "]", ")", "if", "end_sentence", ":", "sentences", ".", "append", "(", "words", ")", "words", "=", "[", "]", "# add final sentence, if it wasn't added yet.", "if", "len", "(", "words", ")", ">", "0", ":", "sentences", ".", "append", "(", "words", ")", "# If the final word ends in a period:", "if", "len", "(", "sentences", ")", ">", "0", "and", "sentences", "[", "-", "1", "]", "[", "-", "1", "]", ":", "alpha_word_piece", "=", "word_with_alpha_and_period", ".", "match", "(", "sentences", "[", "-", "1", "]", "[", "-", "1", "]", ")", "if", "alpha_word_piece", ":", "sentences", "[", "-", "1", "]", "[", "-", "1", "]", "=", "alpha_word_piece", ".", "group", "(", "1", ")", "sentences", "[", "-", "1", "]", ".", "append", "(", "alpha_word_piece", ".", "group", "(", "2", ")", ")", "return", "sentences" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
sent_tokenize
Perform sentence + word tokenization on the input text using regular expressions and english/french specific rules. Arguments: ---------- text : str, input string to tokenize keep_whitespace : bool, whether to strip out spaces and newlines. normalize_ascii : bool, perform some replacements on rare characters so that they become easier to process in a ascii pipeline (canonicalize dashes, replace œ -> oe, etc..) Returns: -------- list<list<str>> : sentences with their content held in a list of strings for each token.
ciseau/sentence_tokenizer.py
def sent_tokenize(text, keep_whitespace=False, normalize_ascii=True): """ Perform sentence + word tokenization on the input text using regular expressions and english/french specific rules. Arguments: ---------- text : str, input string to tokenize keep_whitespace : bool, whether to strip out spaces and newlines. normalize_ascii : bool, perform some replacements on rare characters so that they become easier to process in a ascii pipeline (canonicalize dashes, replace œ -> oe, etc..) Returns: -------- list<list<str>> : sentences with their content held in a list of strings for each token. """ sentences = detect_sentence_boundaries( tokenize( text, normalize_ascii ) ) if not keep_whitespace: sentences = remove_whitespace(sentences) return sentences
def sent_tokenize(text, keep_whitespace=False, normalize_ascii=True): """ Perform sentence + word tokenization on the input text using regular expressions and english/french specific rules. Arguments: ---------- text : str, input string to tokenize keep_whitespace : bool, whether to strip out spaces and newlines. normalize_ascii : bool, perform some replacements on rare characters so that they become easier to process in a ascii pipeline (canonicalize dashes, replace œ -> oe, etc..) Returns: -------- list<list<str>> : sentences with their content held in a list of strings for each token. """ sentences = detect_sentence_boundaries( tokenize( text, normalize_ascii ) ) if not keep_whitespace: sentences = remove_whitespace(sentences) return sentences
[ "Perform", "sentence", "+", "word", "tokenization", "on", "the", "input", "text", "using", "regular", "expressions", "and", "english", "/", "french", "specific", "rules", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/sentence_tokenizer.py#L116-L144
[ "def", "sent_tokenize", "(", "text", ",", "keep_whitespace", "=", "False", ",", "normalize_ascii", "=", "True", ")", ":", "sentences", "=", "detect_sentence_boundaries", "(", "tokenize", "(", "text", ",", "normalize_ascii", ")", ")", "if", "not", "keep_whitespace", ":", "sentences", "=", "remove_whitespace", "(", "sentences", ")", "return", "sentences" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
verbatim_tags
Javascript templates (jquery, handlebars.js, mustache.js) use constructs like: :: {{if condition}} print something{{/if}} This, of course, completely screws up Django templates, because Django thinks {{ and }} means something. The following code preserves {{ }} tokens. This version of verbatim template tag allows you to use tags like url {% url name %}. {% trans "foo" %} or {% csrf_token %} within.
templatetag_handlebars/templatetags/templatetag_handlebars.py
def verbatim_tags(parser, token, endtagname): """ Javascript templates (jquery, handlebars.js, mustache.js) use constructs like: :: {{if condition}} print something{{/if}} This, of course, completely screws up Django templates, because Django thinks {{ and }} means something. The following code preserves {{ }} tokens. This version of verbatim template tag allows you to use tags like url {% url name %}. {% trans "foo" %} or {% csrf_token %} within. """ text_and_nodes = [] while 1: token = parser.tokens.pop(0) if token.contents == endtagname: break if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('{{') text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_TEXT: text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_BLOCK: try: command = token.contents.split()[0] except IndexError: parser.empty_block_tag(token) try: compile_func = parser.tags[command] except KeyError: parser.invalid_block_tag(token, command, None) try: node = compile_func(parser, token) except template.TemplateSyntaxError as e: if not parser.compile_function_error(token, e): raise text_and_nodes.append(node) if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('}}') return text_and_nodes
def verbatim_tags(parser, token, endtagname): """ Javascript templates (jquery, handlebars.js, mustache.js) use constructs like: :: {{if condition}} print something{{/if}} This, of course, completely screws up Django templates, because Django thinks {{ and }} means something. The following code preserves {{ }} tokens. This version of verbatim template tag allows you to use tags like url {% url name %}. {% trans "foo" %} or {% csrf_token %} within. """ text_and_nodes = [] while 1: token = parser.tokens.pop(0) if token.contents == endtagname: break if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('{{') text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_TEXT: text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_BLOCK: try: command = token.contents.split()[0] except IndexError: parser.empty_block_tag(token) try: compile_func = parser.tags[command] except KeyError: parser.invalid_block_tag(token, command, None) try: node = compile_func(parser, token) except template.TemplateSyntaxError as e: if not parser.compile_function_error(token, e): raise text_and_nodes.append(node) if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('}}') return text_and_nodes
[ "Javascript", "templates", "(", "jquery", "handlebars", ".", "js", "mustache", ".", "js", ")", "use", "constructs", "like", ":" ]
makinacorpus/django-templatetag-handlebars
python
https://github.com/makinacorpus/django-templatetag-handlebars/blob/eed2d85dec8e9e36ad34137fb5fb97bf3e21396f/templatetag_handlebars/templatetags/templatetag_handlebars.py#L15-L64
[ "def", "verbatim_tags", "(", "parser", ",", "token", ",", "endtagname", ")", ":", "text_and_nodes", "=", "[", "]", "while", "1", ":", "token", "=", "parser", ".", "tokens", ".", "pop", "(", "0", ")", "if", "token", ".", "contents", "==", "endtagname", ":", "break", "if", "token", ".", "token_type", "==", "template", ".", "base", ".", "TOKEN_VAR", ":", "text_and_nodes", ".", "append", "(", "'{{'", ")", "text_and_nodes", ".", "append", "(", "token", ".", "contents", ")", "elif", "token", ".", "token_type", "==", "template", ".", "base", ".", "TOKEN_TEXT", ":", "text_and_nodes", ".", "append", "(", "token", ".", "contents", ")", "elif", "token", ".", "token_type", "==", "template", ".", "base", ".", "TOKEN_BLOCK", ":", "try", ":", "command", "=", "token", ".", "contents", ".", "split", "(", ")", "[", "0", "]", "except", "IndexError", ":", "parser", ".", "empty_block_tag", "(", "token", ")", "try", ":", "compile_func", "=", "parser", ".", "tags", "[", "command", "]", "except", "KeyError", ":", "parser", ".", "invalid_block_tag", "(", "token", ",", "command", ",", "None", ")", "try", ":", "node", "=", "compile_func", "(", "parser", ",", "token", ")", "except", "template", ".", "TemplateSyntaxError", "as", "e", ":", "if", "not", "parser", ".", "compile_function_error", "(", "token", ",", "e", ")", ":", "raise", "text_and_nodes", ".", "append", "(", "node", ")", "if", "token", ".", "token_type", "==", "template", ".", "base", ".", "TOKEN_VAR", ":", "text_and_nodes", ".", "append", "(", "'}}'", ")", "return", "text_and_nodes" ]
eed2d85dec8e9e36ad34137fb5fb97bf3e21396f
test
Keyring.set_password
Write the password in the file.
keyrings/cryptfile/file_base.py
def set_password(self, service, username, password): """Write the password in the file. """ assoc = self._generate_assoc(service, username) # encrypt the password password_encrypted = self.encrypt(password.encode('utf-8'), assoc) # encode with base64 and add line break to untangle config file password_base64 = '\n' + encodebytes(password_encrypted).decode() self._write_config_value(service, username, password_base64)
def set_password(self, service, username, password): """Write the password in the file. """ assoc = self._generate_assoc(service, username) # encrypt the password password_encrypted = self.encrypt(password.encode('utf-8'), assoc) # encode with base64 and add line break to untangle config file password_base64 = '\n' + encodebytes(password_encrypted).decode() self._write_config_value(service, username, password_base64)
[ "Write", "the", "password", "in", "the", "file", "." ]
frispete/keyrings.cryptfile
python
https://github.com/frispete/keyrings.cryptfile/blob/cfa80d4848a5c3c0aeee41a954b2b120c80e69b2/keyrings/cryptfile/file_base.py#L124-L133
[ "def", "set_password", "(", "self", ",", "service", ",", "username", ",", "password", ")", ":", "assoc", "=", "self", ".", "_generate_assoc", "(", "service", ",", "username", ")", "# encrypt the password", "password_encrypted", "=", "self", ".", "encrypt", "(", "password", ".", "encode", "(", "'utf-8'", ")", ",", "assoc", ")", "# encode with base64 and add line break to untangle config file", "password_base64", "=", "'\\n'", "+", "encodebytes", "(", "password_encrypted", ")", ".", "decode", "(", ")", "self", ".", "_write_config_value", "(", "service", ",", "username", ",", "password_base64", ")" ]
cfa80d4848a5c3c0aeee41a954b2b120c80e69b2
test
protect_shorthand
Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text.
ciseau/word_tokenizer.py
def protect_shorthand(text, split_locations): """ Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text. """ word_matches = list(re.finditer(word_with_period, text)) total_words = len(word_matches) for i, match in enumerate(word_matches): match_start = match.start() match_end = match.end() for char_pos in range(match_start, match_end): if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1: match_start = char_pos word = text[match_start:match_end] if not word.endswith('.'): # ensure that words contained within other words: # e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of' if (not word[0].isdigit() and split_locations[match_start] == UNDECIDED): split_locations[match_start] = SHOULD_SPLIT continue period_pos = match_end - 1 # this is not the last word, abbreviation # is not the final period of the sentence, # moreover: word_is_in_abbr = word[:-1].lower() in ABBR is_abbr_like = ( word_is_in_abbr or one_letter_long_or_repeating.match(word[:-1]) is not None ) is_digit = False if is_abbr_like else word[:-1].isdigit() is_last_word = i == (total_words - 1) is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace()) is_not_ending = not is_ending abbreviation_and_not_end = ( len(word) > 1 and is_abbr_like and is_not_ending ) if abbreviation_and_not_end and ( (not is_last_word and word_matches[i+1].group(0)[0].islower()) or (not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or word[0].isupper() or word_is_in_abbr or len(word) == 2): # next word is lowercase (e.g. not a new sentence?), or next word # is punctuation or next word is totally uppercase (e.g. 'Mister. # ABAGNALE called to the stand') if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif (is_digit and len(word[:-1]) <= 2 and not is_last_word and word_matches[i+1].group(0).lower() in MONTHS): # a date or weird number with a period: if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif split_locations[period_pos] == UNDECIDED: # split this period into its own segment: split_locations[period_pos] = SHOULD_SPLIT
def protect_shorthand(text, split_locations): """ Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text. """ word_matches = list(re.finditer(word_with_period, text)) total_words = len(word_matches) for i, match in enumerate(word_matches): match_start = match.start() match_end = match.end() for char_pos in range(match_start, match_end): if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1: match_start = char_pos word = text[match_start:match_end] if not word.endswith('.'): # ensure that words contained within other words: # e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of' if (not word[0].isdigit() and split_locations[match_start] == UNDECIDED): split_locations[match_start] = SHOULD_SPLIT continue period_pos = match_end - 1 # this is not the last word, abbreviation # is not the final period of the sentence, # moreover: word_is_in_abbr = word[:-1].lower() in ABBR is_abbr_like = ( word_is_in_abbr or one_letter_long_or_repeating.match(word[:-1]) is not None ) is_digit = False if is_abbr_like else word[:-1].isdigit() is_last_word = i == (total_words - 1) is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace()) is_not_ending = not is_ending abbreviation_and_not_end = ( len(word) > 1 and is_abbr_like and is_not_ending ) if abbreviation_and_not_end and ( (not is_last_word and word_matches[i+1].group(0)[0].islower()) or (not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or word[0].isupper() or word_is_in_abbr or len(word) == 2): # next word is lowercase (e.g. not a new sentence?), or next word # is punctuation or next word is totally uppercase (e.g. 'Mister. # ABAGNALE called to the stand') if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif (is_digit and len(word[:-1]) <= 2 and not is_last_word and word_matches[i+1].group(0).lower() in MONTHS): # a date or weird number with a period: if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif split_locations[period_pos] == UNDECIDED: # split this period into its own segment: split_locations[period_pos] = SHOULD_SPLIT
[ "Annotate", "locations", "in", "a", "string", "that", "contain", "periods", "as", "being", "true", "periods", "or", "periods", "that", "are", "a", "part", "of", "shorthand", "(", "and", "thus", "should", "not", "be", "treated", "as", "punctuation", "marks", ")", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L37-L109
[ "def", "protect_shorthand", "(", "text", ",", "split_locations", ")", ":", "word_matches", "=", "list", "(", "re", ".", "finditer", "(", "word_with_period", ",", "text", ")", ")", "total_words", "=", "len", "(", "word_matches", ")", "for", "i", ",", "match", "in", "enumerate", "(", "word_matches", ")", ":", "match_start", "=", "match", ".", "start", "(", ")", "match_end", "=", "match", ".", "end", "(", ")", "for", "char_pos", "in", "range", "(", "match_start", ",", "match_end", ")", ":", "if", "split_locations", "[", "char_pos", "]", "==", "SHOULD_SPLIT", "and", "match_end", "-", "char_pos", ">", "1", ":", "match_start", "=", "char_pos", "word", "=", "text", "[", "match_start", ":", "match_end", "]", "if", "not", "word", ".", "endswith", "(", "'.'", ")", ":", "# ensure that words contained within other words:", "# e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of'", "if", "(", "not", "word", "[", "0", "]", ".", "isdigit", "(", ")", "and", "split_locations", "[", "match_start", "]", "==", "UNDECIDED", ")", ":", "split_locations", "[", "match_start", "]", "=", "SHOULD_SPLIT", "continue", "period_pos", "=", "match_end", "-", "1", "# this is not the last word, abbreviation", "# is not the final period of the sentence,", "# moreover:", "word_is_in_abbr", "=", "word", "[", ":", "-", "1", "]", ".", "lower", "(", ")", "in", "ABBR", "is_abbr_like", "=", "(", "word_is_in_abbr", "or", "one_letter_long_or_repeating", ".", "match", "(", "word", "[", ":", "-", "1", "]", ")", "is", "not", "None", ")", "is_digit", "=", "False", "if", "is_abbr_like", "else", "word", "[", ":", "-", "1", "]", ".", "isdigit", "(", ")", "is_last_word", "=", "i", "==", "(", "total_words", "-", "1", ")", "is_ending", "=", "is_last_word", "and", "(", "match_end", "==", "len", "(", "text", ")", "or", "text", "[", "match_end", ":", "]", ".", "isspace", "(", ")", ")", "is_not_ending", "=", "not", "is_ending", "abbreviation_and_not_end", "=", "(", "len", "(", "word", ")", ">", "1", "and", "is_abbr_like", "and", "is_not_ending", ")", "if", "abbreviation_and_not_end", "and", "(", "(", "not", "is_last_word", "and", "word_matches", "[", "i", "+", "1", "]", ".", "group", "(", "0", ")", "[", "0", "]", ".", "islower", "(", ")", ")", "or", "(", "not", "is_last_word", "and", "word_matches", "[", "i", "+", "1", "]", ".", "group", "(", "0", ")", "in", "PUNCT_SYMBOLS", ")", "or", "word", "[", "0", "]", ".", "isupper", "(", ")", "or", "word_is_in_abbr", "or", "len", "(", "word", ")", "==", "2", ")", ":", "# next word is lowercase (e.g. not a new sentence?), or next word", "# is punctuation or next word is totally uppercase (e.g. 'Mister.", "# ABAGNALE called to the stand')", "if", "split_locations", "[", "period_pos", "]", "==", "SHOULD_SPLIT", "and", "period_pos", "+", "1", "<", "len", "(", "split_locations", ")", ":", "split_locations", "[", "period_pos", "+", "1", "]", "=", "SHOULD_SPLIT", "split_locations", "[", "period_pos", "]", "=", "SHOULD_NOT_SPLIT", "elif", "(", "is_digit", "and", "len", "(", "word", "[", ":", "-", "1", "]", ")", "<=", "2", "and", "not", "is_last_word", "and", "word_matches", "[", "i", "+", "1", "]", ".", "group", "(", "0", ")", ".", "lower", "(", ")", "in", "MONTHS", ")", ":", "# a date or weird number with a period:", "if", "split_locations", "[", "period_pos", "]", "==", "SHOULD_SPLIT", "and", "period_pos", "+", "1", "<", "len", "(", "split_locations", ")", ":", "split_locations", "[", "period_pos", "+", "1", "]", "=", "SHOULD_SPLIT", "split_locations", "[", "period_pos", "]", "=", "SHOULD_NOT_SPLIT", "elif", "split_locations", "[", "period_pos", "]", "==", "UNDECIDED", ":", "# split this period into its own segment:", "split_locations", "[", "period_pos", "]", "=", "SHOULD_SPLIT" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
split_with_locations
Use an integer list to split the string contained in `text`. Arguments: ---------- text : str, same length as locations. locations : list<int>, contains values 'SHOULD_SPLIT', 'UNDECIDED', and 'SHOULD_NOT_SPLIT'. Will create strings between each 'SHOULD_SPLIT' locations. Returns: -------- Generator<str> : the substrings of text corresponding to the slices given in locations.
ciseau/word_tokenizer.py
def split_with_locations(text, locations): """ Use an integer list to split the string contained in `text`. Arguments: ---------- text : str, same length as locations. locations : list<int>, contains values 'SHOULD_SPLIT', 'UNDECIDED', and 'SHOULD_NOT_SPLIT'. Will create strings between each 'SHOULD_SPLIT' locations. Returns: -------- Generator<str> : the substrings of text corresponding to the slices given in locations. """ start = 0 for pos, decision in enumerate(locations): if decision == SHOULD_SPLIT: if start != pos: yield text[start:pos] start = pos if start != len(text): yield text[start:]
def split_with_locations(text, locations): """ Use an integer list to split the string contained in `text`. Arguments: ---------- text : str, same length as locations. locations : list<int>, contains values 'SHOULD_SPLIT', 'UNDECIDED', and 'SHOULD_NOT_SPLIT'. Will create strings between each 'SHOULD_SPLIT' locations. Returns: -------- Generator<str> : the substrings of text corresponding to the slices given in locations. """ start = 0 for pos, decision in enumerate(locations): if decision == SHOULD_SPLIT: if start != pos: yield text[start:pos] start = pos if start != len(text): yield text[start:]
[ "Use", "an", "integer", "list", "to", "split", "the", "string", "contained", "in", "text", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L112-L138
[ "def", "split_with_locations", "(", "text", ",", "locations", ")", ":", "start", "=", "0", "for", "pos", ",", "decision", "in", "enumerate", "(", "locations", ")", ":", "if", "decision", "==", "SHOULD_SPLIT", ":", "if", "start", "!=", "pos", ":", "yield", "text", "[", "start", ":", "pos", "]", "start", "=", "pos", "if", "start", "!=", "len", "(", "text", ")", ":", "yield", "text", "[", "start", ":", "]" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
mark_regex
Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions.
ciseau/word_tokenizer.py
def mark_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() if end_match < len(split_locations): split_locations[end_match] = SHOULD_SPLIT
def mark_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() if end_match < len(split_locations): split_locations[end_match] = SHOULD_SPLIT
[ "Regex", "that", "adds", "a", "SHOULD_SPLIT", "marker", "at", "the", "end", "location", "of", "each", "matching", "group", "of", "the", "given", "regex", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L141-L155
[ "def", "mark_regex", "(", "regex", ",", "text", ",", "split_locations", ")", ":", "for", "match", "in", "regex", ".", "finditer", "(", "text", ")", ":", "end_match", "=", "match", ".", "end", "(", ")", "if", "end_match", "<", "len", "(", "split_locations", ")", ":", "split_locations", "[", "end_match", "]", "=", "SHOULD_SPLIT" ]
f72d1c82d85eeb3d3ac9fac17690041725402175
test
mark_begin_end_regex
Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex, and adds a 'SHOULD_SPLIT' at the beginning of the matching group. Each character within the matching group will be marked as 'SHOULD_NOT_SPLIT'. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions.
ciseau/word_tokenizer.py
def mark_begin_end_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex, and adds a 'SHOULD_SPLIT' at the beginning of the matching group. Each character within the matching group will be marked as 'SHOULD_NOT_SPLIT'. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() begin_match = match.start() for i in range(begin_match+1, end_match): split_locations[i] = SHOULD_NOT_SPLIT if end_match < len(split_locations): if split_locations[end_match] == UNDECIDED: split_locations[end_match] = SHOULD_SPLIT if split_locations[begin_match] == UNDECIDED: split_locations[begin_match] = SHOULD_SPLIT
def mark_begin_end_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex, and adds a 'SHOULD_SPLIT' at the beginning of the matching group. Each character within the matching group will be marked as 'SHOULD_NOT_SPLIT'. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() begin_match = match.start() for i in range(begin_match+1, end_match): split_locations[i] = SHOULD_NOT_SPLIT if end_match < len(split_locations): if split_locations[end_match] == UNDECIDED: split_locations[end_match] = SHOULD_SPLIT if split_locations[begin_match] == UNDECIDED: split_locations[begin_match] = SHOULD_SPLIT
[ "Regex", "that", "adds", "a", "SHOULD_SPLIT", "marker", "at", "the", "end", "location", "of", "each", "matching", "group", "of", "the", "given", "regex", "and", "adds", "a", "SHOULD_SPLIT", "at", "the", "beginning", "of", "the", "matching", "group", ".", "Each", "character", "within", "the", "matching", "group", "will", "be", "marked", "as", "SHOULD_NOT_SPLIT", "." ]
JonathanRaiman/ciseau
python
https://github.com/JonathanRaiman/ciseau/blob/f72d1c82d85eeb3d3ac9fac17690041725402175/ciseau/word_tokenizer.py#L158-L182
[ "def", "mark_begin_end_regex", "(", "regex", ",", "text", ",", "split_locations", ")", ":", "for", "match", "in", "regex", ".", "finditer", "(", "text", ")", ":", "end_match", "=", "match", ".", "end", "(", ")", "begin_match", "=", "match", ".", "start", "(", ")", "for", "i", "in", "range", "(", "begin_match", "+", "1", ",", "end_match", ")", ":", "split_locations", "[", "i", "]", "=", "SHOULD_NOT_SPLIT", "if", "end_match", "<", "len", "(", "split_locations", ")", ":", "if", "split_locations", "[", "end_match", "]", "==", "UNDECIDED", ":", "split_locations", "[", "end_match", "]", "=", "SHOULD_SPLIT", "if", "split_locations", "[", "begin_match", "]", "==", "UNDECIDED", ":", "split_locations", "[", "begin_match", "]", "=", "SHOULD_SPLIT" ]
f72d1c82d85eeb3d3ac9fac17690041725402175