INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Parse the next token in the stream.
def get_token(s, pos, brackets_are_chars=True, environments=True, **parse_flags): """ Parse the next token in the stream. Returns a `LatexToken`. Raises `LatexWalkerEndOfStream` if end of stream reached. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_token()` instead. """ return LatexWalker(s, **parse_flags).get_token(pos=pos, brackets_are_chars=brackets_are_chars, environments=environments)
Reads a latex expression e. g. macro argument. This may be a single char an escape sequence or a expression placed in braces.
def get_latex_expression(s, pos, **parse_flags): """ Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. Returns a tuple `(<LatexNode instance>, pos, len)`. `pos` is the first char of the expression, and `len` is its length. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_expression()` instead. """ return LatexWalker(s, **parse_flags).get_latex_expression(pos=pos)
Attempts to parse an optional argument. Returns a tuple ( groupnode pos len ) if success otherwise returns None.
def get_latex_maybe_optional_arg(s, pos, **parse_flags): """ Attempts to parse an optional argument. Returns a tuple `(groupnode, pos, len)` if success, otherwise returns None. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_maybe_optional_arg()` instead. """ return LatexWalker(s, **parse_flags).get_latex_maybe_optional_arg(pos=pos)
Reads a latex expression enclosed in braces {... }. The first token of s [ pos: ] must be an opening brace.
def get_latex_braced_group(s, pos, brace_type='{', **parse_flags): """ Reads a latex expression enclosed in braces {...}. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`. `pos` is the first char of the expression (which has to be an opening brace), and `len` is its length, including the closing brace. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_braced_group()` instead. """ return LatexWalker(s, **parse_flags).get_latex_braced_group(pos=pos, brace_type=brace_type)
Reads a latex expression enclosed in a \\ begin { environment }... \\ end { environment }. The first token in the stream must be the \\ begin { environment }.
def get_latex_environment(s, pos, environmentname=None, **parse_flags): """ Reads a latex expression enclosed in a \\begin{environment}...\\end{environment}. The first token in the stream must be the \\begin{environment}. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_environment()` instead. """ return LatexWalker(s, **parse_flags).get_latex_environment(pos=pos, environmentname=environmentname)
Parses latex content s.
def get_latex_nodes(s, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None, **parse_flags): """ Parses latex content `s`. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of `LatexNode` 's. If `stop_upon_closing_brace` is given, then `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. .. deprecated:: 1.0 Please use :py:meth:`LatexWalker.get_latex_nodes()` instead. """ return LatexWalker(s, **parse_flags).get_latex_nodes(stop_upon_closing_brace=stop_upon_closing_brace, stop_upon_end_environment=stop_upon_end_environment, stop_upon_closing_mathmode=stop_upon_closing_mathmode)
Parses the latex content given to the constructor ( and stored in self. s ) starting at position pos to parse a single token as defined by: py: class: LatexToken.
def get_token(self, pos, brackets_are_chars=True, environments=True, keep_inline_math=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single "token", as defined by :py:class:`LatexToken`. Parse the token in the stream pointed to at position `pos`. Returns a :py:class:`LatexToken`. Raises :py:exc:`LatexWalkerEndOfStream` if end of stream reached. If `brackets_are_chars=False`, then square bracket characters count as 'brace_open' and 'brace_close' token types (see :py:class:`LatexToken`); otherwise (the default) they are considered just like other normal characters. If `environments=False`, then '\\begin' and '\\end' tokens count as regular 'macro' tokens (see :py:class:`LatexToken`); otherwise (the default) they are considered as the token types 'begin_environment' and 'end_environment'. If `keep_inline_math` is not `None`, then that value overrides that of `self.keep_inline_math` for the duration of this method call. """ s = self.s # shorthand with _PushPropOverride(self, 'keep_inline_math', keep_inline_math): space = '' while (pos < len(s) and s[pos].isspace()): space += s[pos] pos += 1 if (space.endswith('\n\n')): # two \n's indicate new paragraph. # Adding pre-space is overkill here I think. return LatexToken(tok='char', arg='\n\n', pos=pos-2, len=2, pre_space='') if (pos >= len(s)): raise LatexWalkerEndOfStream() if (s[pos] == '\\'): # escape sequence i = 2 macro = s[pos+1] # next char is necessarily part of macro # following chars part of macro only if all are alphabetical isalphamacro = False if (s[pos+1].isalpha()): isalphamacro = True while pos+i<len(s) and s[pos+i].isalpha(): macro += s[pos+i] i += 1 # possibly followed by a star if (pos+i<len(s) and s[pos+i] == '*'): macro += '*' i += 1 # see if we have a begin/end environment if (environments and (macro == 'begin' or macro == 'end')): # \begin{environment} or \end{environment} envmatch = re.match(r'^\s*\{([\w*]+)\}', s[pos+i:]) if (envmatch is None): raise LatexWalkerParseError( s=s, pos=pos, msg="Bad \\%s macro: expected {environment}" %(macro) ) return LatexToken( tok=('begin_environment' if macro == 'begin' else 'end_environment'), arg=envmatch.group(1), pos=pos, len=i+envmatch.end(), # !!envmatch.end() counts from pos+i pre_space=space ) # get the following whitespace, and store it in the macro's post_space post_space = '' if isalphamacro: # important, LaTeX does not consume space after non-alpha macros, like \& while pos+i<len(s) and s[pos+i].isspace(): post_space += s[pos+i] i += 1 return LatexToken(tok='macro', arg=macro, pos=pos, len=i, pre_space=space, post_space=post_space) if (s[pos] == '%'): # latex comment m = re.search(r'(\n|\r|\n\r)\s*', s[pos:]) mlen = None if m is not None: arglen = m.start() # relative to pos already mlen = m.end() # relative to pos already mspace = m.group() else: arglen = len(s)-pos# [ ==len(s[pos:]) ] mlen = arglen mspace = '' return LatexToken(tok='comment', arg=s[pos+1:pos+arglen], pos=pos, len=mlen, pre_space=space, post_space=mspace) openbracechars = '{' closebracechars = '}' if not brackets_are_chars: openbracechars += '[' closebracechars += ']' if s[pos] in openbracechars: return LatexToken(tok='brace_open', arg=s[pos], pos=pos, len=1, pre_space=space) if s[pos] in closebracechars: return LatexToken(tok='brace_close', arg=s[pos], pos=pos, len=1, pre_space=space) # check if it is an inline math char, if we care about inline math. if (s[pos] == '$' and self.keep_inline_math): # check that we don't have double-$$, which would be a display environment. if not (pos+1 < len(s) and s[pos+1] == '$'): return LatexToken(tok='mathmode_inline', arg=s[pos], pos=pos, len=1, pre_space=space) # otherwise, proceed to 'char' type. return LatexToken(tok='char', arg=s[pos], pos=pos, len=1, pre_space=space)
Parses the latex content given to the constructor ( and stored in self. s ) starting at position pos to parse a single LaTeX expression.
def get_latex_expression(self, pos, strict_braces=None): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to parse a single LaTeX expression. Reads a latex expression, e.g. macro argument. This may be a single char, an escape sequence, or a expression placed in braces. This is what TeX calls a "token" (and not what we call a token... anyway). Returns a tuple `(node, pos, len)`, where `pos` is the position of the first char of the expression and `len` the length of the expression. """ with _PushPropOverride(self, 'strict_braces', strict_braces): tok = self.get_token(pos, environments=False, keep_inline_math=False) if (tok.tok == 'macro'): if (tok.arg == 'end'): if not self.tolerant_parsing: # error, this should be an \end{environment}, not an argument in itself raise LatexWalkerParseError("Expected expression, got \end", self.s, pos) else: return (LatexCharsNode(chars=''), tok.pos, 0) return (LatexMacroNode(macroname=tok.arg, nodeoptarg=None, nodeargs=[], macro_post_space=tok.post_space), tok.pos, tok.len) if (tok.tok == 'comment'): return self.get_latex_expression(pos+tok.len) if (tok.tok == 'brace_open'): return self.get_latex_braced_group(tok.pos) if (tok.tok == 'brace_close'): if (self.strict_braces and not self.tolerant_parsing): raise LatexWalkerParseError("Expected expression, got closing brace!", self.s, pos) return (LatexCharsNode(chars=''), tok.pos, 0) if (tok.tok == 'char'): return (LatexCharsNode(chars=tok.arg), tok.pos, tok.len) raise LatexWalkerParseError("Unknown token type: %s" %(tok.tok), self.s, pos)
Parses the latex content given to the constructor ( and stored in self. s ) starting at position pos to attempt to parse an optional argument.
def get_latex_maybe_optional_arg(self, pos): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to attempt to parse an optional argument. Attempts to parse an optional argument. If this is successful, we return a tuple `(node, pos, len)` if success where `node` is a :py:class:`LatexGroupNode`. Otherwise, this method returns None. """ tok = self.get_token(pos, brackets_are_chars=False, environments=False) if (tok.tok == 'brace_open' and tok.arg == '['): return self.get_latex_braced_group(pos, brace_type='[') return None
Parses the latex content given to the constructor ( and stored in self. s ) starting at position pos to read a latex group delimited by braces.
def get_latex_braced_group(self, pos, brace_type='{'): """ Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex group delimited by braces. Reads a latex expression enclosed in braces ``{ ... }``. The first token of `s[pos:]` must be an opening brace. Returns a tuple `(node, pos, len)`, where `node` is a :py:class:`LatexGroupNode` instance, `pos` is the position of the first char of the expression (which has to be an opening brace), and `len` is the length of the group, including the closing brace (relative to the starting position). """ closing_brace = None if (brace_type == '{'): closing_brace = '}' elif (brace_type == '['): closing_brace = ']' else: raise LatexWalkerParseError(s=self.s, pos=pos, msg="Uknown brace type: %s" %(brace_type)) brackets_are_chars = (brace_type != '[') firsttok = self.get_token(pos, brackets_are_chars=brackets_are_chars) if (firsttok.tok != 'brace_open' or firsttok.arg != brace_type): raise LatexWalkerParseError( s=self.s, pos=pos, msg='get_latex_braced_group: not an opening brace/bracket: %s' %(self.s[pos]) ) #pos = firsttok.pos + firsttok.len (nodelist, npos, nlen) = self.get_latex_nodes(firsttok.pos + firsttok.len, stop_upon_closing_brace=closing_brace) return (LatexGroupNode(nodelist=nodelist), firsttok.pos, npos + nlen - firsttok.pos)
r Parses the latex content given to the constructor ( and stored in self. s ) starting at position pos to read a latex environment.
def get_latex_environment(self, pos, environmentname=None): r""" Parses the latex content given to the constructor (and stored in `self.s`), starting at position `pos`, to read a latex environment. Reads a latex expression enclosed in a ``\begin{environment}...\end{environment}``. The first token in the stream must be the ``\begin{environment}``. If `environmentname` is given and nonempty, then additionally a :py:exc:`LatexWalkerParseError` is raised if the environment in the input stream does not match the provided name. This function will attempt to heuristically parse an optional argument, and possibly a mandatory argument given to the environment. No space is allowed between ``\begin{environment}`` and an opening square bracket or opening brace. Returns a tuple (node, pos, len) with node being a :py:class:`LatexEnvironmentNode`. """ startpos = pos firsttok = self.get_token(pos) if (firsttok.tok != 'begin_environment' or (environmentname is not None and firsttok.arg != environmentname)): raise LatexWalkerParseError(s=self.s, pos=pos, msg=r'get_latex_environment: expected \begin{%s}: %s' %( environmentname if environmentname is not None else '<environment name>', firsttok.arg )) if (environmentname is None): environmentname = firsttok.arg pos = firsttok.pos + firsttok.len optargs = [] args = [] # see if the \begin{environment} is immediately followed by some # options. Important: Don't eat the brace of a commutator!! Don't allow # any space between the environment and the open bracket. optargtuple = None if (self.s[pos] == '['): optargtuple = self.get_latex_maybe_optional_arg(pos) if (optargtuple is not None): optargs.append(optargtuple[0]) pos = optargtuple[1]+optargtuple[2] else: # Try to see if we have a mandatory argument. Don't use get_token # as we don't want to skip any space. if self.s[pos] == '{': (argnode, apos, alen) = self.get_latex_braced_group(pos) args.append(argnode) pos = apos+alen (nodelist, npos, nlen) = self.get_latex_nodes(pos, stop_upon_end_environment=environmentname) return (LatexEnvironmentNode(envname=environmentname, nodelist=nodelist, optargs=optargs, args=args), startpos, npos+nlen-startpos)
Parses the latex content given to the constructor ( and stored in self. s ) into a list of nodes.
def get_latex_nodes(self, pos=0, stop_upon_closing_brace=None, stop_upon_end_environment=None, stop_upon_closing_mathmode=None): """ Parses the latex content given to the constructor (and stored in `self.s`) into a list of nodes. Returns a tuple `(nodelist, pos, len)` where nodelist is a list of :py:class:`LatexNode`\ 's. If `stop_upon_closing_brace` is given and set to a character, then parsing stops once the given closing brace is encountered (but not inside a subgroup). The brace is given as a character, ']' or '}'. The returned `len` includes the closing brace, but the closing brace is not included in any of the nodes in the `nodelist`. If `stop_upon_end_environment` is provided, then parsing stops once the given environment was closed. If there is an environment mismatch, then a `LatexWalkerParseError` is raised except in tolerant parsing mode (see py:meth:`parse_flags()`). Again, the closing environment is included in the length count but not the nodes. If `stop_upon_closing_mathmode` is specified, then the parsing stops once the corresponding math mode (assumed already open) is closed. Currently, only inline math modes delimited by ``$`` are supported. I.e., currently, if set, only the value ``stop_upon_closing_mathmode='$'`` is valid. """ nodelist = [] brackets_are_chars = True if (stop_upon_closing_brace == ']'): brackets_are_chars = False origpos = pos class PosPointer: def __init__(self, pos=0, lastchars=''): self.pos = pos self.lastchars = lastchars p = PosPointer(pos) def do_read(nodelist, p): """ Read a single token and process it, recursing into brace blocks and environments etc if needed, and appending stuff to nodelist. Return True whenever we should stop trying to read more. (e.g. upon reaching the a matched stop_upon_end_environment etc.) """ try: tok = self.get_token(p.pos, brackets_are_chars=brackets_are_chars) except LatexWalkerEndOfStream: if self.tolerant_parsing: return True raise # re-raise p.pos = tok.pos + tok.len # if it's a char, just append it to the stream of last characters. if (tok.tok == 'char'): p.lastchars += tok.pre_space + tok.arg return False # if it's not a char, push the last `p.lastchars` into the node list before anything else if len(p.lastchars): strnode = LatexCharsNode(chars=p.lastchars+tok.pre_space) nodelist.append(strnode) p.lastchars = '' elif len(tok.pre_space): # If we have pre_space, add a separate chars node that contains # the spaces. We do this seperately, so that latex2text can # ignore these groups by default to avoid too much space on the # output. This allows latex2text to implement the # `strict_latex_spaces=True` flag correctly. spacestrnode = LatexCharsNode(chars=tok.pre_space) nodelist.append(spacestrnode) # and see what the token is. if (tok.tok == 'brace_close'): # we've reached the end of the group. stop the parsing. if (tok.arg != stop_upon_closing_brace): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing brace: `%s\'' %(tok.arg) ) return False return True if (tok.tok == 'end_environment'): # we've reached the end of an environment. if (tok.arg != stop_upon_end_environment): if (not self.tolerant_parsing): raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg=('Unexpected mismatching closing environment: `%s\', ' 'expecting `%s\'' %(tok.arg, stop_upon_end_environment)) ) return False return True if (tok.tok == 'mathmode_inline'): # if we care about keeping math mode inlines verbatim, gulp all of the expression. if stop_upon_closing_mathmode is not None: if stop_upon_closing_mathmode != '$': raise LatexWalkerParseError( s=self.s, pos=tok.pos, msg='Unexpected mismatching closing math mode: `$\'' ) return True # we have encountered a new math inline, so gulp all of the math expression (mathinline_nodelist, mpos, mlen) = self.get_latex_nodes(p.pos, stop_upon_closing_mathmode='$') p.pos = mpos + mlen nodelist.append(LatexMathNode(displaytype='inline', nodelist=mathinline_nodelist)) return if (tok.tok == 'comment'): commentnode = LatexCommentNode(comment=tok.arg, comment_post_space=tok.post_space) nodelist.append(commentnode) return if (tok.tok == 'brace_open'): # another braced group to read. (groupnode, bpos, blen) = self.get_latex_braced_group(tok.pos) p.pos = bpos + blen nodelist.append(groupnode) return if (tok.tok == 'begin_environment'): # an environment to read. (envnode, epos, elen) = self.get_latex_environment(tok.pos, environmentname=tok.arg) p.pos = epos + elen # add node and continue. nodelist.append(envnode) return if (tok.tok == 'macro'): # read a macro. see if it has arguments. nodeoptarg = None nodeargs = [] macname = tok.arg.rstrip('*') # for lookup in macro_dict if macname in self.macro_dict: mac = self.macro_dict[macname] def getoptarg(pos): """ Gets a possibly optional argument. returns (argnode, new-pos) where argnode might be `None` if the argument was not specified. """ optarginfotuple = self.get_latex_maybe_optional_arg(pos) if optarginfotuple is not None: (nodeoptarg, optargpos, optarglen) = optarginfotuple return (nodeoptarg, optargpos+optarglen) return (None, pos) def getarg(pos): """ Gets a mandatory argument. returns (argnode, new-pos) """ (nodearg, npos, nlen) = self.get_latex_expression(pos, strict_braces=False) return (nodearg, npos + nlen) if mac.optarg: (nodeoptarg, p.pos) = getoptarg(p.pos) if isinstance(mac.numargs, _basestring): # specific argument specification for arg in mac.numargs: if arg == '{': (node, p.pos) = getarg(p.pos) nodeargs.append(node) elif arg == '[': (node, p.pos) = getoptarg(p.pos) nodeargs.append(node) else: raise LatexWalkerError( "Unknown macro argument kind for macro %s: %s" % (mac.macroname, arg) ) else: for n in range(mac.numargs): (nodearg, p.pos) = getarg(p.pos) nodeargs.append(nodearg) nodelist.append(LatexMacroNode(macroname=tok.arg, nodeoptarg=nodeoptarg, nodeargs=nodeargs, macro_post_space=tok.post_space)) return None raise LatexWalkerParseError(s=self.s, pos=p.pos, msg="Unknown token: %r" %(tok)) while True: try: r_endnow = do_read(nodelist, p) except LatexWalkerEndOfStream: if stop_upon_closing_brace or stop_upon_end_environment: # unexpected eof if (not self.tolerant_parsing): raise LatexWalkerError("Unexpected end of stream!") else: r_endnow = False else: r_endnow = True if (r_endnow): # add last chars if (p.lastchars): strnode = LatexCharsNode(chars=p.lastchars) nodelist.append(strnode) return (nodelist, origpos, p.pos - origpos) raise LatexWalkerError( # lgtm [py/unreachable-statement] "CONGRATULATIONS !! " "You are the first human to telepathically break an infinite loop !!!!!!!" )
Extracts text from content meant for database indexing. content is some LaTeX code.
def latex2text(content, tolerant_parsing=False, keep_inline_math=False, keep_comments=False): """ Extracts text from `content` meant for database indexing. `content` is some LaTeX code. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ (nodelist, tpos, tlen) = latexwalker.get_latex_nodes(content, keep_inline_math=keep_inline_math, tolerant_parsing=tolerant_parsing) return latexnodes2text(nodelist, keep_inline_math=keep_inline_math, keep_comments=keep_comments)
Extracts text from a node list. nodelist is a list of nodes as returned by: py: func: pylatexenc. latexwalker. get_latex_nodes ().
def latexnodes2text(nodelist, keep_inline_math=False, keep_comments=False): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:func:`pylatexenc.latexwalker.get_latex_nodes()`. .. deprecated:: 1.0 Please use :py:class:`LatexNodes2Text` instead. """ return LatexNodes2Text( keep_inline_math=keep_inline_math, keep_comments=keep_comments ).nodelist_to_text(nodelist)
Set where to look for input files when encountering the \\ input or \\ include macro.
def set_tex_input_directory(self, tex_input_directory, latex_walker_init_args=None, strict_input=True): """ Set where to look for input files when encountering the ``\\input`` or ``\\include`` macro. Alternatively, you may also override :py:meth:`read_input_file()` to implement a custom file lookup mechanism. The argument `tex_input_directory` is the directory relative to which to search for input files. If `strict_input` is set to `True`, then we always check that the referenced file lies within the subtree of `tex_input_directory`, prohibiting for instance hacks with '..' in filenames or using symbolic links to refer to files out of the directory tree. The argument `latex_walker_init_args` allows you to specify the parse flags passed to the constructor of :py:class:`pylatexenc.latexwalker.LatexWalker` when parsing the input file. """ self.tex_input_directory = tex_input_directory self.latex_walker_init_args = latex_walker_init_args if latex_walker_init_args else {} self.strict_input = strict_input if tex_input_directory: self.macro_dict['input'] = MacroDef('input', lambda n: self._callback_input(n)) self.macro_dict['include'] = MacroDef('include', lambda n: self._callback_input(n)) else: self.macro_dict['input'] = MacroDef('input', discard=True) self.macro_dict['include'] = MacroDef('include', discard=True)
This method may be overridden to implement a custom lookup mechanism when encountering \\ input or \\ include directives.
def read_input_file(self, fn): """ This method may be overridden to implement a custom lookup mechanism when encountering ``\\input`` or ``\\include`` directives. The default implementation looks for a file of the given name relative to the directory set by :py:meth:`set_tex_input_directory()`. If `strict_input=True` was set, we ensure strictly that the file resides in a subtree of the reference input directory (after canonicalizing the paths and resolving all symlinks). You may override this method to obtain the input data in however way you see fit. (In that case, a call to `set_tex_input_directory()` may not be needed as that function simply sets properties which are used by the default implementation of `read_input_file()`.) This function accepts the referred filename as argument (the argument to the ``\\input`` macro), and should return a string with the file contents (or generate a warning or raise an error). """ fnfull = os.path.realpath(os.path.join(self.tex_input_directory, fn)) if self.strict_input: # make sure that the input file is strictly within dirfull, and didn't escape with # '../..' tricks or via symlinks. dirfull = os.path.realpath(self.tex_input_directory) if not fnfull.startswith(dirfull): logger.warning( "Can't access path '%s' leading outside of mandated directory [strict input mode]", fn ) return '' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.tex'): fnfull = fnfull + '.tex' if not os.path.exists(fnfull) and os.path.exists(fnfull + '.latex'): fnfull = fnfull + '.latex' if not os.path.isfile(fnfull): logger.warning(u"Error, file doesn't exist: '%s'", fn) return '' logger.debug("Reading input file %r", fnfull) try: with open(fnfull) as f: return f.read() except IOError as e: logger.warning(u"Error, can't access '%s': %s", fn, e) return ''
Parses the given latex code and returns its textual representation.
def latex_to_text(self, latex, **parse_flags): """ Parses the given `latex` code and returns its textual representation. The `parse_flags` are the flags to give on to the :py:class:`pylatexenc.latexwalker.LatexWalker` constructor. """ return self.nodelist_to_text(latexwalker.LatexWalker(latex, **parse_flags).get_latex_nodes()[0])
Extracts text from a node list. nodelist is a list of nodes as returned by: py: meth: pylatexenc. latexwalker. LatexWalker. get_latex_nodes ().
def nodelist_to_text(self, nodelist): """ Extracts text from a node list. `nodelist` is a list of nodes as returned by :py:meth:`pylatexenc.latexwalker.LatexWalker.get_latex_nodes()`. In addition to converting each node in the list to text using `node_to_text()`, we apply some global replacements and fine-tuning to the resulting text to account for `text_replacements` (e.g., to fix quotes, tab alignment ``&`` chars, etc.) """ s = self._nodelistcontents_to_text(nodelist) # now, perform suitable replacements for pattern, replacement in self.text_replacements: if (hasattr(pattern, 'sub')): s = pattern.sub(replacement, s) else: s = s.replace(pattern, replacement) if not self.keep_inline_math: s = s.replace('$', ''); # removing math mode inline signs, just keep their Unicode counterparts.. return s
Turn the node list to text representations of each node. Basically apply node_to_text () to each node. ( But not quite actually since we take some care as to where we add whitespace. )
def _nodelistcontents_to_text(self, nodelist): """ Turn the node list to text representations of each node. Basically apply `node_to_text()` to each node. (But not quite actually, since we take some care as to where we add whitespace.) """ s = '' prev_node = None for node in nodelist: if self._is_bare_macro_node(prev_node) and node.isNodeType(latexwalker.LatexCharsNode): if not self.strict_latex_spaces['between-macro-and-chars']: # after a macro with absolutely no arguments, include post_space # in output by default if there are other chars that follow. # This is for more breathing space (especially in equations(?)), # and for compatibility with earlier versions of pylatexenc (<= # 1.3). This is NOT LaTeX' default behavior (see issue #11), so # only do this if `strict_latex_spaces=False`. s += prev_node.macro_post_space s += self.node_to_text(node) prev_node = node return s
Return the textual representation of the given node.
def node_to_text(self, node, prev_node_hint=None): """ Return the textual representation of the given `node`. If `prev_node_hint` is specified, then the current node is formatted suitably as following the node given in `prev_node_hint`. This might affect how much space we keep/discard, etc. """ if node is None: return "" if node.isNodeType(latexwalker.LatexCharsNode): # Unless in strict latex spaces mode, ignore nodes consisting only # of empty chars, as this tends to produce too much space... These # have been inserted by LatexWalker() in some occasions to keep # track of all relevant pre_space of tokens, such as between two # braced groups ("{one} {two}") or other such situations. if not self.strict_latex_spaces['between-latex-constructs'] and len(node.chars.strip()) == 0: return "" return node.chars if node.isNodeType(latexwalker.LatexCommentNode): if self.keep_comments: if self.strict_latex_spaces['after-comment']: return '%' + node.comment + '\n' else: # default spaces, i.e., keep what spaces were already there after the comment return '%' + node.comment + node.comment_post_space else: if self.strict_latex_spaces['after-comment']: return "" else: # default spaces, i.e., keep what spaces were already there after the comment # This can be useful to preserve e.g. indentation of the next line return node.comment_post_space if node.isNodeType(latexwalker.LatexGroupNode): contents = self._groupnodecontents_to_text(node) if self.keep_braced_groups and len(contents) >= self.keep_braced_groups_minlen: return "{" + contents + "}" return contents def apply_simplify_repl(node, simplify_repl, nodelistargs, what): if callable(simplify_repl): if 'l2tobj' in getfullargspec(simplify_repl)[0]: # callable accepts an argument named 'l2tobj', provide pointer to self return simplify_repl(node, l2tobj=self) return simplify_repl(node) if '%' in simplify_repl: try: return simplify_repl % tuple([self._groupnodecontents_to_text(nn) for nn in nodelistargs]) except (TypeError, ValueError): logger.warning( "WARNING: Error in configuration: {} failed its substitution!".format(what) ) return simplify_repl # too bad, keep the percent signs as they are... return simplify_repl if node.isNodeType(latexwalker.LatexMacroNode): # get macro behavior definition. macroname = node.macroname.rstrip('*') if macroname in self.macro_dict: mac = self.macro_dict[macroname] else: # no predefined behavior, use default: mac = self.macro_dict[''] def get_macro_str_repl(node, macroname, mac): if mac.simplify_repl: return apply_simplify_repl(node, mac.simplify_repl, node.nodeargs, what="macro '%s'"%(macroname)) if mac.discard: return "" a = node.nodeargs if (node.nodeoptarg): a.prepend(node.nodeoptarg) return "".join([self._groupnodecontents_to_text(n) for n in a]) macrostr = get_macro_str_repl(node, macroname, mac) return macrostr if node.isNodeType(latexwalker.LatexEnvironmentNode): # get environment behavior definition. envname = node.envname.rstrip('*') if (envname in self.env_dict): envdef = self.env_dict[envname] else: # no predefined behavior, use default: envdef = self.env_dict[''] if envdef.simplify_repl: return apply_simplify_repl(node, envdef.simplify_repl, node.nodelist, what="environment '%s'"%(envname)) if envdef.discard: return "" return self._nodelistcontents_to_text(node.nodelist) if node.isNodeType(latexwalker.LatexMathNode): if self.keep_inline_math: # we care about math modes and we should keep this verbatim return latexwalker.math_node_to_latex(node) else: # note, this here only happens if the latexwalker had keep_inline_math=True with _PushEquationContext(self): return self._nodelistcontents_to_text(node.nodelist) logger.warning("LatexNodes2Text.node_to_text(): Unknown node: %r", node) # discard anything else. return ""
u Encode a UTF - 8 string to a LaTeX snippet.
def utf8tolatex(s, non_ascii_only=False, brackets=True, substitute_bad_chars=False, fail_bad_chars=False): u""" Encode a UTF-8 string to a LaTeX snippet. If `non_ascii_only` is set to `True`, then usual (ascii) characters such as ``#``, ``{``, ``}`` etc. will not be escaped. If set to `False` (the default), they are escaped to their respective LaTeX escape sequences. If `brackets` is set to `True` (the default), then LaTeX macros are enclosed in brackets. For example, ``sant\N{LATIN SMALL LETTER E WITH ACUTE}`` is replaced by ``sant{\\'e}`` if `brackets=True` and by ``sant\\'e`` if `brackets=False`. .. warning:: Using `brackets=False` might give you an invalid LaTeX string, so avoid it! (for instance, ``ma\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}tre`` will be replaced incorrectly by ``ma\\^\\itre`` resulting in an unknown macro ``\\itre``). If `substitute_bad_chars=True`, then any non-ascii character for which no LaTeX escape sequence is known is replaced by a question mark in boldface. Otherwise (by default), the character is left as it is. If `fail_bad_chars=True`, then a `ValueError` is raised if we cannot find a character substitution for any non-ascii character. .. versionchanged:: 1.3 Added `fail_bad_chars` switch """ s = unicode(s) # make sure s is unicode s = unicodedata.normalize('NFC', s) if not s: return "" result = u"" for ch in s: #log.longdebug("Encoding char %r", ch) if (non_ascii_only and ord(ch) < 127): result += ch else: lch = utf82latex.get(ord(ch), None) if (lch is not None): # add brackets if needed, i.e. if we have a substituting macro. # note: in condition, beware, that lch might be of zero length. result += ( '{'+lch+'}' if brackets and lch[0:1] == '\\' else lch ) elif ((ord(ch) >= 32 and ord(ch) <= 127) or (ch in "\n\r\t")): # ordinary printable ascii char, just add it result += ch else: # non-ascii char msg = u"Character cannot be encoded into LaTeX: U+%04X - `%s'" % (ord(ch), ch) if fail_bad_chars: raise ValueError(msg) log.warning(msg) if substitute_bad_chars: result += r'{\bfseries ?}' else: # keep unescaped char result += ch return result
Unpack \\ uNNNN escapes in s and encode the result as UTF - 8
def _unascii(s): """Unpack `\\uNNNN` escapes in 's' and encode the result as UTF-8 This method takes the output of the JSONEncoder and expands any \\uNNNN escapes it finds (except for \\u0000 to \\u001F, which are converted to \\xNN escapes). For performance, it assumes that the input is valid JSON, and performs few sanity checks. """ # make the fast path fast: if there are no matches in the string, the # whole thing is ascii. On python 2, that means we're done. On python 3, # we have to turn it into a bytes, which is quickest with encode('utf-8') m = _U_ESCAPE.search(s) if not m: return s if PY2 else s.encode('utf-8') # appending to a string (or a bytes) is slooow, so we accumulate sections # of string result in 'chunks', and join them all together later. # (It doesn't seem to make much difference whether we accumulate # utf8-encoded bytes, or strings which we utf-8 encode after rejoining) # chunks = [] # 'pos' tracks the index in 's' that we have processed into 'chunks' so # far. pos = 0 while m: start = m.start() end = m.end() g = m.group(1) if g is None: # escaped backslash: pass it through along with anything before the # match chunks.append(s[pos:end]) else: # \uNNNN, but we have to watch out for surrogate pairs. # # On python 2, str.encode("utf-8") will decode utf-16 surrogates # before re-encoding, so it's fine for us to pass the surrogates # through. (Indeed we must, to deal with UCS-2 python builds, per # https://github.com/matrix-org/python-canonicaljson/issues/12). # # On python 3, str.encode("utf-8") complains about surrogates, so # we have to unpack them. c = int(g, 16) if c < 0x20: # leave as a \uNNNN escape chunks.append(s[pos:end]) else: if PY3: # pragma nocover if c & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u': esc2 = s[end + 2:end + 6] c2 = int(esc2, 16) if c2 & 0xfc00 == 0xdc00: c = 0x10000 + (((c - 0xd800) << 10) | (c2 - 0xdc00)) end += 6 chunks.append(s[pos:start]) chunks.append(unichr(c)) pos = end m = _U_ESCAPE.search(s, pos) # pass through anything after the last match chunks.append(s[pos:]) return (''.join(chunks)).encode("utf-8")
Get information fot this organisation. Returns a dictionary of values.
def get_organisation_information(self, query_params=None): ''' Get information fot this organisation. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get all the boards for this organisation. Returns a list of Board s.
def get_boards(self, **query_params): ''' Get all the boards for this organisation. Returns a list of Board s. Returns: list(Board): The boards attached to this organisation ''' boards = self.get_boards_json(self.base_uri, query_params=query_params) boards_list = [] for board_json in boards: boards_list.append(self.create_board(board_json)) return boards_list
Get all members attached to this organisation. Returns a list of Member objects
def get_members(self, **query_params): ''' Get all members attached to this organisation. Returns a list of Member objects Returns: list(Member): The members attached to this organisation ''' members = self.get_members_json(self.base_uri, query_params=query_params) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
Update this organisations information. Returns a new organisation object.
def update_organisation(self, query_params=None): ''' Update this organisations information. Returns a new organisation object. ''' organisation_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_organisation(organisation_json)
Remove a member from the organisation. Returns JSON of all members if successful or raises an Unauthorised exception if not.
def remove_member(self, member_id): ''' Remove a member from the organisation.Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='DELETE' )
Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not.
def add_member_by_id(self, member_id, membership_type='normal'): ''' Add a member to the board using the id. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members/%s' % member_id, http_method='PUT', query_params={ 'type': membership_type } )
Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not.
def add_member(self, email, fullname, membership_type='normal'): ''' Add a member to the board. Membership type can be normal or admin. Returns JSON of all members if successful or raises an Unauthorised exception if not. ''' return self.fetch_json( uri_path=self.base_uri + '/members', http_method='PUT', query_params={ 'email': email, 'fullName': fullname, 'type': membership_type } )
Get information for this list. Returns a dictionary of values.
def get_list_information(self, query_params=None): ''' Get information for this list. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Create a card for this list. Returns a Card object.
def add_card(self, query_params=None): ''' Create a card for this list. Returns a Card object. ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards', http_method='POST', query_params=query_params or {} ) return self.create_card(card_json)
Get all information for this Label. Returns a dictionary of values.
def get_label_information(self, query_params=None): ''' Get all information for this Label. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item.
def get_items(self, query_params=None): ''' Get all the items for this label. Returns a list of dictionaries. Each dictionary has the values for an item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', query_params=query_params or {} )
Update the current label s name. Returns a new Label object.
def _update_label_name(self, name): ''' Update the current label's name. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_label(label_json)
Update the current label. Returns a new Label object.
def _update_label_dict(self, query_params={}): ''' Update the current label. Returns a new Label object. ''' label_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params ) return self.create_label(label_json)
Returns a URL that needs to be opened in a browser to retrieve an access token.
def get_authorisation_url(self, application_name, token_expire='1day'): ''' Returns a URL that needs to be opened in a browser to retrieve an access token. ''' query_params = { 'name': application_name, 'expiration': token_expire, 'response_type': 'token', 'scope': 'read,write' } authorisation_url = self.build_uri( path='/authorize', query_params=self.add_authorisation(query_params) ) print('Please go to the following URL and get the user authorisation ' 'token:\n', authorisation_url) return authorisation_url
Get information for this card. Returns a dictionary of values.
def get_card_information(self, query_params=None): ''' Get information for this card. Returns a dictionary of values. ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get board information for this card. Returns a Board object.
def get_board(self, **query_params): ''' Get board information for this card. Returns a Board object. Returns: Board: The board this card is attached to ''' board_json = self.get_board_json(self.base_uri, query_params=query_params) return self.create_board(board_json)
Get list information for this card. Returns a List object.
def get_list(self, **query_params): ''' Get list information for this card. Returns a List object. Returns: List: The list this card is attached to ''' list_json = self.get_list_json(self.base_uri, query_params=query_params) return self.create_list(list_json)
Get the checklists for this card. Returns a list of Checklist objects.
def get_checklists(self, **query_params): ''' Get the checklists for this card. Returns a list of Checklist objects. Returns: list(Checklist): The checklists attached to this card ''' checklists = self.get_checklist_json(self.base_uri, query_params=query_params) checklists_list = [] for checklist_json in checklists: checklists_list.append(self.create_checklist(checklist_json)) return checklists_list
Adds a comment to this card by the current user.
def add_comment(self, comment_text): ''' Adds a comment to this card by the current user. ''' return self.fetch_json( uri_path=self.base_uri + '/actions/comments', http_method='POST', query_params={'text': comment_text} )
Adds an attachment to this card.
def add_attachment(self, filename, open_file): ''' Adds an attachment to this card. ''' fields = { 'api_key': self.client.api_key, 'token': self.client.user_auth_token } content_type, body = self.encode_multipart_formdata( fields=fields, filename=filename, file_values=open_file ) return self.fetch_json( uri_path=self.base_uri + '/attachments', http_method='POST', body=body, headers={'Content-Type': content_type}, )
Add a checklist to this card. Returns a Checklist object.
def add_checklist(self, query_params=None): ''' Add a checklist to this card. Returns a Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri + '/checklists', http_method='POST', query_params=query_params or {} ) return self.create_checklist(checklist_json)
Add a label to this card from a dictionary.
def _add_label_from_dict(self, query_params=None): ''' Add a label to this card, from a dictionary. ''' return self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} )
Add an existing label to this card.
def _add_label_from_class(self, label=None): ''' Add an existing label to this card. ''' return self.fetch_json( uri_path=self.base_uri + '/idLabels', http_method='POST', query_params={'value': label.id} )
Add a member to this card. Returns a list of Member objects.
def add_member(self, member_id): ''' Add a member to this card. Returns a list of Member objects. ''' members = self.fetch_json( uri_path=self.base_uri + '/idMembers', http_method='POST', query_params={'value': member_id} ) members_list = [] for member_json in members: members_list.append(self.create_member(member_json)) return members_list
Encodes data to updload a file to Trello. Fields is a dictionary of api_key and token. Filename is the name of the file and file_values is the open ( file ). read () string.
def encode_multipart_formdata(self, fields, filename, file_values): ''' Encodes data to updload a file to Trello. Fields is a dictionary of api_key and token. Filename is the name of the file and file_values is the open(file).read() string. ''' boundary = '----------Trello_Boundary_$' crlf = '\r\n' data = [] for key in fields: data.append('--' + boundary) data.append('Content-Disposition: form-data; name="%s"' % key) data.append('') data.append(fields[key]) data.append('--' + boundary) data.append( 'Content-Disposition: form-data; name="file"; filename="%s"' % filename) data.append('Content-Type: %s' % self.get_content_type(filename)) data.append('') data.append(file_values) data.append('--' + boundary + '--') data.append('') # Try and avoid the damn unicode errors data = [str(segment) for segment in data] body = crlf.join(data) content_type = 'multipart/form-data; boundary=%s' % boundary return content_type, body
Get Information for a member. Returns a dictionary of values.
def get_member_information(self, query_params=None): ''' Get Information for a member. Returns a dictionary of values. Returns: dict ''' return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get all cards this member is attached to. Return a list of Card objects.
def get_cards(self, **query_params): ''' Get all cards this member is attached to. Return a list of Card objects. Returns: list(Card): Return all cards this member is attached to ''' cards = self.get_cards_json(self.base_uri, query_params=query_params) cards_list = [] for card_json in cards: cards_list.append(self.create_card(card_json)) return cards_list
Get all organisations this member is attached to. Return a list of Organisation objects.
def get_organisations(self, **query_params): ''' Get all organisations this member is attached to. Return a list of Organisation objects. Returns: list(Organisation): Return all organisations this member is attached to ''' organisations = self.get_organisations_json(self.base_uri, query_params=query_params) organisations_list = [] for organisation_json in organisations: organisations_list.append( self.create_organisation(organisation_json)) return organisations_list
Create a new board. name is required in query_params. Returns a Board object.
def create_new_board(self, query_params=None): ''' Create a new board. name is required in query_params. Returns a Board object. Returns: Board: Returns the created board ''' board_json = self.fetch_json( uri_path='/boards', http_method='POST', query_params=query_params or {} ) return self.create_board(board_json)
Enable singledispatch for class methods.
def singledispatchmethod(method): ''' Enable singledispatch for class methods. See http://stackoverflow.com/a/24602374/274318 ''' dispatcher = singledispatch(method) def wrapper(*args, **kw): return dispatcher.dispatch(args[1].__class__)(*args, **kw) wrapper.register = dispatcher.register update_wrapper(wrapper, dispatcher) return wrapper
Create a ChecklistItem object from JSON object
def create_checklist_item(self, card_id, checklist_id, checklistitem_json, **kwargs): ''' Create a ChecklistItem object from JSON object ''' return self.client.create_checklist_item(card_id, checklist_id, checklistitem_json, **kwargs)
Get all information for this board. Returns a dictionary of values.
def get_board_information(self, query_params=None): ''' Get all information for this board. Returns a dictionary of values. ''' return self.fetch_json( uri_path='/boards/' + self.id, query_params=query_params or {} )
Get the lists attached to this board. Returns a list of List objects.
def get_lists(self, **query_params): ''' Get the lists attached to this board. Returns a list of List objects. Returns: list(List): The lists attached to this board ''' lists = self.get_lists_json(self.base_uri, query_params=query_params) lists_list = [] for list_json in lists: lists_list.append(self.create_list(list_json)) return lists_list
Get the labels attached to this board. Returns a label of Label objects.
def get_labels(self, **query_params): ''' Get the labels attached to this board. Returns a label of Label objects. Returns: list(Label): The labels attached to this board ''' labels = self.get_labels_json(self.base_uri, query_params=query_params) labels_list = [] for label_json in labels: labels_list.append(self.create_label(label_json)) return labels_list
Get a Card for a given card id. Returns a Card object.
def get_card(self, card_id, **query_params): ''' Get a Card for a given card id. Returns a Card object. Returns: Card: The card with the given card_id ''' card_json = self.fetch_json( uri_path=self.base_uri + '/cards/' + card_id ) return self.create_card(card_json)
Get the checklists for this board. Returns a list of Checklist objects.
def get_checklists( self ): """ Get the checklists for this board. Returns a list of Checklist objects. """ checklists = self.getChecklistsJson( self.base_uri ) checklists_list = [] for checklist_json in checklists: checklists_list.append( self.createChecklist( checklist_json ) ) return checklists_list
Get the Organisation for this board. Returns Organisation object.
def get_organisation(self, **query_params): ''' Get the Organisation for this board. Returns Organisation object. Returns: list(Organisation): The organisation attached to this board ''' organisation_json = self.get_organisations_json( self.base_uri, query_params=query_params) return self.create_organisation(organisation_json)
Update this board s information. Returns a new board.
def update_board(self, query_params=None): ''' Update this board's information. Returns a new board. ''' board_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params=query_params or {} ) return self.create_board(board_json)
Create a list for a board. Returns a new List object.
def add_list(self, query_params=None): ''' Create a list for a board. Returns a new List object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/lists', http_method='POST', query_params=query_params or {} ) return self.create_list(list_json)
Create a label for a board. Returns a new Label object.
def add_label(self, query_params=None): ''' Create a label for a board. Returns a new Label object. ''' list_json = self.fetch_json( uri_path=self.base_uri + '/labels', http_method='POST', query_params=query_params or {} ) return self.create_label(list_json)
Get all information for this Checklist. Returns a dictionary of values.
def get_checklist_information(self, query_params=None): ''' Get all information for this Checklist. Returns a dictionary of values. ''' # We don't use trelloobject.TrelloObject.get_checklist_json, because # that is meant to return lists of checklists. return self.fetch_json( uri_path=self.base_uri, query_params=query_params or {} )
Get card this checklist is on.
def get_card(self): ''' Get card this checklist is on. ''' card_id = self.get_checklist_information().get('idCard', None) if card_id: return self.client.get_card(card_id)
Get the items for this checklist. Returns a list of ChecklistItem objects.
def get_item_objects(self, query_params=None): """ Get the items for this checklist. Returns a list of ChecklistItem objects. """ card = self.get_card() checklistitems_list = [] for checklistitem_json in self.get_items(query_params): checklistitems_list.append(self.create_checklist_item(card.id, self.id, checklistitem_json)) return checklistitems_list
Update the current checklist. Returns a new Checklist object.
def update_checklist(self, name): ''' Update the current checklist. Returns a new Checklist object. ''' checklist_json = self.fetch_json( uri_path=self.base_uri, http_method='PUT', query_params={'name': name} ) return self.create_checklist(checklist_json)
Add an item to this checklist. Returns a dictionary of values of new item.
def add_item(self, query_params=None): ''' Add an item to this checklist. Returns a dictionary of values of new item. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems', http_method='POST', query_params=query_params or {} )
Deletes an item from this checklist.
def remove_item(self, item_id): ''' Deletes an item from this checklist. ''' return self.fetch_json( uri_path=self.base_uri + '/checkItems/' + item_id, http_method='DELETE' )
Rename the current checklist item. Returns a new ChecklistItem object.
def update_name( self, name ): """ Rename the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/name', http_method = 'PUT', query_params = {'value': name} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
Set the state of the current checklist item. Returns a new ChecklistItem object.
def update_state(self, state): """ Set the state of the current checklist item. Returns a new ChecklistItem object. """ checklistitem_json = self.fetch_json( uri_path = self.base_uri + '/state', http_method = 'PUT', query_params = {'value': 'complete' if state else 'incomplete'} ) return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json)
Adds the API key and user auth token to the query parameters
def add_authorisation(self, query_params): ''' Adds the API key and user auth token to the query parameters ''' query_params['key'] = self.api_key if self.user_auth_token: query_params['token'] = self.user_auth_token return query_params
Check HTTP reponse for known errors
def check_errors(self, uri, response): ''' Check HTTP reponse for known errors ''' if response.status == 401: raise trolly.Unauthorised(uri, response) if response.status != 200: raise trolly.ResourceUnavailable(uri, response)
Build the URI for the API call.
def build_uri(self, path, query_params): ''' Build the URI for the API call. ''' url = 'https://api.trello.com/1' + self.clean_path(path) url += '?' + urlencode(query_params) return url
Make a call to Trello API and capture JSON response. Raises an error when it fails.
def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None): ''' Make a call to Trello API and capture JSON response. Raises an error when it fails. Returns: dict: Dictionary with the JSON data ''' query_params = query_params or {} headers = headers or {} query_params = self.add_authorisation(query_params) uri = self.build_uri(uri_path, query_params) allowed_methods = ("POST", "PUT", "DELETE") if http_method in allowed_methods and 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' response, content = self.client.request( uri=uri, method=http_method, body=body, headers=headers ) self.check_errors(uri, response) return json.loads(content.decode('utf-8'))
Create an Organisation object from a JSON object
def create_organisation(self, organisation_json): ''' Create an Organisation object from a JSON object Returns: Organisation: The organisation from the given `organisation_json`. ''' return trolly.organisation.Organisation( trello_client=self, organisation_id=organisation_json['id'], name=organisation_json['name'], data=organisation_json, )
Create Board object from a JSON object
def create_board(self, board_json): ''' Create Board object from a JSON object Returns: Board: The board from the given `board_json`. ''' return trolly.board.Board( trello_client=self, board_id=board_json['id'], name=board_json['name'], data=board_json, )
Create Label object from JSON object
def create_label(self, label_json): ''' Create Label object from JSON object Returns: Label: The label from the given `label_json`. ''' return trolly.label.Label( trello_client=self, label_id=label_json['id'], name=label_json['name'], data=label_json, )
Create List object from JSON object
def create_list(self, list_json): ''' Create List object from JSON object Returns: List: The list from the given `list_json`. ''' return trolly.list.List( trello_client=self, list_id=list_json['id'], name=list_json['name'], data=list_json, )
Create a Card object from JSON object
def create_card(self, card_json): ''' Create a Card object from JSON object Returns: Card: The card from the given `card_json`. ''' return trolly.card.Card( trello_client=self, card_id=card_json['id'], name=card_json['name'], data=card_json, )
Create a Checklist object from JSON object
def create_checklist(self, checklist_json): ''' Create a Checklist object from JSON object Returns: Checklist: The checklist from the given `checklist_json`. ''' return trolly.checklist.Checklist( trello_client=self, checklist_id=checklist_json['id'], name=checklist_json['name'], data=checklist_json, )
Create a ChecklistItem object from JSON object
def create_checklist_item(self, card_id, checklist_id, checklistitem_json): """ Create a ChecklistItem object from JSON object """ return trolly.checklist.ChecklistItem( trello_client=self, card_id=card_id, checklist_id=checklist_id, checklistitem_id=checklistitem_json['id'].encode('utf-8'), name=checklistitem_json['name'].encode('utf-8'), state=checklistitem_json['state'].encode('utf-8') )
Create a Member object from JSON object
def create_member(self, member_json): ''' Create a Member object from JSON object Returns: Member: The member from the given `member_json`. ''' return trolly.member.Member( trello_client=self, member_id=member_json['id'], name=member_json['fullName'], data=member_json, )
Get an organisation
def get_organisation(self, id, name=None): ''' Get an organisation Returns: Organisation: The organisation with the given `id` ''' return self.create_organisation(dict(id=id, name=name))
Get a board
def get_board(self, id, name=None): ''' Get a board Returns: Board: The board with the given `id` ''' return self.create_board(dict(id=id, name=name))
Get a list
def get_list(self, id, name=None): ''' Get a list Returns: List: The list with the given `id` ''' return self.create_list(dict(id=id, name=name))
Get a card
def get_card(self, id, name=None): ''' Get a card Returns: Card: The card with the given `id` ''' return self.create_card(dict(id=id, name=name))
Get a checklist
def get_checklist(self, id, name=None): ''' Get a checklist Returns: Checklist: The checklist with the given `id` ''' return self.create_checklist(dict(id=id, name=name))
Get a member or your current member if id wasn t given.
def get_member(self, id='me', name=None): ''' Get a member or your current member if `id` wasn't given. Returns: Member: The member with the given `id`, defaults to the logged in member. ''' return self.create_member(dict(id=id, fullName=name))
Get root domain from url. Will prune away query strings url paths protocol prefix and sub - domains Exceptions will be raised on invalid urls
def domain_from_url(url): """ Get root domain from url. Will prune away query strings, url paths, protocol prefix and sub-domains Exceptions will be raised on invalid urls """ ext = tldextract.extract(url) if not ext.suffix: raise InvalidURLException() new_url = ext.domain + "." + ext.suffix return new_url
A generator to convert raw text segments without xml to a list of words without any markup. Additionally dates are replaced by 7777 for normalization.
def to_raw_text_markupless(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, without xml to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ return sent_tokenize( remove_dates(_remove_urls(text)), keep_whitespace, normalize_ascii )
A generator to convert raw text segments with xml and other non - textual content to a list of words without any markup. Additionally dates are replaced by 7777 for normalization.
def to_raw_text(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_urls(text) out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = _remove_brackets(out) out = remove_remaining_double_brackets(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_dates(out) out = remove_math_sections(out) out = remove_html(out) out = sent_tokenize(out, keep_whitespace, normalize_ascii) return out
A generator to convert raw text segments with xml and other non - textual content to a list of words without any markup. Additionally dates are replaced by 7777 for normalization along with wikipedia anchors kept.
def to_raw_text_pairings(text, keep_whitespace=False, normalize_ascii=True): """ A generator to convert raw text segments, with xml, and other non-textual content to a list of words without any markup. Additionally dates are replaced by `7777` for normalization, along with wikipedia anchors kept. Arguments --------- text: str, input text to tokenize, strip of markup. keep_whitespace : bool, should the output retain the whitespace of the input (so that char offsets in the output correspond to those in the input). Returns ------- generator<list<list<str>>>, a generator for sentences, with within each sentence a list of the words separated. """ out = text out = _remove_mvar(out) out = _remove_squiggly_bracket(out) out = _remove_table(out) out = remove_markup(out) out = remove_wikipedia_link.sub(anchor_replacer, out) out = remove_bullets_nbsps.sub(empty_space, out) out = remove_math_sections(out) out = remove_html(out) for sentence in sent_tokenize(out, keep_whitespace, normalize_ascii): yield sentence
Subdivide an input list of strings ( tokens ) into multiple lists according to detected sentence boundaries.
def detect_sentence_boundaries(tokens): """ Subdivide an input list of strings (tokens) into multiple lists according to detected sentence boundaries. ``` detect_sentence_boundaries( ["Cat ", "sat ", "mat", ". ", "Cat ", "'s ", "named ", "Cool", "."] ) #=> [ ["Cat ", "sat ", "mat", ". "], ["Cat ", "'s ", "named ", "Cool", "."] ] ``` Arguments: ---------- tokens : list<str> Returns: -------- list<list<str>> : original list subdivided into multiple lists according to (detected) sentence boundaries. """ tokenized = group_quoted_tokens(tokens) words = [] sentences = [] for i in range(len(tokenized)): # this is a parenthetical: end_sentence = False if isinstance(tokenized[i], list): if len(words) == 0: # end if a sentence finishes inside quoted section, # and no sentence was begun beforehand if is_end_symbol(tokenized[i][-2].rstrip()): end_sentence = True else: # end if a sentence finishes inside quote marks if (tokenized[i][0][0] == '"' and is_end_symbol(tokenized[i][-2].rstrip()) and not tokenized[i][1][0].isupper()): end_sentence = True words.extend(tokenized[i]) else: stripped_tokenized = tokenized[i].rstrip() if is_end_symbol(stripped_tokenized): words.append(tokenized[i]) not_last_word = i + 1 != len(tokenized) next_word_lowercase = ( not_last_word and tokenized[i+1][0].islower() ) next_word_continue_punct = ( not_last_word and tokenized[i+1][0] in CONTINUE_PUNCT_SYMBOLS ) end_sentence = not ( not_last_word and ( next_word_lowercase or next_word_continue_punct ) ) else: words.append(tokenized[i]) if end_sentence: sentences.append(words) words = [] # add final sentence, if it wasn't added yet. if len(words) > 0: sentences.append(words) # If the final word ends in a period: if len(sentences) > 0 and sentences[-1][-1]: alpha_word_piece = word_with_alpha_and_period.match(sentences[-1][-1]) if alpha_word_piece: sentences[-1][-1] = alpha_word_piece.group(1) sentences[-1].append(alpha_word_piece.group(2)) return sentences
Perform sentence + word tokenization on the input text using regular expressions and english/ french specific rules.
def sent_tokenize(text, keep_whitespace=False, normalize_ascii=True): """ Perform sentence + word tokenization on the input text using regular expressions and english/french specific rules. Arguments: ---------- text : str, input string to tokenize keep_whitespace : bool, whether to strip out spaces and newlines. normalize_ascii : bool, perform some replacements on rare characters so that they become easier to process in a ascii pipeline (canonicalize dashes, replace œ -> oe, etc..) Returns: -------- list<list<str>> : sentences with their content held in a list of strings for each token. """ sentences = detect_sentence_boundaries( tokenize( text, normalize_ascii ) ) if not keep_whitespace: sentences = remove_whitespace(sentences) return sentences
Javascript templates ( jquery handlebars. js mustache. js ) use constructs like:
def verbatim_tags(parser, token, endtagname): """ Javascript templates (jquery, handlebars.js, mustache.js) use constructs like: :: {{if condition}} print something{{/if}} This, of course, completely screws up Django templates, because Django thinks {{ and }} means something. The following code preserves {{ }} tokens. This version of verbatim template tag allows you to use tags like url {% url name %}. {% trans "foo" %} or {% csrf_token %} within. """ text_and_nodes = [] while 1: token = parser.tokens.pop(0) if token.contents == endtagname: break if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('{{') text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_TEXT: text_and_nodes.append(token.contents) elif token.token_type == template.base.TOKEN_BLOCK: try: command = token.contents.split()[0] except IndexError: parser.empty_block_tag(token) try: compile_func = parser.tags[command] except KeyError: parser.invalid_block_tag(token, command, None) try: node = compile_func(parser, token) except template.TemplateSyntaxError as e: if not parser.compile_function_error(token, e): raise text_and_nodes.append(node) if token.token_type == template.base.TOKEN_VAR: text_and_nodes.append('}}') return text_and_nodes
Write the password in the file.
def set_password(self, service, username, password): """Write the password in the file. """ assoc = self._generate_assoc(service, username) # encrypt the password password_encrypted = self.encrypt(password.encode('utf-8'), assoc) # encode with base64 and add line break to untangle config file password_base64 = '\n' + encodebytes(password_encrypted).decode() self._write_config_value(service, username, password_base64)
Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand ( and thus should not be treated as punctuation marks ).
def protect_shorthand(text, split_locations): """ Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text. """ word_matches = list(re.finditer(word_with_period, text)) total_words = len(word_matches) for i, match in enumerate(word_matches): match_start = match.start() match_end = match.end() for char_pos in range(match_start, match_end): if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1: match_start = char_pos word = text[match_start:match_end] if not word.endswith('.'): # ensure that words contained within other words: # e.g. 'chocolate.Mountains of' -> 'chocolate. Mountains of' if (not word[0].isdigit() and split_locations[match_start] == UNDECIDED): split_locations[match_start] = SHOULD_SPLIT continue period_pos = match_end - 1 # this is not the last word, abbreviation # is not the final period of the sentence, # moreover: word_is_in_abbr = word[:-1].lower() in ABBR is_abbr_like = ( word_is_in_abbr or one_letter_long_or_repeating.match(word[:-1]) is not None ) is_digit = False if is_abbr_like else word[:-1].isdigit() is_last_word = i == (total_words - 1) is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace()) is_not_ending = not is_ending abbreviation_and_not_end = ( len(word) > 1 and is_abbr_like and is_not_ending ) if abbreviation_and_not_end and ( (not is_last_word and word_matches[i+1].group(0)[0].islower()) or (not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or word[0].isupper() or word_is_in_abbr or len(word) == 2): # next word is lowercase (e.g. not a new sentence?), or next word # is punctuation or next word is totally uppercase (e.g. 'Mister. # ABAGNALE called to the stand') if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif (is_digit and len(word[:-1]) <= 2 and not is_last_word and word_matches[i+1].group(0).lower() in MONTHS): # a date or weird number with a period: if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif split_locations[period_pos] == UNDECIDED: # split this period into its own segment: split_locations[period_pos] = SHOULD_SPLIT
Use an integer list to split the string contained in text.
def split_with_locations(text, locations): """ Use an integer list to split the string contained in `text`. Arguments: ---------- text : str, same length as locations. locations : list<int>, contains values 'SHOULD_SPLIT', 'UNDECIDED', and 'SHOULD_NOT_SPLIT'. Will create strings between each 'SHOULD_SPLIT' locations. Returns: -------- Generator<str> : the substrings of text corresponding to the slices given in locations. """ start = 0 for pos, decision in enumerate(locations): if decision == SHOULD_SPLIT: if start != pos: yield text[start:pos] start = pos if start != len(text): yield text[start:]
Regex that adds a SHOULD_SPLIT marker at the end location of each matching group of the given regex.
def mark_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() if end_match < len(split_locations): split_locations[end_match] = SHOULD_SPLIT
Regex that adds a SHOULD_SPLIT marker at the end location of each matching group of the given regex and adds a SHOULD_SPLIT at the beginning of the matching group. Each character within the matching group will be marked as SHOULD_NOT_SPLIT.
def mark_begin_end_regex(regex, text, split_locations): """ Regex that adds a 'SHOULD_SPLIT' marker at the end location of each matching group of the given regex, and adds a 'SHOULD_SPLIT' at the beginning of the matching group. Each character within the matching group will be marked as 'SHOULD_NOT_SPLIT'. Arguments --------- regex : re.Expression text : str, same length as split_locations split_locations : list<int>, split decisions. """ for match in regex.finditer(text): end_match = match.end() begin_match = match.start() for i in range(begin_match+1, end_match): split_locations[i] = SHOULD_NOT_SPLIT if end_match < len(split_locations): if split_locations[end_match] == UNDECIDED: split_locations[end_match] = SHOULD_SPLIT if split_locations[begin_match] == UNDECIDED: split_locations[begin_match] = SHOULD_SPLIT