Search is not available for this dataset
text
stringlengths
75
104k
def html_annotate_merge_annotations(tokens_old, tokens_new): """Merge the annotations from tokens_old into tokens_new, when the tokens in the new document already existed in the old document. """ s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new) commands = s.get_opcodes() for command, i1, i2, j1, j2 in commands: if command == 'equal': eq_old = tokens_old[i1:i2] eq_new = tokens_new[j1:j2] copy_annotations(eq_old, eq_new)
def copy_annotations(src, dest): """ Copy annotations from the tokens listed in src to the tokens in dest """ assert len(src) == len(dest) for src_tok, dest_tok in zip(src, dest): dest_tok.annotation = src_tok.annotation
def compress_tokens(tokens): """ Combine adjacent tokens when there is no HTML between the tokens, and they share an annotation """ result = [tokens[0]] for tok in tokens[1:]: if (not result[-1].post_tags and not tok.pre_tags and result[-1].annotation == tok.annotation): compress_merge_back(result, tok) else: result.append(tok) return result
def compress_merge_back(tokens, tok): """ Merge tok into the last element of tokens (modifying the list of tokens in-place). """ last = tokens[-1] if type(last) is not token or type(tok) is not token: tokens.append(tok) else: text = _unicode(last) if last.trailing_whitespace: text += last.trailing_whitespace text += tok merged = token(text, pre_tags=last.pre_tags, post_tags=tok.post_tags, trailing_whitespace=tok.trailing_whitespace) merged.annotation = last.annotation tokens[-1] = merged
def markup_serialize_tokens(tokens, markup_func): """ Serialize the list of tokens into a list of text chunks, calling markup_func around text to add annotations. """ for token in tokens: for pre in token.pre_tags: yield pre html = token.html() html = markup_func(html, token.annotation) if token.trailing_whitespace: html += token.trailing_whitespace yield html for post in token.post_tags: yield post
def htmldiff(old_html, new_html): ## FIXME: this should take parsed documents too, and use their body ## or other content. """ Do a diff of the old and new document. The documents are HTML *fragments* (str/UTF8 or unicode), they are not complete documents (i.e., no <html> tag). Returns HTML with <ins> and <del> tags added around the appropriate text. Markup is generally ignored, with the markup from new_html preserved, and possibly some markup from old_html (though it is considered acceptable to lose some of the old markup). Only the words in the HTML are diffed. The exception is <img> tags, which are treated like words, and the href attribute of <a> tags, which are noted inside the tag itself when there are changes. """ old_html_tokens = tokenize(old_html) new_html_tokens = tokenize(new_html) result = htmldiff_tokens(old_html_tokens, new_html_tokens) result = ''.join(result).strip() return fixup_ins_del_tags(result)
def htmldiff_tokens(html1_tokens, html2_tokens): """ Does a diff on the tokens themselves, returning a list of text chunks (not tokens). """ # There are several passes as we do the differences. The tokens # isolate the portion of the content we care to diff; difflib does # all the actual hard work at that point. # # Then we must create a valid document from pieces of both the old # document and the new document. We generally prefer to take # markup from the new document, and only do a best effort attempt # to keep markup from the old document; anything that we can't # resolve we throw away. Also we try to put the deletes as close # to the location where we think they would have been -- because # we are only keeping the markup from the new document, it can be # fuzzy where in the new document the old text would have gone. # Again we just do a best effort attempt. s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens) commands = s.get_opcodes() result = [] for command, i1, i2, j1, j2 in commands: if command == 'equal': result.extend(expand_tokens(html2_tokens[j1:j2], equal=True)) continue if command == 'insert' or command == 'replace': ins_tokens = expand_tokens(html2_tokens[j1:j2]) merge_insert(ins_tokens, result) if command == 'delete' or command == 'replace': del_tokens = expand_tokens(html1_tokens[i1:i2]) merge_delete(del_tokens, result) # If deletes were inserted directly as <del> then we'd have an # invalid document at this point. Instead we put in special # markers, and when the complete diffed document has been created # we try to move the deletes around and resolve any problems. result = cleanup_delete(result) return result
def expand_tokens(tokens, equal=False): """Given a list of tokens, return a generator of the chunks of text for the data in the tokens. """ for token in tokens: for pre in token.pre_tags: yield pre if not equal or not token.hide_when_equal: if token.trailing_whitespace: yield token.html() + token.trailing_whitespace else: yield token.html() for post in token.post_tags: yield post
def merge_insert(ins_chunks, doc): """ doc is the already-handled document (as a list of text chunks); here we add <ins>ins_chunks</ins> to the end of that. """ # Though we don't throw away unbalanced_start or unbalanced_end # (we assume there is accompanying markup later or earlier in the # document), we only put <ins> around the balanced portion. unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks) doc.extend(unbalanced_start) if doc and not doc[-1].endswith(' '): # Fix up the case where the word before the insert didn't end with # a space doc[-1] += ' ' doc.append('<ins>') if balanced and balanced[-1].endswith(' '): # We move space outside of </ins> balanced[-1] = balanced[-1][:-1] doc.extend(balanced) doc.append('</ins> ') doc.extend(unbalanced_end)
def merge_delete(del_chunks, doc): """ Adds the text chunks in del_chunks to the document doc (another list of text chunks) with marker to show it is a delete. cleanup_delete later resolves these markers into <del> tags.""" doc.append(DEL_START) doc.extend(del_chunks) doc.append(DEL_END)
def cleanup_delete(chunks): """ Cleans up any DEL_START/DEL_END markers in the document, replacing them with <del></del>. To do this while keeping the document valid, it may need to drop some tags (either start or end tags). It may also move the del into adjacent tags to try to move it to a similar location where it was originally located (e.g., moving a delete into preceding <div> tag, if the del looks like (DEL_START, 'Text</div>', DEL_END)""" while 1: # Find a pending DEL_START/DEL_END, splitting the document # into stuff-preceding-DEL_START, stuff-inside, and # stuff-following-DEL_END try: pre_delete, delete, post_delete = split_delete(chunks) except NoDeletes: # Nothing found, we've cleaned up the entire doc break # The stuff-inside-DEL_START/END may not be well balanced # markup. First we figure out what unbalanced portions there are: unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete) # Then we move the span forward and/or backward based on these # unbalanced portions: locate_unbalanced_start(unbalanced_start, pre_delete, post_delete) locate_unbalanced_end(unbalanced_end, pre_delete, post_delete) doc = pre_delete if doc and not doc[-1].endswith(' '): # Fix up case where the word before us didn't have a trailing space doc[-1] += ' ' doc.append('<del>') if balanced and balanced[-1].endswith(' '): # We move space outside of </del> balanced[-1] = balanced[-1][:-1] doc.extend(balanced) doc.append('</del> ') doc.extend(post_delete) chunks = doc return chunks
def split_unbalanced(chunks): """Return (unbalanced_start, balanced, unbalanced_end), where each is a list of text and tag chunks. unbalanced_start is a list of all the tags that are opened, but not closed in this span. Similarly, unbalanced_end is a list of tags that are closed but were not opened. Extracting these might mean some reordering of the chunks.""" start = [] end = [] tag_stack = [] balanced = [] for chunk in chunks: if not chunk.startswith('<'): balanced.append(chunk) continue endtag = chunk[1] == '/' name = chunk.split()[0].strip('<>/') if name in empty_tags: balanced.append(chunk) continue if endtag: if tag_stack and tag_stack[-1][0] == name: balanced.append(chunk) name, pos, tag = tag_stack.pop() balanced[pos] = tag elif tag_stack: start.extend([tag for name, pos, tag in tag_stack]) tag_stack = [] end.append(chunk) else: end.append(chunk) else: tag_stack.append((name, len(balanced), chunk)) balanced.append(None) start.extend( [chunk for name, pos, chunk in tag_stack]) balanced = [chunk for chunk in balanced if chunk is not None] return start, balanced, end
def split_delete(chunks): """ Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END, stuff_after_DEL_END). Returns the first case found (there may be more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if there's no DEL_START found. """ try: pos = chunks.index(DEL_START) except ValueError: raise NoDeletes pos2 = chunks.index(DEL_END) return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete): """ pre_delete and post_delete implicitly point to a place in the document (where the two were split). This moves that point (by popping items from one and pushing them onto the other). It moves the point to try to find a place where unbalanced_start applies. As an example:: >>> unbalanced_start = ['<div>'] >>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>'] >>> pre, post = doc[:3], doc[3:] >>> pre, post (['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>']) >>> locate_unbalanced_start(unbalanced_start, pre, post) >>> pre, post (['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>']) As you can see, we moved the point so that the dangling <div> that we found will be effectively replaced by the div in the original document. If this doesn't work out, we just throw away unbalanced_start without doing anything. """ while 1: if not unbalanced_start: # We have totally succeded in finding the position break finding = unbalanced_start[0] finding_name = finding.split()[0].strip('<>') if not post_delete: break next = post_delete[0] if next is DEL_START or not next.startswith('<'): # Reached a word, we can't move the delete text forward break if next[1] == '/': # Reached a closing tag, can we go further? Maybe not... break name = next.split()[0].strip('<>') if name == 'ins': # Can't move into an insert break assert name != 'del', ( "Unexpected delete tag: %r" % next) if name == finding_name: unbalanced_start.pop(0) pre_delete.append(post_delete.pop(0)) else: # Found a tag that doesn't match break
def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete): """ like locate_unbalanced_start, except handling end tags and possibly moving the point earlier in the document. """ while 1: if not unbalanced_end: # Success break finding = unbalanced_end[-1] finding_name = finding.split()[0].strip('<>/') if not pre_delete: break next = pre_delete[-1] if next is DEL_END or not next.startswith('</'): # A word or a start tag break name = next.split()[0].strip('<>/') if name == 'ins' or name == 'del': # Can't move into an insert or delete break if name == finding_name: unbalanced_end.pop() post_delete.insert(0, pre_delete.pop()) else: # Found a tag that doesn't match break
def tokenize(html, include_hrefs=True): """ Parse the given HTML and returns token objects (words with attached tags). This parses only the content of a page; anything in the head is ignored, and the <head> and <body> elements are themselves optional. The content is then parsed by lxml, which ensures the validity of the resulting parsed document (though lxml may make incorrect guesses when the markup is particular bad). <ins> and <del> tags are also eliminated from the document, as that gets confusing. If include_hrefs is true, then the href attribute of <a> tags is included as a special kind of diffable token.""" if etree.iselement(html): body_el = html else: body_el = parse_html(html, cleanup=True) # Then we split the document into text chunks for each tag, word, and end tag: chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs) # Finally re-joining them into token objects: return fixup_chunks(chunks)
def parse_html(html, cleanup=True): """ Parses an HTML fragment, returning an lxml element. Note that the HTML will be wrapped in a <div> tag that was not in the original document. If cleanup is true, make sure there's no <head> or <body>, and get rid of any <ins> and <del> tags. """ if cleanup: # This removes any extra markup or structure like <head>: html = cleanup_html(html) return fragment_fromstring(html, create_parent=True)
def cleanup_html(html): """ This 'cleans' the HTML, meaning that any page structure is removed (only the contents of <body> are used, if there is any <body). Also <ins> and <del> tags are removed. """ match = _body_re.search(html) if match: html = html[match.end():] match = _end_body_re.search(html) if match: html = html[:match.start()] html = _ins_del_re.sub('', html) return html
def split_trailing_whitespace(word): """ This function takes a word, such as 'test\n\n' and returns ('test','\n\n') """ stripped_length = len(word.rstrip()) return word[0:stripped_length], word[stripped_length:]
def fixup_chunks(chunks): """ This function takes a list of chunks and produces a list of tokens. """ tag_accum = [] cur_word = None result = [] for chunk in chunks: if isinstance(chunk, tuple): if chunk[0] == 'img': src = chunk[1] tag, trailing_whitespace = split_trailing_whitespace(chunk[2]) cur_word = tag_token('img', src, html_repr=tag, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif chunk[0] == 'href': href = chunk[1] cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ") tag_accum = [] result.append(cur_word) continue if is_word(chunk): chunk, trailing_whitespace = split_trailing_whitespace(chunk) cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace) tag_accum = [] result.append(cur_word) elif is_start_tag(chunk): tag_accum.append(chunk) elif is_end_tag(chunk): if tag_accum: tag_accum.append(chunk) else: assert cur_word, ( "Weird state, cur_word=%r, result=%r, chunks=%r of %r" % (cur_word, result, chunk, chunks)) cur_word.post_tags.append(chunk) else: assert(0) if not result: return [token('', pre_tags=tag_accum)] else: result[-1].post_tags.extend(tag_accum) return result
def flatten_el(el, include_hrefs, skip_tag=False): """ Takes an lxml element el, and generates all the text chunks for that tag. Each start tag is a chunk, each word is a chunk, and each end tag is a chunk. If skip_tag is true, then the outermost container tag is not returned (just its contents).""" if not skip_tag: if el.tag == 'img': yield ('img', el.get('src'), start_tag(el)) else: yield start_tag(el) if el.tag in empty_tags and not el.text and not len(el) and not el.tail: return start_words = split_words(el.text) for word in start_words: yield html_escape(word) for child in el: for item in flatten_el(child, include_hrefs=include_hrefs): yield item if el.tag == 'a' and el.get('href') and include_hrefs: yield ('href', el.get('href')) if not skip_tag: yield end_tag(el) end_words = split_words(el.tail) for word in end_words: yield html_escape(word)
def split_words(text): """ Splits some text into words. Includes trailing whitespace on each word when appropriate. """ if not text or not text.strip(): return [] words = split_words_re.findall(text) return words
def start_tag(el): """ The text representation of the start tag for a tag. """ return '<%s%s>' % ( el.tag, ''.join([' %s="%s"' % (name, html_escape(value, True)) for name, value in el.attrib.items()]))
def end_tag(el): """ The text representation of an end tag for a tag. Includes trailing whitespace when appropriate. """ if el.tail and start_whitespace_re.search(el.tail): extra = ' ' else: extra = '' return '</%s>%s' % (el.tag, extra)
def fixup_ins_del_tags(html): """ Given an html string, move any <ins> or <del> tags inside of any block-level elements, e.g. transform <ins><p>word</p></ins> to <p><ins>word</ins></p> """ doc = parse_html(html, cleanup=False) _fixup_ins_del_tags(doc) html = serialize_html_fragment(doc, skip_outer=True) return html
def serialize_html_fragment(el, skip_outer=False): """ Serialize a single lxml element as HTML. The serialized form includes the elements tail. If skip_outer is true, then don't serialize the outermost tag """ assert not isinstance(el, basestring), ( "You should pass in an element, not a string like %r" % el) html = etree.tostring(el, method="html", encoding=_unicode) if skip_outer: # Get rid of the extra starting tag: html = html[html.find('>')+1:] # Get rid of the extra end tag: html = html[:html.rfind('<')] return html.strip() else: return html
def _fixup_ins_del_tags(doc): """fixup_ins_del_tags that works on an lxml document in-place """ for tag in ['ins', 'del']: for el in doc.xpath('descendant-or-self::%s' % tag): if not _contains_block_level_tag(el): continue _move_el_inside_block(el, tag=tag) el.drop_tag()
def _contains_block_level_tag(el): """True if the element contains any block-level elements, like <p>, <td>, etc. """ if el.tag in block_level_tags or el.tag in block_level_container_tags: return True for child in el: if _contains_block_level_tag(child): return True return False
def _move_el_inside_block(el, tag): """ helper for _fixup_ins_del_tags; actually takes the <ins> etc tags and moves them inside any block-level tags. """ for child in el: if _contains_block_level_tag(child): break else: import sys # No block-level tags in any child children_tag = etree.Element(tag) children_tag.text = el.text el.text = None children_tag.extend(list(el)) el[:] = [children_tag] return for child in list(el): if _contains_block_level_tag(child): _move_el_inside_block(child, tag) if child.tail: tail_tag = etree.Element(tag) tail_tag.text = child.tail child.tail = None el.insert(el.index(child)+1, tail_tag) else: child_tag = etree.Element(tag) el.replace(child, child_tag) child_tag.append(child) if el.text: text_tag = etree.Element(tag) text_tag.text = el.text el.text = None el.insert(0, text_tag)
def _merge_element_contents(el): """ Removes an element, but merges its contents into its place, e.g., given <p>Hi <i>there!</i></p>, if you remove the <i> element you get <p>Hi there!</p> """ parent = el.getparent() text = el.text or '' if el.tail: if not len(el): text += el.tail else: if el[-1].tail: el[-1].tail += el.tail else: el[-1].tail = el.tail index = parent.index(el) if text: if index == 0: previous = None else: previous = parent[index-1] if previous is None: if parent.text: parent.text += text else: parent.text = text else: if previous.tail: previous.tail += text else: previous.tail = text parent[index:index+1] = el.getchildren()
def _iter_code(code): """Yield '(op,arg)' pair for each operation in code object 'code'""" from array import array from dis import HAVE_ARGUMENT, EXTENDED_ARG bytes = array('b',code.co_code) eof = len(code.co_code) ptr = 0 extended_arg = 0 while ptr<eof: op = bytes[ptr] if op>=HAVE_ARGUMENT: arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg ptr += 3 if op==EXTENDED_ARG: extended_arg = arg * compat.long_type(65536) continue else: arg = None ptr += 1 yield op,arg
def extract_constant(code, symbol, default=-1): """Extract the constant value of 'symbol' from 'code' If the name 'symbol' is bound to a constant value by the Python code object 'code', return that value. If 'symbol' is bound to an expression, return 'default'. Otherwise, return 'None'. Return value is based on the first assignment to 'symbol'. 'symbol' must be a global, or at least a non-"fast" local in the code block. That is, only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' must be present in 'code.co_names'. """ if symbol not in code.co_names: # name's not there, can't possibly be an assigment return None name_idx = list(code.co_names).index(symbol) STORE_NAME = 90 STORE_GLOBAL = 97 LOAD_CONST = 100 const = default for op, arg in _iter_code(code): if op==LOAD_CONST: const = code.co_consts[arg] elif arg==name_idx and (op==STORE_NAME or op==STORE_GLOBAL): return const else: const = default
def cache_url(self, **kwargs): """A simplified URL to be used for caching the given query.""" query = { 'Operation': self.Operation, 'Service': "AWSECommerceService", 'Version': self.Version, } query.update(kwargs) service_domain = SERVICE_DOMAINS[self.Region][0] return "http://" + service_domain + "/onca/xml?" + _quote_query(query)
def autolink(el, link_regexes=_link_regexes, avoid_elements=_avoid_elements, avoid_hosts=_avoid_hosts, avoid_classes=_avoid_classes): """ Turn any URLs into links. It will search for links identified by the given regular expressions (by default mailto and http(s) links). It won't link text in an element in avoid_elements, or an element with a class in avoid_classes. It won't link to anything with a host that matches one of the regular expressions in avoid_hosts (default localhost and 127.0.0.1). If you pass in an element, the element's tail will not be substituted, only the contents of the element. """ if el.tag in avoid_elements: return class_name = el.get('class') if class_name: class_name = class_name.split() for match_class in avoid_classes: if match_class in class_name: return for child in list(el): autolink(child, link_regexes=link_regexes, avoid_elements=avoid_elements, avoid_hosts=avoid_hosts, avoid_classes=avoid_classes) if child.tail: text, tail_children = _link_text( child.tail, link_regexes, avoid_hosts, factory=el.makeelement) if tail_children: child.tail = text index = el.index(child) el[index+1:index+1] = tail_children if el.text: text, pre_children = _link_text( el.text, link_regexes, avoid_hosts, factory=el.makeelement) if pre_children: el.text = text el[:0] = pre_children
def word_break(el, max_width=40, avoid_elements=_avoid_word_break_elements, avoid_classes=_avoid_word_break_classes, break_character=unichr(0x200b)): """ Breaks any long words found in the body of the text (not attributes). Doesn't effect any of the tags in avoid_elements, by default ``<textarea>`` and ``<pre>`` Breaks words by inserting &#8203;, which is a unicode character for Zero Width Space character. This generally takes up no space in rendering, but does copy as a space, and in monospace contexts usually takes up space. See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion """ # Character suggestion of &#8203 comes from: # http://www.cs.tut.fi/~jkorpela/html/nobr.html if el.tag in _avoid_word_break_elements: return class_name = el.get('class') if class_name: dont_break = False class_name = class_name.split() for avoid in avoid_classes: if avoid in class_name: dont_break = True break if dont_break: return if el.text: el.text = _break_text(el.text, max_width, break_character) for child in el: word_break(child, max_width=max_width, avoid_elements=avoid_elements, avoid_classes=avoid_classes, break_character=break_character) if child.tail: child.tail = _break_text(child.tail, max_width, break_character)
def kill_conditional_comments(self, doc): """ IE conditional comments basically embed HTML that the parser doesn't normally see. We can't allow anything like that, so we'll kill any comments that could be conditional. """ bad = [] self._kill_elements( doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment)
def _has_sneaky_javascript(self, style): """ Depending on the browser, stuff like ``e x p r e s s i o n(...)`` can get interpreted, or ``expre/* stuff */ssion(...)``. This checks for attempt to do stuff like this. Typically the response will be to kill the entire style; if you have just a bit of Javascript in the style another rule will catch that and remove only the Javascript from the style; this catches more sneaky attempts. """ style = self._substitute_comments('', style) style = style.replace('\\', '') style = _substitute_whitespace('', style) style = style.lower() if 'javascript:' in style: return True if 'expression(' in style: return True return False
def document_fromstring(html, guess_charset=True, parser=None): """Parse a whole document into a string.""" if not isinstance(html, _strings): raise TypeError('string required') if parser is None: parser = html_parser return parser.parse(html, useChardet=guess_charset).getroot()
def fragments_fromstring(html, no_leading_text=False, guess_charset=False, parser=None): """Parses several HTML elements, returning a list of elements. The first item in the list may be a string. If no_leading_text is true, then it will be an error if there is leading text, and it will always be a list of only elements. If `guess_charset` is `True` and the text was not unicode but a bytestring, the `chardet` library will perform charset guessing on the string. """ if not isinstance(html, _strings): raise TypeError('string required') if parser is None: parser = html_parser children = parser.parseFragment(html, 'div', useChardet=guess_charset) if children and isinstance(children[0], _strings): if no_leading_text: if children[0].strip(): raise etree.ParserError('There is leading text: %r' % children[0]) del children[0] return children
def fragment_fromstring(html, create_parent=False, guess_charset=False, parser=None): """Parses a single HTML element; it is an error if there is more than one element, or if anything but whitespace precedes or follows the element. If create_parent is true (or is a tag name) then a parent node will be created to encapsulate the HTML in a single element. In this case, leading or trailing text is allowed. """ if not isinstance(html, _strings): raise TypeError('string required') accept_leading_text = bool(create_parent) elements = fragments_fromstring( html, guess_charset=guess_charset, parser=parser, no_leading_text=not accept_leading_text) if create_parent: if not isinstance(create_parent, _strings): create_parent = 'div' new_root = Element(create_parent) if elements: if isinstance(elements[0], _strings): new_root.text = elements[0] del elements[0] new_root.extend(elements) return new_root if not elements: raise etree.ParserError('No elements found') if len(elements) > 1: raise etree.ParserError('Multiple elements found') result = elements[0] if result.tail and result.tail.strip(): raise etree.ParserError('Element followed by text: %r' % result.tail) result.tail = None return result
def fromstring(html, guess_charset=True, parser=None): """Parse the html, returning a single element/document. This tries to minimally parse the chunk of text, without knowing if it is a fragment or a document. base_url will set the document's base_url attribute (and the tree's docinfo.URL) """ if not isinstance(html, _strings): raise TypeError('string required') doc = document_fromstring(html, parser=parser, guess_charset=guess_charset) # document starts with doctype or <html>, full document! start = html[:50].lstrip().lower() if start.startswith('<html') or start.startswith('<!doctype'): return doc head = _find_tag(doc, 'head') # if the head is not empty we have a full document if len(head): return doc body = _find_tag(doc, 'body') # The body has just one element, so it was probably a single # element passed in if (len(body) == 1 and (not body.text or not body.text.strip()) and (not body[-1].tail or not body[-1].tail.strip())): return body[0] # Now we have a body which represents a bunch of tags which have the # content that was passed in. We will create a fake container, which # is the body tag, except <body> implies too much structure. if _contains_block_level_tag(body): body.tag = 'div' else: body.tag = 'span' return body
def parse(filename_url_or_file, guess_charset=True, parser=None): """Parse a filename, URL, or file-like object into an HTML document tree. Note: this returns a tree, not an element. Use ``parse(...).getroot()`` to get the document root. """ if parser is None: parser = html_parser if not isinstance(filename_url_or_file, _strings): fp = filename_url_or_file elif _looks_like_url(filename_url_or_file): fp = urlopen(filename_url_or_file) else: fp = open(filename_url_or_file, 'rb') return parser.parse(fp, useChardet=guess_charset)
def api_accepts(fields): """ Define the accept schema of an API (GET or POST). 'fields' is a dict of Django form fields keyed by field name that specifies the form-urlencoded fields that the API accepts*. The view function is then called with GET/POST data that has been cleaned by the Django form. In debug and test modes, failure to validate the fields will result in a 400 Bad Request response. In production mode, failure to validate will just log a warning, unless overwritten by a 'strict' setting. For example: @api_accepts({ 'x': forms.IntegerField(min_value=0), 'y': forms.IntegerField(min_value=0), }) def add(request, *args, **kwargs): x = request.POST['x'] y = request.POST['y'] # x and y are integers already. return HttpResponse('%d' % (x + y)) *: 'fields' can also include Django models as {'key': Model()}. If present, api_accepts will look for the field keyed by '<key>-id' and pick the object that has that primary key. For example, if the entry is {'course': Course()}, it will search for the key course_id='course-id' in the request object, and find the object Course.objects.get(pk=course_id) """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): if request.method not in ['GET', 'POST']: return func(request, *args, **kwargs) # The fields dict passed into the type() function is modified, so # send in a copy instead. form_class = type('ApiForm', (forms.Form,), fields.copy()) form = form_class(getattr(request, request.method)) if not form.is_valid(): if settings.DEBUG: return JsonResponseBadRequest( 'failed to validate: %s' % dict(form.errors) ) else: logger.warn( 'input to \'%s\' failed to validate: %s', request.path, dict(form.errors) ) return func(request, *args, **kwargs) # Clean any models.Model fields, by looking up object based on # primary key in request. for (field_name, field_instance) in fields.items(): if isinstance(field_instance, models.Model): field_type = type(field_instance) # TODO: irregular, should we remove? field_id = '%s-id' % field_name if field_id not in request.REQUEST: return JsonResponseBadRequest( 'field %s not present' % field_name ) field_pk = int(request.REQUEST[field_id]) try: field_value = field_type.objects.get(pk=field_pk) except field_type.DoesNotExist: return JsonResponseNotFound( '%s with pk=%d does not exist' % ( field_type, field_pk ) ) form.cleaned_data[field_name] = field_value validated_request = ValidatedRequest(request, form) return func(validated_request, *args, **kwargs) return wrapped_func return decorator
def api_returns(return_values): """ Define the return schema of an API. 'return_values' is a dictionary mapping HTTP return code => documentation In addition to validating that the status code of the response belongs to one of the accepted status codes, it also validates that the returned object is JSON (derived from JsonResponse) In debug and test modes, failure to validate the fields will result in a 400 Bad Request response. In production mode, failure to validate will just log a warning, unless overwritten by a 'strict' setting. For example: @api_returns({ 200: 'Operation successful', 403: 'User does not have persion', 404: 'Resource not found', 404: 'User not found', }) def add(request, *args, **kwargs): if not request.user.is_superuser: return JsonResponseForbidden() # 403 return HttpResponse() # 200 """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): return_value = func(request, *args, **kwargs) if not isinstance(return_value, JsonResponse): if settings.DEBUG: return JsonResponseBadRequest('API did not return JSON') else: logger.warn('API did not return JSON') accepted_return_codes = return_values.keys() # Never block 500s - these should be handled by other # reporting mechanisms accepted_return_codes.append(500) if return_value.status_code not in accepted_return_codes: if settings.DEBUG: return JsonResponseBadRequest( 'API returned %d instead of acceptable values %s' % (return_value.status_code, accepted_return_codes) ) else: logger.warn( 'API returned %d instead of acceptable values %s', return_value.status_code, accepted_return_codes, ) return return_value return wrapped_func return decorator
def api(accept_return_dict): """ Wrapper that calls @api_accepts and @api_returns in sequence. For example: @api({ 'accepts': { 'x': forms.IntegerField(min_value=0), 'y': forms.IntegerField(min_value=0), }, 'returns': [ 200: 'Operation successful', 403: 'User does not have persion', 404: 'Resource not found', 404: 'User not found', ] }) def add(request, *args, **kwargs): if not request.GET['x'] == 10: return JsonResponseForbidden() # 403 return HttpResponse() # 200 """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): @api_accepts(accept_return_dict['accepts']) @api_returns(accept_return_dict['returns']) def apid_fnc(request, *args, **kwargs): return func(request, *args, **kwargs) return apid_fnc(request, *args, **kwargs) return wrapped_func return decorator
def validate_json_request(required_fields): """ Return a decorator that ensures that the request passed to the view function/method has a valid JSON request body with the given required fields. The dict parsed from the JSON is then passed as the second argument to the decorated function/method. For example: @json_request({'name', 'date'}) def view_func(request, request_dict): ... """ def decorator(func): @wraps(func) def wrapped_func(request, *args, **kwargs): try: request_dict = json.loads(request.raw_post_data) except ValueError as e: return JsonResponseBadRequest('invalid POST JSON: %s' % e) for k in required_fields: if k not in request_dict: return JsonResponseBadRequest( 'POST JSON must contain property \'%s\'' % k) return func(request, request_dict, *args, **kwargs) return wrapped_func return decorator
def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
def get_include(): """ Returns a list of header include paths (for lxml itself, libxml2 and libxslt) needed to compile C code against lxml if it was built with statically linked libraries. """ import os lxml_path = __path__[0] include_path = os.path.join(lxml_path, 'includes') includes = [include_path, lxml_path] for name in os.listdir(include_path): path = os.path.join(include_path, name) if os.path.isdir(path): includes.append(path) return includes
def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) logger.info('Exporting svn repository %s to %s', url, location) with indent_log(): if os.path.exists(location): # Subversion doesn't like to check out over an existing # directory --force fixes this, but was only added in svn 1.5 rmtree(location) self.run_command( ['export'] + rev_options + [url, location], show_stdout=False)
def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): # FIXME: should we warn? continue dirurl, localrev = self._get_svn_url_rev(base) if base == location: base_url = dirurl + '/' # save the root url elif not dirurl or not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision
def setupmethod(f): """Wraps a method so that it performs a check in debug mode if the first request was already handled. """ def wrapper_func(self, *args, **kwargs): if self.debug and self._got_first_request: raise AssertionError('A setup function was called after the ' 'first request was handled. This usually indicates a bug ' 'in the application where a module was not imported ' 'and decorators or other functionality was called too late.\n' 'To fix this make sure to import all your view modules, ' 'database models and everything related at a central place ' 'before the application starts serving requests.') return f(self, *args, **kwargs) return update_wrapper(wrapper_func, f)
def name(self): """The name of the application. This is usually the import name with the difference that it's guessed from the run file if the import name is main. This name is used as a display name when Flask needs the name of the application. It can be set and overridden to change the value. .. versionadded:: 0.8 """ if self.import_name == '__main__': fn = getattr(sys.modules['__main__'], '__file__', None) if fn is None: return '__main__' return os.path.splitext(os.path.basename(fn))[0] return self.import_name
def propagate_exceptions(self): """Returns the value of the `PROPAGATE_EXCEPTIONS` configuration value in case it's set, otherwise a sensible default is returned. .. versionadded:: 0.7 """ rv = self.config['PROPAGATE_EXCEPTIONS'] if rv is not None: return rv return self.testing or self.debug
def logger(self): """A :class:`logging.Logger` object for this application. The default configuration is to log to stderr if the application is in debug mode. This logger can be used to (surprise) log messages. Here some examples:: app.logger.debug('A value for debugging') app.logger.warning('A warning occurred (%d apples)', 42) app.logger.error('An error occurred') .. versionadded:: 0.3 """ if self._logger and self._logger.name == self.logger_name: return self._logger with _logger_lock: if self._logger and self._logger.name == self.logger_name: return self._logger from flask.logging import create_logger self._logger = rv = create_logger(self) return rv
def make_config(self, instance_relative=False): """Used to create the config attribute by the Flask constructor. The `instance_relative` parameter is passed in from the constructor of Flask (there named `instance_relative_config`) and indicates if the config should be relative to the instance path or the root path of the application. .. versionadded:: 0.8 """ root_path = self.root_path if instance_relative: root_path = self.instance_path return Config(root_path, self.default_config)
def auto_find_instance_path(self): """Tries to locate the instance path if it was not provided to the constructor of the application class. It will basically calculate the path to a folder named ``instance`` next to your main file or the package. .. versionadded:: 0.8 """ prefix, package_path = find_package(self.import_name) if prefix is None: return os.path.join(package_path, 'instance') return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'): """Opens a resource from the application's instance folder (:attr:`instance_path`). Otherwise works like :meth:`open_resource`. Instance resources can also be opened for writing. :param resource: the name of the resource. To access resources within subfolders use forward slashes as separator. :param mode: resource file opening mode, default is 'rb'. """ return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self): """Creates the Jinja2 environment based on :attr:`jinja_options` and :meth:`select_jinja_autoescape`. Since 0.7 this also adds the Jinja2 globals and filters after initialization. Override this function to customize the behavior. .. versionadded:: 0.5 """ options = dict(self.jinja_options) if 'autoescape' not in options: options['autoescape'] = self.select_jinja_autoescape rv = Environment(self, **options) rv.globals.update( url_for=url_for, get_flashed_messages=get_flashed_messages, config=self.config, # request, session and g are normally added with the # context processor for efficiency reasons but for imported # templates we also want the proxies in there. request=request, session=session, g=g ) rv.filters['tojson'] = json.tojson_filter return rv
def update_template_context(self, context): """Update the template context with some commonly used variables. This injects request, session, config and g into the template context as well as everything template context processors want to inject. Note that the as of Flask 0.6, the original values in the context will not be overridden if a context processor decides to return a value with the same key. :param context: the context as a dictionary that is updated in place to add extra variables. """ funcs = self.template_context_processors[None] reqctx = _request_ctx_stack.top if reqctx is not None: bp = reqctx.request.blueprint if bp is not None and bp in self.template_context_processors: funcs = chain(funcs, self.template_context_processors[bp]) orig_ctx = context.copy() for func in funcs: context.update(func()) # make sure the original values win. This makes it possible to # easier add new variables in context processors without breaking # existing views. context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options): """Runs the application on a local development server. If the :attr:`debug` flag is set the server will automatically reload for code changes and show a debugger in case an exception happened. If you want to run the application in debug mode, but disable the code execution on the interactive debugger, you can pass ``use_evalex=False`` as parameter. This will keep the debugger's traceback screen active, but disable code execution. .. admonition:: Keep in Mind Flask will suppress any server error with a generic error page unless it is in debug mode. As such to enable just the interactive debugger without the code reloading, you have to invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. Setting ``use_debugger`` to `True` without being in debug mode won't catch any exceptions because there won't be any to catch. .. versionchanged:: 0.10 The default port is now picked from the ``SERVER_NAME`` variable. :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to have the server available externally as well. Defaults to ``'127.0.0.1'``. :param port: the port of the webserver. Defaults to ``5000`` or the port defined in the ``SERVER_NAME`` config variable if present. :param debug: if given, enable or disable debug mode. See :attr:`debug`. :param options: the options to be forwarded to the underlying Werkzeug server. See :func:`werkzeug.serving.run_simple` for more information. """ from werkzeug.serving import run_simple if host is None: host = '127.0.0.1' if port is None: server_name = self.config['SERVER_NAME'] if server_name and ':' in server_name: port = int(server_name.rsplit(':', 1)[1]) else: port = 5000 if debug is not None: self.debug = bool(debug) options.setdefault('use_reloader', self.debug) options.setdefault('use_debugger', self.debug) try: run_simple(host, port, self, **options) finally: # reset the first request information if the development server # resetted normally. This makes it possible to restart the server # without reloader and that stuff from an interactive shell. self._got_first_request = False
def save_session(self, session, response): """Saves the session if it needs updates. For the default implementation, check :meth:`open_session`. Instead of overriding this method we recommend replacing the :class:`session_interface`. :param session: the session to be saved (a :class:`~werkzeug.contrib.securecookie.SecureCookie` object) :param response: an instance of :attr:`response_class` """ return self.session_interface.save_session(self, session, response)
def register_module(self, module, **options): """Registers a module with this application. The keyword argument of this function are the same as the ones for the constructor of the :class:`Module` class and will override the values of the module if provided. .. versionchanged:: 0.7 The module system was deprecated in favor for the blueprint system. """ assert blueprint_is_module(module), 'register_module requires ' \ 'actual module objects. Please upgrade to blueprints though.' if not self.enable_modules: raise RuntimeError('Module support was disabled but code ' 'attempted to register a module named %r' % module) else: from warnings import warn warn(DeprecationWarning('Modules are deprecated. Upgrade to ' 'using blueprints. Have a look into the documentation for ' 'more information. If this module was registered by a ' 'Flask-Extension upgrade the extension or contact the author ' 'of that extension instead. (Registered %r)' % module), stacklevel=2) self.register_blueprint(module, **options)
def add_url_rule(self, rule, endpoint=None, view_func=None, **options): """Connects a URL rule. Works exactly like the :meth:`route` decorator. If a view_func is provided it will be registered with the endpoint. Basically this example:: @app.route('/') def index(): pass Is equivalent to the following:: def index(): pass app.add_url_rule('/', 'index', index) If the view_func is not provided you will need to connect the endpoint to a view function like so:: app.view_functions['index'] = index Internally :meth:`route` invokes :meth:`add_url_rule` so if you want to customize the behavior via subclassing you only need to change this method. For more information refer to :ref:`url-route-registrations`. .. versionchanged:: 0.2 `view_func` parameter added. .. versionchanged:: 0.6 `OPTIONS` is added automatically as method. :param rule: the URL rule as string :param endpoint: the endpoint for the registered URL rule. Flask itself assumes the name of the view function as endpoint :param view_func: the function to call when serving a request to the provided endpoint :param options: the options to be forwarded to the underlying :class:`~werkzeug.routing.Rule` object. A change to Werkzeug is handling of method options. methods is a list of methods this rule should be limited to (`GET`, `POST` etc.). By default a rule just listens for `GET` (and implicitly `HEAD`). Starting with Flask 0.6, `OPTIONS` is implicitly added and handled by the standard request handling. """ if endpoint is None: endpoint = _endpoint_from_view_func(view_func) options['endpoint'] = endpoint methods = options.pop('methods', None) # if the methods are not given and the view_func object knows its # methods we can use that instead. If neither exists, we go with # a tuple of only `GET` as default. if methods is None: methods = getattr(view_func, 'methods', None) or ('GET',) methods = set(methods) # Methods that should always be added required_methods = set(getattr(view_func, 'required_methods', ())) # starting with Flask 0.8 the view_func object can disable and # force-enable the automatic options handling. provide_automatic_options = getattr(view_func, 'provide_automatic_options', None) if provide_automatic_options is None: if 'OPTIONS' not in methods: provide_automatic_options = True required_methods.add('OPTIONS') else: provide_automatic_options = False # Add the required methods now. methods |= required_methods # due to a werkzeug bug we need to make sure that the defaults are # None if they are an empty dictionary. This should not be necessary # with Werkzeug 0.7 options['defaults'] = options.get('defaults') or None rule = self.url_rule_class(rule, methods=methods, **options) rule.provide_automatic_options = provide_automatic_options self.url_map.add(rule) if view_func is not None: old_func = self.view_functions.get(endpoint) if old_func is not None and old_func != view_func: raise AssertionError('View function mapping is overwriting an ' 'existing endpoint function: %s' % endpoint) self.view_functions[endpoint] = view_func
def endpoint(self, endpoint): """A decorator to register a function as an endpoint. Example:: @app.endpoint('example.endpoint') def example(): return "example" :param endpoint: the name of the endpoint """ def decorator(f): self.view_functions[endpoint] = f return f return decorator
def errorhandler(self, code_or_exception): """A decorator that is used to register a function give a given error code. Example:: @app.errorhandler(404) def page_not_found(error): return 'This page does not exist', 404 You can also register handlers for arbitrary exceptions:: @app.errorhandler(DatabaseError) def special_exception_handler(error): return 'Database connection failed', 500 You can also register a function as error handler without using the :meth:`errorhandler` decorator. The following example is equivalent to the one above:: def page_not_found(error): return 'This page does not exist', 404 app.error_handler_spec[None][404] = page_not_found Setting error handlers via assignments to :attr:`error_handler_spec` however is discouraged as it requires fiddling with nested dictionaries and the special case for arbitrary exception types. The first `None` refers to the active blueprint. If the error handler should be application wide `None` shall be used. .. versionadded:: 0.7 One can now additionally also register custom exception types that do not necessarily have to be a subclass of the :class:`~werkzeug.exceptions.HTTPException` class. :param code: the code as integer for the handler """ def decorator(f): self._register_error_handler(None, code_or_exception, f) return f return decorator
def template_filter(self, name=None): """A decorator that is used to register custom template filter. You can specify a name for the filter, otherwise the function name will be used. Example:: @app.template_filter() def reverse(s): return s[::-1] :param name: the optional name of the filter, otherwise the function name will be used. """ def decorator(f): self.add_template_filter(f, name=name) return f return decorator
def add_template_filter(self, f, name=None): """Register a custom template filter. Works exactly like the :meth:`template_filter` decorator. :param name: the optional name of the filter, otherwise the function name will be used. """ self.jinja_env.filters[name or f.__name__] = f
def template_global(self, name=None): """A decorator that is used to register a custom template global function. You can specify a name for the global function, otherwise the function name will be used. Example:: @app.template_global() def double(n): return 2 * n .. versionadded:: 0.10 :param name: the optional name of the global function, otherwise the function name will be used. """ def decorator(f): self.add_template_global(f, name=name) return f return decorator
def add_template_global(self, f, name=None): """Register a custom template global function. Works exactly like the :meth:`template_global` decorator. .. versionadded:: 0.10 :param name: the optional name of the global function, otherwise the function name will be used. """ self.jinja_env.globals[name or f.__name__] = f
def handle_http_exception(self, e): """Handles an HTTP exception. By default this will invoke the registered error handlers and fall back to returning the exception as response. .. versionadded:: 0.3 """ handlers = self.error_handler_spec.get(request.blueprint) # Proxy exceptions don't have error codes. We want to always return # those unchanged as errors if e.code is None: return e if handlers and e.code in handlers: handler = handlers[e.code] else: handler = self.error_handler_spec[None].get(e.code) if handler is None: return e return handler(e)
def trap_http_exception(self, e): """Checks if an HTTP exception should be trapped or not. By default this will return `False` for all exceptions except for a bad request key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`. This is called for all HTTP exceptions raised by a view function. If it returns `True` for any exception the error handler for this exception is not called and it shows up as regular exception in the traceback. This is helpful for debugging implicitly raised HTTP exceptions. .. versionadded:: 0.8 """ if self.config['TRAP_HTTP_EXCEPTIONS']: return True if self.config['TRAP_BAD_REQUEST_ERRORS']: return isinstance(e, BadRequest) return False
def handle_user_exception(self, e): """This method is called whenever an exception occurs that should be handled. A special case are :class:`~werkzeug.exception.HTTPException`\s which are forwarded by this function to the :meth:`handle_http_exception` method. This function will either return a response value or reraise the exception with the same traceback. .. versionadded:: 0.7 """ exc_type, exc_value, tb = sys.exc_info() assert exc_value is e # ensure not to trash sys.exc_info() at that point in case someone # wants the traceback preserved in handle_http_exception. Of course # we cannot prevent users from trashing it themselves in a custom # trap_http_exception method so that's their fault then. if isinstance(e, HTTPException) and not self.trap_http_exception(e): return self.handle_http_exception(e) blueprint_handlers = () handlers = self.error_handler_spec.get(request.blueprint) if handlers is not None: blueprint_handlers = handlers.get(None, ()) app_handlers = self.error_handler_spec[None].get(None, ()) for typecheck, handler in chain(blueprint_handlers, app_handlers): if isinstance(e, typecheck): return handler(e) reraise(exc_type, exc_value, tb)
def handle_exception(self, e): """Default exception handling that kicks in when an exception occurs that is not caught. In debug mode the exception will be re-raised immediately, otherwise it is logged and the handler for a 500 internal server error is used. If no such handler exists, a default 500 internal server error message is displayed. .. versionadded:: 0.3 """ exc_type, exc_value, tb = sys.exc_info() got_request_exception.send(self, exception=e) handler = self.error_handler_spec[None].get(500) if self.propagate_exceptions: # if we want to repropagate the exception, we can attempt to # raise it with the whole traceback in case we can do that # (the function was actually called from the except part) # otherwise, we just raise the error again if exc_value is e: reraise(exc_type, exc_value, tb) else: raise e self.log_exception((exc_type, exc_value, tb)) if handler is None: return InternalServerError() return handler(e)
def log_exception(self, exc_info): """Logs an exception. This is called by :meth:`handle_exception` if debugging is disabled and right before the handler is called. The default implementation logs the exception as error on the :attr:`logger`. .. versionadded:: 0.8 """ self.logger.error('Exception on %s [%s]' % ( request.path, request.method ), exc_info=exc_info)
def raise_routing_exception(self, request): """Exceptions that are recording during routing are reraised with this method. During debug we are not reraising redirect requests for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising a different error instead to help debug situations. :internal: """ if not self.debug \ or not isinstance(request.routing_exception, RequestRedirect) \ or request.method in ('GET', 'HEAD', 'OPTIONS'): raise request.routing_exception from .debughelpers import FormDataRoutingRedirect raise FormDataRoutingRedirect(request)
def dispatch_request(self): """Does the request dispatching. Matches the URL and returns the return value of the view or error handler. This does not have to be a response object. In order to convert the return value to a proper response object, call :func:`make_response`. .. versionchanged:: 0.7 This no longer does the exception handling, this code was moved to the new :meth:`full_dispatch_request`. """ req = _request_ctx_stack.top.request if req.routing_exception is not None: self.raise_routing_exception(req) rule = req.url_rule # if we provide automatic options for this URL and the # request came with the OPTIONS method, reply automatically if getattr(rule, 'provide_automatic_options', False) \ and req.method == 'OPTIONS': return self.make_default_options_response() # otherwise dispatch to the handler for that endpoint return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self): """Dispatches the request and on top of that performs request pre and postprocessing as well as HTTP exception catching and error handling. .. versionadded:: 0.7 """ self.try_trigger_before_first_request_functions() try: request_started.send(self) rv = self.preprocess_request() if rv is None: rv = self.dispatch_request() except Exception as e: rv = self.handle_user_exception(e) response = self.make_response(rv) response = self.process_response(response) request_finished.send(self, response=response) return response
def try_trigger_before_first_request_functions(self): """Called before each request and will ensure that it triggers the :attr:`before_first_request_funcs` and only exactly once per application instance (which means process usually). :internal: """ if self._got_first_request: return with self._before_request_lock: if self._got_first_request: return self._got_first_request = True for func in self.before_first_request_funcs: func()
def make_default_options_response(self): """This method is called to create the default `OPTIONS` response. This can be changed through subclassing to change the default behavior of `OPTIONS` responses. .. versionadded:: 0.7 """ adapter = _request_ctx_stack.top.url_adapter if hasattr(adapter, 'allowed_methods'): methods = adapter.allowed_methods() else: # fallback for Werkzeug < 0.7 methods = [] try: adapter.match(method='--') except MethodNotAllowed as e: methods = e.valid_methods except HTTPException as e: pass rv = self.response_class() rv.allow.update(methods) return rv
def make_response(self, rv): """Converts the return value from a view function to a real response object that is an instance of :attr:`response_class`. The following types are allowed for `rv`: .. tabularcolumns:: |p{3.5cm}|p{9.5cm}| ======================= =========================================== :attr:`response_class` the object is returned unchanged :class:`str` a response object is created with the string as body :class:`unicode` a response object is created with the string encoded to utf-8 as body a WSGI function the function is called as WSGI application and buffered as response object :class:`tuple` A tuple in the form ``(response, status, headers)`` where `response` is any of the types defined here, `status` is a string or an integer and `headers` is a list of a dictionary with header values. ======================= =========================================== :param rv: the return value from the view function .. versionchanged:: 0.9 Previously a tuple was interpreted as the arguments for the response object. """ status = headers = None if isinstance(rv, tuple): rv, status, headers = rv + (None,) * (3 - len(rv)) if rv is None: raise ValueError('View function did not return a response') if not isinstance(rv, self.response_class): # When we create a response object directly, we let the constructor # set the headers and status. We do this because there can be # some extra logic involved when creating these objects with # specific values (like default content type selection). if isinstance(rv, (text_type, bytes, bytearray)): rv = self.response_class(rv, headers=headers, status=status) headers = status = None else: rv = self.response_class.force_type(rv, request.environ) if status is not None: if isinstance(status, string_types): rv.status = status else: rv.status_code = status if headers: rv.headers.extend(headers) return rv
def create_url_adapter(self, request): """Creates a URL adapter for the given request. The URL adapter is created at a point where the request context is not yet set up so the request is passed explicitly. .. versionadded:: 0.6 .. versionchanged:: 0.9 This can now also be called without a request object when the URL adapter is created for the application context. """ if request is not None: return self.url_map.bind_to_environ(request.environ, server_name=self.config['SERVER_NAME']) # We need at the very least the server name to be set for this # to work. if self.config['SERVER_NAME'] is not None: return self.url_map.bind( self.config['SERVER_NAME'], script_name=self.config['APPLICATION_ROOT'] or '/', url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values): """Injects the URL defaults for the given endpoint directly into the values dictionary passed. This is used internally and automatically called on URL building. .. versionadded:: 0.7 """ funcs = self.url_default_functions.get(None, ()) if '.' in endpoint: bp = endpoint.rsplit('.', 1)[0] funcs = chain(funcs, self.url_default_functions.get(bp, ())) for func in funcs: func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values): """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. """ exc_type, exc_value, tb = sys.exc_info() for handler in self.url_build_error_handlers: try: rv = handler(error, endpoint, values) if rv is not None: return rv except BuildError as error: pass # At this point we want to reraise the exception. If the error is # still the same one we can reraise it with the original traceback, # otherwise we raise it from here. if error is exc_value: reraise(exc_type, exc_value, tb) raise error
def preprocess_request(self): """Called before the actual request dispatching and will call every as :meth:`before_request` decorated function. If any of these function returns a value it's handled as if it was the return value from the view and further request handling is stopped. This also triggers the :meth:`url_value_processor` functions before the actual :meth:`before_request` functions are called. """ bp = _request_ctx_stack.top.request.blueprint funcs = self.url_value_preprocessors.get(None, ()) if bp is not None and bp in self.url_value_preprocessors: funcs = chain(funcs, self.url_value_preprocessors[bp]) for func in funcs: func(request.endpoint, request.view_args) funcs = self.before_request_funcs.get(None, ()) if bp is not None and bp in self.before_request_funcs: funcs = chain(funcs, self.before_request_funcs[bp]) for func in funcs: rv = func() if rv is not None: return rv
def process_response(self, response): """Can be overridden in order to modify the response object before it's sent to the WSGI server. By default this will call all the :meth:`after_request` decorated functions. .. versionchanged:: 0.5 As of Flask 0.5 the functions registered for after request execution are called in reverse order of registration. :param response: a :attr:`response_class` object. :return: a new response object or the same, has to be an instance of :attr:`response_class`. """ ctx = _request_ctx_stack.top bp = ctx.request.blueprint funcs = ctx._after_request_functions if bp is not None and bp in self.after_request_funcs: funcs = chain(funcs, reversed(self.after_request_funcs[bp])) if None in self.after_request_funcs: funcs = chain(funcs, reversed(self.after_request_funcs[None])) for handler in funcs: response = handler(response) if not self.session_interface.is_null_session(ctx.session): self.save_session(ctx.session, response) return response
def do_teardown_request(self, exc=None): """Called after the actual request dispatching and will call every as :meth:`teardown_request` decorated function. This is not actually called by the :class:`Flask` object itself but is always triggered when the request context is popped. That way we have a tighter control over certain resources under testing environments. .. versionchanged:: 0.9 Added the `exc` argument. Previously this was always using the current exception information. """ if exc is None: exc = sys.exc_info()[1] funcs = reversed(self.teardown_request_funcs.get(None, ())) bp = _request_ctx_stack.top.request.blueprint if bp is not None and bp in self.teardown_request_funcs: funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) for func in funcs: rv = func(exc) request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None): """Called when an application context is popped. This works pretty much the same as :meth:`do_teardown_request` but for the application context. .. versionadded:: 0.9 """ if exc is None: exc = sys.exc_info()[1] for func in reversed(self.teardown_appcontext_funcs): func(exc) appcontext_tearing_down.send(self, exc=exc)
def wsgi_app(self, environ, start_response): """The actual WSGI application. This is not implemented in `__call__` so that middlewares can be applied without losing a reference to the class. So instead of doing this:: app = MyMiddleware(app) It's a better idea to do this instead:: app.wsgi_app = MyMiddleware(app.wsgi_app) Then you still have the original application object around and can continue to call methods on it. .. versionchanged:: 0.7 The behavior of the before and after request callbacks was changed under error conditions and a new callback was added that will always execute at the end of the request, independent on if an error occurred or not. See :ref:`callbacks-and-errors`. :param environ: a WSGI environment :param start_response: a callable accepting a status code, a list of headers and an optional exception context to start the response """ ctx = self.request_context(environ) ctx.push() error = None try: try: response = self.full_dispatch_request() except Exception as e: error = e response = self.make_response(self.handle_exception(e)) return response(environ, start_response) finally: if self.should_ignore_error(error): error = None ctx.auto_pop(error)
def unique(iterable): """ Yield unique values in iterable, preserving order. """ seen = set() for value in iterable: if not value in seen: seen.add(value) yield value
def handle_requires(metadata, pkg_info, key): """ Place the runtime requirements from pkg_info into metadata. """ may_requires = defaultdict(list) for value in pkg_info.get_all(key): extra_match = EXTRA_RE.search(value) if extra_match: groupdict = extra_match.groupdict() condition = groupdict['condition'] extra = groupdict['extra'] package = groupdict['package'] if condition.endswith(' and '): condition = condition[:-5] else: condition, extra = None, None package = value key = MayRequiresKey(condition, extra) may_requires[key].append(package) if may_requires: metadata['run_requires'] = [] for key, value in may_requires.items(): may_requirement = {'requires':value} if key.extra: may_requirement['extra'] = key.extra if key.condition: may_requirement['environment'] = key.condition metadata['run_requires'].append(may_requirement) if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None): """ Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict. The description is included under the key ['description'] rather than being written to a separate file. path: path to PKG-INFO file distribution: optional distutils Distribution() """ metadata = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) metadata["generator"] = "bdist_wheel (" + wheel.__version__ + ")" try: unicode pkg_info = read_pkg_info(path) except NameError: pkg_info = email.parser.Parser().parsestr(open(path, 'rb').read().decode('utf-8')) description = None if pkg_info['Summary']: metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary') del pkg_info['Summary'] if pkg_info['Description']: description = dedent_description(pkg_info) del pkg_info['Description'] else: payload = pkg_info.get_payload() if isinstance(payload, bytes): # Avoid a Python 2 Unicode error. # We still suffer ? glyphs on Python 3. payload = payload.decode('utf-8') if payload: description = payload if description: pkg_info['description'] = description for key in unique(k.lower() for k in pkg_info.keys()): low_key = key.replace('-', '_') if low_key in SKIP_FIELDS: continue if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN': continue if low_key in PLURAL_FIELDS: metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key) elif low_key == "requires_dist": handle_requires(metadata, pkg_info, key) elif low_key == 'provides_extra': if not 'extras' in metadata: metadata['extras'] = [] metadata['extras'].extend(pkg_info.get_all(key)) elif low_key == 'home_page': metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]} elif low_key == 'keywords': metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key]) else: metadata[low_key] = pkg_info[key] metadata['metadata_version'] = METADATA_VERSION if 'extras' in metadata: metadata['extras'] = sorted(set(metadata['extras'])) # include more information if distribution is available if distribution: for requires, attr in (('test_requires', 'tests_require'),): try: requirements = getattr(distribution, attr) if isinstance(requirements, list): new_requirements = list(convert_requirements(requirements)) metadata[requires] = [{'requires':new_requirements}] except AttributeError: pass # handle contacts contacts = [] for contact_type, role in CONTACT_FIELDS: contact = {} for key in contact_type: if contact_type[key] in metadata: contact[key] = metadata.pop(contact_type[key]) if contact: contact['role'] = role contacts.append(contact) if contacts: metadata['extensions']['python.details']['contacts'] = contacts # convert entry points to exports try: with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file: ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read()) exports = {} for group, items in ep_map.items(): exports[group] = {} for item in items.values(): name, export = str(item).split(' = ', 1) exports[group][name] = export if exports: metadata['extensions']['python.exports'] = exports except IOError: pass # copy console_scripts entry points to commands if 'python.exports' in metadata['extensions']: for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'), ('gui_scripts', 'wrap_gui')): if ep_script in metadata['extensions']['python.exports']: metadata['extensions']['python.commands'][wrap_script] = \ metadata['extensions']['python.exports'][ep_script] return metadata
def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) if not requires_dist: return '' return " (%s)" % ','.join(requires_dist)
def pkginfo_to_metadata(egg_info_path, pkginfo_path): """ Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka old-draft Metadata 2.0 format. """ pkg_info = read_pkg_info(pkginfo_path) pkg_info.replace_header('Metadata-Version', '2.0') requires_path = os.path.join(egg_info_path, 'requires.txt') if os.path.exists(requires_path): requires = open(requires_path).read() for extra, reqs in pkg_resources.split_sections(requires): condition = '' if extra and ':' in extra: # setuptools extra:condition syntax extra, condition = extra.split(':', 1) if extra: pkg_info['Provides-Extra'] = extra if condition: condition += " and " condition += 'extra == %s' % repr(extra) if condition: condition = '; ' + condition for new_req in convert_requirements(reqs): pkg_info['Requires-Dist'] = new_req + condition description = pkg_info['Description'] if description: pkg_info.set_payload(dedent_description(pkg_info)) del pkg_info['Description'] return pkg_info
def set_possible(self): ''' break up a module path to its various parts (prefix, module, class, method) this uses PEP 8 conventions, so foo.Bar would be foo module with class Bar return -- list -- a list of possible interpretations of the module path (eg, foo.bar can be bar module in foo module, or bar method in foo module) ''' possible = [] name = self.name logger.debug('Guessing test name: {}'.format(name)) name_f = self.name.lower() filepath = "" if name_f.endswith(".py") or ".py:" in name_f: # path/something:Class.method bits = name.split(":", 1) filepath = bits[0] logger.debug('Found filepath: {}'.format(filepath)) name = bits[1] if len(bits) > 1 else "" if name: logger.debug('Found test name: {} for filepath: {}'.format(name, filepath)) bits = name.split('.') basedir = self.basedir method_prefix = self.method_prefix # check if the last bit is a Class if re.search(r'^\*?[A-Z]', bits[-1]): logger.debug('Found class in name: {}'.format(bits[-1])) possible.append(PathFinder(basedir, method_prefix, **{ 'class_name': bits[-1], 'module_name': bits[-2] if len(bits) > 1 else '', 'prefix': os.sep.join(bits[0:-2]), 'filepath': filepath, })) elif len(bits) > 1 and re.search(r'^\*?[A-Z]', bits[-2]): logger.debug('Found class in name: {}'.format(bits[-2])) possible.append(PathFinder(basedir, method_prefix, **{ 'class_name': bits[-2], 'method_name': bits[-1], 'module_name': bits[-3] if len(bits) > 2 else '', 'prefix': os.sep.join(bits[0:-3]), 'filepath': filepath, })) else: if self.name: if filepath: if len(bits): possible.append(PathFinder(basedir, method_prefix, **{ 'filepath': filepath, 'method_name': bits[0], })) else: possible.append(PathFinder(basedir, method_prefix, **{ 'filepath': filepath, })) else: logger.debug('Test name is ambiguous') possible.append(PathFinder(basedir, method_prefix, **{ 'module_name': bits[-1], 'prefix': os.sep.join(bits[0:-1]), 'filepath': filepath, })) possible.append(PathFinder(basedir, method_prefix, **{ 'method_name': bits[-1], 'module_name': bits[-2] if len(bits) > 1 else '', 'prefix': os.sep.join(bits[0:-2]), 'filepath': filepath, })) possible.append(PathFinder(basedir, method_prefix, **{ 'prefix': os.sep.join(bits), 'filepath': filepath, })) else: possible.append(PathFinder(basedir, method_prefix, filepath=filepath)) logger.debug("Found {} possible test names".format(len(possible))) self.possible = possible
def modules(self): """return modules that match module_name""" # since the module has to be importable we go ahead and put the # basepath as the very first path to check as that should minimize # namespace collisions, this is what unittest does also sys.path.insert(0, self.basedir) for p in self.paths(): # http://stackoverflow.com/questions/67631/ try: module_name = self.module_path(p) logger.debug("Importing {} from path {}".format(module_name, p)) m = importlib.import_module(module_name) yield m except Exception as e: logger.warning('Caught exception while importing {}: {}'.format(p, e)) logger.warning(e, exc_info=True) error_info = getattr(self, 'error_info', None) if not error_info: exc_info = sys.exc_info() #raise e.__class__, e, exc_info[2] #self.error_info = (e, exc_info) self.error_info = exc_info continue sys.path.pop(0)
def classes(self): """the partial self.class_name will be used to find actual TestCase classes""" for module in self.modules(): cs = inspect.getmembers(module, inspect.isclass) class_name = getattr(self, 'class_name', '') class_regex = '' if class_name: if class_name.startswith("*"): class_name = class_name.strip("*") class_regex = re.compile(r'.*?{}'.format(class_name), re.I) else: class_regex = re.compile(r'^{}'.format(class_name), re.I) for c_name, c in cs: can_yield = True if class_regex and not class_regex.match(c_name): #if class_name and class_name not in c_name: can_yield = False if can_yield and issubclass(c, unittest.TestCase): if c is not unittest.TestCase: # ignore actual TestCase class logger.debug('class: {} matches {}'.format(c_name, class_name)) yield c
def method_names(self): """return the actual test methods that matched self.method_name""" for c in self.classes(): #ms = inspect.getmembers(c, inspect.ismethod) # http://stackoverflow.com/questions/17019949/ ms = inspect.getmembers(c, lambda f: inspect.ismethod(f) or inspect.isfunction(f)) method_name = getattr(self, 'method_name', '') method_regex = '' if method_name: if method_name.startswith(self.method_prefix): method_regex = re.compile(r'^{}'.format(method_name), flags=re.I) else: if method_name.startswith("*"): method_name = method_name.strip("*") method_regex = re.compile( r'^{}[_]{{0,1}}.*?{}'.format(self.method_prefix, method_name), flags=re.I ) else: method_regex = re.compile( r'^{}[_]{{0,1}}{}'.format(self.method_prefix, method_name), flags=re.I ) for m_name, m in ms: if not m_name.startswith(self.method_prefix): continue can_yield = True if method_regex and not method_regex.match(m_name): can_yield = False if can_yield: logger.debug('method: {} matches {}'.format(m_name, method_name)) yield c, m_name
def _find_basename(self, name, basenames, is_prefix=False): """check if name combined with test prefixes or postfixes is found anywhere in the list of basenames :param name: string, the name you're searching for :param basenames: list, a list of basenames to check :param is_prefix: bool, True if this is a prefix search, which means it will also check if name matches any of the basenames without the prefixes or postfixes, if it is False then the prefixes or postfixes must be present (ie, the module we're looking for is the actual test module, not the parent modules it's contained in) :returns: string, the basename if it is found """ ret = "" fileroots = [(os.path.splitext(n)[0], n) for n in basenames] glob = False if name.startswith("*"): glob = True name = name.strip("*") for fileroot, basename in fileroots: if name in fileroot or fileroot in name: for pf in self.module_postfixes: logger.debug( 'Checking if basename {} starts with {} and ends with {}'.format( basename, name, pf )) if glob: if name in fileroot and fileroot.endswith(pf): ret = basename break else: if fileroot.startswith(name) and fileroot.endswith(pf): ret = basename break if not ret: for pf in self.module_prefixes: n = pf + name logger.debug('Checking if basename {} starts with {}'.format(basename, n)) if glob: if fileroot.startswith(pf) and name in fileroot: ret = basename break else: if fileroot.startswith(n): ret = basename break if not ret: if is_prefix: logger.debug('Checking if basename {} starts with {}'.format(basename, name)) if basename.startswith(name) or (glob and name in basename): ret = basename else: logger.debug( 'Checking if basename {} starts with {} and is a test module'.format( basename, name )) if glob: if name in basename and self._is_module_path(basename): ret = basename else: if basename.startswith(name) and self._is_module_path(basename): ret = basename if ret: logger.debug('Found basename {}'.format(ret)) break return ret
def _find_prefix_path(self, basedir, prefix): """Similar to _find_prefix_paths() but only returns the first match""" ret = "" for ret in self._find_prefix_paths(basedir, prefix): break if not ret: raise IOError("Could not find prefix {} in path {}".format(prefix, basedir)) return ret
def _is_module_path(self, path): """Returns true if the passed in path is a test module path :param path: string, the path to check, will need to start or end with the module test prefixes or postfixes to be considered valid :returns: boolean, True if a test module path, False otherwise """ ret = False basename = os.path.basename(path) fileroot = os.path.splitext(basename)[0] for pf in self.module_postfixes: if fileroot.endswith(pf): ret = True break if not ret: for pf in self.module_prefixes: if fileroot.startswith(pf): ret = True break return ret