id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
7,500
__init__.py
CouchPotato_CouchPotatoServer/libs/guessit/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2011 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals __version__ = '0.6.2' __all__ = ['Guess', 'Language', 'guess_file_info', 'guess_video_info', 'guess_movie_info', 'guess_episode_info'] # Do python3 detection before importing any other module, to be sure that # it will then always be available # with code from http://lucumr.pocoo.org/2011/1/22/forwards-compatible-python/ import sys if sys.version_info[0] >= 3: PY3 = True unicode_text_type = str native_text_type = str base_text_type = str def u(x): return str(x) def s(x): return x class UnicodeMixin(object): __str__ = lambda x: x.__unicode__() import binascii def to_hex(x): return binascii.hexlify(x).decode('utf-8') else: PY3 = False __all__ = [ str(s) for s in __all__ ] # fix imports for python2 unicode_text_type = unicode native_text_type = str base_text_type = basestring def u(x): if isinstance(x, str): return x.decode('utf-8') return unicode(x) def s(x): if isinstance(x, unicode): return x.encode('utf-8') if isinstance(x, list): return [ s(y) for y in x ] if isinstance(x, tuple): return tuple(s(y) for y in x) if isinstance(x, dict): return dict((s(key), s(value)) for key, value in x.items()) return x class UnicodeMixin(object): __str__ = lambda x: unicode(x).encode('utf-8') def to_hex(x): return x.encode('hex') from guessit.guess import Guess, merge_all from guessit.language import Language from guessit.matcher import IterativeMatcher from guessit.textutils import clean_string import logging import json log = logging.getLogger(__name__) class NullHandler(logging.Handler): def emit(self, record): pass # let's be a nicely behaving library h = NullHandler() log.addHandler(h) def _guess_filename(filename, filetype): def find_nodes(tree, props): """Yields all nodes containing any of the given props.""" if isinstance(props, base_text_type): props = [props] for node in tree.nodes(): if any(prop in node.guess for prop in props): yield node def warning(title): log.warning('%s, guesses: %s - %s' % (title, m.nice_string(), m2.nice_string())) return m mtree = IterativeMatcher(filename, filetype=filetype) m = mtree.matched() second_pass_opts = [] second_pass_transfo_opts = {} # if there are multiple possible years found, we assume the first one is # part of the title, reparse the tree taking this into account years = set(n.value for n in find_nodes(mtree.match_tree, 'year')) if len(years) >= 2: second_pass_opts.append('skip_first_year') to_skip_language_nodes = [] title_nodes = set(n for n in find_nodes(mtree.match_tree, ['title', 'series'])) title_spans = {} for title_node in title_nodes: title_spans[title_node.span[0]] = title_node title_spans[title_node.span[1]] = title_node for lang_key in ('language', 'subtitleLanguage'): langs = {} lang_nodes = set(n for n in find_nodes(mtree.match_tree, lang_key)) for lang_node in lang_nodes: lang = lang_node.guess.get(lang_key, None) if len(lang_node.value) > 3 and (lang_node.span[0] in title_spans.keys() or lang_node.span[1] in title_spans.keys()): # Language is next or before title, and is not a language code. Add to skip for 2nd pass. # if filetype is subtitle and the language appears last, just before # the extension, then it is likely a subtitle language parts = clean_string(lang_node.root.value).split() if m['type'] in ['moviesubtitle', 'episodesubtitle'] and (parts.index(lang_node.value) == len(parts) - 2): continue to_skip_language_nodes.append(lang_node) elif not lang in langs: langs[lang] = lang_node else: # The same language was found. Keep the more confident one, and add others to skip for 2nd pass. existing_lang_node = langs[lang] to_skip = None if existing_lang_node.guess.confidence('language') >= lang_node.guess.confidence('language'): # lang_node is to remove to_skip = lang_node else: # existing_lang_node is to remove langs[lang] = lang_node to_skip = existing_lang_node to_skip_language_nodes.append(to_skip) if to_skip_language_nodes: second_pass_transfo_opts['guess_language'] = ( ((), { 'skip': [ { 'node_idx': node.parent.node_idx, 'span': node.span } for node in to_skip_language_nodes ] })) if second_pass_opts or second_pass_transfo_opts: # 2nd pass is needed log.info("Running 2nd pass with options: %s" % second_pass_opts) log.info("Transfo options: %s" % second_pass_transfo_opts) mtree = IterativeMatcher(filename, filetype=filetype, opts=second_pass_opts, transfo_opts=second_pass_transfo_opts) m = mtree.matched() if 'language' not in m and 'subtitleLanguage' not in m or 'title' not in m: return m # if we found some language, make sure we didn't cut a title or sth... mtree2 = IterativeMatcher(filename, filetype=filetype, opts=['nolanguage', 'nocountry']) m2 = mtree2.matched() if m.get('title') != m2.get('title'): title = next(find_nodes(mtree.match_tree, 'title')) title2 = next(find_nodes(mtree2.match_tree, 'title')) # if a node is in an explicit group, then the correct title is probably # the other one if title.root.node_at(title.node_idx[:2]).is_explicit(): return m2 elif title2.root.node_at(title2.node_idx[:2]).is_explicit(): return m return m def guess_file_info(filename, filetype, info=None): """info can contain the names of the various plugins, such as 'filename' to detect filename info, or 'hash_md5' to get the md5 hash of the file. >>> guess_file_info('tests/dummy.srt', 'autodetect', info = ['hash_md5', 'hash_sha1']) {'hash_md5': 'e781de9b94ba2753a8e2945b2c0a123d', 'hash_sha1': 'bfd18e2f4e5d59775c2bc14d80f56971891ed620'} """ result = [] hashers = [] # Force unicode as soon as possible filename = u(filename) if info is None: info = ['filename'] if isinstance(info, base_text_type): info = [info] for infotype in info: if infotype == 'filename': result.append(_guess_filename(filename, filetype)) elif infotype == 'hash_mpc': from guessit.hash_mpc import hash_file try: result.append(Guess({'hash_mpc': hash_file(filename)}, confidence=1.0)) except Exception as e: log.warning('Could not compute MPC-style hash because: %s' % e) elif infotype == 'hash_ed2k': from guessit.hash_ed2k import hash_file try: result.append(Guess({'hash_ed2k': hash_file(filename)}, confidence=1.0)) except Exception as e: log.warning('Could not compute ed2k hash because: %s' % e) elif infotype.startswith('hash_'): import hashlib hashname = infotype[5:] try: hasher = getattr(hashlib, hashname)() hashers.append((infotype, hasher)) except AttributeError: log.warning('Could not compute %s hash because it is not available from python\'s hashlib module' % hashname) else: log.warning('Invalid infotype: %s' % infotype) # do all the hashes now, but on a single pass if hashers: try: blocksize = 8192 hasherobjs = dict(hashers).values() with open(filename, 'rb') as f: chunk = f.read(blocksize) while chunk: for hasher in hasherobjs: hasher.update(chunk) chunk = f.read(blocksize) for infotype, hasher in hashers: result.append(Guess({infotype: hasher.hexdigest()}, confidence=1.0)) except Exception as e: log.warning('Could not compute hash because: %s' % e) result = merge_all(result) # last minute adjustments # if country is in the guessed properties, make it part of the filename if 'series' in result and 'country' in result: result['series'] += ' (%s)' % result['country'].alpha2.upper() return result def guess_video_info(filename, info=None): return guess_file_info(filename, 'autodetect', info) def guess_movie_info(filename, info=None): return guess_file_info(filename, 'movie', info) def guess_episode_info(filename, info=None): return guess_file_info(filename, 'episode', info)
10,198
Python
.py
230
34.986957
129
0.610354
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,501
fileutils.py
CouchPotato_CouchPotatoServer/libs/guessit/fileutils.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2011 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import s, u import os.path import zipfile import io import re def split_path(path): r"""Splits the given path into the list of folders and the filename (or the last folder if you gave it a folder path. If the given path was an absolute path, the first element will always be: - the '/' root folder on Unix systems - the drive letter on Windows systems (eg: r'C:\') - the mount point '\\' on Windows systems (eg: r'\\host\share') >>> s(split_path('/usr/bin/smewt')) ['/', 'usr', 'bin', 'smewt'] >>> s(split_path('relative_path/to/my_folder/')) ['relative_path', 'to', 'my_folder'] """ result = [] while True: head, tail = os.path.split(path) headlen = len(head) # if a string has a : in position 1 it gets splitted in everycase, also if # there is not a valid drive letter and also if : is not followed by \ if headlen >= 2 and headlen <= 3 and head[1] == ':' and ( head + tail == path ) and ( head[1:] != ':\\' or not re.match("^[a-zA-Z]:\\\\", head) ): tail = path head = '' headlen = 0 # on Unix systems, the root folder is '/' if head and head == '/'*headlen and tail == '': return ['/'] + result # on Windows, the root folder is a drive letter (eg: 'C:\') or for shares \\ if ((headlen == 3 and head[1:] == ':\\') or (headlen == 2 and head == '\\\\')) and tail == '': return [head] + result if head == '' and tail == '': return result # we just split a directory ending with '/', so tail is empty if not tail: path = head continue # otherwise, add the last path fragment and keep splitting result = [tail] + result path = head def file_in_same_dir(ref_file, desired_file): """Return the path for a file in the same dir as a given reference file. >>> s(file_in_same_dir('~/smewt/smewt.db', 'smewt.settings')) '~/smewt/smewt.settings' """ return os.path.join(*(split_path(ref_file)[:-1] + [desired_file])) def load_file_in_same_dir(ref_file, filename): """Load a given file. Works even when the file is contained inside a zip.""" from couchpotato.core.helpers.encoding import toUnicode path = split_path(toUnicode(ref_file))[:-1] + [filename] for i, p in enumerate(path): if p.endswith('.zip'): zfilename = os.path.join(*path[:i + 1]) zfile = zipfile.ZipFile(zfilename) return zfile.read('/'.join(path[i + 1:])) return u(io.open(os.path.join(*path), encoding='utf-8').read())
3,548
Python
.py
78
39.294872
154
0.628779
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,502
hash_mpc.py
CouchPotato_CouchPotatoServer/libs/guessit/hash_mpc.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2011 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals import struct import os def hash_file(filename): """This function is taken from: http://trac.opensubtitles.org/projects/opensubtitles/wiki/HashSourceCodes and is licensed under the GPL.""" longlongformat = 'q' # long long bytesize = struct.calcsize(longlongformat) f = open(filename, "rb") filesize = os.path.getsize(filename) hash_value = filesize if filesize < 65536 * 2: raise Exception("SizeError: size is %d, should be > 132K..." % filesize) for x in range(65536 / bytesize): buf = f.read(bytesize) (l_value,) = struct.unpack(longlongformat, buf) hash_value += l_value hash_value = hash_value & 0xFFFFFFFFFFFFFFFF #to remain as 64bit number f.seek(max(0, filesize - 65536), 0) for x in range(65536 / bytesize): buf = f.read(bytesize) (l_value,) = struct.unpack(longlongformat, buf) hash_value += l_value hash_value = hash_value & 0xFFFFFFFFFFFFFFFF f.close() return "%016x" % hash_value
1,884
Python
.py
46
36.934783
80
0.710454
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,503
country.py
CouchPotato_CouchPotatoServer/libs/guessit/country.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import UnicodeMixin, base_text_type, u from guessit.fileutils import load_file_in_same_dir import logging __all__ = [ 'Country' ] log = logging.getLogger(__name__) # parsed from http://en.wikipedia.org/wiki/ISO_3166-1 # # Description of the fields: # "An English name, an alpha-2 code (when given), # an alpha-3 code (when given), a numeric code, and an ISO 31666-2 code # are all separated by pipe (|) characters." _iso3166_contents = load_file_in_same_dir(__file__, 'ISO-3166-1_utf8.txt') country_matrix = [ l.strip().split('|') for l in _iso3166_contents.strip().split('\n') ] country_matrix += [ [ 'Unknown', 'un', 'unk', '', '' ], [ 'Latin America', '', 'lat', '', '' ] ] country_to_alpha3 = dict((c[0].lower(), c[2].lower()) for c in country_matrix) country_to_alpha3.update(dict((c[1].lower(), c[2].lower()) for c in country_matrix)) country_to_alpha3.update(dict((c[2].lower(), c[2].lower()) for c in country_matrix)) # add here exceptions / non ISO representations # Note: remember to put those exceptions in lower-case, they won't work otherwise country_to_alpha3.update({ 'latinoamérica': 'lat', 'brazilian': 'bra', 'españa': 'esp', 'uk': 'gbr' }) country_alpha3_to_en_name = dict((c[2].lower(), c[0]) for c in country_matrix) country_alpha3_to_alpha2 = dict((c[2].lower(), c[1].lower()) for c in country_matrix) class Country(UnicodeMixin): """This class represents a country. You can initialize it with pretty much anything, as it knows conversion from ISO-3166 2-letter and 3-letter codes, and an English name. """ def __init__(self, country, strict=False): country = u(country.strip().lower()) self.alpha3 = country_to_alpha3.get(country) if self.alpha3 is None and strict: msg = 'The given string "%s" could not be identified as a country' raise ValueError(msg % country) if self.alpha3 is None: self.alpha3 = 'unk' @property def alpha2(self): return country_alpha3_to_alpha2[self.alpha3] @property def english_name(self): return country_alpha3_to_en_name[self.alpha3] def __hash__(self): return hash(self.alpha3) def __eq__(self, other): if isinstance(other, Country): return self.alpha3 == other.alpha3 if isinstance(other, base_text_type): try: return self == Country(other) except ValueError: return False return False def __ne__(self, other): return not self == other def __unicode__(self): return self.english_name def __repr__(self): return 'Country(%s)' % self.english_name
3,723
Python
.py
85
37.388235
85
0.647271
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,504
matcher.py
CouchPotato_CouchPotatoServer/libs/guessit/matcher.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import PY3, u, base_text_type from guessit.matchtree import MatchTree from guessit.textutils import normalize_unicode, clean_string import logging log = logging.getLogger(__name__) class IterativeMatcher(object): def __init__(self, filename, filetype='autodetect', opts=None, transfo_opts=None): """An iterative matcher tries to match different patterns that appear in the filename. The 'filetype' argument indicates which type of file you want to match. If it is 'autodetect', the matcher will try to see whether it can guess that the file corresponds to an episode, or otherwise will assume it is a movie. The recognized 'filetype' values are: [ autodetect, subtitle, info, movie, moviesubtitle, movieinfo, episode, episodesubtitle, episodeinfo ] The IterativeMatcher works mainly in 2 steps: First, it splits the filename into a match_tree, which is a tree of groups which have a semantic meaning, such as episode number, movie title, etc... The match_tree created looks like the following: 0000000000000000000000000000000000000000000000000000000000000000000000000000000000 111 0000011111111111112222222222222233333333444444444444444455555555666777777778888888 000 0000000000000000000000000000000001111112011112222333333401123334000011233340000000 000 __________________(The.Prestige).______.[____.HP.______.{__-___}.St{__-___}.Chaps].___ xxxxxttttttttttttt ffffff vvvv xxxxxx ll lll xx xxx ccc [XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv The first 3 lines indicates the group index in which a char in the filename is located. So for instance, x264 is the group (0, 4, 1), and it corresponds to a video codec, denoted by the letter'v' in the 4th line. (for more info, see guess.matchtree.to_string) Second, it tries to merge all this information into a single object containing all the found properties, and does some (basic) conflict resolution when they arise. When you create the Matcher, you can pass it: - a list 'opts' of option names, that act as global flags - a dict 'transfo_opts' of { transfo_name: (transfo_args, transfo_kwargs) } with which to call the transfo.process() function. """ valid_filetypes = ('autodetect', 'subtitle', 'info', 'video', 'movie', 'moviesubtitle', 'movieinfo', 'episode', 'episodesubtitle', 'episodeinfo') if filetype not in valid_filetypes: raise ValueError("filetype needs to be one of %s" % valid_filetypes) if not PY3 and not isinstance(filename, unicode): log.warning('Given filename to matcher is not unicode...') filename = filename.decode('utf-8') filename = normalize_unicode(filename) if opts is None: opts = [] if not isinstance(opts, list): raise ValueError('opts must be a list of option names! Received: type=%s val=%s', type(opts), opts) if transfo_opts is None: transfo_opts = {} if not isinstance(transfo_opts, dict): raise ValueError('transfo_opts must be a dict of { transfo_name: (args, kwargs) }. '+ 'Received: type=%s val=%s', type(transfo_opts), transfo_opts) self.match_tree = MatchTree(filename) # sanity check: make sure we don't process a (mostly) empty string if clean_string(filename) == '': return mtree = self.match_tree mtree.guess.set('type', filetype, confidence=1.0) def apply_transfo(transfo_name, *args, **kwargs): transfo = __import__('guessit.transfo.' + transfo_name, globals=globals(), locals=locals(), fromlist=['process'], level=0) default_args, default_kwargs = transfo_opts.get(transfo_name, ((), {})) all_args = args or default_args all_kwargs = dict(default_kwargs) all_kwargs.update(kwargs) # keep all kwargs merged together transfo.process(mtree, *all_args, **all_kwargs) # 1- first split our path into dirs + basename + ext apply_transfo('split_path_components') # 2- guess the file type now (will be useful later) apply_transfo('guess_filetype', filetype) if mtree.guess['type'] == 'unknown': return # 3- split each of those into explicit groups (separated by parentheses # or square brackets) apply_transfo('split_explicit_groups') # 4- try to match information for specific patterns # NOTE: order needs to comply to the following: # - website before language (eg: tvu.org.ru vs russian) # - language before episodes_rexps # - properties before language (eg: he-aac vs hebrew) # - release_group before properties (eg: XviD-?? vs xvid) if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'): strategy = [ 'guess_date', 'guess_website', 'guess_release_group', 'guess_properties', 'guess_language', 'guess_video_rexps', 'guess_episodes_rexps', 'guess_weak_episodes_rexps' ] else: strategy = [ 'guess_date', 'guess_website', 'guess_release_group', 'guess_properties', 'guess_language', 'guess_video_rexps' ] if 'nolanguage' in opts: strategy.remove('guess_language') for name in strategy: apply_transfo(name) # more guessers for both movies and episodes apply_transfo('guess_bonus_features') apply_transfo('guess_year', skip_first_year=('skip_first_year' in opts)) if 'nocountry' not in opts: apply_transfo('guess_country') apply_transfo('guess_idnumber') # split into '-' separated subgroups (with required separator chars # around the dash) apply_transfo('split_on_dash') # 5- try to identify the remaining unknown groups by looking at their # position relative to other known elements if mtree.guess['type'] in ('episode', 'episodesubtitle', 'episodeinfo'): apply_transfo('guess_episode_info_from_position') else: apply_transfo('guess_movie_title_from_position') # 6- perform some post-processing steps apply_transfo('post_process') log.debug('Found match tree:\n%s' % u(mtree)) def matched(self): return self.match_tree.matched()
7,768
Python
.py
141
45.163121
97
0.639299
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,505
guess_website.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_website.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.patterns import websites import logging log = logging.getLogger(__name__) def guess_website(string): low = string.lower() for site in websites: pos = low.find(site.lower()) if pos != -1: return {'website': site}, (pos, pos + len(site)) return None, None def process(mtree): SingleNodeGuesser(guess_website, 1.0, log).process(mtree)
1,293
Python
.py
33
36.666667
74
0.744019
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,506
guess_properties.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_properties.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.patterns import find_properties import logging log = logging.getLogger(__name__) def guess_properties(string): try: prop, value, pos, end = find_properties(string)[0] return { prop: value }, (pos, end) except IndexError: return None, None def process(mtree): SingleNodeGuesser(guess_properties, 1.0, log).process(mtree)
1,273
Python
.py
32
37.46875
74
0.757085
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,507
guess_episodes_rexps.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_episodes_rexps.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.patterns import episode_rexps import re import logging log = logging.getLogger(__name__) def number_list(s): l = [ int(n) for n in re.sub('[^0-9]+', ' ', s).split() ] if len(l) == 2: # it is an episode interval, return all numbers in between return range(l[0], l[1]+1) return l def guess_episodes_rexps(string): for rexp, confidence, span_adjust in episode_rexps: match = re.search(rexp, string, re.IGNORECASE) if match: span = (match.start() + span_adjust[0], match.end() + span_adjust[1]) guess = Guess(match.groupdict(), confidence=confidence, raw=string[span[0]:span[1]]) # decide whether we have only a single episode number or an # episode list if guess.get('episodeNumber'): eplist = number_list(guess['episodeNumber']) guess.set('episodeNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]]) if len(eplist) > 1: guess.set('episodeList', eplist, confidence=confidence, raw=string[span[0]:span[1]]) if guess.get('bonusNumber'): eplist = number_list(guess['bonusNumber']) guess.set('bonusNumber', eplist[0], confidence=confidence, raw=string[span[0]:span[1]]) return guess, span return None, None def process(mtree): SingleNodeGuesser(guess_episodes_rexps, None, log).process(mtree)
2,416
Python
.py
53
39.415094
105
0.676596
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,508
guess_language.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_language.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.language import search_language import logging log = logging.getLogger(__name__) def guess_language(string, node, skip=None): if skip: relative_skip = [] for entry in skip: node_idx = entry['node_idx'] span = entry['span'] if node_idx == node.node_idx[:len(node_idx)]: relative_span = (span[0] - node.offset + 1, span[1] - node.offset + 1) relative_skip.append(relative_span) skip = relative_skip language, span, confidence = search_language(string, skip=skip) if language: return (Guess({'language': language}, confidence=confidence, raw= string[span[0]:span[1]]), span) return None, None guess_language.use_node = True def process(mtree, *args, **kwargs): SingleNodeGuesser(guess_language, None, log, *args, **kwargs).process(mtree) # Note: 'language' is promoted to 'subtitleLanguage' in the post_process transfo
1,946
Python
.py
46
37.108696
86
0.693813
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,509
guess_date.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_date.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.date import search_date import logging log = logging.getLogger(__name__) def guess_date(string): date, span = search_date(string) if date: return { 'date': date }, span else: return None, None def process(mtree): SingleNodeGuesser(guess_date, 1.0, log).process(mtree)
1,217
Python
.py
32
35.84375
74
0.755725
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,510
split_explicit_groups.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/split_explicit_groups.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.textutils import find_first_level_groups from guessit.patterns import group_delimiters import functools import logging log = logging.getLogger(__name__) def process(mtree): """return the string split into explicit groups, that is, those either between parenthese, square brackets or curly braces, and those separated by a dash.""" for c in mtree.children: groups = find_first_level_groups(c.value, group_delimiters[0]) for delimiters in group_delimiters: flatten = lambda l, x: l + find_first_level_groups(x, delimiters) groups = functools.reduce(flatten, groups, []) # do not do this at this moment, it is not strong enough and can break other # patterns, such as dates, etc... #groups = functools.reduce(lambda l, x: l + x.split('-'), groups, []) c.split_on_components(groups)
1,738
Python
.py
38
42.263158
84
0.731995
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,511
guess_idnumber.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_idnumber.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2013 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.patterns import find_properties import re import logging log = logging.getLogger(__name__) def guess_properties(string): try: prop, value, pos, end = find_properties(string)[0] return { prop: value }, (pos, end) except IndexError: return None, None _idnum = re.compile(r'(?P<idNumber>[a-zA-Z0-9-]{10,})') # 1.0, (0, 0)) def guess_idnumber(string): match = _idnum.search(string) if match is not None: result = match.groupdict() switch_count = 0 DIGIT = 0 LETTER = 1 OTHER = 2 last = LETTER for c in result['idNumber']: if c in '0123456789': ci = DIGIT elif c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': ci = LETTER else: ci = OTHER if ci != last: switch_count += 1 last = ci switch_ratio = float(switch_count) / len(result['idNumber']) # only return the result as probable if we alternate often between # char type (more likely for hash values than for common words) if switch_ratio > 0.4: return result, match.span() return None, None def process(mtree): SingleNodeGuesser(guess_idnumber, 0.4, log).process(mtree)
2,224
Python
.py
59
31.881356
77
0.669299
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,512
split_path_components.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/split_path_components.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import fileutils import os.path import logging log = logging.getLogger(__name__) def process(mtree): """Returns the filename split into [ dir*, basename, ext ].""" components = fileutils.split_path(mtree.value) basename = components.pop(-1) components += list(os.path.splitext(basename)) components[-1] = components[-1][1:] # remove the '.' from the extension mtree.split_on_components(components)
1,292
Python
.py
31
39.741935
75
0.753981
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,513
guess_video_rexps.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_video_rexps.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.patterns import video_rexps, sep import re import logging log = logging.getLogger(__name__) def guess_video_rexps(string): string = '-' + string + '-' for rexp, confidence, span_adjust in video_rexps: match = re.search(sep + rexp + sep, string, re.IGNORECASE) if match: metadata = match.groupdict() # is this the better place to put it? (maybe, as it is at least # the soonest that we can catch it) if metadata.get('cdNumberTotal', -1) is None: del metadata['cdNumberTotal'] span = (match.start() + span_adjust[0], match.end() + span_adjust[1] - 2) return (Guess(metadata, confidence=confidence, raw=string[span[0]:span[1]]), span) return None, None def process(mtree): SingleNodeGuesser(guess_video_rexps, None, log).process(mtree)
1,837
Python
.py
43
37.837209
88
0.697258
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,514
guess_release_group.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_release_group.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.patterns import prop_multi, compute_canonical_form, _dash, _psep import re import logging log = logging.getLogger(__name__) def get_patterns(property_name): return [ p.replace(_dash, _psep) for patterns in prop_multi[property_name].values() for p in patterns ] CODECS = get_patterns('videoCodec') FORMATS = get_patterns('format') VAPIS = get_patterns('videoApi') # RG names following a codec or format, with a potential space or dash inside the name GROUP_NAMES = [ r'(?P<videoCodec>' + codec + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]' for codec in CODECS ] GROUP_NAMES += [ r'(?P<format>' + fmt + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]' for fmt in FORMATS ] GROUP_NAMES += [ r'(?P<videoApi>' + api + r')[ \.-](?P<releaseGroup>.+?([- \.].*?)??)[ \.]' for api in VAPIS ] GROUP_NAMES2 = [ r'\.(?P<videoCodec>' + codec + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]' for codec in CODECS ] GROUP_NAMES2 += [ r'\.(?P<format>' + fmt + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]' for fmt in FORMATS ] GROUP_NAMES2 += [ r'\.(?P<videoApi>' + vapi + r')-(?P<releaseGroup>.*?)(-(.*?))?[ \.]' for vapi in VAPIS ] GROUP_NAMES = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES ] GROUP_NAMES2 = [ re.compile(r, re.IGNORECASE) for r in GROUP_NAMES2 ] def adjust_metadata(md): return dict((property_name, compute_canonical_form(property_name, value) or value) for property_name, value in md.items()) def guess_release_group(string): # first try to see whether we have both a known codec and a known release group for rexp in GROUP_NAMES: match = rexp.search(string) while match: metadata = match.groupdict() # make sure this is an actual release group we caught release_group = (compute_canonical_form('releaseGroup', metadata['releaseGroup']) or compute_canonical_form('weakReleaseGroup', metadata['releaseGroup'])) if release_group: return adjust_metadata(metadata), (match.start(1), match.end(2)) # we didn't find anything conclusive, keep searching match = rexp.search(string, match.span()[0]+1) # pick anything as releaseGroup as long as we have a codec in front # this doesn't include a potential dash ('-') ending the release group # eg: [...].X264-HiS@SiLUHD-English.[...] for rexp in GROUP_NAMES2: match = rexp.search(string) if match: return adjust_metadata(match.groupdict()), (match.start(1), match.end(2)) return None, None def process(mtree): SingleNodeGuesser(guess_release_group, 0.8, log).process(mtree)
3,681
Python
.py
71
46.126761
108
0.648957
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,515
__init__.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import base_text_type, Guess from guessit.patterns import canonical_form from guessit.textutils import clean_string import logging log = logging.getLogger(__name__) def found_property(node, name, confidence): node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value) log.debug('Found with confidence %.2f: %s' % (confidence, node.guess)) def format_guess(guess): """Format all the found values to their natural type. For instance, a year would be stored as an int value, etc... Note that this modifies the dictionary given as input. """ for prop, value in guess.items(): if prop in ('season', 'episodeNumber', 'year', 'cdNumber', 'cdNumberTotal', 'bonusNumber', 'filmNumber'): guess[prop] = int(guess[prop]) elif isinstance(value, base_text_type): if prop in ('edition',): value = clean_string(value) guess[prop] = canonical_form(value).replace('\\', '') return guess def find_and_split_node(node, strategy, logger): string = ' %s ' % node.value # add sentinels for matcher, confidence, args, kwargs in strategy: all_args = [string] if getattr(matcher, 'use_node', False): all_args.append(node) if args: all_args.append(args) if kwargs: result, span = matcher(*all_args, **kwargs) else: result, span = matcher(*all_args) if result: # readjust span to compensate for sentinels span = (span[0] - 1, span[1] - 1) if isinstance(result, Guess): if confidence is None: confidence = result.confidence(list(result.keys())[0]) else: if confidence is None: confidence = 1.0 guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1])) msg = 'Found with confidence %.2f: %s' % (confidence, guess) (logger or log).debug(msg) node.partition(span) absolute_span = (span[0] + node.offset, span[1] + node.offset) for child in node.children: if child.span == absolute_span: child.guess = guess else: find_and_split_node(child, strategy, logger) return class SingleNodeGuesser(object): def __init__(self, guess_func, confidence, logger, *args, **kwargs): self.guess_func = guess_func self.confidence = confidence self.logger = logger self.args = args self.kwargs = kwargs def process(self, mtree): # strategy is a list of pairs (guesser, confidence) # - if the guesser returns a guessit.Guess and confidence is specified, # it will override it, otherwise it will leave the guess confidence # - if the guesser returns a simple dict as a guess and confidence is # specified, it will use it, or 1.0 otherwise strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ] for node in mtree.unidentified_leaves(): find_and_split_node(node, strategy, self.logger)
4,117
Python
.py
90
37.688889
107
0.641717
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,516
guess_weak_episodes_rexps.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_weak_episodes_rexps.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.transfo import SingleNodeGuesser from guessit.patterns import weak_episode_rexps import re import logging log = logging.getLogger(__name__) def guess_weak_episodes_rexps(string, node): if 'episodeNumber' in node.root.info: return None, None for rexp, span_adjust in weak_episode_rexps: match = re.search(rexp, string, re.IGNORECASE) if match: metadata = match.groupdict() span = (match.start() + span_adjust[0], match.end() + span_adjust[1]) epnum = int(metadata['episodeNumber']) if epnum > 100: season, epnum = epnum // 100, epnum % 100 # episodes which have a season > 25 are most likely errors # (Simpsons is at 23!) if season > 25: continue return Guess({ 'season': season, 'episodeNumber': epnum }, confidence=0.6, raw=string[span[0]:span[1]]), span else: return Guess(metadata, confidence=0.3, raw=string[span[0]:span[1]]), span return None, None guess_weak_episodes_rexps.use_node = True def process(mtree): SingleNodeGuesser(guess_weak_episodes_rexps, 0.6, log).process(mtree)
2,184
Python
.py
51
35.803922
89
0.662111
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,517
guess_filetype.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_filetype.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess from guessit.patterns import (subtitle_exts, info_exts, video_exts, episode_rexps, find_properties, compute_canonical_form) from guessit.date import valid_year from guessit.textutils import clean_string import os.path import re import mimetypes import logging log = logging.getLogger(__name__) # List of well known movies and series, hardcoded because they cannot be # guessed appropriately otherwise MOVIES = [ 'OSS 117' ] SERIES = [ 'Band of Brothers' ] MOVIES = [ m.lower() for m in MOVIES ] SERIES = [ s.lower() for s in SERIES ] def guess_filetype(mtree, filetype): # put the filetype inside a dummy container to be able to have the # following functions work correctly as closures # this is a workaround for python 2 which doesn't have the # 'nonlocal' keyword (python 3 does have it) filetype_container = [filetype] other = {} filename = mtree.string def upgrade_episode(): if filetype_container[0] == 'video': filetype_container[0] = 'episode' elif filetype_container[0] == 'subtitle': filetype_container[0] = 'episodesubtitle' elif filetype_container[0] == 'info': filetype_container[0] = 'episodeinfo' def upgrade_movie(): if filetype_container[0] == 'video': filetype_container[0] = 'movie' elif filetype_container[0] == 'subtitle': filetype_container[0] = 'moviesubtitle' elif filetype_container[0] == 'info': filetype_container[0] = 'movieinfo' def upgrade_subtitle(): if 'movie' in filetype_container[0]: filetype_container[0] = 'moviesubtitle' elif 'episode' in filetype_container[0]: filetype_container[0] = 'episodesubtitle' else: filetype_container[0] = 'subtitle' def upgrade_info(): if 'movie' in filetype_container[0]: filetype_container[0] = 'movieinfo' elif 'episode' in filetype_container[0]: filetype_container[0] = 'episodeinfo' else: filetype_container[0] = 'info' def upgrade(type='unknown'): if filetype_container[0] == 'autodetect': filetype_container[0] = type # look at the extension first fileext = os.path.splitext(filename)[1][1:].lower() if fileext in subtitle_exts: upgrade_subtitle() other = { 'container': fileext } elif fileext in info_exts: upgrade_info() other = { 'container': fileext } elif fileext in video_exts: upgrade(type='video') other = { 'container': fileext } else: upgrade(type='unknown') other = { 'extension': fileext } # check whether we are in a 'Movies', 'Tv Shows', ... folder folder_rexps = [ (r'Movies?', upgrade_movie), (r'Tv[ _-]?Shows?', upgrade_episode), (r'Series', upgrade_episode) ] for frexp, upgrade_func in folder_rexps: frexp = re.compile(frexp, re.IGNORECASE) for pathgroup in mtree.children: if frexp.match(pathgroup.value): upgrade_func() # check for a few specific cases which will unintentionally make the # following heuristics confused (eg: OSS 117 will look like an episode, # season 1, epnum 17, when it is in fact a movie) fname = clean_string(filename).lower() for m in MOVIES: if m in fname: log.debug('Found in exception list of movies -> type = movie') upgrade_movie() for s in SERIES: if s in fname: log.debug('Found in exception list of series -> type = episode') upgrade_episode() # now look whether there are some specific hints for episode vs movie if filetype_container[0] in ('video', 'subtitle', 'info'): # if we have an episode_rexp (eg: s02e13), it is an episode for rexp, _, _ in episode_rexps: match = re.search(rexp, filename, re.IGNORECASE) if match: log.debug('Found matching regexp: "%s" (string = "%s") -> type = episode', rexp, match.group()) upgrade_episode() break # if we have a 3-4 digit number that's not a year, maybe an episode match = re.search(r'[^0-9]([0-9]{3,4})[^0-9]', filename) if match: fullnumber = int(match.group()[1:-1]) #season = fullnumber // 100 epnumber = fullnumber % 100 possible = True # check for validity if epnumber > 40: possible = False if valid_year(fullnumber): possible = False if possible: log.debug('Found possible episode number: %s (from string "%s") -> type = episode', epnumber, match.group()) upgrade_episode() # if we have certain properties characteristic of episodes, it is an ep for prop, value, _, _ in find_properties(filename): log.debug('prop: %s = %s' % (prop, value)) if prop == 'episodeFormat': log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() break elif compute_canonical_form('format', value) == 'DVB': log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() break # origin-specific type if 'tvu.org.ru' in filename: log.debug('Found characteristic property of episodes: %s = "%s"', prop, value) upgrade_episode() # if no episode info found, assume it's a movie log.debug('Nothing characteristic found, assuming type = movie') upgrade_movie() filetype = filetype_container[0] return filetype, other def process(mtree, filetype='autodetect'): filetype, other = guess_filetype(mtree, filetype) mtree.guess.set('type', filetype, confidence=1.0) log.debug('Found with confidence %.2f: %s' % (1.0, mtree.guess)) filetype_info = Guess(other, confidence=1.0) # guess the mimetype of the filename # TODO: handle other mimetypes not found on the default type_maps # mimetypes.types_map['.srt']='text/subtitle' mime, _ = mimetypes.guess_type(mtree.string, strict=False) if mime is not None: filetype_info.update({'mimetype': mime}, confidence=1.0) node_ext = mtree.node_at((-1,)) node_ext.guess = filetype_info log.debug('Found with confidence %.2f: %s' % (1.0, node_ext.guess))
7,514
Python
.py
169
36.248521
124
0.629118
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,518
guess_episode_info_from_position.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_episode_info_from_position.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import found_property from guessit.patterns import non_episode_title, unlikely_series import logging log = logging.getLogger(__name__) def match_from_epnum_position(mtree, node): epnum_idx = node.node_idx # a few helper functions to be able to filter using high-level semantics def before_epnum_in_same_pathgroup(): return [ leaf for leaf in mtree.unidentified_leaves() if (leaf.node_idx[0] == epnum_idx[0] and leaf.node_idx[1:] < epnum_idx[1:]) ] def after_epnum_in_same_pathgroup(): return [ leaf for leaf in mtree.unidentified_leaves() if (leaf.node_idx[0] == epnum_idx[0] and leaf.node_idx[1:] > epnum_idx[1:]) ] def after_epnum_in_same_explicitgroup(): return [ leaf for leaf in mtree.unidentified_leaves() if (leaf.node_idx[:2] == epnum_idx[:2] and leaf.node_idx[2:] > epnum_idx[2:]) ] # epnumber is the first group and there are only 2 after it in same # path group # -> series title - episode title title_candidates = [ n for n in after_epnum_in_same_pathgroup() if n.clean_value.lower() not in non_episode_title ] if ('title' not in mtree.info and # no title before_epnum_in_same_pathgroup() == [] and # no groups before len(title_candidates) == 2): # only 2 groups after found_property(title_candidates[0], 'series', confidence=0.4) found_property(title_candidates[1], 'title', confidence=0.4) return # if we have at least 1 valid group before the episodeNumber, then it's # probably the series name series_candidates = before_epnum_in_same_pathgroup() if len(series_candidates) >= 1: found_property(series_candidates[0], 'series', confidence=0.7) # only 1 group after (in the same path group) and it's probably the # episode title title_candidates = [ n for n in after_epnum_in_same_pathgroup() if n.clean_value.lower() not in non_episode_title ] if len(title_candidates) == 1: found_property(title_candidates[0], 'title', confidence=0.5) return else: # try in the same explicit group, with lower confidence title_candidates = [ n for n in after_epnum_in_same_explicitgroup() if n.clean_value.lower() not in non_episode_title ] if len(title_candidates) == 1: found_property(title_candidates[0], 'title', confidence=0.4) return elif len(title_candidates) > 1: found_property(title_candidates[0], 'title', confidence=0.3) return # get the one with the longest value title_candidates = [ n for n in after_epnum_in_same_pathgroup() if n.clean_value.lower() not in non_episode_title ] if title_candidates: maxidx = -1 maxv = -1 for i, c in enumerate(title_candidates): if len(c.clean_value) > maxv: maxidx = i maxv = len(c.clean_value) found_property(title_candidates[maxidx], 'title', confidence=0.3) def process(mtree): eps = [node for node in mtree.leaves() if 'episodeNumber' in node.guess] if eps: match_from_epnum_position(mtree, eps[0]) else: # if we don't have the episode number, but at least 2 groups in the # basename, then it's probably series - eptitle basename = mtree.node_at((-2,)) title_candidates = [ n for n in basename.unidentified_leaves() if n.clean_value.lower() not in non_episode_title ] if len(title_candidates) >= 2: found_property(title_candidates[0], 'series', 0.4) found_property(title_candidates[1], 'title', 0.4) elif len(title_candidates) == 1: # but if there's only one candidate, it's probably the series name found_property(title_candidates[0], 'series', 0.4) # if we only have 1 remaining valid group in the folder containing the # file, then it's likely that it is the series name try: series_candidates = mtree.node_at((-3,)).unidentified_leaves() except ValueError: series_candidates = [] if len(series_candidates) == 1: found_property(series_candidates[0], 'series', 0.3) # if there's a path group that only contains the season info, then the # previous one is most likely the series title (ie: ../series/season X/..) eps = [ node for node in mtree.nodes() if 'season' in node.guess and 'episodeNumber' not in node.guess ] if eps: previous = [ node for node in mtree.unidentified_leaves() if node.node_idx[0] == eps[0].node_idx[0] - 1 ] if len(previous) == 1: found_property(previous[0], 'series', 0.5) # reduce the confidence of unlikely series for node in mtree.nodes(): if 'series' in node.guess: if node.guess['series'].lower() in unlikely_series: new_confidence = node.guess.confidence('series') * 0.5 node.guess.set_confidence('series', new_confidence)
6,168
Python
.py
124
41.032258
78
0.632182
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,519
guess_bonus_features.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_bonus_features.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import found_property import logging log = logging.getLogger(__name__) def process(mtree): def previous_group(g): for leaf in mtree.unidentified_leaves()[::-1]: if leaf.node_idx < g.node_idx: return leaf def next_group(g): for leaf in mtree.unidentified_leaves(): if leaf.node_idx > g.node_idx: return leaf def same_group(g1, g2): return g1.node_idx[:2] == g2.node_idx[:2] bonus = [ node for node in mtree.leaves() if 'bonusNumber' in node.guess ] if bonus: bonusTitle = next_group(bonus[0]) if same_group(bonusTitle, bonus[0]): found_property(bonusTitle, 'bonusTitle', 0.8) filmNumber = [ node for node in mtree.leaves() if 'filmNumber' in node.guess ] if filmNumber: filmSeries = previous_group(filmNumber[0]) found_property(filmSeries, 'filmSeries', 0.9) title = next_group(filmNumber[0]) found_property(title, 'title', 0.9) season = [ node for node in mtree.leaves() if 'season' in node.guess ] if season and 'bonusNumber' in mtree.info: series = previous_group(season[0]) if same_group(series, season[0]): found_property(series, 'series', 0.9)
2,155
Python
.py
51
36.686275
78
0.679083
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,520
split_on_dash.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/split_on_dash.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.patterns import sep import re import logging log = logging.getLogger(__name__) def process(mtree): for node in mtree.unidentified_leaves(): indices = [] didx = 0 pattern = re.compile(sep + '-' + sep) match = pattern.search(node.value) while match: span = match.span() indices.extend([ span[0], span[1] ]) match = pattern.search(node.value, span[1]) if indices: node.partition(indices)
1,356
Python
.py
36
33.722222
74
0.70624
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,521
guess_movie_title_from_position.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_movie_title_from_position.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import Guess import unicodedata import logging log = logging.getLogger(__name__) def process(mtree): def found_property(node, name, value, confidence): node.guess = Guess({ name: value }, confidence=confidence, raw=value) log.debug('Found with confidence %.2f: %s' % (confidence, node.guess)) def found_title(node, confidence): found_property(node, 'title', node.clean_value, confidence) basename = mtree.node_at((-2,)) all_valid = lambda leaf: len(leaf.clean_value) > 0 basename_leftover = basename.unidentified_leaves(valid=all_valid) try: folder = mtree.node_at((-3,)) folder_leftover = folder.unidentified_leaves() except ValueError: folder = None folder_leftover = [] log.debug('folder: %s' % folder_leftover) log.debug('basename: %s' % basename_leftover) # specific cases: # if we find the same group both in the folder name and the filename, # it's a good candidate for title if (folder_leftover and basename_leftover and folder_leftover[0].clean_value == basename_leftover[0].clean_value): found_title(folder_leftover[0], confidence=0.8) return # specific cases: # if the basename contains a number first followed by an unidentified # group, and the folder only contains 1 unidentified one, then we have # a series # ex: Millenium Trilogy (2009)/(1)The Girl With The Dragon Tattoo(2009).mkv try: series = folder_leftover[0] filmNumber = basename_leftover[0] title = basename_leftover[1] basename_leaves = basename.leaves() num = int(filmNumber.clean_value) log.debug('series: %s' % series.clean_value) log.debug('title: %s' % title.clean_value) if (series.clean_value != title.clean_value and series.clean_value != filmNumber.clean_value and basename_leaves.index(filmNumber) == 0 and basename_leaves.index(title) == 1): found_title(title, confidence=0.6) found_property(series, 'filmSeries', series.clean_value, confidence=0.6) found_property(filmNumber, 'filmNumber', num, confidence=0.6) return except Exception: pass # specific cases: # - movies/tttttt (yyyy)/tttttt.ccc try: if mtree.node_at((-4, 0)).value.lower() == 'movies': folder = mtree.node_at((-3,)) # Note:too generic, might solve all the unittests as they all # contain 'movies' in their path # #if containing_folder.is_leaf() and not containing_folder.guess: # containing_folder.guess = # Guess({ 'title': clean_string(containing_folder.value) }, # confidence=0.7) year_group = folder.first_leaf_containing('year') groups_before = folder.previous_unidentified_leaves(year_group) found_title(groups_before[0], confidence=0.8) return except Exception: pass # if we have either format or videoCodec in the folder containing the file # or one of its parents, then we should probably look for the title in # there rather than in the basename try: props = mtree.previous_leaves_containing(mtree.children[-2], [ 'videoCodec', 'format', 'language' ]) except IndexError: props = [] if props: group_idx = props[0].node_idx[0] if all(g.node_idx[0] == group_idx for g in props): # if they're all in the same group, take leftover info from there leftover = mtree.node_at((group_idx,)).unidentified_leaves() if leftover: found_title(leftover[0], confidence=0.7) return # look for title in basename if there are some remaining undidentified # groups there if basename_leftover: title_candidate = basename_leftover[0] # if basename is only one word and the containing folder has at least # 3 words in it, we should take the title from the folder name # ex: Movies/Alice in Wonderland DVDRip.XviD-DiAMOND/dmd-aw.avi # ex: Movies/Somewhere.2010.DVDRip.XviD-iLG/i-smwhr.avi <-- TODO: gets caught here? if (title_candidate.clean_value.count(' ') == 0 and folder_leftover and folder_leftover[0].clean_value.count(' ') >= 2): found_title(folder_leftover[0], confidence=0.7) return # if there are only 2 unidentified groups, the first of which is inside # brackets or parentheses, we take the second one for the title: # ex: Movies/[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi if len(basename_leftover) == 2 and basename_leftover[0].is_explicit(): found_title(basename_leftover[1], confidence=0.8) return # if all else fails, take the first remaining unidentified group in the # basename as title found_title(title_candidate, confidence=0.6) return # if there are no leftover groups in the basename, look in the folder name if folder_leftover: found_title(folder_leftover[0], confidence=0.5) return # if nothing worked, look if we have a very small group at the beginning # of the basename basename = mtree.node_at((-2,)) basename_leftover = basename.unidentified_leaves(valid=lambda leaf: True) if basename_leftover: found_title(basename_leftover[0], confidence=0.4) return
6,628
Python
.py
144
37.305556
92
0.640819
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,522
guess_country.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_country.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.country import Country from guessit import Guess import logging log = logging.getLogger(__name__) # list of common words which could be interpreted as countries, but which # are far too common to be able to say they represent a country country_common_words = frozenset([ 'bt', 'bb' ]) def process(mtree): for node in mtree.unidentified_leaves(): if len(node.node_idx) == 2: c = node.value[1:-1].lower() if c in country_common_words: continue # only keep explicit groups (enclosed in parentheses/brackets) if node.value[0] + node.value[-1] not in ['()', '[]', '{}']: continue try: country = Country(c, strict=True) except ValueError: continue node.guess = Guess(country=country, confidence=1.0, raw=c)
1,733
Python
.py
41
37.195122
74
0.690208
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,523
post_process.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/post_process.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.patterns import subtitle_exts from guessit.textutils import reorder_title, find_words import logging log = logging.getLogger(__name__) def process(mtree): # 1- try to promote language to subtitle language where it makes sense for node in mtree.nodes(): if 'language' not in node.guess: continue def promote_subtitle(): # pylint: disable=W0631 node.guess.set('subtitleLanguage', node.guess['language'], confidence=node.guess.confidence('language')) del node.guess['language'] # - if we matched a language in a file with a sub extension and that # the group is the last group of the filename, it is probably the # language of the subtitle # (eg: 'xxx.english.srt') if (mtree.node_at((-1,)).value.lower() in subtitle_exts and node == mtree.leaves()[-2]): promote_subtitle() # - if we find the word 'sub' before the language, and in the same explicit # group, then upgrade the language explicit_group = mtree.node_at(node.node_idx[:2]) group_str = explicit_group.value.lower() if ('sub' in find_words(group_str) and 0 <= group_str.find('sub') < (node.span[0] - explicit_group.span[0])): promote_subtitle() # - if a language is in an explicit group just preceded by "st", # it is a subtitle language (eg: '...st[fr-eng]...') try: idx = node.node_idx previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1] if previous.value.lower()[-2:] == 'st': promote_subtitle() except IndexError: pass # 2- ", the" at the end of a series title should be prepended to it for node in mtree.nodes(): if 'series' not in node.guess: continue node.guess['series'] = reorder_title(node.guess['series'])
2,827
Python
.py
62
38.629032
83
0.649237
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,524
guess_year.py
CouchPotato_CouchPotatoServer/libs/guessit/transfo/guess_year.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <wackou@gmail.com> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import SingleNodeGuesser from guessit.date import search_year import logging log = logging.getLogger(__name__) def guess_year(string): year, span = search_year(string) if year: return { 'year': year }, span else: return None, None def guess_year_skip_first(string): year, span = search_year(string) if year: year2, span2 = guess_year(string[span[1]:]) if year2: return year2, (span2[0]+span[1], span2[1]+span[1]) return None, None def process(mtree, skip_first_year=False): if skip_first_year: SingleNodeGuesser(guess_year_skip_first, 1.0, log).process(mtree) else: SingleNodeGuesser(guess_year, 1.0, log).process(mtree)
1,594
Python
.py
42
34.571429
74
0.726036
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,525
__init__.py
CouchPotato_CouchPotatoServer/libs/bencode/__init__.py
# The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # Written by Petru Paler from BTL import BTFailure def decode_int(x, f): f += 1 newf = x.index('e', f) n = int(x[f:newf]) if x[f] == '-': if x[f + 1] == '0': raise ValueError elif x[f] == '0' and newf != f+1: raise ValueError return (n, newf+1) def decode_string(x, f): colon = x.index(':', f) n = int(x[f:colon]) if x[f] == '0' and colon != f+1: raise ValueError colon += 1 return (x[colon:colon+n], colon+n) def decode_list(x, f): r, f = [], f+1 while x[f] != 'e': v, f = decode_func[x[f]](x, f) r.append(v) return (r, f + 1) def decode_dict(x, f): r, f = {}, f+1 while x[f] != 'e': k, f = decode_string(x, f) r[k], f = decode_func[x[f]](x, f) return (r, f + 1) decode_func = {} decode_func['l'] = decode_list decode_func['d'] = decode_dict decode_func['i'] = decode_int decode_func['0'] = decode_string decode_func['1'] = decode_string decode_func['2'] = decode_string decode_func['3'] = decode_string decode_func['4'] = decode_string decode_func['5'] = decode_string decode_func['6'] = decode_string decode_func['7'] = decode_string decode_func['8'] = decode_string decode_func['9'] = decode_string def bdecode(x): try: r, l = decode_func[x[0]](x, 0) except (IndexError, KeyError, ValueError): raise BTFailure("not a valid bencoded string") if l != len(x): raise BTFailure("invalid bencoded value (data after valid prefix)") return r from types import StringType, IntType, LongType, DictType, ListType, TupleType class Bencached(object): __slots__ = ['bencoded'] def __init__(self, s): self.bencoded = s def encode_bencached(x,r): r.append(x.bencoded) def encode_int(x, r): r.extend(('i', str(x), 'e')) def encode_bool(x, r): if x: encode_int(1, r) else: encode_int(0, r) def encode_string(x, r): r.extend((str(len(x)), ':', x)) def encode_list(x, r): r.append('l') for i in x: encode_func[type(i)](i, r) r.append('e') def encode_dict(x,r): r.append('d') ilist = x.items() ilist.sort() for k, v in ilist: r.extend((str(len(k)), ':', k)) encode_func[type(v)](v, r) r.append('e') encode_func = {} encode_func[Bencached] = encode_bencached encode_func[IntType] = encode_int encode_func[LongType] = encode_int encode_func[StringType] = encode_string encode_func[ListType] = encode_list encode_func[TupleType] = encode_list encode_func[DictType] = encode_dict try: from types import BooleanType encode_func[BooleanType] = encode_bool except ImportError: pass def bencode(x): r = [] encode_func[type(x)](x, r) return ''.join(r)
3,305
Python
.py
108
26.462963
78
0.635186
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,526
common.py
CouchPotato_CouchPotatoServer/libs/oauthlib/common.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.common ~~~~~~~~~~~~~~ This module provides data structures and utilities common to all implementations of OAuth. """ import re import urllib import urlparse always_safe = (u'ABCDEFGHIJKLMNOPQRSTUVWXYZ' u'abcdefghijklmnopqrstuvwxyz' u'0123456789' u'_.-') def quote(s, safe=u'/'): encoded = s.encode("utf-8") quoted = urllib.quote(encoded, safe) return quoted.decode("utf-8") def unquote(s): encoded = s.encode("utf-8") unquoted = urllib.unquote(encoded) return unquoted.decode("utf-8") def urlencode(params): utf8_params = encode_params_utf8(params) urlencoded = urllib.urlencode(utf8_params) return urlencoded.decode("utf-8") def encode_params_utf8(params): """Ensures that all parameters in a list of 2-element tuples are encoded to bytestrings using UTF-8 """ encoded = [] for k, v in params: encoded.append(( k.encode('utf-8') if isinstance(k, unicode) else k, v.encode('utf-8') if isinstance(v, unicode) else v)) return encoded def decode_params_utf8(params): """Ensures that all parameters in a list of 2-element tuples are decoded to unicode using UTF-8. """ decoded = [] for k, v in params: decoded.append(( k.decode('utf-8') if isinstance(k, str) else k, v.decode('utf-8') if isinstance(v, str) else v)) return decoded urlencoded = set(always_safe) | set(u'=&;%+~') def urldecode(query): """Decode a query string in x-www-form-urlencoded format into a sequence of two-element tuples. Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce correct formatting of the query string by validation. If validation fails a ValueError will be raised. urllib.parse_qsl will only raise errors if any of name-value pairs omits the equals sign. """ # Check if query contains invalid characters if query and not set(query) <= urlencoded: raise ValueError('Invalid characters in query string.') # Check for correctly hex encoded values using a regular expression # All encoded values begin with % followed by two hex characters # correct = %00, %A0, %0A, %FF # invalid = %G0, %5H, %PO invalid_hex = u'%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]' if len(re.findall(invalid_hex, query)): raise ValueError('Invalid hex encoding in query string.') query = query.decode('utf-8') if isinstance(query, str) else query # We want to allow queries such as "c2" whereas urlparse.parse_qsl # with the strict_parsing flag will not. params = urlparse.parse_qsl(query, keep_blank_values=True) # unicode all the things return decode_params_utf8(params) def extract_params(raw): """Extract parameters and return them as a list of 2-tuples. Will successfully extract parameters from urlencoded query strings, dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an empty list of parameters. Any other input will result in a return value of None. """ if isinstance(raw, basestring): try: params = urldecode(raw) except ValueError: params = None elif hasattr(raw, '__iter__'): try: dict(raw) except ValueError: params = None except TypeError: params = None else: params = list(raw.items() if isinstance(raw, dict) else raw) params = decode_params_utf8(params) else: params = None return params class Request(object): """A malleable representation of a signable HTTP request. Body argument may contain any data, but parameters will only be decoded if they are one of: * urlencoded query string * dict * list of 2-tuples Anything else will be treated as raw body data to be passed through unmolested. """ def __init__(self, uri, http_method=u'GET', body=None, headers=None): self.uri = uri self.http_method = http_method self.headers = headers or {} self.body = body self.decoded_body = extract_params(body) self.oauth_params = [] @property def uri_query(self): return urlparse.urlparse(self.uri).query @property def uri_query_params(self): return urlparse.parse_qsl(self.uri_query, keep_blank_values=True, strict_parsing=True)
4,551
Python
.py
120
31.533333
79
0.661965
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,527
__init__.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth1/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.oauth1 ~~~~~~~~~~~~~~ This module is a wrapper for the most recent implementation of OAuth 1.0 Client and Server classes. """ from .rfc5849 import Client, Server
242
Python
.py
9
25.444444
79
0.716157
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,528
parameters.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth1/rfc5849/parameters.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.parameters ~~~~~~~~~~~~~~~~~~~ This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec. .. _`section 3.5`: http://tools.ietf.org/html/rfc5849#section-3.5 """ from urlparse import urlparse, urlunparse from . import utils from oauthlib.common import extract_params, urlencode # TODO: do we need filter_params now that oauth_params are handled by Request? # We can easily pass in just oauth protocol params. @utils.filter_params def prepare_headers(oauth_params, headers=None, realm=None): """**Prepare the Authorization header.** Per `section 3.5.1`_ of the spec. Protocol parameters can be transmitted using the HTTP "Authorization" header field as defined by `RFC2617`_ with the auth-scheme name set to "OAuth" (case insensitive). For example:: Authorization: OAuth realm="Example", oauth_consumer_key="0685bd9184jfhq22", oauth_token="ad180jjd733klru7", oauth_signature_method="HMAC-SHA1", oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D", oauth_timestamp="137131200", oauth_nonce="4572616e48616d6d65724c61686176", oauth_version="1.0" .. _`section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1 .. _`RFC2617`: http://tools.ietf.org/html/rfc2617 """ headers = headers or {} # Protocol parameters SHALL be included in the "Authorization" header # field as follows: authorization_header_parameters_parts = [] for oauth_parameter_name, value in oauth_params: # 1. Parameter names and values are encoded per Parameter Encoding # (`Section 3.6`_) # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 escaped_name = utils.escape(oauth_parameter_name) escaped_value = utils.escape(value) # 2. Each parameter's name is immediately followed by an "=" character # (ASCII code 61), a """ character (ASCII code 34), the parameter # value (MAY be empty), and another """ character (ASCII code 34). part = u'{0}="{1}"'.format(escaped_name, escaped_value) authorization_header_parameters_parts.append(part) # 3. Parameters are separated by a "," character (ASCII code 44) and # OPTIONAL linear whitespace per `RFC2617`_. # # .. _`RFC2617`: http://tools.ietf.org/html/rfc2617 authorization_header_parameters = ', '.join( authorization_header_parameters_parts) # 4. The OPTIONAL "realm" parameter MAY be added and interpreted per # `RFC2617 section 1.2`_. # # .. _`RFC2617 section 1.2`: http://tools.ietf.org/html/rfc2617#section-1.2 if realm: # NOTE: realm should *not* be escaped authorization_header_parameters = (u'realm="%s", ' % realm + authorization_header_parameters) # the auth-scheme name set to "OAuth" (case insensitive). authorization_header = u'OAuth %s' % authorization_header_parameters # contribute the Authorization header to the given headers full_headers = {} full_headers.update(headers) full_headers[u'Authorization'] = authorization_header return full_headers def _append_params(oauth_params, params): """Append OAuth params to an existing set of parameters. Both params and oauth_params is must be lists of 2-tuples. Per `section 3.5.2`_ and `3.5.3`_ of the spec. .. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2 .. _`3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3 """ merged = list(params) merged.extend(oauth_params) # The request URI / entity-body MAY include other request-specific # parameters, in which case, the protocol parameters SHOULD be appended # following the request-specific parameters, properly separated by an "&" # character (ASCII code 38) merged.sort(key=lambda i: i[0].startswith('oauth_')) return merged def prepare_form_encoded_body(oauth_params, body): """Prepare the Form-Encoded Body. Per `section 3.5.2`_ of the spec. .. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2 """ # append OAuth params to the existing body return _append_params(oauth_params, body) def prepare_request_uri_query(oauth_params, uri): """Prepare the Request URI Query. Per `section 3.5.3`_ of the spec. .. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3 """ # append OAuth params to the existing set of query components sch, net, path, par, query, fra = urlparse(uri) query = urlencode(_append_params(oauth_params, extract_params(query) or [])) return urlunparse((sch, net, path, par, query, fra))
4,817
Python
.py
100
42.31
80
0.677344
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,529
utils.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth1/rfc5849/utils.py
# -*- coding: utf-8 -*- """ oauthlib.utils ~~~~~~~~~~~~~~ This module contains utility methods used by various parts of the OAuth spec. """ import string import time import urllib2 from random import getrandbits, choice from oauthlib.common import quote, unquote UNICODE_ASCII_CHARACTER_SET = (string.ascii_letters.decode('ascii') + string.digits.decode('ascii')) def filter_params(target): """Decorator which filters params to remove non-oauth_* parameters Assumes the decorated method takes a params dict or list of tuples as its first argument. """ def wrapper(params, *args, **kwargs): params = filter_oauth_params(params) return target(params, *args, **kwargs) wrapper.__doc__ = target.__doc__ return wrapper def filter_oauth_params(params): """Removes all non oauth parameters from a dict or a list of params.""" is_oauth = lambda kv: kv[0].startswith(u"oauth_") if isinstance(params, dict): return filter(is_oauth, params.items()) else: return filter(is_oauth, params) def generate_timestamp(): """Get seconds since epoch (UTC). Per `section 3.3`_ of the spec. .. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3 """ return unicode(int(time.time())) def generate_nonce(): """Generate pseudorandom nonce that is unlikely to repeat. Per `section 3.3`_ of the spec. A random 64-bit number is appended to the epoch timestamp for both randomness and to decrease the likelihood of collisions. .. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3 """ return unicode(getrandbits(64)) + generate_timestamp() def generate_token(length=20, chars=UNICODE_ASCII_CHARACTER_SET): """Generates a generic OAuth token According to `section 2`_ of the spec, the method of token construction is undefined. This implementation is simply a random selection of `length` choices from `chars`. Credit to Ignacio Vazquez-Abrams for his excellent `Stackoverflow answer`_ .. _`Stackoverflow answer` : http://stackoverflow.com/questions/2257441/ python-random-string-generation-with-upper-case-letters-and-digits """ return u''.join(choice(chars) for x in range(length)) def escape(u): """Escape a unicode string in an OAuth-compatible fashion. Per `section 3.6`_ of the spec. .. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 """ if not isinstance(u, unicode): raise ValueError('Only unicode objects are escapable.') # Letters, digits, and the characters '_.-' are already treated as safe # by urllib.quote(). We need to add '~' to fully support rfc5849. return quote(u, safe='~') def unescape(u): if not isinstance(u, unicode): raise ValueError('Only unicode objects are unescapable.') return unquote(u) def urlencode(query): """Encode a sequence of two-element tuples or dictionary into a URL query string. Operates using an OAuth-safe escape() method, in contrast to urllib.urlencode. """ # Convert dictionaries to list of tuples if isinstance(query, dict): query = query.items() return u"&".join([u'='.join([escape(k), escape(v)]) for k, v in query]) def parse_keqv_list(l): """A unicode-safe version of urllib2.parse_keqv_list""" encoded_list = [u.encode('utf-8') for u in l] encoded_parsed = urllib2.parse_keqv_list(encoded_list) return dict((k.decode('utf-8'), v.decode('utf-8')) for k,v in encoded_parsed.items()) def parse_http_list(u): """A unicode-safe version of urllib2.parse_http_list""" encoded_str = u.encode('utf-8') encoded_list = urllib2.parse_http_list(encoded_str) return [s.decode('utf-8') for s in encoded_list] def parse_authorization_header(authorization_header): """Parse an OAuth authorization header into a list of 2-tuples""" auth_scheme = u'OAuth ' if authorization_header.startswith(auth_scheme): authorization_header = authorization_header.replace(auth_scheme, u'', 1) items = parse_http_list(authorization_header) try: return parse_keqv_list(items).items() except ValueError: raise ValueError('Malformed authorization header')
4,269
Python
.py
98
38.612245
85
0.699612
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,530
__init__.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth1/rfc5849/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.oauth1.rfc5849 ~~~~~~~~~~~~~~ This module is an implementation of various logic needed for signing and checking OAuth 1.0 RFC 5849 requests. """ import logging import urlparse from oauthlib.common import Request, urlencode from . import parameters, signature, utils SIGNATURE_HMAC = u"HMAC-SHA1" SIGNATURE_RSA = u"RSA-SHA1" SIGNATURE_PLAINTEXT = u"PLAINTEXT" SIGNATURE_METHODS = (SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_PLAINTEXT) SIGNATURE_TYPE_AUTH_HEADER = u'AUTH_HEADER' SIGNATURE_TYPE_QUERY = u'QUERY' SIGNATURE_TYPE_BODY = u'BODY' CONTENT_TYPE_FORM_URLENCODED = u'application/x-www-form-urlencoded' class Client(object): """A client used to sign OAuth 1.0 RFC 5849 requests""" def __init__(self, client_key, client_secret=None, resource_owner_key=None, resource_owner_secret=None, callback_uri=None, signature_method=SIGNATURE_HMAC, signature_type=SIGNATURE_TYPE_AUTH_HEADER, rsa_key=None, verifier=None): self.client_key = client_key self.client_secret = client_secret self.resource_owner_key = resource_owner_key self.resource_owner_secret = resource_owner_secret self.signature_method = signature_method self.signature_type = signature_type self.callback_uri = callback_uri self.rsa_key = rsa_key self.verifier = verifier if self.signature_method == SIGNATURE_RSA and self.rsa_key is None: raise ValueError('rsa_key is required when using RSA signature method.') def get_oauth_signature(self, request): """Get an OAuth signature to be used in signing a request """ if self.signature_method == SIGNATURE_PLAINTEXT: # fast-path return signature.sign_plaintext(self.client_secret, self.resource_owner_secret) uri, headers, body = self._render(request) collected_params = signature.collect_parameters( uri_query=urlparse.urlparse(uri).query, body=body, headers=headers) logging.debug("Collected params: {0}".format(collected_params)) normalized_params = signature.normalize_parameters(collected_params) normalized_uri = signature.normalize_base_string_uri(request.uri) logging.debug("Normalized params: {0}".format(normalized_params)) logging.debug("Normalized URI: {0}".format(normalized_uri)) base_string = signature.construct_base_string(request.http_method, normalized_uri, normalized_params) logging.debug("Base signing string: {0}".format(base_string)) if self.signature_method == SIGNATURE_HMAC: sig = signature.sign_hmac_sha1(base_string, self.client_secret, self.resource_owner_secret) elif self.signature_method == SIGNATURE_RSA: sig = signature.sign_rsa_sha1(base_string, self.rsa_key) else: sig = signature.sign_plaintext(self.client_secret, self.resource_owner_secret) logging.debug("Signature: {0}".format(sig)) return sig def get_oauth_params(self): """Get the basic OAuth parameters to be used in generating a signature. """ params = [ (u'oauth_nonce', utils.generate_nonce()), (u'oauth_timestamp', utils.generate_timestamp()), (u'oauth_version', u'1.0'), (u'oauth_signature_method', self.signature_method), (u'oauth_consumer_key', self.client_key), ] if self.resource_owner_key: params.append((u'oauth_token', self.resource_owner_key)) if self.callback_uri: params.append((u'oauth_callback', self.callback_uri)) if self.verifier: params.append((u'oauth_verifier', self.verifier)) return params def _render(self, request, formencode=False): """Render a signed request according to signature type Returns a 3-tuple containing the request URI, headers, and body. If the formencode argument is True and the body contains parameters, it is escaped and returned as a valid formencoded string. """ # TODO what if there are body params on a header-type auth? # TODO what if there are query params on a body-type auth? uri, headers, body = request.uri, request.headers, request.body # TODO: right now these prepare_* methods are very narrow in scope--they # only affect their little thing. In some cases (for example, with # header auth) it might be advantageous to allow these methods to touch # other parts of the request, like the headers—so the prepare_headers # method could also set the Content-Type header to x-www-form-urlencoded # like the spec requires. This would be a fundamental change though, and # I'm not sure how I feel about it. if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER: headers = parameters.prepare_headers(request.oauth_params, request.headers) elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None: body = parameters.prepare_form_encoded_body(request.oauth_params, request.decoded_body) if formencode: body = urlencode(body) headers['Content-Type'] = u'application/x-www-form-urlencoded' elif self.signature_type == SIGNATURE_TYPE_QUERY: uri = parameters.prepare_request_uri_query(request.oauth_params, request.uri) else: raise ValueError('Unknown signature type specified.') return uri, headers, body def sign(self, uri, http_method=u'GET', body=None, headers=None): """Sign a request Signs an HTTP request with the specified parts. Returns a 3-tuple of the signed request's URI, headers, and body. Note that http_method is not returned as it is unaffected by the OAuth signing process. The body argument may be a dict, a list of 2-tuples, or a formencoded string. The Content-Type header must be 'application/x-www-form-urlencoded' if it is present. If the body argument is not one of the above, it will be returned verbatim as it is unaffected by the OAuth signing process. Attempting to sign a request with non-formencoded data using the OAuth body signature type is invalid and will raise an exception. If the body does contain parameters, it will be returned as a properly- formatted formencoded string. All string data MUST be unicode. This includes strings inside body dicts, for example. """ # normalize request data request = Request(uri, http_method, body, headers) # sanity check content_type = request.headers.get('Content-Type', None) multipart = content_type and content_type.startswith('multipart/') should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED has_params = request.decoded_body is not None # 3.4.1.3.1. Parameter Sources # [Parameters are collected from the HTTP request entity-body, but only # if [...]: # * The entity-body is single-part. if multipart and has_params: raise ValueError("Headers indicate a multipart body but body contains parameters.") # * The entity-body follows the encoding requirements of the # "application/x-www-form-urlencoded" content-type as defined by # [W3C.REC-html40-19980424]. elif should_have_params and not has_params: raise ValueError("Headers indicate a formencoded body but body was not decodable.") # * The HTTP request entity-header includes the "Content-Type" # header field set to "application/x-www-form-urlencoded". elif not should_have_params and has_params: raise ValueError("Body contains parameters but Content-Type header was not set.") # 3.5.2. Form-Encoded Body # Protocol parameters can be transmitted in the HTTP request entity- # body, but only if the following REQUIRED conditions are met: # o The entity-body is single-part. # o The entity-body follows the encoding requirements of the # "application/x-www-form-urlencoded" content-type as defined by # [W3C.REC-html40-19980424]. # o The HTTP request entity-header includes the "Content-Type" header # field set to "application/x-www-form-urlencoded". elif self.signature_type == SIGNATURE_TYPE_BODY and not ( should_have_params and has_params and not multipart): raise ValueError('Body signatures may only be used with form-urlencoded content') # generate the basic OAuth parameters request.oauth_params = self.get_oauth_params() # generate the signature request.oauth_params.append((u'oauth_signature', self.get_oauth_signature(request))) # render the signed request and return it return self._render(request, formencode=True) class Server(object): """A server used to verify OAuth 1.0 RFC 5849 requests""" def __init__(self, signature_method=SIGNATURE_HMAC, rsa_key=None): self.signature_method = signature_method self.rsa_key = rsa_key def get_client_secret(self, client_key): raise NotImplementedError("Subclasses must implement this function.") def get_resource_owner_secret(self, resource_owner_key): raise NotImplementedError("Subclasses must implement this function.") def get_signature_type_and_params(self, uri_query, headers, body): signature_types_with_oauth_params = filter(lambda s: s[1], ( (SIGNATURE_TYPE_AUTH_HEADER, utils.filter_oauth_params( signature.collect_parameters(headers=headers, exclude_oauth_signature=False))), (SIGNATURE_TYPE_BODY, utils.filter_oauth_params( signature.collect_parameters(body=body, exclude_oauth_signature=False))), (SIGNATURE_TYPE_QUERY, utils.filter_oauth_params( signature.collect_parameters(uri_query=uri_query, exclude_oauth_signature=False))), )) if len(signature_types_with_oauth_params) > 1: raise ValueError('oauth_ params must come from only 1 signature type but were found in %s' % ', '.join( [s[0] for s in signature_types_with_oauth_params])) try: signature_type, params = signature_types_with_oauth_params[0] except IndexError: raise ValueError('oauth_ params are missing. Could not determine signature type.') return signature_type, dict(params) def check_client_key(self, client_key): raise NotImplementedError("Subclasses must implement this function.") def check_resource_owner_key(self, client_key, resource_owner_key): raise NotImplementedError("Subclasses must implement this function.") def check_timestamp_and_nonce(self, timestamp, nonce): raise NotImplementedError("Subclasses must implement this function.") def check_request_signature(self, uri, http_method=u'GET', body='', headers=None): """Check a request's supplied signature to make sure the request is valid. Servers should return HTTP status 400 if a ValueError exception is raised and HTTP status 401 on return value False. Per `section 3.2`_ of the spec. .. _`section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2 """ headers = headers or {} signature_type = None # FIXME: urlparse does not return unicode! uri_query = urlparse.urlparse(uri).query signature_type, params = self.get_signature_type_and_params(uri_query, headers, body) # the parameters may not include duplicate oauth entries filtered_params = utils.filter_oauth_params(params) if len(filtered_params) != len(params): raise ValueError("Duplicate OAuth entries.") params = dict(params) request_signature = params.get(u'oauth_signature') client_key = params.get(u'oauth_consumer_key') resource_owner_key = params.get(u'oauth_token') nonce = params.get(u'oauth_nonce') timestamp = params.get(u'oauth_timestamp') callback_uri = params.get(u'oauth_callback') verifier = params.get(u'oauth_verifier') signature_method = params.get(u'oauth_signature_method') # ensure all mandatory parameters are present if not all((request_signature, client_key, nonce, timestamp, signature_method)): raise ValueError("Missing OAuth parameters.") # if version is supplied, it must be "1.0" if u'oauth_version' in params and params[u'oauth_version'] != u'1.0': raise ValueError("Invalid OAuth version.") # signature method must be valid if not signature_method in SIGNATURE_METHODS: raise ValueError("Invalid signature method.") # ensure client key is valid if not self.check_client_key(client_key): return False # ensure resource owner key is valid and not expired if not self.check_resource_owner_key(client_key, resource_owner_key): return False # ensure the nonce and timestamp haven't been used before if not self.check_timestamp_and_nonce(timestamp, nonce): return False # FIXME: extract realm, then self.check_realm # oauth_client parameters depend on client chosen signature method # which may vary for each request, section 3.4 # HMAC-SHA1 and PLAINTEXT share parameters if signature_method == SIGNATURE_RSA: oauth_client = Client(client_key, resource_owner_key=resource_owner_key, callback_uri=callback_uri, signature_method=signature_method, signature_type=signature_type, rsa_key=self.rsa_key, verifier=verifier) else: client_secret = self.get_client_secret(client_key) resource_owner_secret = self.get_resource_owner_secret( resource_owner_key) oauth_client = Client(client_key, client_secret=client_secret, resource_owner_key=resource_owner_key, resource_owner_secret=resource_owner_secret, callback_uri=callback_uri, signature_method=signature_method, signature_type=signature_type, verifier=verifier) request = Request(uri, http_method, body, headers) request.oauth_params = params client_signature = oauth_client.get_oauth_signature(request) # FIXME: use near constant time string compare to avoid timing attacks return client_signature == request_signature
15,232
Python
.py
284
43.725352
115
0.661672
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,531
signature.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth1/rfc5849/signature.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.oauth1.rfc5849.signature ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module represents a direct implementation of `section 3.4`_ of the spec. Terminology: * Client: software interfacing with an OAuth API * Server: the API provider * Resource Owner: the user who is granting authorization to the client Steps for signing a request: 1. Collect parameters from the uri query, auth header, & body 2. Normalize those parameters 3. Normalize the uri 4. Pass the normalized uri, normalized parameters, and http method to construct the base string 5. Pass the base string and any keys needed to a signing function .. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4 """ import binascii import hashlib import hmac import urlparse from . import utils from oauthlib.common import extract_params def construct_base_string(http_method, base_string_uri, normalized_encoded_request_parameters): """**String Construction** Per `section 3.4.1.1`_ of the spec. For example, the HTTP request:: POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1 Host: example.com Content-Type: application/x-www-form-urlencoded Authorization: OAuth realm="Example", oauth_consumer_key="9djdj82h48djs9d2", oauth_token="kkk9d7dh3k39sjv7", oauth_signature_method="HMAC-SHA1", oauth_timestamp="137131201", oauth_nonce="7d8f3e4a", oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D" c2&a3=2+q is represented by the following signature base string (line breaks are for display purposes only):: POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q %26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_ key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk 9d7dh3k39sjv7 .. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1 """ # The signature base string is constructed by concatenating together, # in order, the following HTTP request elements: # 1. The HTTP request method in uppercase. For example: "HEAD", # "GET", "POST", etc. If the request uses a custom HTTP method, it # MUST be encoded (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 base_string = utils.escape(http_method.upper()) # 2. An "&" character (ASCII code 38). base_string += u'&' # 3. The base string URI from `Section 3.4.1.2`_, after being encoded # (`Section 3.6`_). # # .. _`Section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2 # .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6 base_string += utils.escape(base_string_uri) # 4. An "&" character (ASCII code 38). base_string += u'&' # 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after # being encoded (`Section 3.6`). # # .. _`Section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 # .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6 base_string += utils.escape(normalized_encoded_request_parameters) return base_string def normalize_base_string_uri(uri): """**Base String URI** Per `section 3.4.1.2`_ of the spec. For example, the HTTP request:: GET /r%20v/X?id=123 HTTP/1.1 Host: EXAMPLE.COM:80 is represented by the base string URI: "http://example.com/r%20v/X". In another example, the HTTPS request:: GET /?q=1 HTTP/1.1 Host: www.example.net:8080 is represented by the base string URI: "https://www.example.net:8080/". .. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2 """ if not isinstance(uri, unicode): raise ValueError('uri must be a unicode object.') # FIXME: urlparse does not support unicode scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri) # The scheme, authority, and path of the request resource URI `RFC3986` # are included by constructing an "http" or "https" URI representing # the request resource (without the query or fragment) as follows: # # .. _`RFC2616`: http://tools.ietf.org/html/rfc3986 # 1. The scheme and host MUST be in lowercase. scheme = scheme.lower() netloc = netloc.lower() # 2. The host and port values MUST match the content of the HTTP # request "Host" header field. # TODO: enforce this constraint # 3. The port MUST be included if it is not the default port for the # scheme, and MUST be excluded if it is the default. Specifically, # the port MUST be excluded when making an HTTP request `RFC2616`_ # to port 80 or when making an HTTPS request `RFC2818`_ to port 443. # All other non-default port numbers MUST be included. # # .. _`RFC2616`: http://tools.ietf.org/html/rfc2616 # .. _`RFC2818`: http://tools.ietf.org/html/rfc2818 default_ports = ( (u'http', u'80'), (u'https', u'443'), ) if u':' in netloc: host, port = netloc.split(u':', 1) if (scheme, port) in default_ports: netloc = host return urlparse.urlunparse((scheme, netloc, path, u'', u'', u'')) # ** Request Parameters ** # # Per `section 3.4.1.3`_ of the spec. # # In order to guarantee a consistent and reproducible representation of # the request parameters, the parameters are collected and decoded to # their original decoded form. They are then sorted and encoded in a # particular manner that is often different from their original # encoding scheme, and concatenated into a single string. # # .. _`section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3 def collect_parameters(uri_query='', body=[], headers=None, exclude_oauth_signature=True): """**Parameter Sources** Parameters starting with `oauth_` will be unescaped. Body parameters must be supplied as a dict, a list of 2-tuples, or a formencoded query string. Headers must be supplied as a dict. Per `section 3.4.1.3.1`_ of the spec. For example, the HTTP request:: POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1 Host: example.com Content-Type: application/x-www-form-urlencoded Authorization: OAuth realm="Example", oauth_consumer_key="9djdj82h48djs9d2", oauth_token="kkk9d7dh3k39sjv7", oauth_signature_method="HMAC-SHA1", oauth_timestamp="137131201", oauth_nonce="7d8f3e4a", oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D" c2&a3=2+q contains the following (fully decoded) parameters used in the signature base sting:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | =%3D | | a3 | a | | c@ | | | a2 | r b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2 q | +------------------------+------------------+ Note that the value of "b5" is "=%3D" and not "==". Both "c@" and "c2" have empty values. While the encoding rules specified in this specification for the purpose of constructing the signature base string exclude the use of a "+" character (ASCII code 43) to represent an encoded space character (ASCII code 32), this practice is widely used in "application/x-www-form-urlencoded" encoded values, and MUST be properly decoded, as demonstrated by one of the "a3" parameter instances (the "a3" parameter is used twice in this request). .. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1 """ headers = headers or {} params = [] # The parameters from the following sources are collected into a single # list of name/value pairs: # * The query component of the HTTP request URI as defined by # `RFC3986, Section 3.4`_. The query component is parsed into a list # of name/value pairs by treating it as an # "application/x-www-form-urlencoded" string, separating the names # and values and decoding them as defined by # `W3C.REC-html40-19980424`_, Section 17.13.4. # # .. _`RFC3986, Section 3.4`: http://tools.ietf.org/html/rfc3986#section-3.4 # .. _`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424 if uri_query: params.extend(urlparse.parse_qsl(uri_query, keep_blank_values=True)) # * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if # present. The header's content is parsed into a list of name/value # pairs excluding the "realm" parameter if present. The parameter # values are decoded as defined by `Section 3.5.1`_. # # .. _`Section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1 if headers: headers_lower = dict((k.lower(), v) for k, v in headers.items()) authorization_header = headers_lower.get(u'authorization') if authorization_header is not None: params.extend([i for i in utils.parse_authorization_header( authorization_header) if i[0] != u'realm']) # * The HTTP request entity-body, but only if all of the following # conditions are met: # * The entity-body is single-part. # # * The entity-body follows the encoding requirements of the # "application/x-www-form-urlencoded" content-type as defined by # `W3C.REC-html40-19980424`_. # * The HTTP request entity-header includes the "Content-Type" # header field set to "application/x-www-form-urlencoded". # # .._`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424 # TODO: enforce header param inclusion conditions bodyparams = extract_params(body) or [] params.extend(bodyparams) # ensure all oauth params are unescaped unescaped_params = [] for k, v in params: if k.startswith(u'oauth_'): v = utils.unescape(v) unescaped_params.append((k, v)) # The "oauth_signature" parameter MUST be excluded from the signature # base string if present. if exclude_oauth_signature: unescaped_params = filter(lambda i: i[0] != u'oauth_signature', unescaped_params) return unescaped_params def normalize_parameters(params): """**Parameters Normalization** Per `section 3.4.1.3.2`_ of the spec. For example, the list of parameters from the previous section would be normalized as follows: Encoded:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | b5 | %3D%253D | | a3 | a | | c%40 | | | a2 | r%20b | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_token | kkk9d7dh3k39sjv7 | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_nonce | 7d8f3e4a | | c2 | | | a3 | 2%20q | +------------------------+------------------+ Sorted:: +------------------------+------------------+ | Name | Value | +------------------------+------------------+ | a2 | r%20b | | a3 | 2%20q | | a3 | a | | b5 | %3D%253D | | c%40 | | | c2 | | | oauth_consumer_key | 9djdj82h48djs9d2 | | oauth_nonce | 7d8f3e4a | | oauth_signature_method | HMAC-SHA1 | | oauth_timestamp | 137131201 | | oauth_token | kkk9d7dh3k39sjv7 | +------------------------+------------------+ Concatenated Pairs:: +-------------------------------------+ | Name=Value | +-------------------------------------+ | a2=r%20b | | a3=2%20q | | a3=a | | b5=%3D%253D | | c%40= | | c2= | | oauth_consumer_key=9djdj82h48djs9d2 | | oauth_nonce=7d8f3e4a | | oauth_signature_method=HMAC-SHA1 | | oauth_timestamp=137131201 | | oauth_token=kkk9d7dh3k39sjv7 | +-------------------------------------+ and concatenated together into a single string (line breaks are for display purposes only):: a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1 &oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7 .. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2 """ # The parameters collected in `Section 3.4.1.3`_ are normalized into a # single string as follows: # # .. _`Section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3 # 1. First, the name and value of each parameter are encoded # (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 key_values = [(utils.escape(k), utils.escape(v)) for k, v in params] # 2. The parameters are sorted by name, using ascending byte value # ordering. If two or more parameters share the same name, they # are sorted by their value. key_values.sort() # 3. The name of each parameter is concatenated to its corresponding # value using an "=" character (ASCII code 61) as a separator, even # if the value is empty. parameter_parts = [u'{0}={1}'.format(k, v) for k, v in key_values] # 4. The sorted name/value pairs are concatenated together into a # single string by using an "&" character (ASCII code 38) as # separator. return u'&'.join(parameter_parts) def sign_hmac_sha1(base_string, client_secret, resource_owner_secret): """**HMAC-SHA1** The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature algorithm as defined in `RFC2104`_:: digest = HMAC-SHA1 (key, text) Per `section 3.4.2`_ of the spec. .. _`RFC2104`: http://tools.ietf.org/html/rfc2104 .. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2 """ # The HMAC-SHA1 function variables are used in following way: # text is set to the value of the signature base string from # `Section 3.4.1.1`_. # # .. _`Section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1 text = base_string # key is set to the concatenated values of: # 1. The client shared-secret, after being encoded (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 key = utils.escape(client_secret or u'') # 2. An "&" character (ASCII code 38), which MUST be included # even when either secret is empty. key += u'&' # 3. The token shared-secret, after being encoded (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 key += utils.escape(resource_owner_secret or u'') # FIXME: HMAC does not support unicode! key_utf8 = key.encode('utf-8') text_utf8 = text.encode('utf-8') signature = hmac.new(key_utf8, text_utf8, hashlib.sha1) # digest is used to set the value of the "oauth_signature" protocol # parameter, after the result octet string is base64-encoded # per `RFC2045, Section 6.8`. # # .. _`RFC2045, Section 6.8`: http://tools.ietf.org/html/rfc2045#section-6.8 return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8') def sign_rsa_sha1(base_string, rsa_private_key): """**RSA-SHA1** Per `section 3.4.3`_ of the spec. The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature algorithm as defined in `RFC3447, Section 8.2`_ (also known as PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To use this method, the client MUST have established client credentials with the server that included its RSA public key (in a manner that is beyond the scope of this specification). NOTE: this method requires the python-rsa library. .. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3 .. _`RFC3447, Section 8.2`: http://tools.ietf.org/html/rfc3447#section-8.2 """ # TODO: finish RSA documentation import rsa key = rsa.PrivateKey.load_pkcs1(rsa_private_key) sig = rsa.sign(base_string, key, 'SHA-1') return binascii.b2a_base64(sig)[:-1] def sign_plaintext(client_secret, resource_owner_secret): """Sign a request using plaintext. Per `section 3.4.4`_ of the spec. The "PLAINTEXT" method does not employ a signature algorithm. It MUST be used with a transport-layer mechanism such as TLS or SSL (or sent over a secure channel with equivalent protections). It does not utilize the signature base string or the "oauth_timestamp" and "oauth_nonce" parameters. .. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4 """ # The "oauth_signature" protocol parameter is set to the concatenated # value of: # 1. The client shared-secret, after being encoded (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 signature = utils.escape(client_secret or u'') # 2. An "&" character (ASCII code 38), which MUST be included even # when either secret is empty. signature += u'&' # 3. The token shared-secret, after being encoded (`Section 3.6`_). # # .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 signature += utils.escape(resource_owner_secret or u'') return signature
19,020
Python
.py
392
42.778061
99
0.599438
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,532
__init__.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth2/__init__.py
# -*- coding: utf-8 -*- from __future__ import absolute_import """ oauthlib.oauth2 ~~~~~~~~~~~~~~ This module is a wrapper for the most recent implementation of OAuth 2.0 Client and Server classes. """ from .draft25 import Client, Server
242
Python
.py
9
25.444444
79
0.716157
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,533
tokens.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth2/draft25/tokens.py
from __future__ import absolute_import """ oauthlib.oauth2.draft25.tokens ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains methods for adding two types of access tokens to requests. - Bearer http://tools.ietf.org/html/draft-ietf-oauth-saml2-bearer-08 - MAC http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00 """ from binascii import b2a_base64 import hashlib import hmac from urlparse import urlparse from . import utils def prepare_mac_header(token, uri, key, http_method, nonce=None, headers=None, body=None, ext=u'', hash_algorithm=u'hmac-sha-1'): """Add an `MAC Access Authentication`_ signature to headers. Unlike OAuth 1, this HMAC signature does not require inclusion of the request payload/body, neither does it use a combination of client_secret and token_secret but rather a mac_key provided together with the access token. Currently two algorithms are supported, "hmac-sha-1" and "hmac-sha-256", `extension algorithms`_ are not supported. Example MAC Authorization header, linebreaks added for clarity Authorization: MAC id="h480djs93hd8", nonce="1336363200:dj83hs9s", mac="bhCQXTVyfj5cmA9uKkPFx1zeOXM=" .. _`MAC Access Authentication`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01 .. _`extension algorithms`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-7.1 :param uri: Request URI. :param headers: Request headers as a dictionary. :param http_method: HTTP Request method. :param key: MAC given provided by token endpoint. :param algorithm: HMAC algorithm provided by token endpoint. :return: headers dictionary with the authorization field added. """ http_method = http_method.upper() host, port = utils.host_from_uri(uri) if hash_algorithm.lower() == u'hmac-sha-1': h = hashlib.sha1 else: h = hashlib.sha256 nonce = nonce or u'{0}:{1}'.format(utils.generate_nonce(), utils.generate_timestamp()) sch, net, path, par, query, fra = urlparse(uri) if query: request_uri = path + u'?' + query else: request_uri = path # Hash the body/payload if body is not None: bodyhash = b2a_base64(h(body).digest())[:-1].decode('utf-8') else: bodyhash = u'' # Create the normalized base string base = [] base.append(nonce) base.append(http_method.upper()) base.append(request_uri) base.append(host) base.append(port) base.append(bodyhash) base.append(ext) base_string = '\n'.join(base) + u'\n' # hmac struggles with unicode strings - http://bugs.python.org/issue5285 if isinstance(key, unicode): key = key.encode('utf-8') sign = hmac.new(key, base_string, h) sign = b2a_base64(sign.digest())[:-1].decode('utf-8') header = [] header.append(u'MAC id="%s"' % token) header.append(u'nonce="%s"' % nonce) if bodyhash: header.append(u'bodyhash="%s"' % bodyhash) if ext: header.append(u'ext="%s"' % ext) header.append(u'mac="%s"' % sign) headers = headers or {} headers[u'Authorization'] = u', '.join(header) return headers def prepare_bearer_uri(token, uri): """Add a `Bearer Token`_ to the request URI. Not recommended, use only if client can't use authorization header or body. http://www.example.com/path?access_token=h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ return utils.add_params_to_uri(uri, [((u'access_token', token))]) def prepare_bearer_headers(token, headers=None): """Add a `Bearer Token`_ to the request URI. Recommended method of passing bearer tokens. Authorization: Bearer h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ headers = headers or {} headers[u'Authorization'] = u'Bearer %s' % token return headers def prepare_bearer_body(token, body=u''): """Add a `Bearer Token`_ to the request body. access_token=h480djs93hd8 .. _`Bearer Token`: http://tools.ietf.org/html/draft-ietf-oauth-v2-bearer-18 """ return utils.add_params_to_qs(body, [((u'access_token', token))])
4,258
Python
.py
99
37.585859
102
0.677974
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,534
utils.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth2/draft25/utils.py
""" oauthlib.utils ~~~~~~~~~~~~~~ This module contains utility methods used by various parts of the OAuth 2 spec. """ import random import string import time import urllib from urlparse import urlparse, urlunparse, parse_qsl UNICODE_ASCII_CHARACTER_SET = (string.ascii_letters.decode('ascii') + string.digits.decode('ascii')) def add_params_to_qs(query, params): """Extend a query with a list of two-tuples. :param query: Query string. :param params: List of two-tuples. :return: extended query """ queryparams = parse_qsl(query, keep_blank_values=True) queryparams.extend(params) return urlencode(queryparams) def add_params_to_uri(uri, params): """Add a list of two-tuples to the uri query components. :param uri: Full URI. :param params: List of two-tuples. :return: uri with extended query """ sch, net, path, par, query, fra = urlparse(uri) query = add_params_to_qs(query, params) return urlunparse((sch, net, path, par, query, fra)) def escape(u): """Escape a string in an OAuth-compatible fashion. Per `section 3.6`_ of the spec. .. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6 """ if not isinstance(u, unicode): raise ValueError('Only unicode objects are escapable.') return urllib.quote(u.encode('utf-8'), safe='~') def generate_nonce(): """Generate pseudorandom nonce that is unlikely to repeat. Per `section 3.2.1`_ of the MAC Access Authentication spec. A random 64-bit number is appended to the epoch timestamp for both randomness and to decrease the likelihood of collisions. .. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1 """ return unicode(unicode(random.getrandbits(64)) + generate_timestamp()) def generate_timestamp(): """Get seconds since epoch (UTC). Per `section 3.2.1`_ of the MAC Access Authentication spec. .. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1 """ return unicode(int(time.time())) def generate_token(length=20, chars=UNICODE_ASCII_CHARACTER_SET): """Generates a generic OAuth 2 token According to `section 1.4`_ and `section 1.5` of the spec, the method of token construction is undefined. This implementation is simply a random selection of `length` choices from `chars`. SystemRandom is used since it provides higher entropy than random.choice. .. _`section 1.4`: http://tools.ietf.org/html/draft-ietf-oauth-v2-25#section-1.4 .. _`section 1.5`: http://tools.ietf.org/html/draft-ietf-oauth-v2-25#section-1.5 """ rand = random.SystemRandom() return u''.join(rand.choice(chars) for x in range(length)) def host_from_uri(uri): """Extract hostname and port from URI. Will use default port for HTTP and HTTPS if none is present in the URI. >>> host_from_uri(u'https://www.example.com/path?query') u'www.example.com', u'443' >>> host_from_uri(u'http://www.example.com:8080/path?query') u'www.example.com', u'8080' :param uri: Full URI. :param http_method: HTTP request method. :return: hostname, port """ default_ports = { u'HTTP' : u'80', u'HTTPS' : u'443', } sch, netloc, path, par, query, fra = urlparse(uri) if u':' in netloc: netloc, port = netloc.split(u':', 1) else: port = default_ports.get(sch.upper()) return netloc, port def urlencode(query): """Encode a sequence of two-element tuples or dictionary into a URL query string. Operates using an OAuth-safe escape() method, in contrast to urllib.urlenocde. """ # Convert dictionaries to list of tuples if isinstance(query, dict): query = query.items() return "&".join(['='.join([escape(k), escape(v)]) for k, v in query])
3,876
Python
.py
92
37.282609
97
0.684899
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,535
__init__.py
CouchPotato_CouchPotatoServer/libs/oauthlib/oauth2/draft25/__init__.py
""" oauthlib.oauth2.draft_25 ~~~~~~~~~~~~~~ This module is an implementation of various logic needed for signing and checking OAuth 2.0 draft 25 requests. """ class Client(object): pass class Server(object): pass
225
Python
.py
10
20.3
56
0.729858
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,536
api.py
CouchPotato_CouchPotatoServer/libs/pio/api.py
# -*- coding: utf-8 -*- # Changed # Removed iso8601 library requirement # Added CP logging import os import json import binascii import webbrowser try: from urllib import urlencode except ImportError: from urllib.parse import urlencode from datetime import datetime import tus import requests from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.retry import Retry from couchpotato import CPLog KB = 1024 MB = 1024 * KB # Read and write operations are limited to this chunk size. # This can make a big difference when dealing with large files. CHUNK_SIZE = 256 * KB BASE_URL = 'https://api.put.io/v2' UPLOAD_URL = 'https://upload.put.io/v2/files/upload' TUS_UPLOAD_URL = 'https://upload.put.io/files/' ACCESS_TOKEN_URL = 'https://api.put.io/v2/oauth2/access_token' AUTHENTICATION_URL = 'https://api.put.io/v2/oauth2/authenticate' log = CPLog(__name__) class APIError(Exception): pass class ClientError(APIError): pass class ServerError(APIError): pass class AuthHelper(object): def __init__(self, client_id, client_secret, redirect_uri, type='code'): self.client_id = client_id self.client_secret = client_secret self.callback_url = redirect_uri self.type = type @property def authentication_url(self): """Redirect your users to here to authenticate them.""" params = { 'client_id': self.client_id, 'response_type': self.type, 'redirect_uri': self.callback_url } return AUTHENTICATION_URL + "?" + urlencode(params) def open_authentication_url(self): webbrowser.open(self.authentication_url) def get_access_token(self, code): params = { 'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'authorization_code', 'redirect_uri': self.callback_url, 'code': code } response = requests.get(ACCESS_TOKEN_URL, params=params) log.debug(response) assert response.status_code == 200 return response.json()['access_token'] class Client(object): def __init__(self, access_token, use_retry=False): self.access_token = access_token self.session = requests.session() if use_retry: # Retry maximum 10 times, backoff on each retry # Sleeps 1s, 2s, 4s, 8s, etc to a maximum of 120s between retries # Retries on HTTP status codes 500, 502, 503, 504 retries = Retry(total=10, backoff_factor=1, status_forcelist=[500, 502, 503, 504]) # Use the retry strategy for all HTTPS requests self.session.mount('https://', HTTPAdapter(max_retries=retries)) # Keep resource classes as attributes of client. # Pass client to resource classes so resource object # can use the client. attributes = {'client': self} self.File = type('File', (_File,), attributes) self.Transfer = type('Transfer', (_Transfer,), attributes) self.Account = type('Account', (_Account,), attributes) def request(self, path, method='GET', params=None, data=None, files=None, headers=None, raw=False, allow_redirects=True, stream=False): """ Wrapper around requests.request() Prepends BASE_URL to path. Inserts oauth_token to query params. Parses response as JSON and returns it. """ if not params: params = {} if not headers: headers = {} # All requests must include oauth_token params['oauth_token'] = self.access_token headers['Accept'] = 'application/json' if path.startswith('https://'): url = path else: url = BASE_URL + path log.debug('url: %s', url) response = self.session.request( method, url, params=params, data=data, files=files, headers=headers, allow_redirects=allow_redirects, stream=stream) log.debug('response: %s', response) if raw: return response log.debug('content: %s', response.content) try: body = json.loads(response.content.decode()) except ValueError: raise ServerError('InvalidJSON', response.content) if body['status'] == 'ERROR': log.error("API returned error: %s", body) exception_class = {'4': ClientError, '5': ServerError}[str(response.status_code)[0]] raise exception_class(body['error_type'], body['error_message']) return body class _BaseResource(object): client = None def __init__(self, resource_dict): """Constructs the object from a dict.""" # All resources must have id and name attributes self.id = None self.name = None self.__dict__.update(resource_dict) try: self.created_at = strptime(self.created_at) except Exception: self.created_at = None def __str__(self): return self.name.encode('utf-8') def __repr__(self): # shorten name for display name = self.name[:17] + '...' if len(self.name) > 20 else self.name return '<%s id=%r, name=%r>' % ( self.__class__.__name__, self.id, name) class _File(_BaseResource): @classmethod def get(cls, id): d = cls.client.request('/files/%i' % id, method='GET') t = d['file'] return cls(t) @classmethod def list(cls, parent_id=0): d = cls.client.request('/files/list', params={'parent_id': parent_id}) files = d['files'] return [cls(f) for f in files] @classmethod def upload(cls, path, name=None, parent_id=0): with open(path) as f: if name: files = {'file': (name, f)} else: files = {'file': f} d = cls.client.request(UPLOAD_URL, method='POST', data={'parent_id': parent_id}, files=files) f = d['file'] return cls(f) @classmethod def upload_tus(cls, path, name=None, parent_id=0): headers = {'Authorization': 'token %s' % cls.client.access_token} metadata = {'parent_id': str(parent_id)} if name: metadata['name'] = name with open(path) as f: tus.upload(f, TUS_UPLOAD_URL, file_name=name, headers=headers, metadata=metadata) def dir(self): """List the files under directory.""" return self.list(parent_id=self.id) def download(self, dest='.', delete_after_download=False, chunk_size=CHUNK_SIZE): if self.content_type == 'application/x-directory': self._download_directory(dest, delete_after_download, chunk_size) else: self._download_file(dest, delete_after_download, chunk_size) def _download_directory(self, dest, delete_after_download, chunk_size): name = _str(self.name) dest = os.path.join(dest, name) if not os.path.exists(dest): os.mkdir(dest) for sub_file in self.dir(): sub_file.download(dest, delete_after_download, chunk_size) if delete_after_download: self.delete() def _verify_file(self, filepath): log.info('verifying crc32...') filesize = os.path.getsize(filepath) if self.size != filesize: logging.error('file %s is %d bytes, should be %s bytes' % (filepath, filesize, self.size)) return False crcbin = 0 with open(filepath, 'rb') as f: while True: chunk = f.read(CHUNK_SIZE) if not chunk: break crcbin = binascii.crc32(chunk, crcbin) & 0xffffffff crc32 = '%08x' % crcbin if crc32 != self.crc32: logging.error('file %s CRC32 is %s, should be %s' % (filepath, crc32, self.crc32)) return False return True def _download_file(self, dest, delete_after_download, chunk_size): name = _str(self.name) filepath = os.path.join(dest, name) if os.path.exists(filepath): first_byte = os.path.getsize(filepath) if first_byte == self.size: log.warning('file %s exists and is the correct size %d' % (filepath, self.size)) else: first_byte = 0 log.debug('file %s is currently %d, should be %d' % (filepath, first_byte, self.size)) if self.size == 0: # Create an empty file open(filepath, 'w').close() log.debug('created empty file %s' % filepath) else: if first_byte < self.size: with open(filepath, 'ab') as f: headers = {'Range': 'bytes=%d-' % first_byte} log.debug('request range: bytes=%d-' % first_byte) response = self.client.request('/files/%s/download' % self.id, headers=headers, raw=True, stream=True) for chunk in response.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) if self._verify_file(filepath): if delete_after_download: self.delete() def delete(self): return self.client.request('/files/delete', method='POST', data={'file_id': str(self.id)}) @classmethod def delete_multi(cls, ids): return cls.client.request('/files/delete', method='POST', data={'file_ids': ','.join(map(str, ids))}) def move(self, parent_id): return self.client.request('/files/move', method='POST', data={'file_ids': str(self.id), 'parent_id': str(parent_id)}) def rename(self, name): return self.client.request('/files/rename', method='POST', data={'file_id': str(self.id), 'name': str(name)}) class _Transfer(_BaseResource): @classmethod def list(cls): d = cls.client.request('/transfers/list') transfers = d['transfers'] return [cls(t) for t in transfers] @classmethod def get(cls, id): d = cls.client.request('/transfers/%i' % id, method='GET') t = d['transfer'] return cls(t) @classmethod def add_url(cls, url, parent_id=0, extract=False, callback_url=None): log.debug('callback_url is %s', callback_url) d = cls.client.request('/transfers/add', method='POST', data=dict( url=url, save_parent_id=parent_id, extract=extract, callback_url=callback_url)) t = d['transfer'] return cls(t) @classmethod def add_torrent(cls, path, parent_id=0, extract=False, callback_url=None): with open(path, 'rb') as f: files = {'file': f} d = cls.client.request('/files/upload', method='POST', files=files, data=dict(parent_id=parent_id, extract=extract, callback_url=callback_url)) t = d['transfer'] return cls(t) @classmethod def clean(cls): return cls.client.request('/transfers/clean', method='POST') def cancel(self): return self.client.request('/transfers/cancel', method='POST', data={'transfer_ids': self.id}) @classmethod def cancel_multi(cls, ids): return cls.client.request('/transfers/cancel', method='POST', data={'transfer_ids': ','.join(map(str, ids))}) class _Account(_BaseResource): @classmethod def info(cls): return cls.client.request('/account/info', method='GET') @classmethod def settings(cls): return cls.client.request('/account/settings', method='GET') # Due to a nasty bug in datetime module, datetime.strptime calls # are not thread-safe and can throw a TypeError. Details: https://bugs.python.org/issue7980 # Here we are implementing simple RFC3339 parser which is used in Put.io APIv2. def strptime(date): """Returns datetime object from the given date, which is in a specific format: YYYY-MM-ddTHH:mm:ss""" d = { 'year': date[0:4], 'month': date[5:7], 'day': date[8:10], 'hour': date[11:13], 'minute': date[14:16], 'second': date[17:], } d = dict((k, int(v)) for k, v in d.iteritems()) return datetime(**d) def _str(s): """Python 3 compatibility function for converting to str.""" try: if isinstance(s, unicode): return s.encode('utf-8', 'replace') except NameError: pass return s
13,183
Python
.py
317
31.223975
105
0.577221
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,537
subtitles.py
CouchPotato_CouchPotatoServer/libs/subliminal/subtitles.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .language import Language from .utils import to_unicode import os.path __all__ = ['Subtitle', 'EmbeddedSubtitle', 'ExternalSubtitle', 'ResultSubtitle', 'get_subtitle_path'] #: Subtitles extensions EXTENSIONS = ['.srt', '.sub', '.txt', '.ass'] class Subtitle(object): """Base class for subtitles :param string path: path to the subtitle :param language: language of the subtitle :type language: :class:`~subliminal.language.Language` """ def __init__(self, path, language): if not isinstance(language, Language): raise TypeError('%r is not an instance of Language') self.path = path self.language = language @property def exists(self): """Whether the subtitle exists or not""" if self.path: return os.path.exists(self.path) return False def __unicode__(self): return to_unicode(self.path) def __str__(self): return unicode(self).encode('utf-8') def __repr__(self): return '%s(%s, %s)' % (self.__class__.__name__, self, self.language) class EmbeddedSubtitle(Subtitle): """Subtitle embedded in a container :param string path: path to the subtitle :param language: language of the subtitle :type language: :class:`~subliminal.language.Language` :param int track_id: id of the subtitle track in the container """ def __init__(self, path, language, track_id): super(EmbeddedSubtitle, self).__init__(path, language) self.track_id = track_id @classmethod def from_enzyme(cls, path, subtitle): language = Language(subtitle.language, strict=False) return cls(path, language, subtitle.trackno) class ExternalSubtitle(Subtitle): """Subtitle in a file next to the video file""" @classmethod def from_path(cls, path): """Create an :class:`ExternalSubtitle` from path""" extension = None for e in EXTENSIONS: if path.endswith(e): extension = e break if extension is None: raise ValueError('Not a supported subtitle extension') language = Language(os.path.splitext(path[:len(path) - len(extension)])[1][1:], strict=False) return cls(path, language) class ResultSubtitle(ExternalSubtitle): """Subtitle found using :mod:`~subliminal.services` :param string path: path to the subtitle :param language: language of the subtitle :type language: :class:`~subliminal.language.Language` :param string service: name of the service :param string link: download link for the subtitle :param string release: release name of the video :param float confidence: confidence that the subtitle matches the video according to the service :param set keywords: keywords that describe the subtitle """ def __init__(self, path, language, service, link, release=None, confidence=1, keywords=None): super(ResultSubtitle, self).__init__(path, language) self.service = service self.link = link self.release = release self.confidence = confidence self.keywords = keywords or set() @property def single(self): """Whether this is a single subtitle or not. A single subtitle does not have a language indicator in its file name :rtype: bool """ return self.language == Language('Undetermined') def __repr__(self): if not self.release: return 'ResultSubtitle(%s, %s, %s, %.2f)' % (self.path, self.language, self.service, self.confidence) return 'ResultSubtitle(%s, %s, %s, %.2f, release=%s)' % (self.path, self.language, self.service, self.confidence, self.release.encode('ascii', 'ignore')) def get_subtitle_path(video_path, language, multi): """Create the subtitle path from the given video path using language if multi :param string video_path: path to the video :param language: language of the subtitle :type language: :class:`~subliminal.language.Language` :param bool multi: whether to use multi language naming or not :return: path of the subtitle :rtype: string """ if not os.path.exists(video_path): path = os.path.splitext(os.path.basename(video_path))[0] else: path = os.path.splitext(video_path)[0] if multi and language: return path + '.%s%s' % (language.alpha2, EXTENSIONS[0]) return path + '%s' % EXTENSIONS[0]
5,252
Python
.py
119
38.109244
161
0.681168
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,538
infos.py
CouchPotato_CouchPotatoServer/libs/subliminal/infos.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. __version__ = '0.6.2'
793
Python
.py
18
43.055556
77
0.766452
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,539
language.py
CouchPotato_CouchPotatoServer/libs/subliminal/language.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .utils import to_unicode import re import logging logger = logging.getLogger(__name__) COUNTRIES = [('AF', 'AFG', '004', u'Afghanistan'), ('AX', 'ALA', '248', u'Åland Islands'), ('AL', 'ALB', '008', u'Albania'), ('DZ', 'DZA', '012', u'Algeria'), ('AS', 'ASM', '016', u'American Samoa'), ('AD', 'AND', '020', u'Andorra'), ('AO', 'AGO', '024', u'Angola'), ('AI', 'AIA', '660', u'Anguilla'), ('AQ', 'ATA', '010', u'Antarctica'), ('AG', 'ATG', '028', u'Antigua and Barbuda'), ('AR', 'ARG', '032', u'Argentina'), ('AM', 'ARM', '051', u'Armenia'), ('AW', 'ABW', '533', u'Aruba'), ('AU', 'AUS', '036', u'Australia'), ('AT', 'AUT', '040', u'Austria'), ('AZ', 'AZE', '031', u'Azerbaijan'), ('BS', 'BHS', '044', u'Bahamas'), ('BH', 'BHR', '048', u'Bahrain'), ('BD', 'BGD', '050', u'Bangladesh'), ('BB', 'BRB', '052', u'Barbados'), ('BY', 'BLR', '112', u'Belarus'), ('BE', 'BEL', '056', u'Belgium'), ('BZ', 'BLZ', '084', u'Belize'), ('BJ', 'BEN', '204', u'Benin'), ('BM', 'BMU', '060', u'Bermuda'), ('BT', 'BTN', '064', u'Bhutan'), ('BO', 'BOL', '068', u'Bolivia, Plurinational State of'), ('BQ', 'BES', '535', u'Bonaire, Sint Eustatius and Saba'), ('BA', 'BIH', '070', u'Bosnia and Herzegovina'), ('BW', 'BWA', '072', u'Botswana'), ('BV', 'BVT', '074', u'Bouvet Island'), ('BR', 'BRA', '076', u'Brazil'), ('IO', 'IOT', '086', u'British Indian Ocean Territory'), ('BN', 'BRN', '096', u'Brunei Darussalam'), ('BG', 'BGR', '100', u'Bulgaria'), ('BF', 'BFA', '854', u'Burkina Faso'), ('BI', 'BDI', '108', u'Burundi'), ('KH', 'KHM', '116', u'Cambodia'), ('CM', 'CMR', '120', u'Cameroon'), ('CA', 'CAN', '124', u'Canada'), ('CV', 'CPV', '132', u'Cape Verde'), ('KY', 'CYM', '136', u'Cayman Islands'), ('CF', 'CAF', '140', u'Central African Republic'), ('TD', 'TCD', '148', u'Chad'), ('CL', 'CHL', '152', u'Chile'), ('CN', 'CHN', '156', u'China'), ('CX', 'CXR', '162', u'Christmas Island'), ('CC', 'CCK', '166', u'Cocos (Keeling) Islands'), ('CO', 'COL', '170', u'Colombia'), ('KM', 'COM', '174', u'Comoros'), ('CG', 'COG', '178', u'Congo'), ('CD', 'COD', '180', u'Congo, The Democratic Republic of the'), ('CK', 'COK', '184', u'Cook Islands'), ('CR', 'CRI', '188', u'Costa Rica'), ('CI', 'CIV', '384', u'Côte d\'Ivoire'), ('HR', 'HRV', '191', u'Croatia'), ('CU', 'CUB', '192', u'Cuba'), ('CW', 'CUW', '531', u'Curaçao'), ('CY', 'CYP', '196', u'Cyprus'), ('CZ', 'CZE', '203', u'Czech Republic'), ('DK', 'DNK', '208', u'Denmark'), ('DJ', 'DJI', '262', u'Djibouti'), ('DM', 'DMA', '212', u'Dominica'), ('DO', 'DOM', '214', u'Dominican Republic'), ('EC', 'ECU', '218', u'Ecuador'), ('EG', 'EGY', '818', u'Egypt'), ('SV', 'SLV', '222', u'El Salvador'), ('GQ', 'GNQ', '226', u'Equatorial Guinea'), ('ER', 'ERI', '232', u'Eritrea'), ('EE', 'EST', '233', u'Estonia'), ('ET', 'ETH', '231', u'Ethiopia'), ('FK', 'FLK', '238', u'Falkland Islands (Malvinas)'), ('FO', 'FRO', '234', u'Faroe Islands'), ('FJ', 'FJI', '242', u'Fiji'), ('FI', 'FIN', '246', u'Finland'), ('FR', 'FRA', '250', u'France'), ('GF', 'GUF', '254', u'French Guiana'), ('PF', 'PYF', '258', u'French Polynesia'), ('TF', 'ATF', '260', u'French Southern Territories'), ('GA', 'GAB', '266', u'Gabon'), ('GM', 'GMB', '270', u'Gambia'), ('GE', 'GEO', '268', u'Georgia'), ('DE', 'DEU', '276', u'Germany'), ('GH', 'GHA', '288', u'Ghana'), ('GI', 'GIB', '292', u'Gibraltar'), ('GR', 'GRC', '300', u'Greece'), ('GL', 'GRL', '304', u'Greenland'), ('GD', 'GRD', '308', u'Grenada'), ('GP', 'GLP', '312', u'Guadeloupe'), ('GU', 'GUM', '316', u'Guam'), ('GT', 'GTM', '320', u'Guatemala'), ('GG', 'GGY', '831', u'Guernsey'), ('GN', 'GIN', '324', u'Guinea'), ('GW', 'GNB', '624', u'Guinea-Bissau'), ('GY', 'GUY', '328', u'Guyana'), ('HT', 'HTI', '332', u'Haiti'), ('HM', 'HMD', '334', u'Heard Island and McDonald Islands'), ('VA', 'VAT', '336', u'Holy See (Vatican City State)'), ('HN', 'HND', '340', u'Honduras'), ('HK', 'HKG', '344', u'Hong Kong'), ('HU', 'HUN', '348', u'Hungary'), ('IS', 'ISL', '352', u'Iceland'), ('IN', 'IND', '356', u'India'), ('ID', 'IDN', '360', u'Indonesia'), ('IR', 'IRN', '364', u'Iran, Islamic Republic of'), ('IQ', 'IRQ', '368', u'Iraq'), ('IE', 'IRL', '372', u'Ireland'), ('IM', 'IMN', '833', u'Isle of Man'), ('IL', 'ISR', '376', u'Israel'), ('IT', 'ITA', '380', u'Italy'), ('JM', 'JAM', '388', u'Jamaica'), ('JP', 'JPN', '392', u'Japan'), ('JE', 'JEY', '832', u'Jersey'), ('JO', 'JOR', '400', u'Jordan'), ('KZ', 'KAZ', '398', u'Kazakhstan'), ('KE', 'KEN', '404', u'Kenya'), ('KI', 'KIR', '296', u'Kiribati'), ('KP', 'PRK', '408', u'Korea, Democratic People\'s Republic of'), ('KR', 'KOR', '410', u'Korea, Republic of'), ('KW', 'KWT', '414', u'Kuwait'), ('KG', 'KGZ', '417', u'Kyrgyzstan'), ('LA', 'LAO', '418', u'Lao People\'s Democratic Republic'), ('LV', 'LVA', '428', u'Latvia'), ('LB', 'LBN', '422', u'Lebanon'), ('LS', 'LSO', '426', u'Lesotho'), ('LR', 'LBR', '430', u'Liberia'), ('LY', 'LBY', '434', u'Libya'), ('LI', 'LIE', '438', u'Liechtenstein'), ('LT', 'LTU', '440', u'Lithuania'), ('LU', 'LUX', '442', u'Luxembourg'), ('MO', 'MAC', '446', u'Macao'), ('MK', 'MKD', '807', u'Macedonia, Republic of'), ('MG', 'MDG', '450', u'Madagascar'), ('MW', 'MWI', '454', u'Malawi'), ('MY', 'MYS', '458', u'Malaysia'), ('MV', 'MDV', '462', u'Maldives'), ('ML', 'MLI', '466', u'Mali'), ('MT', 'MLT', '470', u'Malta'), ('MH', 'MHL', '584', u'Marshall Islands'), ('MQ', 'MTQ', '474', u'Martinique'), ('MR', 'MRT', '478', u'Mauritania'), ('MU', 'MUS', '480', u'Mauritius'), ('YT', 'MYT', '175', u'Mayotte'), ('MX', 'MEX', '484', u'Mexico'), ('FM', 'FSM', '583', u'Micronesia, Federated States of'), ('MD', 'MDA', '498', u'Moldova, Republic of'), ('MC', 'MCO', '492', u'Monaco'), ('MN', 'MNG', '496', u'Mongolia'), ('ME', 'MNE', '499', u'Montenegro'), ('MS', 'MSR', '500', u'Montserrat'), ('MA', 'MAR', '504', u'Morocco'), ('MZ', 'MOZ', '508', u'Mozambique'), ('MM', 'MMR', '104', u'Myanmar'), ('NA', 'NAM', '516', u'Namibia'), ('NR', 'NRU', '520', u'Nauru'), ('NP', 'NPL', '524', u'Nepal'), ('NL', 'NLD', '528', u'Netherlands'), ('NC', 'NCL', '540', u'New Caledonia'), ('NZ', 'NZL', '554', u'New Zealand'), ('NI', 'NIC', '558', u'Nicaragua'), ('NE', 'NER', '562', u'Niger'), ('NG', 'NGA', '566', u'Nigeria'), ('NU', 'NIU', '570', u'Niue'), ('NF', 'NFK', '574', u'Norfolk Island'), ('MP', 'MNP', '580', u'Northern Mariana Islands'), ('NO', 'NOR', '578', u'Norway'), ('OM', 'OMN', '512', u'Oman'), ('PK', 'PAK', '586', u'Pakistan'), ('PW', 'PLW', '585', u'Palau'), ('PS', 'PSE', '275', u'Palestinian Territory, Occupied'), ('PA', 'PAN', '591', u'Panama'), ('PG', 'PNG', '598', u'Papua New Guinea'), ('PY', 'PRY', '600', u'Paraguay'), ('PE', 'PER', '604', u'Peru'), ('PH', 'PHL', '608', u'Philippines'), ('PN', 'PCN', '612', u'Pitcairn'), ('PL', 'POL', '616', u'Poland'), ('PT', 'PRT', '620', u'Portugal'), ('PR', 'PRI', '630', u'Puerto Rico'), ('QA', 'QAT', '634', u'Qatar'), ('RE', 'REU', '638', u'Réunion'), ('RO', 'ROU', '642', u'Romania'), ('RU', 'RUS', '643', u'Russian Federation'), ('RW', 'RWA', '646', u'Rwanda'), ('BL', 'BLM', '652', u'Saint Barthélemy'), ('SH', 'SHN', '654', u'Saint Helena, Ascension and Tristan da Cunha'), ('KN', 'KNA', '659', u'Saint Kitts and Nevis'), ('LC', 'LCA', '662', u'Saint Lucia'), ('MF', 'MAF', '663', u'Saint Martin (French part)'), ('PM', 'SPM', '666', u'Saint Pierre and Miquelon'), ('VC', 'VCT', '670', u'Saint Vincent and the Grenadines'), ('WS', 'WSM', '882', u'Samoa'), ('SM', 'SMR', '674', u'San Marino'), ('ST', 'STP', '678', u'Sao Tome and Principe'), ('SA', 'SAU', '682', u'Saudi Arabia'), ('SN', 'SEN', '686', u'Senegal'), ('RS', 'SRB', '688', u'Serbia'), ('SC', 'SYC', '690', u'Seychelles'), ('SL', 'SLE', '694', u'Sierra Leone'), ('SG', 'SGP', '702', u'Singapore'), ('SX', 'SXM', '534', u'Sint Maarten (Dutch part)'), ('SK', 'SVK', '703', u'Slovakia'), ('SI', 'SVN', '705', u'Slovenia'), ('SB', 'SLB', '090', u'Solomon Islands'), ('SO', 'SOM', '706', u'Somalia'), ('ZA', 'ZAF', '710', u'South Africa'), ('GS', 'SGS', '239', u'South Georgia and the South Sandwich Islands'), ('ES', 'ESP', '724', u'Spain'), ('LK', 'LKA', '144', u'Sri Lanka'), ('SD', 'SDN', '729', u'Sudan'), ('SR', 'SUR', '740', u'Suriname'), ('SS', 'SSD', '728', u'South Sudan'), ('SJ', 'SJM', '744', u'Svalbard and Jan Mayen'), ('SZ', 'SWZ', '748', u'Swaziland'), ('SE', 'SWE', '752', u'Sweden'), ('CH', 'CHE', '756', u'Switzerland'), ('SY', 'SYR', '760', u'Syrian Arab Republic'), ('TW', 'TWN', '158', u'Taiwan, Province of China'), ('TJ', 'TJK', '762', u'Tajikistan'), ('TZ', 'TZA', '834', u'Tanzania, United Republic of'), ('TH', 'THA', '764', u'Thailand'), ('TL', 'TLS', '626', u'Timor-Leste'), ('TG', 'TGO', '768', u'Togo'), ('TK', 'TKL', '772', u'Tokelau'), ('TO', 'TON', '776', u'Tonga'), ('TT', 'TTO', '780', u'Trinidad and Tobago'), ('TN', 'TUN', '788', u'Tunisia'), ('TR', 'TUR', '792', u'Turkey'), ('TM', 'TKM', '795', u'Turkmenistan'), ('TC', 'TCA', '796', u'Turks and Caicos Islands'), ('TV', 'TUV', '798', u'Tuvalu'), ('UG', 'UGA', '800', u'Uganda'), ('UA', 'UKR', '804', u'Ukraine'), ('AE', 'ARE', '784', u'United Arab Emirates'), ('GB', 'GBR', '826', u'United Kingdom'), ('US', 'USA', '840', u'United States'), ('UM', 'UMI', '581', u'United States Minor Outlying Islands'), ('UY', 'URY', '858', u'Uruguay'), ('UZ', 'UZB', '860', u'Uzbekistan'), ('VU', 'VUT', '548', u'Vanuatu'), ('VE', 'VEN', '862', u'Venezuela, Bolivarian Republic of'), ('VN', 'VNM', '704', u'Viet Nam'), ('VG', 'VGB', '092', u'Virgin Islands, British'), ('VI', 'VIR', '850', u'Virgin Islands, U.S.'), ('WF', 'WLF', '876', u'Wallis and Futuna'), ('EH', 'ESH', '732', u'Western Sahara'), ('YE', 'YEM', '887', u'Yemen'), ('ZM', 'ZMB', '894', u'Zambia'), ('ZW', 'ZWE', '716', u'Zimbabwe')] LANGUAGES = [('aar', '', 'aa', u'Afar', u'afar'), ('abk', '', 'ab', u'Abkhazian', u'abkhaze'), ('ace', '', '', u'Achinese', u'aceh'), ('ach', '', '', u'Acoli', u'acoli'), ('ada', '', '', u'Adangme', u'adangme'), ('ady', '', '', u'Adyghe; Adygei', u'adyghé'), ('afa', '', '', u'Afro-Asiatic languages', u'afro-asiatiques, langues'), ('afh', '', '', u'Afrihili', u'afrihili'), ('afr', '', 'af', u'Afrikaans', u'afrikaans'), ('ain', '', '', u'Ainu', u'aïnou'), ('aka', '', 'ak', u'Akan', u'akan'), ('akk', '', '', u'Akkadian', u'akkadien'), ('alb', 'sqi', 'sq', u'Albanian', u'albanais'), ('ale', '', '', u'Aleut', u'aléoute'), ('alg', '', '', u'Algonquian languages', u'algonquines, langues'), ('alt', '', '', u'Southern Altai', u'altai du Sud'), ('amh', '', 'am', u'Amharic', u'amharique'), ('ang', '', '', u'English, Old (ca.450-1100)', u'anglo-saxon (ca.450-1100)'), ('anp', '', '', u'Angika', u'angika'), ('apa', '', '', u'Apache languages', u'apaches, langues'), ('ara', '', 'ar', u'Arabic', u'arabe'), ('arc', '', '', u'Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)', u'araméen d\'empire (700-300 BCE)'), ('arg', '', 'an', u'Aragonese', u'aragonais'), ('arm', 'hye', 'hy', u'Armenian', u'arménien'), ('arn', '', '', u'Mapudungun; Mapuche', u'mapudungun; mapuche; mapuce'), ('arp', '', '', u'Arapaho', u'arapaho'), ('art', '', '', u'Artificial languages', u'artificielles, langues'), ('arw', '', '', u'Arawak', u'arawak'), ('asm', '', 'as', u'Assamese', u'assamais'), ('ast', '', '', u'Asturian; Bable; Leonese; Asturleonese', u'asturien; bable; léonais; asturoléonais'), ('ath', '', '', u'Athapascan languages', u'athapascanes, langues'), ('aus', '', '', u'Australian languages', u'australiennes, langues'), ('ava', '', 'av', u'Avaric', u'avar'), ('ave', '', 'ae', u'Avestan', u'avestique'), ('awa', '', '', u'Awadhi', u'awadhi'), ('aym', '', 'ay', u'Aymara', u'aymara'), ('aze', '', 'az', u'Azerbaijani', u'azéri'), ('bad', '', '', u'Banda languages', u'banda, langues'), ('bai', '', '', u'Bamileke languages', u'bamiléké, langues'), ('bak', '', 'ba', u'Bashkir', u'bachkir'), ('bal', '', '', u'Baluchi', u'baloutchi'), ('bam', '', 'bm', u'Bambara', u'bambara'), ('ban', '', '', u'Balinese', u'balinais'), ('baq', 'eus', 'eu', u'Basque', u'basque'), ('bas', '', '', u'Basa', u'basa'), ('bat', '', '', u'Baltic languages', u'baltes, langues'), ('bej', '', '', u'Beja; Bedawiyet', u'bedja'), ('bel', '', 'be', u'Belarusian', u'biélorusse'), ('bem', '', '', u'Bemba', u'bemba'), ('ben', '', 'bn', u'Bengali', u'bengali'), ('ber', '', '', u'Berber languages', u'berbères, langues'), ('bho', '', '', u'Bhojpuri', u'bhojpuri'), ('bih', '', 'bh', u'Bihari languages', u'langues biharis'), ('bik', '', '', u'Bikol', u'bikol'), ('bin', '', '', u'Bini; Edo', u'bini; edo'), ('bis', '', 'bi', u'Bislama', u'bichlamar'), ('bla', '', '', u'Siksika', u'blackfoot'), ('bnt', '', '', u'Bantu (Other)', u'bantoues, autres langues'), ('bos', '', 'bs', u'Bosnian', u'bosniaque'), ('bra', '', '', u'Braj', u'braj'), ('bre', '', 'br', u'Breton', u'breton'), ('btk', '', '', u'Batak languages', u'batak, langues'), ('bua', '', '', u'Buriat', u'bouriate'), ('bug', '', '', u'Buginese', u'bugi'), ('bul', '', 'bg', u'Bulgarian', u'bulgare'), ('bur', 'mya', 'my', u'Burmese', u'birman'), ('byn', '', '', u'Blin; Bilin', u'blin; bilen'), ('cad', '', '', u'Caddo', u'caddo'), ('cai', '', '', u'Central American Indian languages', u'amérindiennes de L\'Amérique centrale, langues'), ('car', '', '', u'Galibi Carib', u'karib; galibi; carib'), ('cat', '', 'ca', u'Catalan; Valencian', u'catalan; valencien'), ('cau', '', '', u'Caucasian languages', u'caucasiennes, langues'), ('ceb', '', '', u'Cebuano', u'cebuano'), ('cel', '', '', u'Celtic languages', u'celtiques, langues; celtes, langues'), ('cha', '', 'ch', u'Chamorro', u'chamorro'), ('chb', '', '', u'Chibcha', u'chibcha'), ('che', '', 'ce', u'Chechen', u'tchétchène'), ('chg', '', '', u'Chagatai', u'djaghataï'), ('chi', 'zho', 'zh', u'Chinese', u'chinois'), ('chk', '', '', u'Chuukese', u'chuuk'), ('chm', '', '', u'Mari', u'mari'), ('chn', '', '', u'Chinook jargon', u'chinook, jargon'), ('cho', '', '', u'Choctaw', u'choctaw'), ('chp', '', '', u'Chipewyan; Dene Suline', u'chipewyan'), ('chr', '', '', u'Cherokee', u'cherokee'), ('chu', '', 'cu', u'Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic', u'slavon d\'église; vieux slave; slavon liturgique; vieux bulgare'), ('chv', '', 'cv', u'Chuvash', u'tchouvache'), ('chy', '', '', u'Cheyenne', u'cheyenne'), ('cmc', '', '', u'Chamic languages', u'chames, langues'), ('cop', '', '', u'Coptic', u'copte'), ('cor', '', 'kw', u'Cornish', u'cornique'), ('cos', '', 'co', u'Corsican', u'corse'), ('cpe', '', '', u'Creoles and pidgins, English based', u'créoles et pidgins basés sur l\'anglais'), ('cpf', '', '', u'Creoles and pidgins, French-based ', u'créoles et pidgins basés sur le français'), ('cpp', '', '', u'Creoles and pidgins, Portuguese-based ', u'créoles et pidgins basés sur le portugais'), ('cre', '', 'cr', u'Cree', u'cree'), ('crh', '', '', u'Crimean Tatar; Crimean Turkish', u'tatar de Crimé'), ('crp', '', '', u'Creoles and pidgins ', u'créoles et pidgins'), ('csb', '', '', u'Kashubian', u'kachoube'), ('cus', '', '', u'Cushitic languages', u'couchitiques, langues'), ('cze', 'ces', 'cs', u'Czech', u'tchèque'), ('dak', '', '', u'Dakota', u'dakota'), ('dan', '', 'da', u'Danish', u'danois'), ('dar', '', '', u'Dargwa', u'dargwa'), ('day', '', '', u'Land Dayak languages', u'dayak, langues'), ('del', '', '', u'Delaware', u'delaware'), ('den', '', '', u'Slave (Athapascan)', u'esclave (athapascan)'), ('dgr', '', '', u'Dogrib', u'dogrib'), ('din', '', '', u'Dinka', u'dinka'), ('div', '', 'dv', u'Divehi; Dhivehi; Maldivian', u'maldivien'), ('doi', '', '', u'Dogri', u'dogri'), ('dra', '', '', u'Dravidian languages', u'dravidiennes, langues'), ('dsb', '', '', u'Lower Sorbian', u'bas-sorabe'), ('dua', '', '', u'Duala', u'douala'), ('dum', '', '', u'Dutch, Middle (ca.1050-1350)', u'néerlandais moyen (ca. 1050-1350)'), ('dut', 'nld', 'nl', u'Dutch; Flemish', u'néerlandais; flamand'), ('dyu', '', '', u'Dyula', u'dioula'), ('dzo', '', 'dz', u'Dzongkha', u'dzongkha'), ('efi', '', '', u'Efik', u'efik'), ('egy', '', '', u'Egyptian (Ancient)', u'égyptien'), ('eka', '', '', u'Ekajuk', u'ekajuk'), ('elx', '', '', u'Elamite', u'élamite'), ('eng', '', 'en', u'English', u'anglais'), ('enm', '', '', u'English, Middle (1100-1500)', u'anglais moyen (1100-1500)'), ('epo', '', 'eo', u'Esperanto', u'espéranto'), ('est', '', 'et', u'Estonian', u'estonien'), ('ewe', '', 'ee', u'Ewe', u'éwé'), ('ewo', '', '', u'Ewondo', u'éwondo'), ('fan', '', '', u'Fang', u'fang'), ('fao', '', 'fo', u'Faroese', u'féroïen'), ('fat', '', '', u'Fanti', u'fanti'), ('fij', '', 'fj', u'Fijian', u'fidjien'), ('fil', '', '', u'Filipino; Pilipino', u'filipino; pilipino'), ('fin', '', 'fi', u'Finnish', u'finnois'), ('fiu', '', '', u'Finno-Ugrian languages', u'finno-ougriennes, langues'), ('fon', '', '', u'Fon', u'fon'), ('fre', 'fra', 'fr', u'French', u'français'), ('frm', '', '', u'French, Middle (ca.1400-1600)', u'français moyen (1400-1600)'), ('fro', '', '', u'French, Old (842-ca.1400)', u'français ancien (842-ca.1400)'), ('frr', '', '', u'Northern Frisian', u'frison septentrional'), ('frs', '', '', u'Eastern Frisian', u'frison oriental'), ('fry', '', 'fy', u'Western Frisian', u'frison occidental'), ('ful', '', 'ff', u'Fulah', u'peul'), ('fur', '', '', u'Friulian', u'frioulan'), ('gaa', '', '', u'Ga', u'ga'), ('gay', '', '', u'Gayo', u'gayo'), ('gba', '', '', u'Gbaya', u'gbaya'), ('gem', '', '', u'Germanic languages', u'germaniques, langues'), ('geo', 'kat', 'ka', u'Georgian', u'géorgien'), ('ger', 'deu', 'de', u'German', u'allemand'), ('gez', '', '', u'Geez', u'guèze'), ('gil', '', '', u'Gilbertese', u'kiribati'), ('gla', '', 'gd', u'Gaelic; Scottish Gaelic', u'gaélique; gaélique écossais'), ('gle', '', 'ga', u'Irish', u'irlandais'), ('glg', '', 'gl', u'Galician', u'galicien'), ('glv', '', 'gv', u'Manx', u'manx; mannois'), ('gmh', '', '', u'German, Middle High (ca.1050-1500)', u'allemand, moyen haut (ca. 1050-1500)'), ('goh', '', '', u'German, Old High (ca.750-1050)', u'allemand, vieux haut (ca. 750-1050)'), ('gon', '', '', u'Gondi', u'gond'), ('gor', '', '', u'Gorontalo', u'gorontalo'), ('got', '', '', u'Gothic', u'gothique'), ('grb', '', '', u'Grebo', u'grebo'), ('grc', '', '', u'Greek, Ancient (to 1453)', u'grec ancien (jusqu\'à 1453)'), ('gre', 'ell', 'el', u'Greek, Modern (1453-)', u'grec moderne (après 1453)'), ('grn', '', 'gn', u'Guarani', u'guarani'), ('gsw', '', '', u'Swiss German; Alemannic; Alsatian', u'suisse alémanique; alémanique; alsacien'), ('guj', '', 'gu', u'Gujarati', u'goudjrati'), ('gwi', '', '', u'Gwich\'in', u'gwich\'in'), ('hai', '', '', u'Haida', u'haida'), ('hat', '', 'ht', u'Haitian; Haitian Creole', u'haïtien; créole haïtien'), ('hau', '', 'ha', u'Hausa', u'haoussa'), ('haw', '', '', u'Hawaiian', u'hawaïen'), ('heb', '', 'he', u'Hebrew', u'hébreu'), ('her', '', 'hz', u'Herero', u'herero'), ('hil', '', '', u'Hiligaynon', u'hiligaynon'), ('him', '', '', u'Himachali languages; Western Pahari languages', u'langues himachalis; langues paharis occidentales'), ('hin', '', 'hi', u'Hindi', u'hindi'), ('hit', '', '', u'Hittite', u'hittite'), ('hmn', '', '', u'Hmong; Mong', u'hmong'), ('hmo', '', 'ho', u'Hiri Motu', u'hiri motu'), ('hrv', '', 'hr', u'Croatian', u'croate'), ('hsb', '', '', u'Upper Sorbian', u'haut-sorabe'), ('hun', '', 'hu', u'Hungarian', u'hongrois'), ('hup', '', '', u'Hupa', u'hupa'), ('iba', '', '', u'Iban', u'iban'), ('ibo', '', 'ig', u'Igbo', u'igbo'), ('ice', 'isl', 'is', u'Icelandic', u'islandais'), ('ido', '', 'io', u'Ido', u'ido'), ('iii', '', 'ii', u'Sichuan Yi; Nuosu', u'yi de Sichuan'), ('ijo', '', '', u'Ijo languages', u'ijo, langues'), ('iku', '', 'iu', u'Inuktitut', u'inuktitut'), ('ile', '', 'ie', u'Interlingue; Occidental', u'interlingue'), ('ilo', '', '', u'Iloko', u'ilocano'), ('ina', '', 'ia', u'Interlingua (International Auxiliary Language Association)', u'interlingua (langue auxiliaire internationale)'), ('inc', '', '', u'Indic languages', u'indo-aryennes, langues'), ('ind', '', 'id', u'Indonesian', u'indonésien'), ('ine', '', '', u'Indo-European languages', u'indo-européennes, langues'), ('inh', '', '', u'Ingush', u'ingouche'), ('ipk', '', 'ik', u'Inupiaq', u'inupiaq'), ('ira', '', '', u'Iranian languages', u'iraniennes, langues'), ('iro', '', '', u'Iroquoian languages', u'iroquoises, langues'), ('ita', '', 'it', u'Italian', u'italien'), ('jav', '', 'jv', u'Javanese', u'javanais'), ('jbo', '', '', u'Lojban', u'lojban'), ('jpn', '', 'ja', u'Japanese', u'japonais'), ('jpr', '', '', u'Judeo-Persian', u'judéo-persan'), ('jrb', '', '', u'Judeo-Arabic', u'judéo-arabe'), ('kaa', '', '', u'Kara-Kalpak', u'karakalpak'), ('kab', '', '', u'Kabyle', u'kabyle'), ('kac', '', '', u'Kachin; Jingpho', u'kachin; jingpho'), ('kal', '', 'kl', u'Kalaallisut; Greenlandic', u'groenlandais'), ('kam', '', '', u'Kamba', u'kamba'), ('kan', '', 'kn', u'Kannada', u'kannada'), ('kar', '', '', u'Karen languages', u'karen, langues'), ('kas', '', 'ks', u'Kashmiri', u'kashmiri'), ('kau', '', 'kr', u'Kanuri', u'kanouri'), ('kaw', '', '', u'Kawi', u'kawi'), ('kaz', '', 'kk', u'Kazakh', u'kazakh'), ('kbd', '', '', u'Kabardian', u'kabardien'), ('kha', '', '', u'Khasi', u'khasi'), ('khi', '', '', u'Khoisan languages', u'khoïsan, langues'), ('khm', '', 'km', u'Central Khmer', u'khmer central'), ('kho', '', '', u'Khotanese; Sakan', u'khotanais; sakan'), ('kik', '', 'ki', u'Kikuyu; Gikuyu', u'kikuyu'), ('kin', '', 'rw', u'Kinyarwanda', u'rwanda'), ('kir', '', 'ky', u'Kirghiz; Kyrgyz', u'kirghiz'), ('kmb', '', '', u'Kimbundu', u'kimbundu'), ('kok', '', '', u'Konkani', u'konkani'), ('kom', '', 'kv', u'Komi', u'kom'), ('kon', '', 'kg', u'Kongo', u'kongo'), ('kor', '', 'ko', u'Korean', u'coréen'), ('kos', '', '', u'Kosraean', u'kosrae'), ('kpe', '', '', u'Kpelle', u'kpellé'), ('krc', '', '', u'Karachay-Balkar', u'karatchai balkar'), ('krl', '', '', u'Karelian', u'carélien'), ('kro', '', '', u'Kru languages', u'krou, langues'), ('kru', '', '', u'Kurukh', u'kurukh'), ('kua', '', 'kj', u'Kuanyama; Kwanyama', u'kuanyama; kwanyama'), ('kum', '', '', u'Kumyk', u'koumyk'), ('kur', '', 'ku', u'Kurdish', u'kurde'), ('kut', '', '', u'Kutenai', u'kutenai'), ('lad', '', '', u'Ladino', u'judéo-espagnol'), ('lah', '', '', u'Lahnda', u'lahnda'), ('lam', '', '', u'Lamba', u'lamba'), ('lao', '', 'lo', u'Lao', u'lao'), ('lat', '', 'la', u'Latin', u'latin'), ('lav', '', 'lv', u'Latvian', u'letton'), ('lez', '', '', u'Lezghian', u'lezghien'), ('lim', '', 'li', u'Limburgan; Limburger; Limburgish', u'limbourgeois'), ('lin', '', 'ln', u'Lingala', u'lingala'), ('lit', '', 'lt', u'Lithuanian', u'lituanien'), ('lol', '', '', u'Mongo', u'mongo'), ('loz', '', '', u'Lozi', u'lozi'), ('ltz', '', 'lb', u'Luxembourgish; Letzeburgesch', u'luxembourgeois'), ('lua', '', '', u'Luba-Lulua', u'luba-lulua'), ('lub', '', 'lu', u'Luba-Katanga', u'luba-katanga'), ('lug', '', 'lg', u'Ganda', u'ganda'), ('lui', '', '', u'Luiseno', u'luiseno'), ('lun', '', '', u'Lunda', u'lunda'), ('luo', '', '', u'Luo (Kenya and Tanzania)', u'luo (Kenya et Tanzanie)'), ('lus', '', '', u'Lushai', u'lushai'), ('mac', 'mkd', 'mk', u'Macedonian', u'macédonien'), ('mad', '', '', u'Madurese', u'madourais'), ('mag', '', '', u'Magahi', u'magahi'), ('mah', '', 'mh', u'Marshallese', u'marshall'), ('mai', '', '', u'Maithili', u'maithili'), ('mak', '', '', u'Makasar', u'makassar'), ('mal', '', 'ml', u'Malayalam', u'malayalam'), ('man', '', '', u'Mandingo', u'mandingue'), ('mao', 'mri', 'mi', u'Maori', u'maori'), ('map', '', '', u'Austronesian languages', u'austronésiennes, langues'), ('mar', '', 'mr', u'Marathi', u'marathe'), ('mas', '', '', u'Masai', u'massaï'), ('may', 'msa', 'ms', u'Malay', u'malais'), ('mdf', '', '', u'Moksha', u'moksa'), ('mdr', '', '', u'Mandar', u'mandar'), ('men', '', '', u'Mende', u'mendé'), ('mga', '', '', u'Irish, Middle (900-1200)', u'irlandais moyen (900-1200)'), ('mic', '', '', u'Mi\'kmaq; Micmac', u'mi\'kmaq; micmac'), ('min', '', '', u'Minangkabau', u'minangkabau'), ('mkh', '', '', u'Mon-Khmer languages', u'môn-khmer, langues'), ('mlg', '', 'mg', u'Malagasy', u'malgache'), ('mlt', '', 'mt', u'Maltese', u'maltais'), ('mnc', '', '', u'Manchu', u'mandchou'), ('mni', '', '', u'Manipuri', u'manipuri'), ('mno', '', '', u'Manobo languages', u'manobo, langues'), ('moh', '', '', u'Mohawk', u'mohawk'), ('mon', '', 'mn', u'Mongolian', u'mongol'), ('mos', '', '', u'Mossi', u'moré'), ('mun', '', '', u'Munda languages', u'mounda, langues'), ('mus', '', '', u'Creek', u'muskogee'), ('mwl', '', '', u'Mirandese', u'mirandais'), ('mwr', '', '', u'Marwari', u'marvari'), ('myn', '', '', u'Mayan languages', u'maya, langues'), ('myv', '', '', u'Erzya', u'erza'), ('nah', '', '', u'Nahuatl languages', u'nahuatl, langues'), ('nai', '', '', u'North American Indian languages', u'nord-amérindiennes, langues'), ('nap', '', '', u'Neapolitan', u'napolitain'), ('nau', '', 'na', u'Nauru', u'nauruan'), ('nav', '', 'nv', u'Navajo; Navaho', u'navaho'), ('nbl', '', 'nr', u'Ndebele, South; South Ndebele', u'ndébélé du Sud'), ('nde', '', 'nd', u'Ndebele, North; North Ndebele', u'ndébélé du Nord'), ('ndo', '', 'ng', u'Ndonga', u'ndonga'), ('nds', '', '', u'Low German; Low Saxon; German, Low; Saxon, Low', u'bas allemand; bas saxon; allemand, bas; saxon, bas'), ('nep', '', 'ne', u'Nepali', u'népalais'), ('new', '', '', u'Nepal Bhasa; Newari', u'nepal bhasa; newari'), ('nia', '', '', u'Nias', u'nias'), ('nic', '', '', u'Niger-Kordofanian languages', u'nigéro-kordofaniennes, langues'), ('niu', '', '', u'Niuean', u'niué'), ('nno', '', 'nn', u'Norwegian Nynorsk; Nynorsk, Norwegian', u'norvégien nynorsk; nynorsk, norvégien'), ('nob', '', 'nb', u'Bokmål, Norwegian; Norwegian Bokmål', u'norvégien bokmål'), ('nog', '', '', u'Nogai', u'nogaï; nogay'), ('non', '', '', u'Norse, Old', u'norrois, vieux'), ('nor', '', 'no', u'Norwegian', u'norvégien'), ('nqo', '', '', u'N\'Ko', u'n\'ko'), ('nso', '', '', u'Pedi; Sepedi; Northern Sotho', u'pedi; sepedi; sotho du Nord'), ('nub', '', '', u'Nubian languages', u'nubiennes, langues'), ('nwc', '', '', u'Classical Newari; Old Newari; Classical Nepal Bhasa', u'newari classique'), ('nya', '', 'ny', u'Chichewa; Chewa; Nyanja', u'chichewa; chewa; nyanja'), ('nym', '', '', u'Nyamwezi', u'nyamwezi'), ('nyn', '', '', u'Nyankole', u'nyankolé'), ('nyo', '', '', u'Nyoro', u'nyoro'), ('nzi', '', '', u'Nzima', u'nzema'), ('oci', '', 'oc', u'Occitan (post 1500); Provençal', u'occitan (après 1500); provençal'), ('oji', '', 'oj', u'Ojibwa', u'ojibwa'), ('ori', '', 'or', u'Oriya', u'oriya'), ('orm', '', 'om', u'Oromo', u'galla'), ('osa', '', '', u'Osage', u'osage'), ('oss', '', 'os', u'Ossetian; Ossetic', u'ossète'), ('ota', '', '', u'Turkish, Ottoman (1500-1928)', u'turc ottoman (1500-1928)'), ('oto', '', '', u'Otomian languages', u'otomi, langues'), ('paa', '', '', u'Papuan languages', u'papoues, langues'), ('pag', '', '', u'Pangasinan', u'pangasinan'), ('pal', '', '', u'Pahlavi', u'pahlavi'), ('pam', '', '', u'Pampanga; Kapampangan', u'pampangan'), ('pan', '', 'pa', u'Panjabi; Punjabi', u'pendjabi'), ('pap', '', '', u'Papiamento', u'papiamento'), ('pau', '', '', u'Palauan', u'palau'), ('peo', '', '', u'Persian, Old (ca.600-400 B.C.)', u'perse, vieux (ca. 600-400 av. J.-C.)'), ('per', 'fas', 'fa', u'Persian', u'persan'), ('phi', '', '', u'Philippine languages', u'philippines, langues'), ('phn', '', '', u'Phoenician', u'phénicien'), ('pli', '', 'pi', u'Pali', u'pali'), ('pol', '', 'pl', u'Polish', u'polonais'), ('pon', '', '', u'Pohnpeian', u'pohnpei'), ('por', '', 'pt', u'Portuguese', u'portugais'), ('pra', '', '', u'Prakrit languages', u'prâkrit, langues'), ('pro', '', '', u'Provençal, Old (to 1500)', u'provençal ancien (jusqu\'à 1500)'), ('pus', '', 'ps', u'Pushto; Pashto', u'pachto'), ('que', '', 'qu', u'Quechua', u'quechua'), ('raj', '', '', u'Rajasthani', u'rajasthani'), ('rap', '', '', u'Rapanui', u'rapanui'), ('rar', '', '', u'Rarotongan; Cook Islands Maori', u'rarotonga; maori des îles Cook'), ('roa', '', '', u'Romance languages', u'romanes, langues'), ('roh', '', 'rm', u'Romansh', u'romanche'), ('rom', '', '', u'Romany', u'tsigane'), ('rum', 'ron', 'ro', u'Romanian; Moldavian; Moldovan', u'roumain; moldave'), ('run', '', 'rn', u'Rundi', u'rundi'), ('rup', '', '', u'Aromanian; Arumanian; Macedo-Romanian', u'aroumain; macédo-roumain'), ('rus', '', 'ru', u'Russian', u'russe'), ('sad', '', '', u'Sandawe', u'sandawe'), ('sag', '', 'sg', u'Sango', u'sango'), ('sah', '', '', u'Yakut', u'iakoute'), ('sai', '', '', u'South American Indian (Other)', u'indiennes d\'Amérique du Sud, autres langues'), ('sal', '', '', u'Salishan languages', u'salishennes, langues'), ('sam', '', '', u'Samaritan Aramaic', u'samaritain'), ('san', '', 'sa', u'Sanskrit', u'sanskrit'), ('sas', '', '', u'Sasak', u'sasak'), ('sat', '', '', u'Santali', u'santal'), ('scn', '', '', u'Sicilian', u'sicilien'), ('sco', '', '', u'Scots', u'écossais'), ('sel', '', '', u'Selkup', u'selkoupe'), ('sem', '', '', u'Semitic languages', u'sémitiques, langues'), ('sga', '', '', u'Irish, Old (to 900)', u'irlandais ancien (jusqu\'à 900)'), ('sgn', '', '', u'Sign Languages', u'langues des signes'), ('shn', '', '', u'Shan', u'chan'), ('sid', '', '', u'Sidamo', u'sidamo'), ('sin', '', 'si', u'Sinhala; Sinhalese', u'singhalais'), ('sio', '', '', u'Siouan languages', u'sioux, langues'), ('sit', '', '', u'Sino-Tibetan languages', u'sino-tibétaines, langues'), ('sla', '', '', u'Slavic languages', u'slaves, langues'), ('slo', 'slk', 'sk', u'Slovak', u'slovaque'), ('slv', '', 'sl', u'Slovenian', u'slovène'), ('sma', '', '', u'Southern Sami', u'sami du Sud'), ('sme', '', 'se', u'Northern Sami', u'sami du Nord'), ('smi', '', '', u'Sami languages', u'sames, langues'), ('smj', '', '', u'Lule Sami', u'sami de Lule'), ('smn', '', '', u'Inari Sami', u'sami d\'Inari'), ('smo', '', 'sm', u'Samoan', u'samoan'), ('sms', '', '', u'Skolt Sami', u'sami skolt'), ('sna', '', 'sn', u'Shona', u'shona'), ('snd', '', 'sd', u'Sindhi', u'sindhi'), ('snk', '', '', u'Soninke', u'soninké'), ('sog', '', '', u'Sogdian', u'sogdien'), ('som', '', 'so', u'Somali', u'somali'), ('son', '', '', u'Songhai languages', u'songhai, langues'), ('sot', '', 'st', u'Sotho, Southern', u'sotho du Sud'), ('spa', '', 'es', u'Spanish; Castilian', u'espagnol; castillan'), ('srd', '', 'sc', u'Sardinian', u'sarde'), ('srn', '', '', u'Sranan Tongo', u'sranan tongo'), ('srp', '', 'sr', u'Serbian', u'serbe'), ('srr', '', '', u'Serer', u'sérère'), ('ssa', '', '', u'Nilo-Saharan languages', u'nilo-sahariennes, langues'), ('ssw', '', 'ss', u'Swati', u'swati'), ('suk', '', '', u'Sukuma', u'sukuma'), ('sun', '', 'su', u'Sundanese', u'soundanais'), ('sus', '', '', u'Susu', u'soussou'), ('sux', '', '', u'Sumerian', u'sumérien'), ('swa', '', 'sw', u'Swahili', u'swahili'), ('swe', '', 'sv', u'Swedish', u'suédois'), ('syc', '', '', u'Classical Syriac', u'syriaque classique'), ('syr', '', '', u'Syriac', u'syriaque'), ('tah', '', 'ty', u'Tahitian', u'tahitien'), ('tai', '', '', u'Tai languages', u'tai, langues'), ('tam', '', 'ta', u'Tamil', u'tamoul'), ('tat', '', 'tt', u'Tatar', u'tatar'), ('tel', '', 'te', u'Telugu', u'télougou'), ('tem', '', '', u'Timne', u'temne'), ('ter', '', '', u'Tereno', u'tereno'), ('tet', '', '', u'Tetum', u'tetum'), ('tgk', '', 'tg', u'Tajik', u'tadjik'), ('tgl', '', 'tl', u'Tagalog', u'tagalog'), ('tha', '', 'th', u'Thai', u'thaï'), ('tib', 'bod', 'bo', u'Tibetan', u'tibétain'), ('tig', '', '', u'Tigre', u'tigré'), ('tir', '', 'ti', u'Tigrinya', u'tigrigna'), ('tiv', '', '', u'Tiv', u'tiv'), ('tkl', '', '', u'Tokelau', u'tokelau'), ('tlh', '', '', u'Klingon; tlhIngan-Hol', u'klingon'), ('tli', '', '', u'Tlingit', u'tlingit'), ('tmh', '', '', u'Tamashek', u'tamacheq'), ('tog', '', '', u'Tonga (Nyasa)', u'tonga (Nyasa)'), ('ton', '', 'to', u'Tonga (Tonga Islands)', u'tongan (Îles Tonga)'), ('tpi', '', '', u'Tok Pisin', u'tok pisin'), ('tsi', '', '', u'Tsimshian', u'tsimshian'), ('tsn', '', 'tn', u'Tswana', u'tswana'), ('tso', '', 'ts', u'Tsonga', u'tsonga'), ('tuk', '', 'tk', u'Turkmen', u'turkmène'), ('tum', '', '', u'Tumbuka', u'tumbuka'), ('tup', '', '', u'Tupi languages', u'tupi, langues'), ('tur', '', 'tr', u'Turkish', u'turc'), ('tut', '', '', u'Altaic languages', u'altaïques, langues'), ('tvl', '', '', u'Tuvalu', u'tuvalu'), ('twi', '', 'tw', u'Twi', u'twi'), ('tyv', '', '', u'Tuvinian', u'touva'), ('udm', '', '', u'Udmurt', u'oudmourte'), ('uga', '', '', u'Ugaritic', u'ougaritique'), ('uig', '', 'ug', u'Uighur; Uyghur', u'ouïgour'), ('ukr', '', 'uk', u'Ukrainian', u'ukrainien'), ('umb', '', '', u'Umbundu', u'umbundu'), ('und', '', '', u'Undetermined', u'indéterminée'), ('urd', '', 'ur', u'Urdu', u'ourdou'), ('uzb', '', 'uz', u'Uzbek', u'ouszbek'), ('vai', '', '', u'Vai', u'vaï'), ('ven', '', 've', u'Venda', u'venda'), ('vie', '', 'vi', u'Vietnamese', u'vietnamien'), ('vol', '', 'vo', u'Volapük', u'volapük'), ('vot', '', '', u'Votic', u'vote'), ('wak', '', '', u'Wakashan languages', u'wakashanes, langues'), ('wal', '', '', u'Walamo', u'walamo'), ('war', '', '', u'Waray', u'waray'), ('was', '', '', u'Washo', u'washo'), ('wel', 'cym', 'cy', u'Welsh', u'gallois'), ('wen', '', '', u'Sorbian languages', u'sorabes, langues'), ('wln', '', 'wa', u'Walloon', u'wallon'), ('wol', '', 'wo', u'Wolof', u'wolof'), ('xal', '', '', u'Kalmyk; Oirat', u'kalmouk; oïrat'), ('xho', '', 'xh', u'Xhosa', u'xhosa'), ('yao', '', '', u'Yao', u'yao'), ('yap', '', '', u'Yapese', u'yapois'), ('yid', '', 'yi', u'Yiddish', u'yiddish'), ('yor', '', 'yo', u'Yoruba', u'yoruba'), ('ypk', '', '', u'Yupik languages', u'yupik, langues'), ('zap', '', '', u'Zapotec', u'zapotèque'), ('zbl', '', '', u'Blissymbols; Blissymbolics; Bliss', u'symboles Bliss; Bliss'), ('zen', '', '', u'Zenaga', u'zenaga'), ('zha', '', 'za', u'Zhuang; Chuang', u'zhuang; chuang'), ('znd', '', '', u'Zande languages', u'zandé, langues'), ('zul', '', 'zu', u'Zulu', u'zoulou'), ('zun', '', '', u'Zuni', u'zuni'), ('zza', '', '', u'Zaza; Dimili; Dimli; Kirdki; Kirmanjki; Zazaki', u'zaza; dimili; dimli; kirdki; kirmanjki; zazaki')] class Country(object): """Country according to ISO-3166 :param string country: country name, alpha2 code, alpha3 code or numeric code :param list countries: all countries :type countries: see :data:`~subliminal.language.COUNTRIES` """ def __init__(self, country, countries=None): countries = countries or COUNTRIES country = to_unicode(country.strip().lower()) country_tuple = None # Try to find the country if len(country) == 2: country_tuple = dict((c[0].lower(), c) for c in countries).get(country) elif len(country) == 3 and not country.isdigit(): country_tuple = dict((c[1].lower(), c) for c in countries).get(country) elif len(country) == 3 and country.isdigit(): country_tuple = dict((c[2].lower(), c) for c in countries).get(country) if country_tuple is None: country_tuple = dict((c[3].lower(), c) for c in countries).get(country) # Raise ValueError if nothing is found if country_tuple is None: raise ValueError('Country %s does not exist' % country) # Set default attrs self.alpha2 = country_tuple[0] self.alpha3 = country_tuple[1] self.numeric = country_tuple[2] self.name = country_tuple[3] def __hash__(self): return hash(self.alpha3) def __eq__(self, other): if isinstance(other, Country): return self.alpha3 == other.alpha3 return False def __ne__(self, other): return not self == other def __unicode__(self): return self.name def __str__(self): return unicode(self).encode('utf-8') def __repr__(self): return 'Country(%s)' % self class Language(object): """Language according to ISO-639 :param string language: language name (english or french), alpha2 code, alpha3 code, terminologic code or numeric code, eventually with a country :param country: country of the language :type country: :class:`Country` or string :param languages: all languages :type languages: see :data:`~subliminal.language.LANGUAGES` :param countries: all countries :type countries: see :data:`~subliminal.language.COUNTRIES` :param bool strict: whether to raise a ValueError on unknown language or not :class:`Language` implements the inclusion test, with the ``in`` keyword:: >>> Language('pt-BR') in Language('pt') # Portuguese (Brazil) is included in Portuguese True >>> Language('pt') in Language('pt-BR') # Portuguese is not included in Portuguese (Brazil) False """ with_country_regexps = [re.compile('(.*)\((.*)\)'), re.compile('(.*)[-_](.*)')] def __init__(self, language, country=None, languages=None, countries=None, strict=True): languages = languages or LANGUAGES countries = countries or COUNTRIES # Get the country self.country = None if isinstance(country, Country): self.country = country elif isinstance(country, basestring): try: self.country = Country(country, countries) except ValueError: logger.warning(u'Country %s could not be identified' % country) if strict: raise # Language + Country format #TODO: Improve this part if country is None: for regexp in [r.match(language) for r in self.with_country_regexps]: if regexp: language = regexp.group(1) try: self.country = Country(regexp.group(2), countries) except ValueError: logger.warning(u'Country %s could not be identified' % country) if strict: raise break # Try to find the language language = to_unicode(language.strip().lower()) language_tuple = None if len(language) == 2: language_tuple = dict((l[2].lower(), l) for l in languages).get(language) elif len(language) == 3: language_tuple = dict((l[0].lower(), l) for l in languages).get(language) if language_tuple is None: language_tuple = dict((l[1].lower(), l) for l in languages).get(language) if language_tuple is None: language_tuple = dict((l[3].split('; ')[0].lower(), l) for l in languages).get(language) if language_tuple is None: language_tuple = dict((l[4].split('; ')[0].lower(), l) for l in languages).get(language) # Raise ValueError if strict or continue with Undetermined if language_tuple is None: if strict: raise ValueError('Language %s does not exist' % language) language_tuple = dict((l[0].lower(), l) for l in languages).get('und') # Set attributes self.alpha2 = language_tuple[2] self.alpha3 = language_tuple[0] self.terminologic = language_tuple[1] self.name = language_tuple[3] self.french_name = language_tuple[4] def __hash__(self): if self.country is None: return hash(self.alpha3) return hash(self.alpha3 + self.country.alpha3) def __eq__(self, other): if isinstance(other, Language): return self.alpha3 == other.alpha3 and self.country == other.country return False def __contains__(self, item): if isinstance(item, Language): if self == item: return True if self.country is None: return self.alpha3 == item.alpha3 return False def __ne__(self, other): return not self == other def __nonzero__(self): return self.alpha3 != 'und' def __unicode__(self): if self.country is None: return self.name return '%s (%s)' % (self.name, self.country) def __str__(self): return unicode(self).encode('utf-8') def __repr__(self): if self.country is None: return 'Language(%s)' % self.name.encode('utf-8') return 'Language(%s, country=%s)' % (self.name.encode('utf-8'), self.country) class language_set(set): """Set of :class:`Language` with some specificities. :param iterable: where to take elements from :type iterable: iterable of :class:`Languages <Language>` or string :param languages: all languages :type languages: see :data:`~subliminal.language.LANGUAGES` :param bool strict: whether to raise a ValueError on invalid language or not The following redefinitions are meant to reflect the inclusion logic in :class:`Language` * Inclusion test, with the ``in`` keyword * Intersection * Substraction Here is an illustration of the previous points:: >>> Language('en') in language_set(['en-US', 'en-CA']) False >>> Language('en-US') in language_set(['en', 'fr']) True >>> language_set(['en']) & language_set(['en-US', 'en-CA']) language_set([Language(English, country=Canada), Language(English, country=United States)]) >>> language_set(['en-US', 'en-CA', 'fr']) - language_set(['en']) language_set([Language(French)]) """ def __init__(self, iterable=None, languages=None, strict=True): iterable = iterable or [] languages = languages or LANGUAGES items = [] for i in iterable: if isinstance(i, Language): items.append(i) continue if isinstance(i, tuple): items.append(Language(i[0], languages=languages, strict=strict)) continue items.append(Language(i, languages=languages, strict=strict)) super(language_set, self).__init__(items) def __contains__(self, item): for i in self: if item in i: return True return super(language_set, self).__contains__(item) def __and__(self, other): results = language_set() for i in self: for j in other: if i in j: results.add(i) for i in other: for j in self: if i in j: results.add(i) return results def __sub__(self, other): results = language_set() for i in self: if i not in other: results.add(i) return results class language_list(list): """List of :class:`Language` with some specificities. :param iterable: where to take elements from :type iterable: iterable of :class:`Languages <Language>` or string :param languages: all languages :type languages: see :data:`~subliminal.language.LANGUAGES` :param bool strict: whether to raise a ValueError on invalid language or not The following redefinitions are meant to reflect the inclusion logic in :class:`Language` * Inclusion test, with the ``in`` keyword * Index Here is an illustration of the previous points:: >>> Language('en') in language_list(['en-US', 'en-CA']) False >>> Language('en-US') in language_list(['en', 'fr-BE']) True >>> language_list(['en', 'fr-BE']).index(Language('en-US')) 0 """ def __init__(self, iterable=None, languages=None, strict=True): iterable = iterable or [] languages = languages or LANGUAGES items = [] for i in iterable: if isinstance(i, Language): items.append(i) continue if isinstance(i, tuple): items.append(Language(i[0], languages=languages, strict=strict)) continue items.append(Language(i, languages=languages, strict=strict)) super(language_list, self).__init__(items) def __contains__(self, item): for i in self: if item in i: return True return super(language_list, self).__contains__(item) def index(self, x, strict=False): if not strict: for i in range(len(self)): if x in self[i]: return i return super(language_list, self).index(x)
54,658
Python
.py
987
42.483283
184
0.453318
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,540
tasks.py
CouchPotato_CouchPotatoServer/libs/subliminal/tasks.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. __all__ = ['Task', 'ListTask', 'DownloadTask', 'StopTask'] class Task(object): """Base class for tasks to use in subliminal""" pass class ListTask(Task): """List task used by the worker to search for subtitles :param video: video to search subtitles for :type video: :class:`~subliminal.videos.Video` :param list languages: languages to search for :param string service: name of the service to use :param config: configuration for the service :type config: :class:`~subliminal.services.ServiceConfig` """ def __init__(self, video, languages, service, config): super(ListTask, self).__init__() self.video = video self.service = service self.languages = languages self.config = config def __repr__(self): return 'ListTask(%r, %r, %s, %r)' % (self.video, self.languages, self.service, self.config) class DownloadTask(Task): """Download task used by the worker to download subtitles :param video: video to download subtitles for :type video: :class:`~subliminal.videos.Video` :param subtitles: subtitles to download in order of preference :type subtitles: list of :class:`~subliminal.subtitles.Subtitle` """ def __init__(self, video, subtitles): super(DownloadTask, self).__init__() self.video = video self.subtitles = subtitles def __repr__(self): return 'DownloadTask(%r, %r)' % (self.video, self.subtitles) class StopTask(Task): """Stop task that will stop the worker""" pass
2,328
Python
.py
54
38.740741
99
0.70531
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,541
api.py
CouchPotato_CouchPotatoServer/libs/subliminal/api.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE, create_list_tasks, consume_task, create_download_tasks, group_by_video, key_subtitles) from .language import language_set, language_list, LANGUAGES import logging __all__ = ['list_subtitles', 'download_subtitles'] logger = logging.getLogger(__name__) def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None): """List subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :return: found subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] """ services = services or SERVICES languages = language_set(languages) if languages is not None else language_set(LANGUAGES) if isinstance(paths, basestring): paths = [paths] if any([not isinstance(p, unicode) for p in paths]): logger.warning(u'Not all entries are unicode') results = [] service_instances = {} tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results) def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None): """Download subtitles in given paths according to the criteria :param paths: path(s) to video file or folder :type paths: string or list :param languages: languages to search for, in preferred order :type languages: list of :class:`~subliminal.language.Language` or string :param list services: services to use for the search, in preferred order :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :param order: preferred order for subtitles sorting :type list: list of :data:`~subliminal.core.LANGUAGE_INDEX`, :data:`~subliminal.core.SERVICE_INDEX`, :data:`~subliminal.core.SERVICE_CONFIDENCE`, :data:`~subliminal.core.MATCHING_CONFIDENCE` :return: downloaded subtitles :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`] .. note:: If you use ``multi=True``, :data:`~subliminal.core.LANGUAGE_INDEX` has to be the first item of the ``order`` list or you might get unexpected results. """ services = services or SERVICES languages = language_list(languages) if languages is not None else language_list(LANGUAGES) if isinstance(paths, basestring): paths = [paths] order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE] subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for video, subtitles in subtitles_by_video.iteritems(): subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True) results = [] service_instances = {} tasks = create_download_tasks(subtitles_by_video, languages, multi) for task in tasks: try: result = consume_task(task, service_instances) results.append((task.video, result)) except: logger.error(u'Error consuming task %r' % task, exc_info=True) for service_instance in service_instances.itervalues(): service_instance.terminate() return group_by_video(results)
5,646
Python
.py
97
53.082474
194
0.729998
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,542
videos.py
CouchPotato_CouchPotatoServer/libs/subliminal/videos.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import subtitles from .language import Language from .utils import to_unicode import enzyme.core import guessit import hashlib import logging import mimetypes import os import struct __all__ = ['EXTENSIONS', 'MIMETYPES', 'Video', 'Episode', 'Movie', 'UnknownVideo', 'scan', 'hash_opensubtitles', 'hash_thesubdb'] logger = logging.getLogger(__name__) #: Video extensions EXTENSIONS = ['.avi', '.mkv', '.mpg', '.mp4', '.m4v', '.mov', '.ogm', '.ogv', '.wmv', '.divx', '.asf'] #: Video mimetypes MIMETYPES = ['video/mpeg', 'video/mp4', 'video/quicktime', 'video/x-ms-wmv', 'video/x-msvideo', 'video/x-flv', 'video/x-matroska', 'video/x-matroska-3d'] class Video(object): """Base class for videos :param string path: path :param guess: guessed informations :type guess: :class:`~guessit.guess.Guess` :param string imdbid: imdbid """ def __init__(self, path, guess, imdbid=None): self.release = path self.guess = guess self.imdbid = imdbid self._path = None self.hashes = {} if os.path.exists(path): self._path = path self.size = os.path.getsize(self._path) self._compute_hashes() @classmethod def from_path(cls, path): """Create a :class:`Video` subclass guessing all informations from the given path :param string path: path :return: video object :rtype: :class:`Episode` or :class:`Movie` or :class:`UnknownVideo` """ guess = guessit.guess_file_info(path, 'autodetect') result = None if guess['type'] == 'episode' and 'series' in guess and 'season' in guess and 'episodeNumber' in guess: title = None if 'title' in guess: title = guess['title'] result = Episode(path, guess['series'], guess['season'], guess['episodeNumber'], title, guess) if guess['type'] == 'movie' and 'title' in guess: year = None if 'year' in guess: year = guess['year'] result = Movie(path, guess['title'], year, guess) if not result: result = UnknownVideo(path, guess) if not isinstance(result, cls): raise ValueError('Video is not of requested type') return result @property def exists(self): """Whether the video exists or not""" if self._path: return os.path.exists(self._path) return False @property def path(self): """Path to the video""" return self._path @path.setter def path(self, value): if not os.path.exists(value): raise ValueError('Path does not exists') self._path = value self.size = os.path.getsize(self._path) self._compute_hashes() def _compute_hashes(self): """Compute different hashes""" self.hashes['OpenSubtitles'] = hash_opensubtitles(self.path) self.hashes['TheSubDB'] = hash_thesubdb(self.path) def scan(self): """Scan and return associated subtitles :return: associated subtitles :rtype: list of :class:`~subliminal.subtitles.Subtitle` """ if not self.exists: return [] basepath = os.path.splitext(self.path)[0] results = [] video_infos = None try: video_infos = enzyme.parse(self.path) logger.debug(u'Succeeded parsing %s with enzyme: %r' % (self.path, video_infos)) except: logger.debug(u'Failed parsing %s with enzyme' % self.path) if isinstance(video_infos, enzyme.core.AVContainer): results.extend([subtitles.EmbeddedSubtitle.from_enzyme(self.path, s) for s in video_infos.subtitles]) # cannot use glob here because it chokes if there are any square # brackets inside the filename, so we have to use basic string # startswith/endswith comparisons folder, basename = os.path.split(basepath) if folder == '': folder = '.' existing = [f for f in os.listdir(folder) if f.startswith(basename)] for path in existing: for ext in subtitles.EXTENSIONS: if path.endswith(ext): language = Language(path[len(basename) + 1:-len(ext)], strict=False) results.append(subtitles.ExternalSubtitle(path, language)) return results def __unicode__(self): return to_unicode(self.path or self.release) def __str__(self): return unicode(self).encode('utf-8') def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self) def __hash__(self): return hash(self.path or self.release) class Episode(Video): """Episode :class:`Video` :param string path: path :param string series: series :param int season: season number :param int episode: episode number :param string title: title :param guess: guessed informations :type guess: :class:`~guessit.guess.Guess` :param string tvdbid: tvdbid :param string imdbid: imdbid """ def __init__(self, path, series, season, episode, title=None, guess=None, tvdbid=None, imdbid=None): super(Episode, self).__init__(path, guess, imdbid) self.series = series self.title = title self.season = season self.episode = episode self.tvdbid = tvdbid class Movie(Video): """Movie :class:`Video` :param string path: path :param string title: title :param int year: year :param guess: guessed informations :type guess: :class:`~guessit.guess.Guess` :param string imdbid: imdbid """ def __init__(self, path, title, year=None, guess=None, imdbid=None): super(Movie, self).__init__(path, guess, imdbid) self.title = title self.year = year class UnknownVideo(Video): """Unknown video""" pass def scan(entry, max_depth=3, scan_filter=None, depth=0): """Scan a path for videos and subtitles :param string entry: path :param int max_depth: maximum folder depth :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :param int depth: starting depth :return: found videos and subtitles :rtype: list of (:class:`Video`, [:class:`~subliminal.subtitles.Subtitle`]) """ if depth > max_depth and max_depth != 0: # we do not want to search the whole file system except if max_depth = 0 return [] if os.path.isdir(entry): # a dir? recurse logger.debug(u'Scanning directory %s with depth %d/%d' % (entry, depth, max_depth)) result = [] for e in os.listdir(entry): result.extend(scan(os.path.join(entry, e), max_depth, scan_filter, depth + 1)) return result if os.path.isfile(entry) or depth == 0: logger.debug(u'Scanning file %s with depth %d/%d' % (entry, depth, max_depth)) if depth != 0: # trust the user: only check for valid format if recursing if mimetypes.guess_type(entry)[0] not in MIMETYPES and os.path.splitext(entry)[1] not in EXTENSIONS: return [] if scan_filter is not None and scan_filter(entry): return [] video = Video.from_path(entry) return [(video, video.scan())] logger.warning(u'Scanning entry %s failed with depth %d/%d' % (entry, depth, max_depth)) return [] # anything else def hash_opensubtitles(path): """Compute a hash using OpenSubtitles' algorithm :param string path: path :return: hash :rtype: string """ longlongformat = 'q' # long long bytesize = struct.calcsize(longlongformat) with open(path, 'rb') as f: filesize = os.path.getsize(path) filehash = filesize if filesize < 65536 * 2: return None for _ in range(65536 / bytesize): filebuffer = f.read(bytesize) (l_value,) = struct.unpack(longlongformat, filebuffer) filehash += l_value filehash = filehash & 0xFFFFFFFFFFFFFFFF # to remain as 64bit number f.seek(max(0, filesize - 65536), 0) for _ in range(65536 / bytesize): filebuffer = f.read(bytesize) (l_value,) = struct.unpack(longlongformat, filebuffer) filehash += l_value filehash = filehash & 0xFFFFFFFFFFFFFFFF returnedhash = '%016x' % filehash logger.debug(u'Computed OpenSubtitle hash %s for %s' % (returnedhash, path)) return returnedhash def hash_thesubdb(path): """Compute a hash using TheSubDB's algorithm :param string path: path :return: hash :rtype: string """ readsize = 64 * 1024 if os.path.getsize(path) < readsize: return None with open(path, 'rb') as f: data = f.read(readsize) f.seek(-readsize, os.SEEK_END) data += f.read(readsize) returnedhash = hashlib.md5(data).hexdigest() logger.debug(u'Computed TheSubDB hash %s for %s' % (returnedhash, path)) return returnedhash
9,971
Python
.py
242
33.797521
175
0.633867
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,543
utils.py
CouchPotato_CouchPotatoServer/libs/subliminal/utils.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. import re __all__ = ['get_keywords', 'split_keyword', 'to_unicode'] def get_keywords(guess): """Retrieve keywords from guessed informations :param guess: guessed informations :type guess: :class:`guessit.guess.Guess` :return: lower case alphanumeric keywords :rtype: set """ keywords = set() for k in ['releaseGroup', 'screenSize', 'videoCodec', 'format']: if k in guess: keywords = keywords | split_keyword(guess[k].lower()) return keywords def split_keyword(keyword): """Split a keyword in multiple ones on any non-alphanumeric character :param string keyword: keyword :return: keywords :rtype: set """ split = set(re.findall(r'\w+', keyword)) return split def to_unicode(data): """Convert a basestring to unicode :param basestring data: data to decode :return: data as unicode :rtype: unicode """ if not isinstance(data, basestring): raise ValueError('Basestring expected') if isinstance(data, unicode): return data for encoding in ('utf-8', 'latin-1'): try: return unicode(data, encoding) except UnicodeDecodeError: pass return unicode(data, 'utf-8', 'replace')
2,027
Python
.py
55
32.4
77
0.702247
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,544
cache.py
CouchPotato_CouchPotatoServer/libs/subliminal/cache.py
# -*- coding: utf-8 -*- # Copyright 2012 Nicolas Wack <wackou@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict from functools import wraps import logging import os.path import threading try: import cPickle as pickle except ImportError: import pickle __all__ = ['Cache', 'cachedmethod'] logger = logging.getLogger(__name__) class Cache(object): """A Cache object contains cached values for methods. It can have separate internal caches, one for each service """ def __init__(self, cache_dir): self.cache_dir = cache_dir self.cache = defaultdict(dict) self.lock = threading.RLock() def __del__(self): for service_name in self.cache: self.save(service_name) def cache_location(self, service_name): return os.path.join(self.cache_dir, 'subliminal_%s.cache' % service_name) def load(self, service_name): with self.lock: if service_name in self.cache: # already loaded return self.cache[service_name] = defaultdict(dict) filename = self.cache_location(service_name) logger.debug(u'Cache: loading cache from %s' % filename) try: self.cache[service_name] = pickle.load(open(filename, 'rb')) except IOError: logger.info('Cache: Cache file "%s" doesn\'t exist, creating it' % filename) except EOFError: logger.error('Cache: cache file "%s" is corrupted... Removing it.' % filename) os.remove(filename) def save(self, service_name): filename = self.cache_location(service_name) logger.debug(u'Cache: saving cache to %s' % filename) with self.lock: pickle.dump(self.cache[service_name], open(filename, 'wb')) def clear(self, service_name): try: os.remove(self.cache_location(service_name)) except OSError: pass self.cache[service_name] = defaultdict(dict) def cached_func_key(self, func, cls=None): try: cls = func.im_class except: pass return ('%s.%s' % (cls.__module__, cls.__name__), func.__name__) def function_cache(self, service_name, func): func_key = self.cached_func_key(func) return self.cache[service_name][func_key] def cache_for(self, service_name, func, args, result): # no need to lock here, dict ops are atomic self.function_cache(service_name, func)[args] = result def cached_value(self, service_name, func, args): """Raises KeyError if not found""" # no need to lock here, dict ops are atomic return self.function_cache(service_name, func)[args] def cachedmethod(function): """Decorator to make a method use the cache. .. note:: This can NOT be used with static functions, it has to be used on methods of some class """ @wraps(function) def cached(*args): c = args[0].config.cache service_name = args[0].__class__.__name__ func_key = c.cached_func_key(function, cls=args[0].__class__) func_cache = c.cache[service_name][func_key] # we need to remove the first element of args for the key, as it is the # instance pointer and we don't want the cache to know which instance # called it, it is shared among all instances of the same class key = args[1:] if key in func_cache: result = func_cache[key] logger.debug(u'Using cached value for %s(%s), returns: %s' % (func_key, key, result)) return result result = function(*args) # note: another thread could have already cached a value in the # meantime, but that's ok as we prefer to keep the latest value in # the cache func_cache[key] = result return result return cached
4,619
Python
.py
110
34.554545
97
0.646377
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,545
__init__.py
CouchPotato_CouchPotatoServer/libs/subliminal/__init__.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .api import list_subtitles, download_subtitles from .async import Pool from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE) from .infos import __version__ import logging try: from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass __all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE', 'list_subtitles', 'download_subtitles', 'Pool'] logging.getLogger(__name__).addHandler(NullHandler())
1,366
Python
.py
32
40.28125
81
0.756757
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,546
async.py
CouchPotato_CouchPotatoServer/libs/subliminal/async.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .core import (consume_task, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE, SERVICES, create_list_tasks, create_download_tasks, group_by_video, key_subtitles) from .language import language_list, language_set, LANGUAGES from .tasks import StopTask import Queue import logging import threading __all__ = ['Worker', 'Pool'] logger = logging.getLogger(__name__) class Worker(threading.Thread): """Consume tasks and put the result in the queue""" def __init__(self, tasks, results): super(Worker, self).__init__() self.tasks = tasks self.results = results self.services = {} def run(self): while 1: result = [] try: task = self.tasks.get(block=True) if isinstance(task, StopTask): break result = consume_task(task, self.services) self.results.put((task.video, result)) except: logger.error(u'Exception raised in worker %s' % self.name, exc_info=True) finally: self.tasks.task_done() self.terminate() logger.debug(u'Thread %s terminated' % self.name) def terminate(self): """Terminate instantiated services""" for service_name, service in self.services.iteritems(): try: service.terminate() except: logger.error(u'Exception raised when terminating service %s' % service_name, exc_info=True) class Pool(object): """Pool of workers""" def __init__(self, size): self.tasks = Queue.Queue() self.results = Queue.Queue() self.workers = [] for _ in range(size): self.workers.append(Worker(self.tasks, self.results)) def __enter__(self): self.start() return self def __exit__(self, *args): self.stop() self.join() def start(self): """Start workers""" for worker in self.workers: worker.start() def stop(self): """Stop workers""" for _ in self.workers: self.tasks.put(StopTask()) def join(self): """Join the task queue""" self.tasks.join() def collect(self): """Collect available results :return: results of tasks :rtype: list of :class:`~subliminal.tasks.Task` """ results = [] while 1: try: result = self.results.get(block=False) results.append(result) except Queue.Empty: break return results def list_subtitles(self, paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None): """See :meth:`subliminal.list_subtitles`""" services = services or SERVICES languages = language_set(languages) if languages is not None else language_set(LANGUAGES) if isinstance(paths, basestring): paths = [paths] if any([not isinstance(p, unicode) for p in paths]): logger.warning(u'Not all entries are unicode') tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for task in tasks: self.tasks.put(task) self.join() results = self.collect() return group_by_video(results) def download_subtitles(self, paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None): """See :meth:`subliminal.download_subtitles`""" services = services or SERVICES languages = language_list(languages) if languages is not None else language_list(LANGUAGES) if isinstance(paths, basestring): paths = [paths] order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE] subtitles_by_video = self.list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter) for video, subtitles in subtitles_by_video.iteritems(): subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True) tasks = create_download_tasks(subtitles_by_video, languages, multi) for task in tasks: self.tasks.put(task) self.join() results = self.collect() return group_by_video(results)
5,253
Python
.py
124
34.120968
155
0.638623
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,547
core.py
CouchPotato_CouchPotatoServer/libs/subliminal/core.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from .exceptions import DownloadFailedError from .services import ServiceConfig from .tasks import DownloadTask, ListTask from .utils import get_keywords from .videos import Episode, Movie, scan from .language import Language from collections import defaultdict from itertools import groupby import bs4 import guessit import logging __all__ = ['SERVICES', 'LANGUAGE_INDEX', 'SERVICE_INDEX', 'SERVICE_CONFIDENCE', 'MATCHING_CONFIDENCE', 'create_list_tasks', 'create_download_tasks', 'consume_task', 'matching_confidence', 'key_subtitles', 'group_by_video'] logger = logging.getLogger(__name__) SERVICES = ['opensubtitles', 'bierdopje', 'subswiki', 'subtitulos', 'thesubdb', 'addic7ed', 'tvsubtitles', 'subscenter', 'wizdom'] LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE = range(4) def create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter): """Create a list of :class:`~subliminal.tasks.ListTask` from one or more paths using the given criteria :param paths: path(s) to video file or folder :type paths: string or list :param set languages: languages to search for :param list services: services to use for the search :param bool force: force searching for subtitles even if some are detected :param bool multi: search multiple languages for the same video :param string cache_dir: path to the cache directory to use :param int max_depth: maximum depth for scanning entries :param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``) :return: the created tasks :rtype: list of :class:`~subliminal.tasks.ListTask` """ scan_result = [] for p in paths: scan_result.extend(scan(p, max_depth, scan_filter)) logger.debug(u'Found %d videos in %r with maximum depth %d' % (len(scan_result), paths, max_depth)) tasks = [] config = ServiceConfig(multi, cache_dir) services = filter_services(services) for video, detected_subtitles in scan_result: detected_languages = set(s.language for s in detected_subtitles) wanted_languages = languages.copy() if not force and multi: wanted_languages -= detected_languages if not wanted_languages: logger.debug(u'No need to list multi subtitles %r for %r because %r detected' % (languages, video, detected_languages)) continue if not force and not multi and Language('Undetermined') in detected_languages: logger.debug(u'No need to list single subtitles %r for %r because one detected' % (languages, video)) continue logger.debug(u'Listing subtitles %r for %r with services %r' % (wanted_languages, video, services)) for service_name in services: mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1) service = mod.Service if not service.check_validity(video, wanted_languages): continue task = ListTask(video, wanted_languages & service.languages, service_name, config) logger.debug(u'Created task %r' % task) tasks.append(task) return tasks def create_download_tasks(subtitles_by_video, languages, multi): """Create a list of :class:`~subliminal.tasks.DownloadTask` from a list results grouped by video :param subtitles_by_video: :class:`~subliminal.tasks.ListTask` results with ordered subtitles :type subtitles_by_video: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`] :param languages: languages in preferred order :type languages: :class:`~subliminal.language.language_list` :param bool multi: download multiple languages for the same video :return: the created tasks :rtype: list of :class:`~subliminal.tasks.DownloadTask` """ tasks = [] for video, subtitles in subtitles_by_video.iteritems(): if not subtitles: continue if not multi: task = DownloadTask(video, list(subtitles)) logger.debug(u'Created task %r' % task) tasks.append(task) continue for _, by_language in groupby(subtitles, lambda s: languages.index(s.language)): task = DownloadTask(video, list(by_language)) logger.debug(u'Created task %r' % task) tasks.append(task) return tasks def consume_task(task, services=None): """Consume a task. If the ``services`` parameter is given, the function will attempt to get the service from it. In case the service is not in ``services``, it will be initialized and put in ``services`` :param task: task to consume :type task: :class:`~subliminal.tasks.ListTask` or :class:`~subliminal.tasks.DownloadTask` :param dict services: mapping between the service name and an instance of this service :return: the result of the task :rtype: list of :class:`~subliminal.subtitles.ResultSubtitle` """ if services is None: services = {} logger.info(u'Consuming %r' % task) result = None if isinstance(task, ListTask): service = get_service(services, task.service, config=task.config) result = service.list(task.video, task.languages) elif isinstance(task, DownloadTask): for subtitle in task.subtitles: service = get_service(services, subtitle.service) try: service.download(subtitle) result = [subtitle] break except DownloadFailedError: logger.warning(u'Could not download subtitle %r, trying next' % subtitle) continue if result is None: logger.error(u'No subtitles could be downloaded for video %r' % task.video) return result def matching_confidence(video, subtitle): """Compute the probability (confidence) that the subtitle matches the video :param video: video to match :type video: :class:`~subliminal.videos.Video` :param subtitle: subtitle to match :type subtitle: :class:`~subliminal.subtitles.Subtitle` :return: the matching probability :rtype: float """ guess = guessit.guess_file_info(subtitle.release, 'autodetect') video_keywords = get_keywords(video.guess) subtitle_keywords = get_keywords(guess) | subtitle.keywords logger.debug(u'Video keywords %r - Subtitle keywords %r' % (video_keywords, subtitle_keywords)) replacement = {'keywords': len(video_keywords & subtitle_keywords)} if isinstance(video, Episode): replacement.update({'series': 0, 'season': 0, 'episode': 0}) matching_format = '{series:b}{season:b}{episode:b}{keywords:03b}' best = matching_format.format(series=1, season=1, episode=1, keywords=len(video_keywords)) if guess['type'] in ['episode', 'episodesubtitle']: if 'series' in guess and guess['series'].lower() == video.series.lower(): replacement['series'] = 1 if 'season' in guess and guess['season'] == video.season: replacement['season'] = 1 if 'episodeNumber' in guess and guess['episodeNumber'] == video.episode: replacement['episode'] = 1 elif isinstance(video, Movie): replacement.update({'title': 0, 'year': 0}) matching_format = '{title:b}{year:b}{keywords:03b}' best = matching_format.format(title=1, year=1, keywords=len(video_keywords)) if guess['type'] in ['movie', 'moviesubtitle']: if 'title' in guess and guess['title'].lower() == video.title.lower(): replacement['title'] = 1 if 'year' in guess and guess['year'] == video.year: replacement['year'] = 1 else: logger.debug(u'Not able to compute confidence for %r' % video) return 0.0 logger.debug(u'Found %r' % replacement) confidence = float(int(matching_format.format(**replacement), 2)) / float(int(best, 2)) logger.info(u'Computed confidence %.4f for %r and %r' % (confidence, video, subtitle)) return confidence def get_service(services, service_name, config=None): """Get a service from its name in the service dict with the specified config. If the service does not exist in the service dict, it is created and added to the dict. :param dict services: dict where to get existing services or put created ones :param string service_name: name of the service to get :param config: config to use for the service :type config: :class:`~subliminal.services.ServiceConfig` or None :return: the corresponding service :rtype: :class:`~subliminal.services.ServiceBase` """ if service_name not in services: mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1) services[service_name] = mod.Service() services[service_name].init() services[service_name].config = config return services[service_name] def key_subtitles(subtitle, video, languages, services, order): """Create a key to sort subtitle using the given order :param subtitle: subtitle to sort :type subtitle: :class:`~subliminal.subtitles.ResultSubtitle` :param video: video to match :type video: :class:`~subliminal.videos.Video` :param list languages: languages in preferred order :param list services: services in preferred order :param order: preferred order for subtitles sorting :type list: list of :data:`LANGUAGE_INDEX`, :data:`SERVICE_INDEX`, :data:`SERVICE_CONFIDENCE`, :data:`MATCHING_CONFIDENCE` :return: a key ready to use for subtitles sorting :rtype: int """ key = '' for sort_item in order: if sort_item == LANGUAGE_INDEX: key += '{0:03d}'.format(len(languages) - languages.index(subtitle.language) - 1) key += '{0:01d}'.format(subtitle.language == languages[languages.index(subtitle.language)]) elif sort_item == SERVICE_INDEX: key += '{0:02d}'.format(len(services) - services.index(subtitle.service) - 1) elif sort_item == SERVICE_CONFIDENCE: key += '{0:04d}'.format(int(subtitle.confidence * 1000)) elif sort_item == MATCHING_CONFIDENCE: confidence = 0 if subtitle.release: confidence = matching_confidence(video, subtitle) key += '{0:04d}'.format(int(confidence * 1000)) return int(key) def group_by_video(list_results): """Group the results of :class:`ListTasks <subliminal.tasks.ListTask>` into a dictionary of :class:`~subliminal.videos.Video` => :class:`~subliminal.subtitles.Subtitle` :param list_results: :type list_results: list of result of :class:`~subliminal.tasks.ListTask` :return: subtitles grouped by videos :rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.Subtitle`] """ result = defaultdict(list) for video, subtitles in list_results: result[video] += subtitles or [] return result def filter_services(services): """Filter out services that are not available because of a missing feature :param list services: service names to filter :return: a copy of the initial list of service names without unavailable ones :rtype: list """ filtered_services = services[:] for service_name in services: mod = __import__('services.' + service_name, globals=globals(), locals=locals(), fromlist=['Service'], level=-1) service = mod.Service if service.required_features is not None and bs4.builder_registry.lookup(*service.required_features) is None: logger.warning(u'Service %s not available: none of available features could be used. One of %r required' % (service_name, service.required_features)) filtered_services.remove(service_name) return filtered_services
12,876
Python
.py
242
46.157025
175
0.685714
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,548
exceptions.py
CouchPotato_CouchPotatoServer/libs/subliminal/exceptions.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. class Error(Exception): """Base class for exceptions in subliminal""" pass class ServiceError(Error): """"Exception raised by services""" pass class DownloadFailedError(Error): """"Exception raised when a download task has failed in service""" pass
1,050
Python
.py
26
38.230769
77
0.761297
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,549
addic7ed.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/addic7ed.py
# -*- coding: utf-8 -*- # Copyright 2012 Olivier Leveau <olifozzy@gmail.com> # Copyright 2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..cache import cachedmethod from ..exceptions import DownloadFailedError from ..language import Language, language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords, split_keyword from ..videos import Episode from bs4 import BeautifulSoup import logging import os import re logger = logging.getLogger(__name__) class Addic7ed(ServiceBase): server_url = 'http://www.addic7ed.com' api_based = False #TODO: Complete this languages = language_set(['ar', 'ca', 'de', 'el', 'en', 'es', 'eu', 'fr', 'ga', 'gl', 'he', 'hr', 'hu', 'it', 'pl', 'pt', 'ro', 'ru', 'se', 'pt-br']) language_map = {'Portuguese (Brazilian)': Language('por-BR'), 'Greek': Language('gre'), 'Spanish (Latin America)': Language('spa'), 'Galego': Language('glg'), u'Català': Language('cat')} videos = [Episode] require_video = False required_features = ['permissive'] @cachedmethod def get_series_id(self, name): """Get the show page and cache every show found in it""" r = self.session.get('%s/shows.php' % self.server_url) soup = BeautifulSoup(r.content, self.required_features) for html_series in soup.select('h3 > a'): series_name = html_series.text.lower() match = re.search('show/([0-9]+)', html_series['href']) if match is None: continue series_id = int(match.group(1)) self.cache_for(self.get_series_id, args=(series_name,), result=series_id) return self.cached_value(self.get_series_id, args=(name,)) def list_checked(self, video, languages): return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode) def query(self, filepath, languages, keywords, series, season, episode): logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) self.init_cache() try: series_id = self.get_series_id(series.lower()) except KeyError: logger.debug(u'Could not find series id for %s' % series) return [] r = self.session.get('%s/show/%d&season=%d' % (self.server_url, series_id, season)) soup = BeautifulSoup(r.content, self.required_features) subtitles = [] for row in soup('tr', {'class': 'epeven completed'}): cells = row('td') if int(cells[0].text.strip()) != season or int(cells[1].text.strip()) != episode: continue if cells[6].text.strip(): logger.debug(u'Skipping hearing impaired') continue sub_status = cells[5].text.strip() if sub_status != 'Completed': logger.debug(u'Wrong subtitle status %s' % sub_status) continue sub_language = self.get_language(cells[3].text.strip()) if sub_language not in languages: logger.debug(u'Language %r not in wanted languages %r' % (sub_language, languages)) continue sub_keywords = split_keyword(cells[4].text.strip().lower()) #TODO: Maybe allow empty keywords here? (same in Subtitulos) if not keywords & sub_keywords: logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords)) continue sub_link = '%s/%s' % (self.server_url, cells[9].a['href']) sub_path = get_subtitle_path(filepath, sub_language, self.config.multi) subtitle = ResultSubtitle(sub_path, sub_language, self.__class__.__name__.lower(), sub_link, keywords=sub_keywords) subtitles.append(subtitle) return subtitles def download(self, subtitle): logger.info(u'Downloading %s in %s' % (subtitle.link, subtitle.path)) try: r = self.session.get(subtitle.link, headers={'Referer': subtitle.link, 'User-Agent': self.user_agent}) soup = BeautifulSoup(r.content, self.required_features) if soup.title is not None and u'Addic7ed.com' in soup.title.text.strip(): raise DownloadFailedError('Download limit exceeded') with open(subtitle.path, 'wb') as f: f.write(r.content) except Exception as e: logger.error(u'Download failed: %s' % e) if os.path.exists(subtitle.path): os.remove(subtitle.path) raise DownloadFailedError(str(e)) logger.debug(u'Download finished') return subtitle Service = Addic7ed
5,549
Python
.py
110
41.736364
135
0.634371
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,550
subscenter.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/subscenter.py
# -*- coding: utf-8 -*- # Copyright 2012 Ofir123 <ofirbrukner@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..exceptions import ServiceError from ..language import language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..videos import Episode, Movie from ..utils import to_unicode import bisect import logging from urllib import urlencode logger = logging.getLogger(__name__) class Subscenter(ServiceBase): server = 'http://www.cinemast.org/he/cinemast/api/' api_based = True languages = language_set(['he']) videos = [Episode, Movie] require_video = False default_username = 'subliminal@gmail.com' default_password = 'subliminal' def __init__(self, config=None): super(Subscenter, self).__init__(config) self.token = None self.user_id = None def init(self): super(Subscenter, self).init() logger.debug('Logging in') url = self.server_url + 'login/' # actual login data = {'username': self.default_username, 'password': self.default_password} r = self.session.post(url, data=urlencode(data), allow_redirects=False, timeout=10) if r.status_code != 200: raise ServiceError('Login failed') try: result = r.json() if 'token' not in result: raise ServiceError('Login failed') logger.info('Logged in') self.user_id = r.json().get('user') self.token = r.json().get('token') except ValueError: raise ServiceError('Login failed') def terminate(self): super(Subscenter, self).terminate() if self.token or self.user_id: logger.info('Logged out') self.token = None self.user_id = None def list_checked(self, video, languages): series = None season = None episode = None title = video.title year = video.year if isinstance(video, Episode): series = video.series season = video.season episode = video.episode return self.query(video.path or video.release, languages, series, season, episode, title, year) def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, year=None): logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format( series, season, episode, languages)) query = { 'user': self.user_id, 'token': self.token } # episode if season and episode: query['q'] = series query['type'] = 'series' query['season'] = season query['episode'] = episode elif title: query['q'] = title query['type'] = 'movies' if year: query['year_start'] = year - 1 query['year_end'] = year else: raise ServiceError('One or more parameters are missing') # get the list of subtitles logger.debug('Getting the list of subtitles') url = self.server_url + 'search/' r = self.session.post(url, data=urlencode(query)) r.raise_for_status() try: results = r.json() except ValueError: return {} # loop over results subtitles = {} for group_data in results.get('data', []): for language_code, subtitles_data in group_data.get('subtitles', {}).items(): language_object = self.get_language(language_code) for subtitle_item in subtitles_data: # read the item subtitle_id = subtitle_item['id'] subtitle_key = subtitle_item['key'] release = subtitle_item['version'] subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi) download_link = self.server_url + 'subtitle/download/{0}/?v={1}&key={2}&sub_id={3}'.format( language_code, release, subtitle_key, subtitle_id) # Add the release and increment downloaded count if we already have the subtitle. if subtitle_id in subtitles: logger.debug('Found additional release {0} for subtitle {1}'.format( release, subtitle_id)) bisect.insort_left(subtitles[subtitle_id].release, release) # Deterministic order. continue # Otherwise create it. subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(), download_link, release=to_unicode(release)) logger.debug('Found subtitle %r', subtitle) subtitles[subtitle_id] = subtitle return subtitles.values() def download(self, subtitle): data = { 'user': self.user_id, 'token': self.token } self.download_zip_file(subtitle.link, subtitle.path, data=urlencode(data)) return subtitle Service = Subscenter
5,932
Python
.py
137
33.10219
111
0.602184
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,551
subtitulos.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/subtitulos.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..language import language_set, Language from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords, split_keyword from ..videos import Episode from bs4 import BeautifulSoup import logging import re import unicodedata import urllib logger = logging.getLogger(__name__) class Subtitulos(ServiceBase): server_url = 'http://www.subtitulos.es' api_based = False languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat']) language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'), u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'), u'English (UK)': Language('eng-GB'), 'Galego': Language('glg')} language_code = 'name' videos = [Episode] require_video = False required_features = ['permissive'] # the '.+' in the pattern for Version allows us to match both '&oacute;' # and the 'ó' char directly. This is because now BS4 converts the html # code chars into their equivalent unicode char release_pattern = re.compile('Versi.+n (.+) ([0-9]+).([0-9])+ megabytes') def list_checked(self, video, languages): return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode) def query(self, filepath, languages, keywords, series, season, episode): request_series = series.lower().replace(' ', '_') if isinstance(request_series, unicode): request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore') logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) r = self.session.get('%s/%s/%sx%.2d' % (self.server_url, urllib.quote(request_series), season, episode)) if r.status_code == 404: logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) return [] if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return [] soup = BeautifulSoup(r.content, self.required_features) subtitles = [] for sub in soup('div', {'id': 'version'}): sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower()) if not keywords & sub_keywords: logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords)) continue for html_language in sub.findAllNext('ul', {'class': 'sslist'}): language = self.get_language(html_language.findNext('li', {'class': 'li-idioma'}).find('strong').contents[0].string.strip()) if language not in languages: logger.debug(u'Language %r not in wanted languages %r' % (language, languages)) continue html_status = html_language.findNext('li', {'class': 'li-estado green'}) status = html_status.contents[0].string.strip() if status != 'Completado': logger.debug(u'Wrong subtitle status %s' % status) continue path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), html_status.findNext('span', {'class': 'descargar green'}).find('a')['href'], keywords=sub_keywords) subtitles.append(subtitle) return subtitles Service = Subtitulos
4,640
Python
.py
80
49.7875
168
0.649945
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,552
wizdom.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/wizdom.py
# -*- coding: utf-8 -*- # Copyright 2017 Ofir123 <ofirbrukner@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..exceptions import ServiceError from ..language import language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..videos import Episode, Movie from ..utils import to_unicode import bisect import logging logger = logging.getLogger(__name__) class Wizdom(ServiceBase): server = 'http://wizdom.xyz' api_based = True languages = language_set(['he']) videos = [Episode, Movie] require_video = False _tmdb_api_key = 'a51ee051bcd762543373903de296e0a3' def _search_imdb_id(self, title, year, is_movie): """Search the IMDB ID for the given `title` and `year`. :param str title: title to search for. :param int year: year to search for (or 0 if not relevant). :param bool is_movie: If True, IMDB ID will be searched for in TMDB instead of Wizdom. :return: the IMDB ID for the given title and year (or None if not found). :rtype: str """ # make the search logger.info('Searching IMDB ID for %r%r', title, '' if not year else ' ({})'.format(year)) category = 'movie' if is_movie else 'tv' title = title.replace('\'', '') # get TMDB ID first r = self.session.get('http://api.tmdb.org/3/search/{}?api_key={}&query={}{}&language=en'.format( category, self._tmdb_api_key, title, '' if not year else '&year={}'.format(year))) r.raise_for_status() tmdb_results = r.json().get('results') if tmdb_results: tmdb_id = tmdb_results[0].get('id') if tmdb_id: # get actual IMDB ID from TMDB r = self.session.get('http://api.tmdb.org/3/{}/{}{}?api_key={}&language=en'.format( category, tmdb_id, '' if is_movie else '/external_ids', self._tmdb_api_key)) r.raise_for_status() return str(r.json().get('imdb_id', '')) or None return None def list_checked(self, video, languages): series = None season = None episode = None title = video.title imdb_id = video.imdbid year = video.year if isinstance(video, Episode): series = video.series season = video.season episode = video.episode return self.query(video.path or video.release, languages, series, season, episode, title, imdb_id, year) def query(self, filepath, languages=None, series=None, season=None, episode=None, title=None, imdbid=None, year=None): logger.debug(u'Getting subtitles for {0} season {1} episode {2} with languages {3}'.format( series, season, episode, languages)) # search for the IMDB ID if needed is_movie = not (series and season and episode) if is_movie and not title: raise ServiceError('One or more parameters are missing') # for TV series, we need the series IMDB ID, and not the specific episode ID imdb_id = imdbid or self._search_imdb_id(title, year, is_movie) # search logger.debug(u'Using IMDB ID {0}'.format(imdb_id)) url = 'http://json.{}/{}.json'.format(self.server_url, imdb_id) # get the list of subtitles logger.debug('Getting the list of subtitles') r = self.session.get(url) r.raise_for_status() try: results = r.json() except ValueError: return {} # filter irrelevant results if not is_movie: results = results.get('subs', {}).get(str(season), {}).get(str(episode), []) else: results = results.get('subs', []) # loop over results subtitles = dict() for result in results: language_object = self.get_language('heb') subtitle_id = result['id'] release = result['version'] subtitle_path = get_subtitle_path(filepath, language_object, self.config.multi) download_link = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle_id) # add the release and increment downloaded count if we already have the subtitle if subtitle_id in subtitles: logger.debug(u'Found additional release {0} for subtitle {1}'.format(release, subtitle_id)) bisect.insort_left(subtitles[subtitle_id].releases, release) # deterministic order subtitles[subtitle_id].downloaded += 1 continue # otherwise create it subtitle = ResultSubtitle(subtitle_path, language_object, self.__class__.__name__.lower(), download_link, release=to_unicode(release)) logger.debug(u'Found subtitle {0}'.format(subtitle)) subtitles[subtitle_id] = subtitle return subtitles.values() def download(self, subtitle): self.download_zip_file(subtitle.link, subtitle.path) return subtitle Service = Wizdom
5,770
Python
.py
122
38.508197
110
0.628551
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,553
subswiki.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/subswiki.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..exceptions import ServiceError from ..language import language_set, Language from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords, split_keyword from ..videos import Episode, Movie from bs4 import BeautifulSoup import logging import urllib logger = logging.getLogger(__name__) class SubsWiki(ServiceBase): server_url = 'http://www.subswiki.com' api_based = False languages = language_set(['eng-US', 'eng-GB', 'eng', 'fre', 'por-BR', 'por', 'spa-ES', u'spa', u'ita', u'cat']) language_map = {u'Español': Language('spa'), u'Español (España)': Language('spa'), u'Español (Latinoamérica)': Language('spa'), u'Català': Language('cat'), u'Brazilian': Language('por-BR'), u'English (US)': Language('eng-US'), u'English (UK)': Language('eng-GB')} language_code = 'name' videos = [Episode, Movie] require_video = False required_features = ['permissive'] def list_checked(self, video, languages): results = [] if isinstance(video, Episode): results = self.query(video.path or video.release, languages, get_keywords(video.guess), series=video.series, season=video.season, episode=video.episode) elif isinstance(video, Movie) and video.year: results = self.query(video.path or video.release, languages, get_keywords(video.guess), movie=video.title, year=video.year) return results def query(self, filepath, languages, keywords=None, series=None, season=None, episode=None, movie=None, year=None): if series and season and episode: request_series = series.lower().replace(' ', '_') if isinstance(request_series, unicode): request_series = request_series.encode('utf-8') logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) r = self.session.get('%s/serie/%s/%s/%s/' % (self.server_url, urllib.quote(request_series), season, episode)) if r.status_code == 404: logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) return [] elif movie and year: request_movie = movie.title().replace(' ', '_') if isinstance(request_movie, unicode): request_movie = request_movie.encode('utf-8') logger.debug(u'Getting subtitles for %s (%d) with languages %r' % (movie, year, languages)) r = self.session.get('%s/film/%s_(%d)' % (self.server_url, urllib.quote(request_movie), year)) if r.status_code == 404: logger.debug(u'Could not find subtitles for %s (%d) with languages %r' % (movie, year, languages)) return [] else: raise ServiceError('One or more parameter missing') if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return [] soup = BeautifulSoup(r.content, self.required_features) subtitles = [] for sub in soup('td', {'class': 'NewsTitle'}): sub_keywords = split_keyword(sub.b.string.lower()) if not keywords & sub_keywords: logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords)) continue for html_language in sub.parent.parent.find_all('td', {'class': 'language'}): language = self.get_language(html_language.string.strip()) if language not in languages: logger.debug(u'Language %r not in wanted languages %r' % (language, languages)) continue html_status = html_language.find_next_sibling('td') status = html_status.strong.string.strip() if status != 'Completado': logger.debug(u'Wrong subtitle status %s' % status) continue path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s%s' % (self.server_url, html_status.find_next('td').find('a')['href'])) subtitles.append(subtitle) return subtitles Service = SubsWiki
5,182
Python
.py
91
47.791209
165
0.640536
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,554
tvsubtitles.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/tvsubtitles.py
# -*- coding: utf-8 -*- # Copyright 2012 Nicolas Wack <wackou@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..cache import cachedmethod from ..language import language_set, Language from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import get_keywords from ..videos import Episode from bs4 import BeautifulSoup import logging import re logger = logging.getLogger(__name__) def match(pattern, string): try: return re.search(pattern, string).group(1) except AttributeError: logger.debug(u'Could not match %r on %r' % (pattern, string)) return None class TvSubtitles(ServiceBase): server_url = 'http://www.tvsubtitles.net' api_based = False languages = language_set(['ar', 'bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja', 'ko', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'uk', 'zh', 'pt-br']) #TODO: Find more exceptions language_map = {'gr': Language('gre'), 'cz': Language('cze'), 'ua': Language('ukr'), 'cn': Language('chi')} videos = [Episode] require_video = False required_features = ['permissive'] @cachedmethod def get_likely_series_id(self, name): r = self.session.post('%s/search.php' % self.server_url, data={'q': name}) soup = BeautifulSoup(r.content, self.required_features) maindiv = soup.find('div', 'left') results = [] for elem in maindiv.find_all('li'): sid = int(match('tvshow-([0-9]+)\.html', elem.a['href'])) show_name = match('(.*) \(', elem.a.text) results.append((show_name, sid)) #TODO: pick up the best one in a smart way result = results[0] return result[1] @cachedmethod def get_episode_id(self, series_id, season, number): """Get the TvSubtitles id for the given episode. Raises KeyError if none could be found.""" # download the page of the season, contains ids for all episodes episode_id = None r = self.session.get('%s/tvshow-%d-%d.html' % (self.server_url, series_id, season)) soup = BeautifulSoup(r.content, self.required_features) table = soup.find('table', id='table5') for row in table.find_all('tr'): cells = row.find_all('td') if not cells: continue episode_number = match('x([0-9]+)', cells[0].text) if not episode_number: continue episode_number = int(episode_number) episode_id = int(match('episode-([0-9]+)', cells[1].a['href'])) # we could just return the id of the queried episode, but as we # already downloaded the whole page we might as well fill in the # information for all the episodes of the season self.cache_for(self.get_episode_id, args=(series_id, season, episode_number), result=episode_id) # raises KeyError if not found return self.cached_value(self.get_episode_id, args=(series_id, season, number)) # Do not cache this method in order to always check for the most recent # subtitles def get_sub_ids(self, episode_id): subids = [] r = self.session.get('%s/episode-%d.html' % (self.server_url, episode_id)) epsoup = BeautifulSoup(r.content, self.required_features) for subdiv in epsoup.find_all('a'): if 'href' not in subdiv.attrs or not subdiv['href'].startswith('/subtitle'): continue subid = int(match('([0-9]+)', subdiv['href'])) lang = self.get_language(match('flags/(.*).gif', subdiv.img['src'])) result = {'subid': subid, 'language': lang} for p in subdiv.find_all('p'): if 'alt' in p.attrs and p['alt'] == 'rip': result['rip'] = p.text.strip() if 'alt' in p.attrs and p['alt'] == 'release': result['release'] = p.text.strip() subids.append(result) return subids def list_checked(self, video, languages): return self.query(video.path or video.release, languages, get_keywords(video.guess), video.series, video.season, video.episode) def query(self, filepath, languages, keywords, series, season, episode): logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages)) self.init_cache() sid = self.get_likely_series_id(series.lower()) try: ep_id = self.get_episode_id(sid, season, episode) except KeyError: logger.debug(u'Could not find episode id for %s season %d episode %d' % (series, season, episode)) return [] subids = self.get_sub_ids(ep_id) # filter the subtitles with our queried languages subtitles = [] for subid in subids: language = subid['language'] if language not in languages: continue path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s/download-%d.html' % (self.server_url, subid['subid']), keywords=[subid['rip'], subid['release']]) subtitles.append(subtitle) return subtitles def download(self, subtitle): self.download_zip_file(subtitle.link, subtitle.path) return subtitle Service = TvSubtitles
6,240
Python
.py
128
40.0625
145
0.618563
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,555
__init__.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/__init__.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from ..cache import Cache from ..exceptions import DownloadFailedError, ServiceError from ..language import language_set, Language from ..subtitles import EXTENSIONS import logging import os import requests import threading import zipfile __all__ = ['ServiceBase', 'ServiceConfig'] logger = logging.getLogger(__name__) class ServiceBase(object): """Service base class :param config: service configuration :type config: :class:`ServiceConfig` """ #: URL to the service server server_url = '' #: User Agent for any HTTP-based requests user_agent = 'subliminal v0.6' #: Whether based on an API or not api_based = False #: Timeout for web requests timeout = 5 #: :class:`~subliminal.language.language_set` of available languages languages = language_set() #: Map between language objects and language codes used in the service language_map = {} #: Default attribute of a :class:`~subliminal.language.Language` to get with :meth:`get_code` language_code = 'alpha2' #: Accepted video classes (:class:`~subliminal.videos.Episode`, :class:`~subliminal.videos.Movie`, :class:`~subliminal.videos.UnknownVideo`) videos = [] #: Whether the video has to exist or not require_video = False #: List of required features for BeautifulSoup required_features = None def __init__(self, config=None): self.config = config or ServiceConfig() self.session = None def __enter__(self): self.init() return self def __exit__(self, *args): self.terminate() def init(self): """Initialize connection""" logger.debug(u'Initializing %s' % self.__class__.__name__) self.session = requests.session() self.session.headers.update({'User-Agent': self.user_agent}) def init_cache(self): """Initialize cache, make sure it is loaded from disk""" if not self.config or not self.config.cache: raise ServiceError('Cache directory is required') self.config.cache.load(self.__class__.__name__) def save_cache(self): self.config.cache.save(self.__class__.__name__) def clear_cache(self): self.config.cache.clear(self.__class__.__name__) def cache_for(self, func, args, result): return self.config.cache.cache_for(self.__class__.__name__, func, args, result) def cached_value(self, func, args): return self.config.cache.cached_value(self.__class__.__name__, func, args) def terminate(self): """Terminate connection""" logger.debug(u'Terminating %s' % self.__class__.__name__) def get_code(self, language): """Get the service code for a :class:`~subliminal.language.Language` It uses the :data:`language_map` and if there's no match, falls back on the :data:`language_code` attribute of the given :class:`~subliminal.language.Language` """ if language in self.language_map: return self.language_map[language] if self.language_code is None: raise ValueError('%r has no matching code' % language) return getattr(language, self.language_code) def get_language(self, code): """Get a :class:`~subliminal.language.Language` from a service code It uses the :data:`language_map` and if there's no match, uses the given code as ``language`` parameter for the :class:`~subliminal.language.Language` constructor .. note:: A warning is emitted if the generated :class:`~subliminal.language.Language` is "Undetermined" """ if code in self.language_map: return self.language_map[code] language = Language(code, strict=False) if language == Language('Undetermined'): logger.warning(u'Code %s could not be identified as a language for %s' % (code, self.__class__.__name__)) return language def query(self, *args): """Make the actual query""" raise NotImplementedError() def list(self, video, languages): """List subtitles As a service writer, you can either override this method or implement :meth:`list_checked` instead to have the languages pre-filtered for you """ if not self.check_validity(video, languages): return [] return self.list_checked(video, languages) def list_checked(self, video, languages): """List subtitles without having to check parameters for validity""" raise NotImplementedError() def download(self, subtitle): """Download a subtitle""" self.download_file(subtitle.link, subtitle.path) return subtitle @classmethod def check_validity(cls, video, languages): """Check for video and languages validity in the Service :param video: the video to check :type video: :class:`~subliminal.videos.video` :param languages: languages to check :type languages: :class:`~subliminal.language.Language` :rtype: bool """ languages = (languages & cls.languages) - language_set(['Undetermined']) if not languages: logger.debug(u'No language available for service %s' % cls.__name__.lower()) return False if cls.require_video and not video.exists or not isinstance(video, tuple(cls.videos)): logger.debug(u'%r is not valid for service %s' % (video, cls.__name__.lower())) return False return True def download_file(self, url, filepath, data=None): """Attempt to download a file and remove it in case of failure :param string url: URL to download :param string filepath: destination path :param string data: data to add to the post request """ logger.info(u'Downloading %s in %s' % (url, filepath)) try: headers = {'Referer': url, 'User-Agent': self.user_agent} if data: r = self.session.post(url, data=data, timeout=10, headers=headers) else: r = self.session.get(url, timeout=10, headers=headers) with open(filepath, 'wb') as f: f.write(r.content) except Exception as e: logger.error(u'Download failed: %s' % e) if os.path.exists(filepath): os.remove(filepath) raise DownloadFailedError(str(e)) logger.debug(u'Download finished') def download_zip_file(self, url, filepath, data=None): """Attempt to download a zip file and extract any subtitle file from it, if any. This cleans up after itself if anything fails. :param string url: URL of the zip file to download :param string filepath: destination path for the subtitle :param string data: data to add to the post request """ logger.info(u'Downloading %s in %s' % (url, filepath)) try: zippath = filepath + '.zip' headers = {'Referer': url, 'User-Agent': self.user_agent} if data: r = self.session.post(url, data=data, timeout=10, headers=headers) else: r = self.session.get(url, timeout=10, headers=headers) with open(zippath, 'wb') as f: f.write(r.content) if not zipfile.is_zipfile(zippath): # TODO: could check if maybe we already have a text file and # download it directly raise DownloadFailedError('Downloaded file is not a zip file') with zipfile.ZipFile(zippath) as zipsub: for subfile in zipsub.namelist(): if os.path.splitext(subfile)[1] in EXTENSIONS: with open(filepath, 'w') as f: f.write(zipsub.open(subfile).read()) break else: raise DownloadFailedError('No subtitles found in zip file') os.remove(zippath) except Exception as e: logger.error(u'Download %s failed: %s' % (url, e)) if os.path.exists(zippath): os.remove(zippath) if os.path.exists(filepath): os.remove(filepath) raise DownloadFailedError(str(e)) logger.debug(u'Download finished') class ServiceConfig(object): """Configuration for any :class:`Service` :param bool multi: whether to download one subtitle per language or not :param string cache_dir: cache directory """ def __init__(self, multi=False, cache_dir=None): self.multi = multi self.cache_dir = cache_dir self.cache = None if cache_dir is not None: self.cache = Cache(cache_dir) def __repr__(self): return 'ServiceConfig(%r, %s)' % (self.multi, self.cache.cache_dir)
9,715
Python
.py
214
36.728972
144
0.637384
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,556
opensubtitles.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/opensubtitles.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..exceptions import ServiceError, DownloadFailedError from ..language import Language, language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..utils import to_unicode from ..videos import Episode, Movie import gzip import logging import os.path import xmlrpclib logger = logging.getLogger(__name__) class OpenSubtitles(ServiceBase): server_url = 'http://api.opensubtitles.org/xml-rpc' api_based = True # Source: http://www.opensubtitles.org/addons/export_languages.php languages = language_set(['aar', 'abk', 'ace', 'ach', 'ada', 'ady', 'afa', 'afh', 'afr', 'ain', 'aka', 'akk', 'alb', 'ale', 'alg', 'alt', 'amh', 'ang', 'apa', 'ara', 'arc', 'arg', 'arm', 'arn', 'arp', 'art', 'arw', 'asm', 'ast', 'ath', 'aus', 'ava', 'ave', 'awa', 'aym', 'aze', 'bad', 'bai', 'bak', 'bal', 'bam', 'ban', 'baq', 'bas', 'bat', 'bej', 'bel', 'bem', 'ben', 'ber', 'bho', 'bih', 'bik', 'bin', 'bis', 'bla', 'bnt', 'bos', 'bra', 'bre', 'btk', 'bua', 'bug', 'bul', 'bur', 'byn', 'cad', 'cai', 'car', 'cat', 'cau', 'ceb', 'cel', 'cha', 'chb', 'che', 'chg', 'chi', 'chk', 'chm', 'chn', 'cho', 'chp', 'chr', 'chu', 'chv', 'chy', 'cmc', 'cop', 'cor', 'cos', 'cpe', 'cpf', 'cpp', 'cre', 'crh', 'crp', 'csb', 'cus', 'cze', 'dak', 'dan', 'dar', 'day', 'del', 'den', 'dgr', 'din', 'div', 'doi', 'dra', 'dua', 'dum', 'dut', 'dyu', 'dzo', 'efi', 'egy', 'eka', 'ell', 'elx', 'eng', 'enm', 'epo', 'est', 'ewe', 'ewo', 'fan', 'fao', 'fat', 'fij', 'fil', 'fin', 'fiu', 'fon', 'fre', 'frm', 'fro', 'fry', 'ful', 'fur', 'gaa', 'gay', 'gba', 'gem', 'geo', 'ger', 'gez', 'gil', 'gla', 'gle', 'glg', 'glv', 'gmh', 'goh', 'gon', 'gor', 'got', 'grb', 'grc', 'grn', 'guj', 'gwi', 'hai', 'hat', 'hau', 'haw', 'heb', 'her', 'hil', 'him', 'hin', 'hit', 'hmn', 'hmo', 'hrv', 'hun', 'hup', 'iba', 'ibo', 'ice', 'ido', 'iii', 'ijo', 'iku', 'ile', 'ilo', 'ina', 'inc', 'ind', 'ine', 'inh', 'ipk', 'ira', 'iro', 'ita', 'jav', 'jpn', 'jpr', 'jrb', 'kaa', 'kab', 'kac', 'kal', 'kam', 'kan', 'kar', 'kas', 'kau', 'kaw', 'kaz', 'kbd', 'kha', 'khi', 'khm', 'kho', 'kik', 'kin', 'kir', 'kmb', 'kok', 'kom', 'kon', 'kor', 'kos', 'kpe', 'krc', 'kro', 'kru', 'kua', 'kum', 'kur', 'kut', 'lad', 'lah', 'lam', 'lao', 'lat', 'lav', 'lez', 'lim', 'lin', 'lit', 'lol', 'loz', 'ltz', 'lua', 'lub', 'lug', 'lui', 'lun', 'luo', 'lus', 'mac', 'mad', 'mag', 'mah', 'mai', 'mak', 'mal', 'man', 'mao', 'map', 'mar', 'mas', 'may', 'mdf', 'mdr', 'men', 'mga', 'mic', 'min', 'mkh', 'mlg', 'mlt', 'mnc', 'mni', 'mno', 'moh', 'mon', 'mos', 'mun', 'mus', 'mwl', 'mwr', 'myn', 'myv', 'nah', 'nai', 'nap', 'nau', 'nav', 'nbl', 'nde', 'ndo', 'nds', 'nep', 'new', 'nia', 'nic', 'niu', 'nno', 'nob', 'nog', 'non', 'nor', 'nso', 'nub', 'nwc', 'nya', 'nym', 'nyn', 'nyo', 'nzi', 'oci', 'oji', 'ori', 'orm', 'osa', 'oss', 'ota', 'oto', 'paa', 'pag', 'pal', 'pam', 'pan', 'pap', 'pau', 'peo', 'per', 'phi', 'phn', 'pli', 'pol', 'pon', 'por', 'pra', 'pro', 'pus', 'que', 'raj', 'rap', 'rar', 'roa', 'roh', 'rom', 'rum', 'run', 'rup', 'rus', 'sad', 'sag', 'sah', 'sai', 'sal', 'sam', 'san', 'sas', 'sat', 'scn', 'sco', 'sel', 'sem', 'sga', 'sgn', 'shn', 'sid', 'sin', 'sio', 'sit', 'sla', 'slo', 'slv', 'sma', 'sme', 'smi', 'smj', 'smn', 'smo', 'sms', 'sna', 'snd', 'snk', 'sog', 'som', 'son', 'sot', 'spa', 'srd', 'srp', 'srr', 'ssa', 'ssw', 'suk', 'sun', 'sus', 'sux', 'swa', 'swe', 'syr', 'tah', 'tai', 'tam', 'tat', 'tel', 'tem', 'ter', 'tet', 'tgk', 'tgl', 'tha', 'tib', 'tig', 'tir', 'tiv', 'tkl', 'tlh', 'tli', 'tmh', 'tog', 'ton', 'tpi', 'tsi', 'tsn', 'tso', 'tuk', 'tum', 'tup', 'tur', 'tut', 'tvl', 'twi', 'tyv', 'udm', 'uga', 'uig', 'ukr', 'umb', 'urd', 'uzb', 'vai', 'ven', 'vie', 'vol', 'vot', 'wak', 'wal', 'war', 'was', 'wel', 'wen', 'wln', 'wol', 'xal', 'xho', 'yao', 'yap', 'yid', 'yor', 'ypk', 'zap', 'zen', 'zha', 'znd', 'zul', 'zun', 'por-BR', 'rum-MD']) language_map = {'mol': Language('rum-MD'), 'scc': Language('srp'), 'pob': Language('por-BR'), Language('rum-MD'): 'mol', Language('srp'): 'scc', Language('por-BR'): 'pob'} language_code = 'alpha3' videos = [Episode, Movie] require_video = False confidence_order = ['moviehash', 'imdbid', 'fulltext'] def __init__(self, config=None): super(OpenSubtitles, self).__init__(config) self.server = xmlrpclib.ServerProxy(self.server_url) self.token = None def init(self): super(OpenSubtitles, self).init() result = self.server.LogIn('', '', 'eng', self.user_agent) if result['status'] != '200 OK': raise ServiceError('Login failed') self.token = result['token'] def terminate(self): super(OpenSubtitles, self).terminate() if self.token: self.server.LogOut(self.token) def query(self, filepath, languages, moviehash=None, size=None, imdbid=None, query=None): searches = [] if moviehash and size: searches.append({'moviehash': moviehash, 'moviebytesize': size}) if imdbid: searches.append({'imdbid': imdbid}) if query: searches.append({'query': query}) if not searches: raise ServiceError('One or more parameter missing') for search in searches: search['sublanguageid'] = ','.join(self.get_code(l) for l in languages) logger.debug(u'Getting subtitles %r with token %s' % (searches, self.token)) results = self.server.SearchSubtitles(self.token, searches) if not results['data']: logger.debug(u'Could not find subtitles for %r with token %s' % (searches, self.token)) return [] subtitles = [] for result in results['data']: language = self.get_language(result['SubLanguageID']) path = get_subtitle_path(filepath, language, self.config.multi) confidence = 1 - float(self.confidence_order.index(result['MatchedBy'])) / float(len(self.confidence_order)) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), result['SubDownloadLink'], release=to_unicode(result['SubFileName']), confidence=confidence) subtitles.append(subtitle) return subtitles def list_checked(self, video, languages): results = [] if video.exists: results = self.query(video.path or video.release, languages, moviehash=video.hashes['OpenSubtitles'], size=str(video.size)) elif video.imdbid: results = self.query(video.path or video.release, languages, imdbid=video.imdbid) elif isinstance(video, Episode): results = self.query(video.path or video.release, languages, query=video.series) elif isinstance(video, Movie): results = self.query(video.path or video.release, languages, query=video.title) return results def download(self, subtitle): #TODO: Use OpenSubtitles DownloadSubtitles method try: self.download_file(subtitle.link, subtitle.path + '.gz') with open(subtitle.path, 'wb') as dump: gz = gzip.open(subtitle.path + '.gz') dump.write(gz.read()) gz.close() except Exception as e: if os.path.exists(subtitle.path): os.remove(subtitle.path) raise DownloadFailedError(str(e)) finally: if os.path.exists(subtitle.path + '.gz'): os.remove(subtitle.path + '.gz') return subtitle Service = OpenSubtitles
9,503
Python
.py
146
50.993151
135
0.496415
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,557
thesubdb.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/thesubdb.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..language import language_set from ..subtitles import get_subtitle_path, ResultSubtitle from ..videos import Episode, Movie, UnknownVideo import logging logger = logging.getLogger(__name__) class TheSubDB(ServiceBase): server_url = 'http://api.thesubdb.com' user_agent = 'SubDB/1.0 (subliminal/0.6; https://github.com/Diaoul/subliminal)' api_based = True # Source: http://api.thesubdb.com/?action=languages languages = language_set(['af', 'cs', 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'id', 'it', 'la', 'nl', 'no', 'oc', 'pl', 'pt', 'ro', 'ru', 'sl', 'sr', 'sv', 'tr']) videos = [Movie, Episode, UnknownVideo] require_video = True def list_checked(self, video, languages): return self.query(video.path, video.hashes['TheSubDB'], languages) def query(self, filepath, moviehash, languages): r = self.session.get(self.server_url, params={'action': 'search', 'hash': moviehash}) if r.status_code == 404: logger.debug(u'Could not find subtitles for hash %s' % moviehash) return [] if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return [] available_languages = language_set(r.content.split(',')) languages &= available_languages if not languages: logger.debug(u'Could not find subtitles for hash %s with languages %r (only %r available)' % (moviehash, languages, available_languages)) return [] subtitles = [] for language in languages: path = get_subtitle_path(filepath, language, self.config.multi) subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), '%s?action=download&hash=%s&language=%s' % (self.server_url, moviehash, language.alpha2)) subtitles.append(subtitle) return subtitles Service = TheSubDB
2,775
Python
.py
55
44.145455
176
0.666298
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,558
bierdopje.py
CouchPotato_CouchPotatoServer/libs/subliminal/services/bierdopje.py
# -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from . import ServiceBase from ..cache import cachedmethod from ..exceptions import ServiceError from ..language import language_set from ..subtitles import get_subtitle_path, ResultSubtitle, EXTENSIONS from ..utils import to_unicode from ..videos import Episode from bs4 import BeautifulSoup import logging import urllib try: import cPickle as pickle except ImportError: import pickle logger = logging.getLogger(__name__) class BierDopje(ServiceBase): server_url = 'http://api.bierdopje.com/A2B638AC5D804C2E/' user_agent = 'Subliminal/0.6' api_based = True languages = language_set(['eng', 'dut']) videos = [Episode] require_video = False required_features = ['xml'] @cachedmethod def get_show_id(self, series): r = self.session.get('%sGetShowByName/%s' % (self.server_url, urllib.quote(series.lower()))) if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return None soup = BeautifulSoup(r.content, self.required_features) if soup.status.contents[0] == 'false': logger.debug(u'Could not find show %s' % series) return None return int(soup.showid.contents[0]) def load_cache(self): logger.debug(u'Loading showids from cache...') with self.lock: with open(self.showids_cache, 'r') as f: self.showids = pickle.load(f) def query(self, filepath, season, episode, languages, tvdbid=None, series=None): self.init_cache() if series: request_id = self.get_show_id(series.lower()) if request_id is None: return [] request_source = 'showid' request_is_tvdbid = 'false' elif tvdbid: request_id = tvdbid request_source = 'tvdbid' request_is_tvdbid = 'true' else: raise ServiceError('One or more parameter missing') subtitles = [] for language in languages: logger.debug(u'Getting subtitles for %s %d season %d episode %d with language %s' % (request_source, request_id, season, episode, language.alpha2)) r = self.session.get('%sGetAllSubsFor/%s/%s/%s/%s/%s' % (self.server_url, request_id, season, episode, language.alpha2, request_is_tvdbid)) if r.status_code != 200: logger.error(u'Request %s returned status code %d' % (r.url, r.status_code)) return [] soup = BeautifulSoup(r.content, self.required_features) if soup.status.contents[0] == 'false': logger.debug(u'Could not find subtitles for %s %d season %d episode %d with language %s' % (request_source, request_id, season, episode, language.alpha2)) continue path = get_subtitle_path(filepath, language, self.config.multi) for result in soup.results('result'): release = to_unicode(result.filename.contents[0]) if not release.endswith(tuple(EXTENSIONS)): release += '.srt' subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), result.downloadlink.contents[0], release=release) subtitles.append(subtitle) return subtitles def list_checked(self, video, languages): return self.query(video.path or video.release, video.season, video.episode, languages, video.tvdbid, video.series) Service = BierDopje
4,335
Python
.py
93
38.645161
170
0.654537
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,559
sanitizer.py
CouchPotato_CouchPotatoServer/libs/html5lib/sanitizer.py
from __future__ import absolute_import, division, unicode_literals import re from xml.sax.saxutils import escape, unescape from .tokenizer import HTMLTokenizer from .constants import tokenTypes class HTMLSanitizerMixin(object): """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video'] mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', 'munderover', 'none'] svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse', 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use'] acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', 'background', 'balance', 'bgcolor', 'bgproperties', 'border', 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', 'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start', 'step', 'style', 'summary', 'suppress', 'tabindex', 'target', 'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', 'xml:lang'] mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', 'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth', 'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize', 'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', 'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', 'arabic-form', 'ascent', 'attributeName', 'attributeType', 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', 'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end', 'marker-mid', 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset', 'opacity', 'orient', 'origin', 'overline-position', 'overline-thickness', 'panose-1', 'path', 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity', 'strikethrough-position', 'strikethrough-thickness', 'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2', 'zoomAndPan'] attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc', 'poster', 'xlink:href', 'xml:base'] svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill', 'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end', 'mask', 'stroke'] svg_allow_local_href = ['altGlyph', 'animate', 'animateColor', 'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter', 'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref', 'set', 'use'] acceptable_css_properties = ['azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width'] acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow'] acceptable_svg_properties = ['fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity'] acceptable_protocols = ['ed2k', 'ftp', 'http', 'https', 'irc', 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', 'ssh', 'sftp', 'rtsp', 'afs'] # subclasses may define their own versions of these constants allowed_elements = acceptable_elements + mathml_elements + svg_elements allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes allowed_css_properties = acceptable_css_properties allowed_css_keywords = acceptable_css_keywords allowed_svg_properties = acceptable_svg_properties allowed_protocols = acceptable_protocols # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style # attributes are parsed, and a restricted set, # specified by # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified # in ALLOWED_PROTOCOLS are allowed. # # sanitize_html('<script> do_nasty_stuff() </script>') # => &lt;script> do_nasty_stuff() &lt;/script> # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') # => <a>Click here for $100</a> def sanitize_token(self, token): # accommodate filters which use token_type differently token_type = token["type"] if token_type in list(tokenTypes.keys()): token_type = tokenTypes[token_type] if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"]): if token["name"] in self.allowed_elements: return self.allowed_token(token, token_type) else: return self.disallowed_token(token, token_type) elif token_type == tokenTypes["Comment"]: pass else: return token def allowed_token(self, token, token_type): if "data" in token: attrs = dict([(name, val) for name, val in token["data"][::-1] if name in self.allowed_attributes]) for attr in self.attr_val_is_uri: if attr not in attrs: continue val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '', unescape(attrs[attr])).lower() # remove replacement characters from unescaped characters val_unescaped = val_unescaped.replace("\ufffd", "") if (re.match("^[a-z0-9][-+.a-z0-9]*:", val_unescaped) and (val_unescaped.split(':')[0] not in self.allowed_protocols)): del attrs[attr] for attr in self.svg_attr_val_allows_ref: if attr in attrs: attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(attrs[attr])) if (token["name"] in self.svg_allow_local_href and 'xlink:href' in attrs and re.search('^\s*[^#\s].*', attrs['xlink:href'])): del attrs['xlink:href'] if 'style' in attrs: attrs['style'] = self.sanitize_css(attrs['style']) token["data"] = [[name, val] for name, val in list(attrs.items())] return token def disallowed_token(self, token, token_type): if token_type == tokenTypes["EndTag"]: token["data"] = "</%s>" % token["name"] elif token["data"]: attrs = ''.join([' %s="%s"' % (k, escape(v)) for k, v in token["data"]]) token["data"] = "<%s%s>" % (token["name"], attrs) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" if token["type"] in list(tokenTypes.keys()): token["type"] = "Characters" else: token["type"] = tokenTypes["Characters"] del token["name"] return token def sanitize_css(self, style): # disallow urls style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): if not keyword in self.acceptable_css_keywords and \ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): break else: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean) class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin): def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=False, lowercaseAttrName=False, parser=None): # Change case matching defaults as we only output lowercase html anyway # This solution doesn't seem ideal... HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet, lowercaseElementName, lowercaseAttrName, parser=parser) def __iter__(self): for token in HTMLTokenizer.__iter__(self): token = self.sanitize_token(token) if token: yield token
16,428
Python
.py
239
49.811715
146
0.4892
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,560
inputstream.py
CouchPotato_CouchPotatoServer/libs/html5lib/inputstream.py
from __future__ import absolute_import, division, unicode_literals from six import text_type from six.moves import http_client import codecs import re from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase from .constants import encodings, ReparseException from . import utils from io import StringIO try: from io import BytesIO except ImportError: BytesIO = StringIO try: from io import BufferedIOBase except ImportError: class BufferedIOBase(object): pass # Non-unicode versions of constants for use in the pre-parser spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters]) asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters]) asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase]) spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"]) invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]") non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF]) ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]") # Cache for charsUntil() charsUntilRegEx = {} class BufferedStream(object): """Buffering for streams that do not have buffering of their own The buffer is implemented as a list of chunks on the assumption that joining many strings will be slow since it is O(n**2) """ def __init__(self, stream): self.stream = stream self.buffer = [] self.position = [-1, 0] # chunk number, offset def tell(self): pos = 0 for chunk in self.buffer[:self.position[0]]: pos += len(chunk) pos += self.position[1] return pos def seek(self, pos): assert pos <= self._bufferedBytes() offset = pos i = 0 while len(self.buffer[i]) < offset: offset -= len(self.buffer[i]) i += 1 self.position = [i, offset] def read(self, bytes): if not self.buffer: return self._readStream(bytes) elif (self.position[0] == len(self.buffer) and self.position[1] == len(self.buffer[-1])): return self._readStream(bytes) else: return self._readFromBuffer(bytes) def _bufferedBytes(self): return sum([len(item) for item in self.buffer]) def _readStream(self, bytes): data = self.stream.read(bytes) self.buffer.append(data) self.position[0] += 1 self.position[1] = len(data) return data def _readFromBuffer(self, bytes): remainingBytes = bytes rv = [] bufferIndex = self.position[0] bufferOffset = self.position[1] while bufferIndex < len(self.buffer) and remainingBytes != 0: assert remainingBytes > 0 bufferedData = self.buffer[bufferIndex] if remainingBytes <= len(bufferedData) - bufferOffset: bytesToRead = remainingBytes self.position = [bufferIndex, bufferOffset + bytesToRead] else: bytesToRead = len(bufferedData) - bufferOffset self.position = [bufferIndex, len(bufferedData)] bufferIndex += 1 rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead]) remainingBytes -= bytesToRead bufferOffset = 0 if remainingBytes: rv.append(self._readStream(remainingBytes)) return b"".join(rv) def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True): if isinstance(source, http_client.HTTPResponse): # Work around Python bug #20007: read(0) closes the connection. # http://bugs.python.org/issue20007 isUnicode = False elif hasattr(source, "read"): isUnicode = isinstance(source.read(0), text_type) else: isUnicode = isinstance(source, text_type) if isUnicode: if encoding is not None: raise TypeError("Cannot explicitly set an encoding with a unicode string") return HTMLUnicodeInputStream(source) else: return HTMLBinaryInputStream(source, encoding, parseMeta, chardet) class HTMLUnicodeInputStream(object): """Provides a unicode stream of characters to the HTMLTokenizer. This class takes care of character encoding and removing or replacing incorrect byte-sequences and also provides column and line tracking. """ _defaultChunkSize = 10240 def __init__(self, source): """Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information """ # Craziness if len("\U0010FFFF") == 1: self.reportCharacterErrors = self.characterErrorsUCS4 self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]") else: self.reportCharacterErrors = self.characterErrorsUCS2 self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])") # List of where new lines occur self.newLines = [0] self.charEncoding = ("utf-8", "certain") self.dataStream = self.openStream(source) self.reset() def reset(self): self.chunk = "" self.chunkSize = 0 self.chunkOffset = 0 self.errors = [] # number of (complete) lines in previous chunks self.prevNumLines = 0 # number of columns in the last line of the previous chunk self.prevNumCols = 0 # Deal with CR LF and surrogates split over chunk boundaries self._bufferedCharacter = None def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = StringIO(source) return stream def _position(self, offset): chunk = self.chunk nLines = chunk.count('\n', 0, offset) positionLine = self.prevNumLines + nLines lastLinePos = chunk.rfind('\n', 0, offset) if lastLinePos == -1: positionColumn = self.prevNumCols + offset else: positionColumn = offset - (lastLinePos + 1) return (positionLine, positionColumn) def position(self): """Returns (line, col) of the current position in the stream.""" line, col = self._position(self.chunkOffset) return (line + 1, col) def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char def readChunk(self, chunkSize=None): if chunkSize is None: chunkSize = self._defaultChunkSize self.prevNumLines, self.prevNumCols = self._position(self.chunkSize) self.chunk = "" self.chunkSize = 0 self.chunkOffset = 0 data = self.dataStream.read(chunkSize) # Deal with CR LF and surrogates broken across chunks if self._bufferedCharacter: data = self._bufferedCharacter + data self._bufferedCharacter = None elif not data: # We have no more data, bye-bye stream return False if len(data) > 1: lastv = ord(data[-1]) if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF: self._bufferedCharacter = data[-1] data = data[:-1] self.reportCharacterErrors(data) # Replace invalid characters # Note U+0000 is dealt with in the tokenizer data = self.replaceCharactersRegexp.sub("\ufffd", data) data = data.replace("\r\n", "\n") data = data.replace("\r", "\n") self.chunk = data self.chunkSize = len(data) return True def characterErrorsUCS4(self, data): for i in range(len(invalid_unicode_re.findall(data))): self.errors.append("invalid-codepoint") def characterErrorsUCS2(self, data): # Someone picked the wrong compile option # You lose skip = False for match in invalid_unicode_re.finditer(data): if skip: continue codepoint = ord(match.group()) pos = match.start() # Pretty sure there should be endianness issues here if utils.isSurrogatePair(data[pos:pos + 2]): # We have a surrogate pair! char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2]) if char_val in non_bmp_invalid_codepoints: self.errors.append("invalid-codepoint") skip = True elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and pos == len(data) - 1): self.errors.append("invalid-codepoint") else: skip = False self.errors.append("invalid-codepoint") def charsUntil(self, characters, opposite=False): """ Returns a string of characters from the stream up to but not including any character in 'characters' or EOF. 'characters' must be a container that supports the 'in' method and iteration over its characters. """ # Use a cache of regexps to find the required characters try: chars = charsUntilRegEx[(characters, opposite)] except KeyError: if __debug__: for c in characters: assert(ord(c) < 128) regex = "".join(["\\x%02x" % ord(c) for c in characters]) if not opposite: regex = "^%s" % regex chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex) rv = [] while True: # Find the longest matching prefix m = chars.match(self.chunk, self.chunkOffset) if m is None: # If nothing matched, and it wasn't because we ran out of chunk, # then stop if self.chunkOffset != self.chunkSize: break else: end = m.end() # If not the whole chunk matched, return everything # up to the part that didn't match if end != self.chunkSize: rv.append(self.chunk[self.chunkOffset:end]) self.chunkOffset = end break # If the whole remainder of the chunk matched, # use it all and read the next chunk rv.append(self.chunk[self.chunkOffset:]) if not self.readChunk(): # Reached EOF break r = "".join(rv) return r def unget(self, char): # Only one character is allowed to be ungotten at once - it must # be consumed again before any further call to unget if char is not None: if self.chunkOffset == 0: # unget is called quite rarely, so it's a good idea to do # more work here if it saves a bit of work in the frequently # called char and charsUntil. # So, just prepend the ungotten character onto the current # chunk: self.chunk = char + self.chunk self.chunkSize += 1 else: self.chunkOffset -= 1 assert self.chunk[self.chunkOffset] == char class HTMLBinaryInputStream(HTMLUnicodeInputStream): """Provides a unicode stream of characters to the HTMLTokenizer. This class takes care of character encoding and removing or replacing incorrect byte-sequences and also provides column and line tracking. """ def __init__(self, source, encoding=None, parseMeta=True, chardet=True): """Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by html5lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information """ # Raw Stream - for unicode objects this will encode to utf-8 and set # self.charEncoding as appropriate self.rawStream = self.openStream(source) HTMLUnicodeInputStream.__init__(self, self.rawStream) self.charEncoding = (codecName(encoding), "certain") # Encoding Information # Number of bytes to use when looking for a meta element with # encoding information self.numBytesMeta = 512 # Number of bytes to use when using detecting encoding using chardet self.numBytesChardet = 100 # Encoding to use if no other information can be found self.defaultEncoding = "windows-1252" # Detect encoding iff no explicit "transport level" encoding is supplied if (self.charEncoding[0] is None): self.charEncoding = self.detectEncoding(parseMeta, chardet) # Call superclass self.reset() def reset(self): self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream, 'replace') HTMLUnicodeInputStream.reset(self) def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: stream = BufferedStream(stream) return stream def detectEncoding(self, parseMeta=True, chardet=True): # First look for a BOM # This will also read past the BOM if present encoding = self.detectBOM() confidence = "certain" # If there is no BOM need to look for meta elements with encoding # information if encoding is None and parseMeta: encoding = self.detectEncodingMeta() confidence = "tentative" # Guess with chardet, if avaliable if encoding is None and chardet: confidence = "tentative" try: try: from charade.universaldetector import UniversalDetector except ImportError: from chardet.universaldetector import UniversalDetector buffers = [] detector = UniversalDetector() while not detector.done: buffer = self.rawStream.read(self.numBytesChardet) assert isinstance(buffer, bytes) if not buffer: break buffers.append(buffer) detector.feed(buffer) detector.close() encoding = detector.result['encoding'] self.rawStream.seek(0) except ImportError: pass # If all else fails use the default encoding if encoding is None: confidence = "tentative" encoding = self.defaultEncoding # Substitute for equivalent encodings: encodingSub = {"iso-8859-1": "windows-1252"} if encoding.lower() in encodingSub: encoding = encodingSub[encoding.lower()] return encoding, confidence def changeEncoding(self, newEncoding): assert self.charEncoding[1] != "certain" newEncoding = codecName(newEncoding) if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"): newEncoding = "utf-8" if newEncoding is None: return elif newEncoding == self.charEncoding[0]: self.charEncoding = (self.charEncoding[0], "certain") else: self.rawStream.seek(0) self.reset() self.charEncoding = (newEncoding, "certain") raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding)) def detectBOM(self): """Attempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return None""" bomDict = { codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be' } # Go to beginning of file and read in 4 bytes string = self.rawStream.read(4) assert isinstance(string, bytes) # Try detecting the BOM using bytes from the string encoding = bomDict.get(string[:3]) # UTF-8 seek = 3 if not encoding: # Need to detect UTF-32 before UTF-16 encoding = bomDict.get(string) # UTF-32 seek = 4 if not encoding: encoding = bomDict.get(string[:2]) # UTF-16 seek = 2 # Set the read position past the BOM if one was found, otherwise # set it to the start of the stream self.rawStream.seek(encoding and seek or 0) return encoding def detectEncodingMeta(self): """Report the encoding declared by the meta element """ buffer = self.rawStream.read(self.numBytesMeta) assert isinstance(buffer, bytes) parser = EncodingParser(buffer) self.rawStream.seek(0) encoding = parser.getEncoding() if encoding in ("utf-16", "utf-16-be", "utf-16-le"): encoding = "utf-8" return encoding class EncodingBytes(bytes): """String-like object with an associated position and various extra methods If the position is ever greater than the string length then an exception is raised""" def __new__(self, value): assert isinstance(value, bytes) return bytes.__new__(self, value.lower()) def __init__(self, value): self._position = -1 def __iter__(self): return self def __next__(self): p = self._position = self._position + 1 if p >= len(self): raise StopIteration elif p < 0: raise TypeError return self[p:p + 1] def next(self): # Py2 compat return self.__next__() def previous(self): p = self._position if p >= len(self): raise StopIteration elif p < 0: raise TypeError self._position = p = p - 1 return self[p:p + 1] def setPosition(self, position): if self._position >= len(self): raise StopIteration self._position = position def getPosition(self): if self._position >= len(self): raise StopIteration if self._position >= 0: return self._position else: return None position = property(getPosition, setPosition) def getCurrentByte(self): return self[self.position:self.position + 1] currentByte = property(getCurrentByte) def skip(self, chars=spaceCharactersBytes): """Skip past a list of characters""" p = self.position # use property for the error-checking while p < len(self): c = self[p:p + 1] if c not in chars: self._position = p return c p += 1 self._position = p return None def skipUntil(self, chars): p = self.position while p < len(self): c = self[p:p + 1] if c in chars: self._position = p return c p += 1 self._position = p return None def matchBytes(self, bytes): """Look for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone""" p = self.position data = self[p:p + len(bytes)] rv = data.startswith(bytes) if rv: self.position += len(bytes) return rv def jumpTo(self, bytes): """Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match""" newPosition = self[self.position:].find(bytes) if newPosition > -1: # XXX: This is ugly, but I can't see a nicer way to fix this. if self._position == -1: self._position = 0 self._position += (newPosition + len(bytes) - 1) return True else: raise StopIteration class EncodingParser(object): """Mini parser for detecting character encoding from meta elements""" def __init__(self, data): """string - the data to work on for encoding detection""" self.data = EncodingBytes(data) self.encoding = None def getEncoding(self): methodDispatch = ( (b"<!--", self.handleComment), (b"<meta", self.handleMeta), (b"</", self.handlePossibleEndTag), (b"<!", self.handleOther), (b"<?", self.handleOther), (b"<", self.handlePossibleStartTag)) for byte in self.data: keepParsing = True for key, method in methodDispatch: if self.data.matchBytes(key): try: keepParsing = method() break except StopIteration: keepParsing = False break if not keepParsing: break return self.encoding def handleComment(self): """Skip over comments""" return self.data.jumpTo(b"-->") def handleMeta(self): if self.data.currentByte not in spaceCharactersBytes: # if we have <meta not followed by a space so just keep going return True # We have a valid meta element we want to search for attributes hasPragma = False pendingEncoding = None while True: # Try to find the next attribute after the current position attr = self.getAttribute() if attr is None: return True else: if attr[0] == b"http-equiv": hasPragma = attr[1] == b"content-type" if hasPragma and pendingEncoding is not None: self.encoding = pendingEncoding return False elif attr[0] == b"charset": tentativeEncoding = attr[1] codec = codecName(tentativeEncoding) if codec is not None: self.encoding = codec return False elif attr[0] == b"content": contentParser = ContentAttrParser(EncodingBytes(attr[1])) tentativeEncoding = contentParser.parse() if tentativeEncoding is not None: codec = codecName(tentativeEncoding) if codec is not None: if hasPragma: self.encoding = codec return False else: pendingEncoding = codec def handlePossibleStartTag(self): return self.handlePossibleTag(False) def handlePossibleEndTag(self): next(self.data) return self.handlePossibleTag(True) def handlePossibleTag(self, endTag): data = self.data if data.currentByte not in asciiLettersBytes: # If the next byte is not an ascii letter either ignore this # fragment (possible start tag case) or treat it according to # handleOther if endTag: data.previous() self.handleOther() return True c = data.skipUntil(spacesAngleBrackets) if c == b"<": # return to the first step in the overall "two step" algorithm # reprocessing the < byte data.previous() else: # Read all attributes attr = self.getAttribute() while attr is not None: attr = self.getAttribute() return True def handleOther(self): return self.data.jumpTo(b">") def getAttribute(self): """Return a name,value pair for the next attribute in the stream, if one is found, or None""" data = self.data # Step 1 (skip chars) c = data.skip(spaceCharactersBytes | frozenset([b"/"])) assert c is None or len(c) == 1 # Step 2 if c in (b">", None): return None # Step 3 attrName = [] attrValue = [] # Step 4 attribute name while True: if c == b"=" and attrName: break elif c in spaceCharactersBytes: # Step 6! c = data.skip() break elif c in (b"/", b">"): return b"".join(attrName), b"" elif c in asciiUppercaseBytes: attrName.append(c.lower()) elif c is None: return None else: attrName.append(c) # Step 5 c = next(data) # Step 7 if c != b"=": data.previous() return b"".join(attrName), b"" # Step 8 next(data) # Step 9 c = data.skip() # Step 10 if c in (b"'", b'"'): # 10.1 quoteChar = c while True: # 10.2 c = next(data) # 10.3 if c == quoteChar: next(data) return b"".join(attrName), b"".join(attrValue) # 10.4 elif c in asciiUppercaseBytes: attrValue.append(c.lower()) # 10.5 else: attrValue.append(c) elif c == b">": return b"".join(attrName), b"" elif c in asciiUppercaseBytes: attrValue.append(c.lower()) elif c is None: return None else: attrValue.append(c) # Step 11 while True: c = next(data) if c in spacesAngleBrackets: return b"".join(attrName), b"".join(attrValue) elif c in asciiUppercaseBytes: attrValue.append(c.lower()) elif c is None: return None else: attrValue.append(c) class ContentAttrParser(object): def __init__(self, data): assert isinstance(data, bytes) self.data = data def parse(self): try: # Check if the attr name is charset # otherwise return self.data.jumpTo(b"charset") self.data.position += 1 self.data.skip() if not self.data.currentByte == b"=": # If there is no = sign keep looking for attrs return None self.data.position += 1 self.data.skip() # Look for an encoding between matching quote marks if self.data.currentByte in (b'"', b"'"): quoteMark = self.data.currentByte self.data.position += 1 oldPosition = self.data.position if self.data.jumpTo(quoteMark): return self.data[oldPosition:self.data.position] else: return None else: # Unquoted value oldPosition = self.data.position try: self.data.skipUntil(spaceCharactersBytes) return self.data[oldPosition:self.data.position] except StopIteration: # Return the whole remaining value return self.data[oldPosition:] except StopIteration: return None def codecName(encoding): """Return the python codec name corresponding to an encoding or None if the string doesn't correspond to a valid encoding.""" if isinstance(encoding, bytes): try: encoding = encoding.decode("ascii") except UnicodeDecodeError: return None if encoding: canonicalName = ascii_punctuation_re.sub("", encoding).lower() return encodings.get(canonicalName, None) else: return None
30,855
Python
.py
744
29.946237
440
0.579399
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,561
constants.py
CouchPotato_CouchPotatoServer/libs/html5lib/constants.py
from __future__ import absolute_import, division, unicode_literals import string import gettext _ = gettext.gettext EOF = None E = { "null-character": _("Null character in input stream, replaced with U+FFFD."), "invalid-codepoint": _("Invalid codepoint in stream."), "incorrectly-placed-solidus": _("Solidus (/) incorrectly placed in tag."), "incorrect-cr-newline-entity": _("Incorrect CR newline entity, replaced with LF."), "illegal-windows-1252-entity": _("Entity used with illegal number (windows-1252 reference)."), "cant-convert-numeric-entity": _("Numeric entity couldn't be converted to character " "(codepoint U+%(charAsInt)08x)."), "illegal-codepoint-for-numeric-entity": _("Numeric entity represents an illegal codepoint: " "U+%(charAsInt)08x."), "numeric-entity-without-semicolon": _("Numeric entity didn't end with ';'."), "expected-numeric-entity-but-got-eof": _("Numeric entity expected. Got end of file instead."), "expected-numeric-entity": _("Numeric entity expected but none found."), "named-entity-without-semicolon": _("Named entity didn't end with ';'."), "expected-named-entity": _("Named entity expected. Got none."), "attributes-in-end-tag": _("End tag contains unexpected attributes."), 'self-closing-flag-on-end-tag': _("End tag contains unexpected self-closing flag."), "expected-tag-name-but-got-right-bracket": _("Expected tag name. Got '>' instead."), "expected-tag-name-but-got-question-mark": _("Expected tag name. Got '?' instead. (HTML doesn't " "support processing instructions.)"), "expected-tag-name": _("Expected tag name. Got something else instead"), "expected-closing-tag-but-got-right-bracket": _("Expected closing tag. Got '>' instead. Ignoring '</>'."), "expected-closing-tag-but-got-eof": _("Expected closing tag. Unexpected end of file."), "expected-closing-tag-but-got-char": _("Expected closing tag. Unexpected character '%(data)s' found."), "eof-in-tag-name": _("Unexpected end of file in the tag name."), "expected-attribute-name-but-got-eof": _("Unexpected end of file. Expected attribute name instead."), "eof-in-attribute-name": _("Unexpected end of file in attribute name."), "invalid-character-in-attribute-name": _("Invalid character in attribute name"), "duplicate-attribute": _("Dropped duplicate attribute on tag."), "expected-end-of-tag-name-but-got-eof": _("Unexpected end of file. Expected = or end of tag."), "expected-attribute-value-but-got-eof": _("Unexpected end of file. Expected attribute value."), "expected-attribute-value-but-got-right-bracket": _("Expected attribute value. Got '>' instead."), 'equals-in-unquoted-attribute-value': _("Unexpected = in unquoted attribute"), 'unexpected-character-in-unquoted-attribute-value': _("Unexpected character in unquoted attribute"), "invalid-character-after-attribute-name": _("Unexpected character after attribute name."), "unexpected-character-after-attribute-value": _("Unexpected character after attribute value."), "eof-in-attribute-value-double-quote": _("Unexpected end of file in attribute value (\")."), "eof-in-attribute-value-single-quote": _("Unexpected end of file in attribute value (')."), "eof-in-attribute-value-no-quotes": _("Unexpected end of file in attribute value."), "unexpected-EOF-after-solidus-in-tag": _("Unexpected end of file in tag. Expected >"), "unexpected-character-after-solidus-in-tag": _("Unexpected character after / in tag. Expected >"), "expected-dashes-or-doctype": _("Expected '--' or 'DOCTYPE'. Not found."), "unexpected-bang-after-double-dash-in-comment": _("Unexpected ! after -- in comment"), "unexpected-space-after-double-dash-in-comment": _("Unexpected space after -- in comment"), "incorrect-comment": _("Incorrect comment."), "eof-in-comment": _("Unexpected end of file in comment."), "eof-in-comment-end-dash": _("Unexpected end of file in comment (-)"), "unexpected-dash-after-double-dash-in-comment": _("Unexpected '-' after '--' found in comment."), "eof-in-comment-double-dash": _("Unexpected end of file in comment (--)."), "eof-in-comment-end-space-state": _("Unexpected end of file in comment."), "eof-in-comment-end-bang-state": _("Unexpected end of file in comment."), "unexpected-char-in-comment": _("Unexpected character in comment found."), "need-space-after-doctype": _("No space after literal string 'DOCTYPE'."), "expected-doctype-name-but-got-right-bracket": _("Unexpected > character. Expected DOCTYPE name."), "expected-doctype-name-but-got-eof": _("Unexpected end of file. Expected DOCTYPE name."), "eof-in-doctype-name": _("Unexpected end of file in DOCTYPE name."), "eof-in-doctype": _("Unexpected end of file in DOCTYPE."), "expected-space-or-right-bracket-in-doctype": _("Expected space or '>'. Got '%(data)s'"), "unexpected-end-of-doctype": _("Unexpected end of DOCTYPE."), "unexpected-char-in-doctype": _("Unexpected character in DOCTYPE."), "eof-in-innerhtml": _("XXX innerHTML EOF"), "unexpected-doctype": _("Unexpected DOCTYPE. Ignored."), "non-html-root": _("html needs to be the first start tag."), "expected-doctype-but-got-eof": _("Unexpected End of file. Expected DOCTYPE."), "unknown-doctype": _("Erroneous DOCTYPE."), "expected-doctype-but-got-chars": _("Unexpected non-space characters. Expected DOCTYPE."), "expected-doctype-but-got-start-tag": _("Unexpected start tag (%(name)s). Expected DOCTYPE."), "expected-doctype-but-got-end-tag": _("Unexpected end tag (%(name)s). Expected DOCTYPE."), "end-tag-after-implied-root": _("Unexpected end tag (%(name)s) after the (implied) root element."), "expected-named-closing-tag-but-got-eof": _("Unexpected end of file. Expected end tag (%(name)s)."), "two-heads-are-not-better-than-one": _("Unexpected start tag head in existing head. Ignored."), "unexpected-end-tag": _("Unexpected end tag (%(name)s). Ignored."), "unexpected-start-tag-out-of-my-head": _("Unexpected start tag (%(name)s) that can be in head. Moved."), "unexpected-start-tag": _("Unexpected start tag (%(name)s)."), "missing-end-tag": _("Missing end tag (%(name)s)."), "missing-end-tags": _("Missing end tags (%(name)s)."), "unexpected-start-tag-implies-end-tag": _("Unexpected start tag (%(startName)s) " "implies end tag (%(endName)s)."), "unexpected-start-tag-treated-as": _("Unexpected start tag (%(originalName)s). Treated as %(newName)s."), "deprecated-tag": _("Unexpected start tag %(name)s. Don't use it!"), "unexpected-start-tag-ignored": _("Unexpected start tag %(name)s. Ignored."), "expected-one-end-tag-but-got-another": _("Unexpected end tag (%(gotName)s). " "Missing end tag (%(expectedName)s)."), "end-tag-too-early": _("End tag (%(name)s) seen too early. Expected other end tag."), "end-tag-too-early-named": _("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."), "end-tag-too-early-ignored": _("End tag (%(name)s) seen too early. Ignored."), "adoption-agency-1.1": _("End tag (%(name)s) violates step 1, " "paragraph 1 of the adoption agency algorithm."), "adoption-agency-1.2": _("End tag (%(name)s) violates step 1, " "paragraph 2 of the adoption agency algorithm."), "adoption-agency-1.3": _("End tag (%(name)s) violates step 1, " "paragraph 3 of the adoption agency algorithm."), "adoption-agency-4.4": _("End tag (%(name)s) violates step 4, " "paragraph 4 of the adoption agency algorithm."), "unexpected-end-tag-treated-as": _("Unexpected end tag (%(originalName)s). Treated as %(newName)s."), "no-end-tag": _("This element (%(name)s) has no end tag."), "unexpected-implied-end-tag-in-table": _("Unexpected implied end tag (%(name)s) in the table phase."), "unexpected-implied-end-tag-in-table-body": _("Unexpected implied end tag (%(name)s) in the table body phase."), "unexpected-char-implies-table-voodoo": _("Unexpected non-space characters in " "table context caused voodoo mode."), "unexpected-hidden-input-in-table": _("Unexpected input with type hidden in table context."), "unexpected-form-in-table": _("Unexpected form in table context."), "unexpected-start-tag-implies-table-voodoo": _("Unexpected start tag (%(name)s) in " "table context caused voodoo mode."), "unexpected-end-tag-implies-table-voodoo": _("Unexpected end tag (%(name)s) in " "table context caused voodoo mode."), "unexpected-cell-in-table-body": _("Unexpected table cell start tag (%(name)s) " "in the table body phase."), "unexpected-cell-end-tag": _("Got table cell end tag (%(name)s) " "while required end tags are missing."), "unexpected-end-tag-in-table-body": _("Unexpected end tag (%(name)s) in the table body phase. Ignored."), "unexpected-implied-end-tag-in-table-row": _("Unexpected implied end tag (%(name)s) in the table row phase."), "unexpected-end-tag-in-table-row": _("Unexpected end tag (%(name)s) in the table row phase. Ignored."), "unexpected-select-in-select": _("Unexpected select start tag in the select phase " "treated as select end tag."), "unexpected-input-in-select": _("Unexpected input start tag in the select phase."), "unexpected-start-tag-in-select": _("Unexpected start tag token (%(name)s in the select phase. " "Ignored."), "unexpected-end-tag-in-select": _("Unexpected end tag (%(name)s) in the select phase. Ignored."), "unexpected-table-element-start-tag-in-select-in-table": _("Unexpected table element start tag (%(name)s) in the select in table phase."), "unexpected-table-element-end-tag-in-select-in-table": _("Unexpected table element end tag (%(name)s) in the select in table phase."), "unexpected-char-after-body": _("Unexpected non-space characters in the after body phase."), "unexpected-start-tag-after-body": _("Unexpected start tag token (%(name)s)" " in the after body phase."), "unexpected-end-tag-after-body": _("Unexpected end tag token (%(name)s)" " in the after body phase."), "unexpected-char-in-frameset": _("Unexpected characters in the frameset phase. Characters ignored."), "unexpected-start-tag-in-frameset": _("Unexpected start tag token (%(name)s)" " in the frameset phase. Ignored."), "unexpected-frameset-in-frameset-innerhtml": _("Unexpected end tag token (frameset) " "in the frameset phase (innerHTML)."), "unexpected-end-tag-in-frameset": _("Unexpected end tag token (%(name)s)" " in the frameset phase. Ignored."), "unexpected-char-after-frameset": _("Unexpected non-space characters in the " "after frameset phase. Ignored."), "unexpected-start-tag-after-frameset": _("Unexpected start tag (%(name)s)" " in the after frameset phase. Ignored."), "unexpected-end-tag-after-frameset": _("Unexpected end tag (%(name)s)" " in the after frameset phase. Ignored."), "unexpected-end-tag-after-body-innerhtml": _("Unexpected end tag after body(innerHtml)"), "expected-eof-but-got-char": _("Unexpected non-space characters. Expected end of file."), "expected-eof-but-got-start-tag": _("Unexpected start tag (%(name)s)" ". Expected end of file."), "expected-eof-but-got-end-tag": _("Unexpected end tag (%(name)s)" ". Expected end of file."), "eof-in-table": _("Unexpected end of file. Expected table content."), "eof-in-select": _("Unexpected end of file. Expected select content."), "eof-in-frameset": _("Unexpected end of file. Expected frameset content."), "eof-in-script-in-script": _("Unexpected end of file. Expected script content."), "eof-in-foreign-lands": _("Unexpected end of file. Expected foreign content"), "non-void-element-with-trailing-solidus": _("Trailing solidus not allowed on element %(name)s"), "unexpected-html-element-in-foreign-content": _("Element %(name)s not allowed in a non-html context"), "unexpected-end-tag-before-html": _("Unexpected end tag (%(name)s) before html."), "XXX-undefined-error": _("Undefined error (this sucks and should be fixed)"), } namespaces = { "html": "http://www.w3.org/1999/xhtml", "mathml": "http://www.w3.org/1998/Math/MathML", "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", "xml": "http://www.w3.org/XML/1998/namespace", "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset(( (namespaces["html"], "applet"), (namespaces["html"], "caption"), (namespaces["html"], "html"), (namespaces["html"], "marquee"), (namespaces["html"], "object"), (namespaces["html"], "table"), (namespaces["html"], "td"), (namespaces["html"], "th"), (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext"), (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title"), )) formattingElements = frozenset(( (namespaces["html"], "a"), (namespaces["html"], "b"), (namespaces["html"], "big"), (namespaces["html"], "code"), (namespaces["html"], "em"), (namespaces["html"], "font"), (namespaces["html"], "i"), (namespaces["html"], "nobr"), (namespaces["html"], "s"), (namespaces["html"], "small"), (namespaces["html"], "strike"), (namespaces["html"], "strong"), (namespaces["html"], "tt"), (namespaces["html"], "u") )) specialElements = frozenset(( (namespaces["html"], "address"), (namespaces["html"], "applet"), (namespaces["html"], "area"), (namespaces["html"], "article"), (namespaces["html"], "aside"), (namespaces["html"], "base"), (namespaces["html"], "basefont"), (namespaces["html"], "bgsound"), (namespaces["html"], "blockquote"), (namespaces["html"], "body"), (namespaces["html"], "br"), (namespaces["html"], "button"), (namespaces["html"], "caption"), (namespaces["html"], "center"), (namespaces["html"], "col"), (namespaces["html"], "colgroup"), (namespaces["html"], "command"), (namespaces["html"], "dd"), (namespaces["html"], "details"), (namespaces["html"], "dir"), (namespaces["html"], "div"), (namespaces["html"], "dl"), (namespaces["html"], "dt"), (namespaces["html"], "embed"), (namespaces["html"], "fieldset"), (namespaces["html"], "figure"), (namespaces["html"], "footer"), (namespaces["html"], "form"), (namespaces["html"], "frame"), (namespaces["html"], "frameset"), (namespaces["html"], "h1"), (namespaces["html"], "h2"), (namespaces["html"], "h3"), (namespaces["html"], "h4"), (namespaces["html"], "h5"), (namespaces["html"], "h6"), (namespaces["html"], "head"), (namespaces["html"], "header"), (namespaces["html"], "hr"), (namespaces["html"], "html"), (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), (namespaces["html"], "li"), (namespaces["html"], "link"), (namespaces["html"], "listing"), (namespaces["html"], "marquee"), (namespaces["html"], "menu"), (namespaces["html"], "meta"), (namespaces["html"], "nav"), (namespaces["html"], "noembed"), (namespaces["html"], "noframes"), (namespaces["html"], "noscript"), (namespaces["html"], "object"), (namespaces["html"], "ol"), (namespaces["html"], "p"), (namespaces["html"], "param"), (namespaces["html"], "plaintext"), (namespaces["html"], "pre"), (namespaces["html"], "script"), (namespaces["html"], "section"), (namespaces["html"], "select"), (namespaces["html"], "style"), (namespaces["html"], "table"), (namespaces["html"], "tbody"), (namespaces["html"], "td"), (namespaces["html"], "textarea"), (namespaces["html"], "tfoot"), (namespaces["html"], "th"), (namespaces["html"], "thead"), (namespaces["html"], "title"), (namespaces["html"], "tr"), (namespaces["html"], "ul"), (namespaces["html"], "wbr"), (namespaces["html"], "xmp"), (namespaces["svg"], "foreignObject") )) htmlIntegrationPointElements = frozenset(( (namespaces["mathml"], "annotaion-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") )) mathmlTextIntegrationPointElements = frozenset(( (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext") )) adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), "xlink:href": ("xlink", "href", namespaces["xlink"]), "xlink:role": ("xlink", "role", namespaces["xlink"]), "xlink:show": ("xlink", "show", namespaces["xlink"]), "xlink:title": ("xlink", "title", namespaces["xlink"]), "xlink:type": ("xlink", "type", namespaces["xlink"]), "xml:base": ("xml", "base", namespaces["xml"]), "xml:lang": ("xml", "lang", namespaces["xml"]), "xml:space": ("xml", "space", namespaces["xml"]), "xmlns": (None, "xmlns", namespaces["xmlns"]), "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) } unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in adjustForeignAttributes.items()]) spaceCharacters = frozenset(( "\t", "\n", "\u000C", " ", "\r" )) tableInsertModeElements = frozenset(( "table", "tbody", "tfoot", "thead", "tr" )) asciiLowercase = frozenset(string.ascii_lowercase) asciiUppercase = frozenset(string.ascii_uppercase) asciiLetters = frozenset(string.ascii_letters) digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) asciiUpper2Lower = dict([(ord(c), ord(c.lower())) for c in string.ascii_uppercase]) # Heading elements need to be ordered headingElements = ( "h1", "h2", "h3", "h4", "h5", "h6" ) voidElements = frozenset(( "base", "command", "event-source", "link", "meta", "hr", "br", "img", "embed", "param", "area", "col", "input", "source", "track" )) cdataElements = frozenset(('title', 'textarea')) rcdataElements = frozenset(( 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript' )) booleanAttributes = { "": frozenset(("irrelevant",)), "style": frozenset(("scoped",)), "img": frozenset(("ismap",)), "audio": frozenset(("autoplay", "controls")), "video": frozenset(("autoplay", "controls")), "script": frozenset(("defer", "async")), "details": frozenset(("open",)), "datagrid": frozenset(("multiple", "disabled")), "command": frozenset(("hidden", "disabled", "checked", "default")), "hr": frozenset(("noshade")), "menu": frozenset(("autosubmit",)), "fieldset": frozenset(("disabled", "readonly")), "option": frozenset(("disabled", "readonly", "selected")), "optgroup": frozenset(("disabled", "readonly")), "button": frozenset(("disabled", "autofocus")), "input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")), "select": frozenset(("disabled", "readonly", "autofocus", "multiple")), "output": frozenset(("disabled", "readonly")), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( 8364, # 0x80 0x20AC EURO SIGN 65533, # 0x81 UNDEFINED 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS 8224, # 0x86 0x2020 DAGGER 8225, # 0x87 0x2021 DOUBLE DAGGER 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT 8240, # 0x89 0x2030 PER MILLE SIGN 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE 65533, # 0x8D UNDEFINED 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON 65533, # 0x8F UNDEFINED 65533, # 0x90 UNDEFINED 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK 8226, # 0x95 0x2022 BULLET 8211, # 0x96 0x2013 EN DASH 8212, # 0x97 0x2014 EM DASH 732, # 0x98 0x02DC SMALL TILDE 8482, # 0x99 0x2122 TRADE MARK SIGN 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE 65533, # 0x9D UNDEFINED 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;')) entities = { "AElig": "\xc6", "AElig;": "\xc6", "AMP": "&", "AMP;": "&", "Aacute": "\xc1", "Aacute;": "\xc1", "Abreve;": "\u0102", "Acirc": "\xc2", "Acirc;": "\xc2", "Acy;": "\u0410", "Afr;": "\U0001d504", "Agrave": "\xc0", "Agrave;": "\xc0", "Alpha;": "\u0391", "Amacr;": "\u0100", "And;": "\u2a53", "Aogon;": "\u0104", "Aopf;": "\U0001d538", "ApplyFunction;": "\u2061", "Aring": "\xc5", "Aring;": "\xc5", "Ascr;": "\U0001d49c", "Assign;": "\u2254", "Atilde": "\xc3", "Atilde;": "\xc3", "Auml": "\xc4", "Auml;": "\xc4", "Backslash;": "\u2216", "Barv;": "\u2ae7", "Barwed;": "\u2306", "Bcy;": "\u0411", "Because;": "\u2235", "Bernoullis;": "\u212c", "Beta;": "\u0392", "Bfr;": "\U0001d505", "Bopf;": "\U0001d539", "Breve;": "\u02d8", "Bscr;": "\u212c", "Bumpeq;": "\u224e", "CHcy;": "\u0427", "COPY": "\xa9", "COPY;": "\xa9", "Cacute;": "\u0106", "Cap;": "\u22d2", "CapitalDifferentialD;": "\u2145", "Cayleys;": "\u212d", "Ccaron;": "\u010c", "Ccedil": "\xc7", "Ccedil;": "\xc7", "Ccirc;": "\u0108", "Cconint;": "\u2230", "Cdot;": "\u010a", "Cedilla;": "\xb8", "CenterDot;": "\xb7", "Cfr;": "\u212d", "Chi;": "\u03a7", "CircleDot;": "\u2299", "CircleMinus;": "\u2296", "CirclePlus;": "\u2295", "CircleTimes;": "\u2297", "ClockwiseContourIntegral;": "\u2232", "CloseCurlyDoubleQuote;": "\u201d", "CloseCurlyQuote;": "\u2019", "Colon;": "\u2237", "Colone;": "\u2a74", "Congruent;": "\u2261", "Conint;": "\u222f", "ContourIntegral;": "\u222e", "Copf;": "\u2102", "Coproduct;": "\u2210", "CounterClockwiseContourIntegral;": "\u2233", "Cross;": "\u2a2f", "Cscr;": "\U0001d49e", "Cup;": "\u22d3", "CupCap;": "\u224d", "DD;": "\u2145", "DDotrahd;": "\u2911", "DJcy;": "\u0402", "DScy;": "\u0405", "DZcy;": "\u040f", "Dagger;": "\u2021", "Darr;": "\u21a1", "Dashv;": "\u2ae4", "Dcaron;": "\u010e", "Dcy;": "\u0414", "Del;": "\u2207", "Delta;": "\u0394", "Dfr;": "\U0001d507", "DiacriticalAcute;": "\xb4", "DiacriticalDot;": "\u02d9", "DiacriticalDoubleAcute;": "\u02dd", "DiacriticalGrave;": "`", "DiacriticalTilde;": "\u02dc", "Diamond;": "\u22c4", "DifferentialD;": "\u2146", "Dopf;": "\U0001d53b", "Dot;": "\xa8", "DotDot;": "\u20dc", "DotEqual;": "\u2250", "DoubleContourIntegral;": "\u222f", "DoubleDot;": "\xa8", "DoubleDownArrow;": "\u21d3", "DoubleLeftArrow;": "\u21d0", "DoubleLeftRightArrow;": "\u21d4", "DoubleLeftTee;": "\u2ae4", "DoubleLongLeftArrow;": "\u27f8", "DoubleLongLeftRightArrow;": "\u27fa", "DoubleLongRightArrow;": "\u27f9", "DoubleRightArrow;": "\u21d2", "DoubleRightTee;": "\u22a8", "DoubleUpArrow;": "\u21d1", "DoubleUpDownArrow;": "\u21d5", "DoubleVerticalBar;": "\u2225", "DownArrow;": "\u2193", "DownArrowBar;": "\u2913", "DownArrowUpArrow;": "\u21f5", "DownBreve;": "\u0311", "DownLeftRightVector;": "\u2950", "DownLeftTeeVector;": "\u295e", "DownLeftVector;": "\u21bd", "DownLeftVectorBar;": "\u2956", "DownRightTeeVector;": "\u295f", "DownRightVector;": "\u21c1", "DownRightVectorBar;": "\u2957", "DownTee;": "\u22a4", "DownTeeArrow;": "\u21a7", "Downarrow;": "\u21d3", "Dscr;": "\U0001d49f", "Dstrok;": "\u0110", "ENG;": "\u014a", "ETH": "\xd0", "ETH;": "\xd0", "Eacute": "\xc9", "Eacute;": "\xc9", "Ecaron;": "\u011a", "Ecirc": "\xca", "Ecirc;": "\xca", "Ecy;": "\u042d", "Edot;": "\u0116", "Efr;": "\U0001d508", "Egrave": "\xc8", "Egrave;": "\xc8", "Element;": "\u2208", "Emacr;": "\u0112", "EmptySmallSquare;": "\u25fb", "EmptyVerySmallSquare;": "\u25ab", "Eogon;": "\u0118", "Eopf;": "\U0001d53c", "Epsilon;": "\u0395", "Equal;": "\u2a75", "EqualTilde;": "\u2242", "Equilibrium;": "\u21cc", "Escr;": "\u2130", "Esim;": "\u2a73", "Eta;": "\u0397", "Euml": "\xcb", "Euml;": "\xcb", "Exists;": "\u2203", "ExponentialE;": "\u2147", "Fcy;": "\u0424", "Ffr;": "\U0001d509", "FilledSmallSquare;": "\u25fc", "FilledVerySmallSquare;": "\u25aa", "Fopf;": "\U0001d53d", "ForAll;": "\u2200", "Fouriertrf;": "\u2131", "Fscr;": "\u2131", "GJcy;": "\u0403", "GT": ">", "GT;": ">", "Gamma;": "\u0393", "Gammad;": "\u03dc", "Gbreve;": "\u011e", "Gcedil;": "\u0122", "Gcirc;": "\u011c", "Gcy;": "\u0413", "Gdot;": "\u0120", "Gfr;": "\U0001d50a", "Gg;": "\u22d9", "Gopf;": "\U0001d53e", "GreaterEqual;": "\u2265", "GreaterEqualLess;": "\u22db", "GreaterFullEqual;": "\u2267", "GreaterGreater;": "\u2aa2", "GreaterLess;": "\u2277", "GreaterSlantEqual;": "\u2a7e", "GreaterTilde;": "\u2273", "Gscr;": "\U0001d4a2", "Gt;": "\u226b", "HARDcy;": "\u042a", "Hacek;": "\u02c7", "Hat;": "^", "Hcirc;": "\u0124", "Hfr;": "\u210c", "HilbertSpace;": "\u210b", "Hopf;": "\u210d", "HorizontalLine;": "\u2500", "Hscr;": "\u210b", "Hstrok;": "\u0126", "HumpDownHump;": "\u224e", "HumpEqual;": "\u224f", "IEcy;": "\u0415", "IJlig;": "\u0132", "IOcy;": "\u0401", "Iacute": "\xcd", "Iacute;": "\xcd", "Icirc": "\xce", "Icirc;": "\xce", "Icy;": "\u0418", "Idot;": "\u0130", "Ifr;": "\u2111", "Igrave": "\xcc", "Igrave;": "\xcc", "Im;": "\u2111", "Imacr;": "\u012a", "ImaginaryI;": "\u2148", "Implies;": "\u21d2", "Int;": "\u222c", "Integral;": "\u222b", "Intersection;": "\u22c2", "InvisibleComma;": "\u2063", "InvisibleTimes;": "\u2062", "Iogon;": "\u012e", "Iopf;": "\U0001d540", "Iota;": "\u0399", "Iscr;": "\u2110", "Itilde;": "\u0128", "Iukcy;": "\u0406", "Iuml": "\xcf", "Iuml;": "\xcf", "Jcirc;": "\u0134", "Jcy;": "\u0419", "Jfr;": "\U0001d50d", "Jopf;": "\U0001d541", "Jscr;": "\U0001d4a5", "Jsercy;": "\u0408", "Jukcy;": "\u0404", "KHcy;": "\u0425", "KJcy;": "\u040c", "Kappa;": "\u039a", "Kcedil;": "\u0136", "Kcy;": "\u041a", "Kfr;": "\U0001d50e", "Kopf;": "\U0001d542", "Kscr;": "\U0001d4a6", "LJcy;": "\u0409", "LT": "<", "LT;": "<", "Lacute;": "\u0139", "Lambda;": "\u039b", "Lang;": "\u27ea", "Laplacetrf;": "\u2112", "Larr;": "\u219e", "Lcaron;": "\u013d", "Lcedil;": "\u013b", "Lcy;": "\u041b", "LeftAngleBracket;": "\u27e8", "LeftArrow;": "\u2190", "LeftArrowBar;": "\u21e4", "LeftArrowRightArrow;": "\u21c6", "LeftCeiling;": "\u2308", "LeftDoubleBracket;": "\u27e6", "LeftDownTeeVector;": "\u2961", "LeftDownVector;": "\u21c3", "LeftDownVectorBar;": "\u2959", "LeftFloor;": "\u230a", "LeftRightArrow;": "\u2194", "LeftRightVector;": "\u294e", "LeftTee;": "\u22a3", "LeftTeeArrow;": "\u21a4", "LeftTeeVector;": "\u295a", "LeftTriangle;": "\u22b2", "LeftTriangleBar;": "\u29cf", "LeftTriangleEqual;": "\u22b4", "LeftUpDownVector;": "\u2951", "LeftUpTeeVector;": "\u2960", "LeftUpVector;": "\u21bf", "LeftUpVectorBar;": "\u2958", "LeftVector;": "\u21bc", "LeftVectorBar;": "\u2952", "Leftarrow;": "\u21d0", "Leftrightarrow;": "\u21d4", "LessEqualGreater;": "\u22da", "LessFullEqual;": "\u2266", "LessGreater;": "\u2276", "LessLess;": "\u2aa1", "LessSlantEqual;": "\u2a7d", "LessTilde;": "\u2272", "Lfr;": "\U0001d50f", "Ll;": "\u22d8", "Lleftarrow;": "\u21da", "Lmidot;": "\u013f", "LongLeftArrow;": "\u27f5", "LongLeftRightArrow;": "\u27f7", "LongRightArrow;": "\u27f6", "Longleftarrow;": "\u27f8", "Longleftrightarrow;": "\u27fa", "Longrightarrow;": "\u27f9", "Lopf;": "\U0001d543", "LowerLeftArrow;": "\u2199", "LowerRightArrow;": "\u2198", "Lscr;": "\u2112", "Lsh;": "\u21b0", "Lstrok;": "\u0141", "Lt;": "\u226a", "Map;": "\u2905", "Mcy;": "\u041c", "MediumSpace;": "\u205f", "Mellintrf;": "\u2133", "Mfr;": "\U0001d510", "MinusPlus;": "\u2213", "Mopf;": "\U0001d544", "Mscr;": "\u2133", "Mu;": "\u039c", "NJcy;": "\u040a", "Nacute;": "\u0143", "Ncaron;": "\u0147", "Ncedil;": "\u0145", "Ncy;": "\u041d", "NegativeMediumSpace;": "\u200b", "NegativeThickSpace;": "\u200b", "NegativeThinSpace;": "\u200b", "NegativeVeryThinSpace;": "\u200b", "NestedGreaterGreater;": "\u226b", "NestedLessLess;": "\u226a", "NewLine;": "\n", "Nfr;": "\U0001d511", "NoBreak;": "\u2060", "NonBreakingSpace;": "\xa0", "Nopf;": "\u2115", "Not;": "\u2aec", "NotCongruent;": "\u2262", "NotCupCap;": "\u226d", "NotDoubleVerticalBar;": "\u2226", "NotElement;": "\u2209", "NotEqual;": "\u2260", "NotEqualTilde;": "\u2242\u0338", "NotExists;": "\u2204", "NotGreater;": "\u226f", "NotGreaterEqual;": "\u2271", "NotGreaterFullEqual;": "\u2267\u0338", "NotGreaterGreater;": "\u226b\u0338", "NotGreaterLess;": "\u2279", "NotGreaterSlantEqual;": "\u2a7e\u0338", "NotGreaterTilde;": "\u2275", "NotHumpDownHump;": "\u224e\u0338", "NotHumpEqual;": "\u224f\u0338", "NotLeftTriangle;": "\u22ea", "NotLeftTriangleBar;": "\u29cf\u0338", "NotLeftTriangleEqual;": "\u22ec", "NotLess;": "\u226e", "NotLessEqual;": "\u2270", "NotLessGreater;": "\u2278", "NotLessLess;": "\u226a\u0338", "NotLessSlantEqual;": "\u2a7d\u0338", "NotLessTilde;": "\u2274", "NotNestedGreaterGreater;": "\u2aa2\u0338", "NotNestedLessLess;": "\u2aa1\u0338", "NotPrecedes;": "\u2280", "NotPrecedesEqual;": "\u2aaf\u0338", "NotPrecedesSlantEqual;": "\u22e0", "NotReverseElement;": "\u220c", "NotRightTriangle;": "\u22eb", "NotRightTriangleBar;": "\u29d0\u0338", "NotRightTriangleEqual;": "\u22ed", "NotSquareSubset;": "\u228f\u0338", "NotSquareSubsetEqual;": "\u22e2", "NotSquareSuperset;": "\u2290\u0338", "NotSquareSupersetEqual;": "\u22e3", "NotSubset;": "\u2282\u20d2", "NotSubsetEqual;": "\u2288", "NotSucceeds;": "\u2281", "NotSucceedsEqual;": "\u2ab0\u0338", "NotSucceedsSlantEqual;": "\u22e1", "NotSucceedsTilde;": "\u227f\u0338", "NotSuperset;": "\u2283\u20d2", "NotSupersetEqual;": "\u2289", "NotTilde;": "\u2241", "NotTildeEqual;": "\u2244", "NotTildeFullEqual;": "\u2247", "NotTildeTilde;": "\u2249", "NotVerticalBar;": "\u2224", "Nscr;": "\U0001d4a9", "Ntilde": "\xd1", "Ntilde;": "\xd1", "Nu;": "\u039d", "OElig;": "\u0152", "Oacute": "\xd3", "Oacute;": "\xd3", "Ocirc": "\xd4", "Ocirc;": "\xd4", "Ocy;": "\u041e", "Odblac;": "\u0150", "Ofr;": "\U0001d512", "Ograve": "\xd2", "Ograve;": "\xd2", "Omacr;": "\u014c", "Omega;": "\u03a9", "Omicron;": "\u039f", "Oopf;": "\U0001d546", "OpenCurlyDoubleQuote;": "\u201c", "OpenCurlyQuote;": "\u2018", "Or;": "\u2a54", "Oscr;": "\U0001d4aa", "Oslash": "\xd8", "Oslash;": "\xd8", "Otilde": "\xd5", "Otilde;": "\xd5", "Otimes;": "\u2a37", "Ouml": "\xd6", "Ouml;": "\xd6", "OverBar;": "\u203e", "OverBrace;": "\u23de", "OverBracket;": "\u23b4", "OverParenthesis;": "\u23dc", "PartialD;": "\u2202", "Pcy;": "\u041f", "Pfr;": "\U0001d513", "Phi;": "\u03a6", "Pi;": "\u03a0", "PlusMinus;": "\xb1", "Poincareplane;": "\u210c", "Popf;": "\u2119", "Pr;": "\u2abb", "Precedes;": "\u227a", "PrecedesEqual;": "\u2aaf", "PrecedesSlantEqual;": "\u227c", "PrecedesTilde;": "\u227e", "Prime;": "\u2033", "Product;": "\u220f", "Proportion;": "\u2237", "Proportional;": "\u221d", "Pscr;": "\U0001d4ab", "Psi;": "\u03a8", "QUOT": "\"", "QUOT;": "\"", "Qfr;": "\U0001d514", "Qopf;": "\u211a", "Qscr;": "\U0001d4ac", "RBarr;": "\u2910", "REG": "\xae", "REG;": "\xae", "Racute;": "\u0154", "Rang;": "\u27eb", "Rarr;": "\u21a0", "Rarrtl;": "\u2916", "Rcaron;": "\u0158", "Rcedil;": "\u0156", "Rcy;": "\u0420", "Re;": "\u211c", "ReverseElement;": "\u220b", "ReverseEquilibrium;": "\u21cb", "ReverseUpEquilibrium;": "\u296f", "Rfr;": "\u211c", "Rho;": "\u03a1", "RightAngleBracket;": "\u27e9", "RightArrow;": "\u2192", "RightArrowBar;": "\u21e5", "RightArrowLeftArrow;": "\u21c4", "RightCeiling;": "\u2309", "RightDoubleBracket;": "\u27e7", "RightDownTeeVector;": "\u295d", "RightDownVector;": "\u21c2", "RightDownVectorBar;": "\u2955", "RightFloor;": "\u230b", "RightTee;": "\u22a2", "RightTeeArrow;": "\u21a6", "RightTeeVector;": "\u295b", "RightTriangle;": "\u22b3", "RightTriangleBar;": "\u29d0", "RightTriangleEqual;": "\u22b5", "RightUpDownVector;": "\u294f", "RightUpTeeVector;": "\u295c", "RightUpVector;": "\u21be", "RightUpVectorBar;": "\u2954", "RightVector;": "\u21c0", "RightVectorBar;": "\u2953", "Rightarrow;": "\u21d2", "Ropf;": "\u211d", "RoundImplies;": "\u2970", "Rrightarrow;": "\u21db", "Rscr;": "\u211b", "Rsh;": "\u21b1", "RuleDelayed;": "\u29f4", "SHCHcy;": "\u0429", "SHcy;": "\u0428", "SOFTcy;": "\u042c", "Sacute;": "\u015a", "Sc;": "\u2abc", "Scaron;": "\u0160", "Scedil;": "\u015e", "Scirc;": "\u015c", "Scy;": "\u0421", "Sfr;": "\U0001d516", "ShortDownArrow;": "\u2193", "ShortLeftArrow;": "\u2190", "ShortRightArrow;": "\u2192", "ShortUpArrow;": "\u2191", "Sigma;": "\u03a3", "SmallCircle;": "\u2218", "Sopf;": "\U0001d54a", "Sqrt;": "\u221a", "Square;": "\u25a1", "SquareIntersection;": "\u2293", "SquareSubset;": "\u228f", "SquareSubsetEqual;": "\u2291", "SquareSuperset;": "\u2290", "SquareSupersetEqual;": "\u2292", "SquareUnion;": "\u2294", "Sscr;": "\U0001d4ae", "Star;": "\u22c6", "Sub;": "\u22d0", "Subset;": "\u22d0", "SubsetEqual;": "\u2286", "Succeeds;": "\u227b", "SucceedsEqual;": "\u2ab0", "SucceedsSlantEqual;": "\u227d", "SucceedsTilde;": "\u227f", "SuchThat;": "\u220b", "Sum;": "\u2211", "Sup;": "\u22d1", "Superset;": "\u2283", "SupersetEqual;": "\u2287", "Supset;": "\u22d1", "THORN": "\xde", "THORN;": "\xde", "TRADE;": "\u2122", "TSHcy;": "\u040b", "TScy;": "\u0426", "Tab;": "\t", "Tau;": "\u03a4", "Tcaron;": "\u0164", "Tcedil;": "\u0162", "Tcy;": "\u0422", "Tfr;": "\U0001d517", "Therefore;": "\u2234", "Theta;": "\u0398", "ThickSpace;": "\u205f\u200a", "ThinSpace;": "\u2009", "Tilde;": "\u223c", "TildeEqual;": "\u2243", "TildeFullEqual;": "\u2245", "TildeTilde;": "\u2248", "Topf;": "\U0001d54b", "TripleDot;": "\u20db", "Tscr;": "\U0001d4af", "Tstrok;": "\u0166", "Uacute": "\xda", "Uacute;": "\xda", "Uarr;": "\u219f", "Uarrocir;": "\u2949", "Ubrcy;": "\u040e", "Ubreve;": "\u016c", "Ucirc": "\xdb", "Ucirc;": "\xdb", "Ucy;": "\u0423", "Udblac;": "\u0170", "Ufr;": "\U0001d518", "Ugrave": "\xd9", "Ugrave;": "\xd9", "Umacr;": "\u016a", "UnderBar;": "_", "UnderBrace;": "\u23df", "UnderBracket;": "\u23b5", "UnderParenthesis;": "\u23dd", "Union;": "\u22c3", "UnionPlus;": "\u228e", "Uogon;": "\u0172", "Uopf;": "\U0001d54c", "UpArrow;": "\u2191", "UpArrowBar;": "\u2912", "UpArrowDownArrow;": "\u21c5", "UpDownArrow;": "\u2195", "UpEquilibrium;": "\u296e", "UpTee;": "\u22a5", "UpTeeArrow;": "\u21a5", "Uparrow;": "\u21d1", "Updownarrow;": "\u21d5", "UpperLeftArrow;": "\u2196", "UpperRightArrow;": "\u2197", "Upsi;": "\u03d2", "Upsilon;": "\u03a5", "Uring;": "\u016e", "Uscr;": "\U0001d4b0", "Utilde;": "\u0168", "Uuml": "\xdc", "Uuml;": "\xdc", "VDash;": "\u22ab", "Vbar;": "\u2aeb", "Vcy;": "\u0412", "Vdash;": "\u22a9", "Vdashl;": "\u2ae6", "Vee;": "\u22c1", "Verbar;": "\u2016", "Vert;": "\u2016", "VerticalBar;": "\u2223", "VerticalLine;": "|", "VerticalSeparator;": "\u2758", "VerticalTilde;": "\u2240", "VeryThinSpace;": "\u200a", "Vfr;": "\U0001d519", "Vopf;": "\U0001d54d", "Vscr;": "\U0001d4b1", "Vvdash;": "\u22aa", "Wcirc;": "\u0174", "Wedge;": "\u22c0", "Wfr;": "\U0001d51a", "Wopf;": "\U0001d54e", "Wscr;": "\U0001d4b2", "Xfr;": "\U0001d51b", "Xi;": "\u039e", "Xopf;": "\U0001d54f", "Xscr;": "\U0001d4b3", "YAcy;": "\u042f", "YIcy;": "\u0407", "YUcy;": "\u042e", "Yacute": "\xdd", "Yacute;": "\xdd", "Ycirc;": "\u0176", "Ycy;": "\u042b", "Yfr;": "\U0001d51c", "Yopf;": "\U0001d550", "Yscr;": "\U0001d4b4", "Yuml;": "\u0178", "ZHcy;": "\u0416", "Zacute;": "\u0179", "Zcaron;": "\u017d", "Zcy;": "\u0417", "Zdot;": "\u017b", "ZeroWidthSpace;": "\u200b", "Zeta;": "\u0396", "Zfr;": "\u2128", "Zopf;": "\u2124", "Zscr;": "\U0001d4b5", "aacute": "\xe1", "aacute;": "\xe1", "abreve;": "\u0103", "ac;": "\u223e", "acE;": "\u223e\u0333", "acd;": "\u223f", "acirc": "\xe2", "acirc;": "\xe2", "acute": "\xb4", "acute;": "\xb4", "acy;": "\u0430", "aelig": "\xe6", "aelig;": "\xe6", "af;": "\u2061", "afr;": "\U0001d51e", "agrave": "\xe0", "agrave;": "\xe0", "alefsym;": "\u2135", "aleph;": "\u2135", "alpha;": "\u03b1", "amacr;": "\u0101", "amalg;": "\u2a3f", "amp": "&", "amp;": "&", "and;": "\u2227", "andand;": "\u2a55", "andd;": "\u2a5c", "andslope;": "\u2a58", "andv;": "\u2a5a", "ang;": "\u2220", "ange;": "\u29a4", "angle;": "\u2220", "angmsd;": "\u2221", "angmsdaa;": "\u29a8", "angmsdab;": "\u29a9", "angmsdac;": "\u29aa", "angmsdad;": "\u29ab", "angmsdae;": "\u29ac", "angmsdaf;": "\u29ad", "angmsdag;": "\u29ae", "angmsdah;": "\u29af", "angrt;": "\u221f", "angrtvb;": "\u22be", "angrtvbd;": "\u299d", "angsph;": "\u2222", "angst;": "\xc5", "angzarr;": "\u237c", "aogon;": "\u0105", "aopf;": "\U0001d552", "ap;": "\u2248", "apE;": "\u2a70", "apacir;": "\u2a6f", "ape;": "\u224a", "apid;": "\u224b", "apos;": "'", "approx;": "\u2248", "approxeq;": "\u224a", "aring": "\xe5", "aring;": "\xe5", "ascr;": "\U0001d4b6", "ast;": "*", "asymp;": "\u2248", "asympeq;": "\u224d", "atilde": "\xe3", "atilde;": "\xe3", "auml": "\xe4", "auml;": "\xe4", "awconint;": "\u2233", "awint;": "\u2a11", "bNot;": "\u2aed", "backcong;": "\u224c", "backepsilon;": "\u03f6", "backprime;": "\u2035", "backsim;": "\u223d", "backsimeq;": "\u22cd", "barvee;": "\u22bd", "barwed;": "\u2305", "barwedge;": "\u2305", "bbrk;": "\u23b5", "bbrktbrk;": "\u23b6", "bcong;": "\u224c", "bcy;": "\u0431", "bdquo;": "\u201e", "becaus;": "\u2235", "because;": "\u2235", "bemptyv;": "\u29b0", "bepsi;": "\u03f6", "bernou;": "\u212c", "beta;": "\u03b2", "beth;": "\u2136", "between;": "\u226c", "bfr;": "\U0001d51f", "bigcap;": "\u22c2", "bigcirc;": "\u25ef", "bigcup;": "\u22c3", "bigodot;": "\u2a00", "bigoplus;": "\u2a01", "bigotimes;": "\u2a02", "bigsqcup;": "\u2a06", "bigstar;": "\u2605", "bigtriangledown;": "\u25bd", "bigtriangleup;": "\u25b3", "biguplus;": "\u2a04", "bigvee;": "\u22c1", "bigwedge;": "\u22c0", "bkarow;": "\u290d", "blacklozenge;": "\u29eb", "blacksquare;": "\u25aa", "blacktriangle;": "\u25b4", "blacktriangledown;": "\u25be", "blacktriangleleft;": "\u25c2", "blacktriangleright;": "\u25b8", "blank;": "\u2423", "blk12;": "\u2592", "blk14;": "\u2591", "blk34;": "\u2593", "block;": "\u2588", "bne;": "=\u20e5", "bnequiv;": "\u2261\u20e5", "bnot;": "\u2310", "bopf;": "\U0001d553", "bot;": "\u22a5", "bottom;": "\u22a5", "bowtie;": "\u22c8", "boxDL;": "\u2557", "boxDR;": "\u2554", "boxDl;": "\u2556", "boxDr;": "\u2553", "boxH;": "\u2550", "boxHD;": "\u2566", "boxHU;": "\u2569", "boxHd;": "\u2564", "boxHu;": "\u2567", "boxUL;": "\u255d", "boxUR;": "\u255a", "boxUl;": "\u255c", "boxUr;": "\u2559", "boxV;": "\u2551", "boxVH;": "\u256c", "boxVL;": "\u2563", "boxVR;": "\u2560", "boxVh;": "\u256b", "boxVl;": "\u2562", "boxVr;": "\u255f", "boxbox;": "\u29c9", "boxdL;": "\u2555", "boxdR;": "\u2552", "boxdl;": "\u2510", "boxdr;": "\u250c", "boxh;": "\u2500", "boxhD;": "\u2565", "boxhU;": "\u2568", "boxhd;": "\u252c", "boxhu;": "\u2534", "boxminus;": "\u229f", "boxplus;": "\u229e", "boxtimes;": "\u22a0", "boxuL;": "\u255b", "boxuR;": "\u2558", "boxul;": "\u2518", "boxur;": "\u2514", "boxv;": "\u2502", "boxvH;": "\u256a", "boxvL;": "\u2561", "boxvR;": "\u255e", "boxvh;": "\u253c", "boxvl;": "\u2524", "boxvr;": "\u251c", "bprime;": "\u2035", "breve;": "\u02d8", "brvbar": "\xa6", "brvbar;": "\xa6", "bscr;": "\U0001d4b7", "bsemi;": "\u204f", "bsim;": "\u223d", "bsime;": "\u22cd", "bsol;": "\\", "bsolb;": "\u29c5", "bsolhsub;": "\u27c8", "bull;": "\u2022", "bullet;": "\u2022", "bump;": "\u224e", "bumpE;": "\u2aae", "bumpe;": "\u224f", "bumpeq;": "\u224f", "cacute;": "\u0107", "cap;": "\u2229", "capand;": "\u2a44", "capbrcup;": "\u2a49", "capcap;": "\u2a4b", "capcup;": "\u2a47", "capdot;": "\u2a40", "caps;": "\u2229\ufe00", "caret;": "\u2041", "caron;": "\u02c7", "ccaps;": "\u2a4d", "ccaron;": "\u010d", "ccedil": "\xe7", "ccedil;": "\xe7", "ccirc;": "\u0109", "ccups;": "\u2a4c", "ccupssm;": "\u2a50", "cdot;": "\u010b", "cedil": "\xb8", "cedil;": "\xb8", "cemptyv;": "\u29b2", "cent": "\xa2", "cent;": "\xa2", "centerdot;": "\xb7", "cfr;": "\U0001d520", "chcy;": "\u0447", "check;": "\u2713", "checkmark;": "\u2713", "chi;": "\u03c7", "cir;": "\u25cb", "cirE;": "\u29c3", "circ;": "\u02c6", "circeq;": "\u2257", "circlearrowleft;": "\u21ba", "circlearrowright;": "\u21bb", "circledR;": "\xae", "circledS;": "\u24c8", "circledast;": "\u229b", "circledcirc;": "\u229a", "circleddash;": "\u229d", "cire;": "\u2257", "cirfnint;": "\u2a10", "cirmid;": "\u2aef", "cirscir;": "\u29c2", "clubs;": "\u2663", "clubsuit;": "\u2663", "colon;": ":", "colone;": "\u2254", "coloneq;": "\u2254", "comma;": ",", "commat;": "@", "comp;": "\u2201", "compfn;": "\u2218", "complement;": "\u2201", "complexes;": "\u2102", "cong;": "\u2245", "congdot;": "\u2a6d", "conint;": "\u222e", "copf;": "\U0001d554", "coprod;": "\u2210", "copy": "\xa9", "copy;": "\xa9", "copysr;": "\u2117", "crarr;": "\u21b5", "cross;": "\u2717", "cscr;": "\U0001d4b8", "csub;": "\u2acf", "csube;": "\u2ad1", "csup;": "\u2ad0", "csupe;": "\u2ad2", "ctdot;": "\u22ef", "cudarrl;": "\u2938", "cudarrr;": "\u2935", "cuepr;": "\u22de", "cuesc;": "\u22df", "cularr;": "\u21b6", "cularrp;": "\u293d", "cup;": "\u222a", "cupbrcap;": "\u2a48", "cupcap;": "\u2a46", "cupcup;": "\u2a4a", "cupdot;": "\u228d", "cupor;": "\u2a45", "cups;": "\u222a\ufe00", "curarr;": "\u21b7", "curarrm;": "\u293c", "curlyeqprec;": "\u22de", "curlyeqsucc;": "\u22df", "curlyvee;": "\u22ce", "curlywedge;": "\u22cf", "curren": "\xa4", "curren;": "\xa4", "curvearrowleft;": "\u21b6", "curvearrowright;": "\u21b7", "cuvee;": "\u22ce", "cuwed;": "\u22cf", "cwconint;": "\u2232", "cwint;": "\u2231", "cylcty;": "\u232d", "dArr;": "\u21d3", "dHar;": "\u2965", "dagger;": "\u2020", "daleth;": "\u2138", "darr;": "\u2193", "dash;": "\u2010", "dashv;": "\u22a3", "dbkarow;": "\u290f", "dblac;": "\u02dd", "dcaron;": "\u010f", "dcy;": "\u0434", "dd;": "\u2146", "ddagger;": "\u2021", "ddarr;": "\u21ca", "ddotseq;": "\u2a77", "deg": "\xb0", "deg;": "\xb0", "delta;": "\u03b4", "demptyv;": "\u29b1", "dfisht;": "\u297f", "dfr;": "\U0001d521", "dharl;": "\u21c3", "dharr;": "\u21c2", "diam;": "\u22c4", "diamond;": "\u22c4", "diamondsuit;": "\u2666", "diams;": "\u2666", "die;": "\xa8", "digamma;": "\u03dd", "disin;": "\u22f2", "div;": "\xf7", "divide": "\xf7", "divide;": "\xf7", "divideontimes;": "\u22c7", "divonx;": "\u22c7", "djcy;": "\u0452", "dlcorn;": "\u231e", "dlcrop;": "\u230d", "dollar;": "$", "dopf;": "\U0001d555", "dot;": "\u02d9", "doteq;": "\u2250", "doteqdot;": "\u2251", "dotminus;": "\u2238", "dotplus;": "\u2214", "dotsquare;": "\u22a1", "doublebarwedge;": "\u2306", "downarrow;": "\u2193", "downdownarrows;": "\u21ca", "downharpoonleft;": "\u21c3", "downharpoonright;": "\u21c2", "drbkarow;": "\u2910", "drcorn;": "\u231f", "drcrop;": "\u230c", "dscr;": "\U0001d4b9", "dscy;": "\u0455", "dsol;": "\u29f6", "dstrok;": "\u0111", "dtdot;": "\u22f1", "dtri;": "\u25bf", "dtrif;": "\u25be", "duarr;": "\u21f5", "duhar;": "\u296f", "dwangle;": "\u29a6", "dzcy;": "\u045f", "dzigrarr;": "\u27ff", "eDDot;": "\u2a77", "eDot;": "\u2251", "eacute": "\xe9", "eacute;": "\xe9", "easter;": "\u2a6e", "ecaron;": "\u011b", "ecir;": "\u2256", "ecirc": "\xea", "ecirc;": "\xea", "ecolon;": "\u2255", "ecy;": "\u044d", "edot;": "\u0117", "ee;": "\u2147", "efDot;": "\u2252", "efr;": "\U0001d522", "eg;": "\u2a9a", "egrave": "\xe8", "egrave;": "\xe8", "egs;": "\u2a96", "egsdot;": "\u2a98", "el;": "\u2a99", "elinters;": "\u23e7", "ell;": "\u2113", "els;": "\u2a95", "elsdot;": "\u2a97", "emacr;": "\u0113", "empty;": "\u2205", "emptyset;": "\u2205", "emptyv;": "\u2205", "emsp13;": "\u2004", "emsp14;": "\u2005", "emsp;": "\u2003", "eng;": "\u014b", "ensp;": "\u2002", "eogon;": "\u0119", "eopf;": "\U0001d556", "epar;": "\u22d5", "eparsl;": "\u29e3", "eplus;": "\u2a71", "epsi;": "\u03b5", "epsilon;": "\u03b5", "epsiv;": "\u03f5", "eqcirc;": "\u2256", "eqcolon;": "\u2255", "eqsim;": "\u2242", "eqslantgtr;": "\u2a96", "eqslantless;": "\u2a95", "equals;": "=", "equest;": "\u225f", "equiv;": "\u2261", "equivDD;": "\u2a78", "eqvparsl;": "\u29e5", "erDot;": "\u2253", "erarr;": "\u2971", "escr;": "\u212f", "esdot;": "\u2250", "esim;": "\u2242", "eta;": "\u03b7", "eth": "\xf0", "eth;": "\xf0", "euml": "\xeb", "euml;": "\xeb", "euro;": "\u20ac", "excl;": "!", "exist;": "\u2203", "expectation;": "\u2130", "exponentiale;": "\u2147", "fallingdotseq;": "\u2252", "fcy;": "\u0444", "female;": "\u2640", "ffilig;": "\ufb03", "fflig;": "\ufb00", "ffllig;": "\ufb04", "ffr;": "\U0001d523", "filig;": "\ufb01", "fjlig;": "fj", "flat;": "\u266d", "fllig;": "\ufb02", "fltns;": "\u25b1", "fnof;": "\u0192", "fopf;": "\U0001d557", "forall;": "\u2200", "fork;": "\u22d4", "forkv;": "\u2ad9", "fpartint;": "\u2a0d", "frac12": "\xbd", "frac12;": "\xbd", "frac13;": "\u2153", "frac14": "\xbc", "frac14;": "\xbc", "frac15;": "\u2155", "frac16;": "\u2159", "frac18;": "\u215b", "frac23;": "\u2154", "frac25;": "\u2156", "frac34": "\xbe", "frac34;": "\xbe", "frac35;": "\u2157", "frac38;": "\u215c", "frac45;": "\u2158", "frac56;": "\u215a", "frac58;": "\u215d", "frac78;": "\u215e", "frasl;": "\u2044", "frown;": "\u2322", "fscr;": "\U0001d4bb", "gE;": "\u2267", "gEl;": "\u2a8c", "gacute;": "\u01f5", "gamma;": "\u03b3", "gammad;": "\u03dd", "gap;": "\u2a86", "gbreve;": "\u011f", "gcirc;": "\u011d", "gcy;": "\u0433", "gdot;": "\u0121", "ge;": "\u2265", "gel;": "\u22db", "geq;": "\u2265", "geqq;": "\u2267", "geqslant;": "\u2a7e", "ges;": "\u2a7e", "gescc;": "\u2aa9", "gesdot;": "\u2a80", "gesdoto;": "\u2a82", "gesdotol;": "\u2a84", "gesl;": "\u22db\ufe00", "gesles;": "\u2a94", "gfr;": "\U0001d524", "gg;": "\u226b", "ggg;": "\u22d9", "gimel;": "\u2137", "gjcy;": "\u0453", "gl;": "\u2277", "glE;": "\u2a92", "gla;": "\u2aa5", "glj;": "\u2aa4", "gnE;": "\u2269", "gnap;": "\u2a8a", "gnapprox;": "\u2a8a", "gne;": "\u2a88", "gneq;": "\u2a88", "gneqq;": "\u2269", "gnsim;": "\u22e7", "gopf;": "\U0001d558", "grave;": "`", "gscr;": "\u210a", "gsim;": "\u2273", "gsime;": "\u2a8e", "gsiml;": "\u2a90", "gt": ">", "gt;": ">", "gtcc;": "\u2aa7", "gtcir;": "\u2a7a", "gtdot;": "\u22d7", "gtlPar;": "\u2995", "gtquest;": "\u2a7c", "gtrapprox;": "\u2a86", "gtrarr;": "\u2978", "gtrdot;": "\u22d7", "gtreqless;": "\u22db", "gtreqqless;": "\u2a8c", "gtrless;": "\u2277", "gtrsim;": "\u2273", "gvertneqq;": "\u2269\ufe00", "gvnE;": "\u2269\ufe00", "hArr;": "\u21d4", "hairsp;": "\u200a", "half;": "\xbd", "hamilt;": "\u210b", "hardcy;": "\u044a", "harr;": "\u2194", "harrcir;": "\u2948", "harrw;": "\u21ad", "hbar;": "\u210f", "hcirc;": "\u0125", "hearts;": "\u2665", "heartsuit;": "\u2665", "hellip;": "\u2026", "hercon;": "\u22b9", "hfr;": "\U0001d525", "hksearow;": "\u2925", "hkswarow;": "\u2926", "hoarr;": "\u21ff", "homtht;": "\u223b", "hookleftarrow;": "\u21a9", "hookrightarrow;": "\u21aa", "hopf;": "\U0001d559", "horbar;": "\u2015", "hscr;": "\U0001d4bd", "hslash;": "\u210f", "hstrok;": "\u0127", "hybull;": "\u2043", "hyphen;": "\u2010", "iacute": "\xed", "iacute;": "\xed", "ic;": "\u2063", "icirc": "\xee", "icirc;": "\xee", "icy;": "\u0438", "iecy;": "\u0435", "iexcl": "\xa1", "iexcl;": "\xa1", "iff;": "\u21d4", "ifr;": "\U0001d526", "igrave": "\xec", "igrave;": "\xec", "ii;": "\u2148", "iiiint;": "\u2a0c", "iiint;": "\u222d", "iinfin;": "\u29dc", "iiota;": "\u2129", "ijlig;": "\u0133", "imacr;": "\u012b", "image;": "\u2111", "imagline;": "\u2110", "imagpart;": "\u2111", "imath;": "\u0131", "imof;": "\u22b7", "imped;": "\u01b5", "in;": "\u2208", "incare;": "\u2105", "infin;": "\u221e", "infintie;": "\u29dd", "inodot;": "\u0131", "int;": "\u222b", "intcal;": "\u22ba", "integers;": "\u2124", "intercal;": "\u22ba", "intlarhk;": "\u2a17", "intprod;": "\u2a3c", "iocy;": "\u0451", "iogon;": "\u012f", "iopf;": "\U0001d55a", "iota;": "\u03b9", "iprod;": "\u2a3c", "iquest": "\xbf", "iquest;": "\xbf", "iscr;": "\U0001d4be", "isin;": "\u2208", "isinE;": "\u22f9", "isindot;": "\u22f5", "isins;": "\u22f4", "isinsv;": "\u22f3", "isinv;": "\u2208", "it;": "\u2062", "itilde;": "\u0129", "iukcy;": "\u0456", "iuml": "\xef", "iuml;": "\xef", "jcirc;": "\u0135", "jcy;": "\u0439", "jfr;": "\U0001d527", "jmath;": "\u0237", "jopf;": "\U0001d55b", "jscr;": "\U0001d4bf", "jsercy;": "\u0458", "jukcy;": "\u0454", "kappa;": "\u03ba", "kappav;": "\u03f0", "kcedil;": "\u0137", "kcy;": "\u043a", "kfr;": "\U0001d528", "kgreen;": "\u0138", "khcy;": "\u0445", "kjcy;": "\u045c", "kopf;": "\U0001d55c", "kscr;": "\U0001d4c0", "lAarr;": "\u21da", "lArr;": "\u21d0", "lAtail;": "\u291b", "lBarr;": "\u290e", "lE;": "\u2266", "lEg;": "\u2a8b", "lHar;": "\u2962", "lacute;": "\u013a", "laemptyv;": "\u29b4", "lagran;": "\u2112", "lambda;": "\u03bb", "lang;": "\u27e8", "langd;": "\u2991", "langle;": "\u27e8", "lap;": "\u2a85", "laquo": "\xab", "laquo;": "\xab", "larr;": "\u2190", "larrb;": "\u21e4", "larrbfs;": "\u291f", "larrfs;": "\u291d", "larrhk;": "\u21a9", "larrlp;": "\u21ab", "larrpl;": "\u2939", "larrsim;": "\u2973", "larrtl;": "\u21a2", "lat;": "\u2aab", "latail;": "\u2919", "late;": "\u2aad", "lates;": "\u2aad\ufe00", "lbarr;": "\u290c", "lbbrk;": "\u2772", "lbrace;": "{", "lbrack;": "[", "lbrke;": "\u298b", "lbrksld;": "\u298f", "lbrkslu;": "\u298d", "lcaron;": "\u013e", "lcedil;": "\u013c", "lceil;": "\u2308", "lcub;": "{", "lcy;": "\u043b", "ldca;": "\u2936", "ldquo;": "\u201c", "ldquor;": "\u201e", "ldrdhar;": "\u2967", "ldrushar;": "\u294b", "ldsh;": "\u21b2", "le;": "\u2264", "leftarrow;": "\u2190", "leftarrowtail;": "\u21a2", "leftharpoondown;": "\u21bd", "leftharpoonup;": "\u21bc", "leftleftarrows;": "\u21c7", "leftrightarrow;": "\u2194", "leftrightarrows;": "\u21c6", "leftrightharpoons;": "\u21cb", "leftrightsquigarrow;": "\u21ad", "leftthreetimes;": "\u22cb", "leg;": "\u22da", "leq;": "\u2264", "leqq;": "\u2266", "leqslant;": "\u2a7d", "les;": "\u2a7d", "lescc;": "\u2aa8", "lesdot;": "\u2a7f", "lesdoto;": "\u2a81", "lesdotor;": "\u2a83", "lesg;": "\u22da\ufe00", "lesges;": "\u2a93", "lessapprox;": "\u2a85", "lessdot;": "\u22d6", "lesseqgtr;": "\u22da", "lesseqqgtr;": "\u2a8b", "lessgtr;": "\u2276", "lesssim;": "\u2272", "lfisht;": "\u297c", "lfloor;": "\u230a", "lfr;": "\U0001d529", "lg;": "\u2276", "lgE;": "\u2a91", "lhard;": "\u21bd", "lharu;": "\u21bc", "lharul;": "\u296a", "lhblk;": "\u2584", "ljcy;": "\u0459", "ll;": "\u226a", "llarr;": "\u21c7", "llcorner;": "\u231e", "llhard;": "\u296b", "lltri;": "\u25fa", "lmidot;": "\u0140", "lmoust;": "\u23b0", "lmoustache;": "\u23b0", "lnE;": "\u2268", "lnap;": "\u2a89", "lnapprox;": "\u2a89", "lne;": "\u2a87", "lneq;": "\u2a87", "lneqq;": "\u2268", "lnsim;": "\u22e6", "loang;": "\u27ec", "loarr;": "\u21fd", "lobrk;": "\u27e6", "longleftarrow;": "\u27f5", "longleftrightarrow;": "\u27f7", "longmapsto;": "\u27fc", "longrightarrow;": "\u27f6", "looparrowleft;": "\u21ab", "looparrowright;": "\u21ac", "lopar;": "\u2985", "lopf;": "\U0001d55d", "loplus;": "\u2a2d", "lotimes;": "\u2a34", "lowast;": "\u2217", "lowbar;": "_", "loz;": "\u25ca", "lozenge;": "\u25ca", "lozf;": "\u29eb", "lpar;": "(", "lparlt;": "\u2993", "lrarr;": "\u21c6", "lrcorner;": "\u231f", "lrhar;": "\u21cb", "lrhard;": "\u296d", "lrm;": "\u200e", "lrtri;": "\u22bf", "lsaquo;": "\u2039", "lscr;": "\U0001d4c1", "lsh;": "\u21b0", "lsim;": "\u2272", "lsime;": "\u2a8d", "lsimg;": "\u2a8f", "lsqb;": "[", "lsquo;": "\u2018", "lsquor;": "\u201a", "lstrok;": "\u0142", "lt": "<", "lt;": "<", "ltcc;": "\u2aa6", "ltcir;": "\u2a79", "ltdot;": "\u22d6", "lthree;": "\u22cb", "ltimes;": "\u22c9", "ltlarr;": "\u2976", "ltquest;": "\u2a7b", "ltrPar;": "\u2996", "ltri;": "\u25c3", "ltrie;": "\u22b4", "ltrif;": "\u25c2", "lurdshar;": "\u294a", "luruhar;": "\u2966", "lvertneqq;": "\u2268\ufe00", "lvnE;": "\u2268\ufe00", "mDDot;": "\u223a", "macr": "\xaf", "macr;": "\xaf", "male;": "\u2642", "malt;": "\u2720", "maltese;": "\u2720", "map;": "\u21a6", "mapsto;": "\u21a6", "mapstodown;": "\u21a7", "mapstoleft;": "\u21a4", "mapstoup;": "\u21a5", "marker;": "\u25ae", "mcomma;": "\u2a29", "mcy;": "\u043c", "mdash;": "\u2014", "measuredangle;": "\u2221", "mfr;": "\U0001d52a", "mho;": "\u2127", "micro": "\xb5", "micro;": "\xb5", "mid;": "\u2223", "midast;": "*", "midcir;": "\u2af0", "middot": "\xb7", "middot;": "\xb7", "minus;": "\u2212", "minusb;": "\u229f", "minusd;": "\u2238", "minusdu;": "\u2a2a", "mlcp;": "\u2adb", "mldr;": "\u2026", "mnplus;": "\u2213", "models;": "\u22a7", "mopf;": "\U0001d55e", "mp;": "\u2213", "mscr;": "\U0001d4c2", "mstpos;": "\u223e", "mu;": "\u03bc", "multimap;": "\u22b8", "mumap;": "\u22b8", "nGg;": "\u22d9\u0338", "nGt;": "\u226b\u20d2", "nGtv;": "\u226b\u0338", "nLeftarrow;": "\u21cd", "nLeftrightarrow;": "\u21ce", "nLl;": "\u22d8\u0338", "nLt;": "\u226a\u20d2", "nLtv;": "\u226a\u0338", "nRightarrow;": "\u21cf", "nVDash;": "\u22af", "nVdash;": "\u22ae", "nabla;": "\u2207", "nacute;": "\u0144", "nang;": "\u2220\u20d2", "nap;": "\u2249", "napE;": "\u2a70\u0338", "napid;": "\u224b\u0338", "napos;": "\u0149", "napprox;": "\u2249", "natur;": "\u266e", "natural;": "\u266e", "naturals;": "\u2115", "nbsp": "\xa0", "nbsp;": "\xa0", "nbump;": "\u224e\u0338", "nbumpe;": "\u224f\u0338", "ncap;": "\u2a43", "ncaron;": "\u0148", "ncedil;": "\u0146", "ncong;": "\u2247", "ncongdot;": "\u2a6d\u0338", "ncup;": "\u2a42", "ncy;": "\u043d", "ndash;": "\u2013", "ne;": "\u2260", "neArr;": "\u21d7", "nearhk;": "\u2924", "nearr;": "\u2197", "nearrow;": "\u2197", "nedot;": "\u2250\u0338", "nequiv;": "\u2262", "nesear;": "\u2928", "nesim;": "\u2242\u0338", "nexist;": "\u2204", "nexists;": "\u2204", "nfr;": "\U0001d52b", "ngE;": "\u2267\u0338", "nge;": "\u2271", "ngeq;": "\u2271", "ngeqq;": "\u2267\u0338", "ngeqslant;": "\u2a7e\u0338", "nges;": "\u2a7e\u0338", "ngsim;": "\u2275", "ngt;": "\u226f", "ngtr;": "\u226f", "nhArr;": "\u21ce", "nharr;": "\u21ae", "nhpar;": "\u2af2", "ni;": "\u220b", "nis;": "\u22fc", "nisd;": "\u22fa", "niv;": "\u220b", "njcy;": "\u045a", "nlArr;": "\u21cd", "nlE;": "\u2266\u0338", "nlarr;": "\u219a", "nldr;": "\u2025", "nle;": "\u2270", "nleftarrow;": "\u219a", "nleftrightarrow;": "\u21ae", "nleq;": "\u2270", "nleqq;": "\u2266\u0338", "nleqslant;": "\u2a7d\u0338", "nles;": "\u2a7d\u0338", "nless;": "\u226e", "nlsim;": "\u2274", "nlt;": "\u226e", "nltri;": "\u22ea", "nltrie;": "\u22ec", "nmid;": "\u2224", "nopf;": "\U0001d55f", "not": "\xac", "not;": "\xac", "notin;": "\u2209", "notinE;": "\u22f9\u0338", "notindot;": "\u22f5\u0338", "notinva;": "\u2209", "notinvb;": "\u22f7", "notinvc;": "\u22f6", "notni;": "\u220c", "notniva;": "\u220c", "notnivb;": "\u22fe", "notnivc;": "\u22fd", "npar;": "\u2226", "nparallel;": "\u2226", "nparsl;": "\u2afd\u20e5", "npart;": "\u2202\u0338", "npolint;": "\u2a14", "npr;": "\u2280", "nprcue;": "\u22e0", "npre;": "\u2aaf\u0338", "nprec;": "\u2280", "npreceq;": "\u2aaf\u0338", "nrArr;": "\u21cf", "nrarr;": "\u219b", "nrarrc;": "\u2933\u0338", "nrarrw;": "\u219d\u0338", "nrightarrow;": "\u219b", "nrtri;": "\u22eb", "nrtrie;": "\u22ed", "nsc;": "\u2281", "nsccue;": "\u22e1", "nsce;": "\u2ab0\u0338", "nscr;": "\U0001d4c3", "nshortmid;": "\u2224", "nshortparallel;": "\u2226", "nsim;": "\u2241", "nsime;": "\u2244", "nsimeq;": "\u2244", "nsmid;": "\u2224", "nspar;": "\u2226", "nsqsube;": "\u22e2", "nsqsupe;": "\u22e3", "nsub;": "\u2284", "nsubE;": "\u2ac5\u0338", "nsube;": "\u2288", "nsubset;": "\u2282\u20d2", "nsubseteq;": "\u2288", "nsubseteqq;": "\u2ac5\u0338", "nsucc;": "\u2281", "nsucceq;": "\u2ab0\u0338", "nsup;": "\u2285", "nsupE;": "\u2ac6\u0338", "nsupe;": "\u2289", "nsupset;": "\u2283\u20d2", "nsupseteq;": "\u2289", "nsupseteqq;": "\u2ac6\u0338", "ntgl;": "\u2279", "ntilde": "\xf1", "ntilde;": "\xf1", "ntlg;": "\u2278", "ntriangleleft;": "\u22ea", "ntrianglelefteq;": "\u22ec", "ntriangleright;": "\u22eb", "ntrianglerighteq;": "\u22ed", "nu;": "\u03bd", "num;": "#", "numero;": "\u2116", "numsp;": "\u2007", "nvDash;": "\u22ad", "nvHarr;": "\u2904", "nvap;": "\u224d\u20d2", "nvdash;": "\u22ac", "nvge;": "\u2265\u20d2", "nvgt;": ">\u20d2", "nvinfin;": "\u29de", "nvlArr;": "\u2902", "nvle;": "\u2264\u20d2", "nvlt;": "<\u20d2", "nvltrie;": "\u22b4\u20d2", "nvrArr;": "\u2903", "nvrtrie;": "\u22b5\u20d2", "nvsim;": "\u223c\u20d2", "nwArr;": "\u21d6", "nwarhk;": "\u2923", "nwarr;": "\u2196", "nwarrow;": "\u2196", "nwnear;": "\u2927", "oS;": "\u24c8", "oacute": "\xf3", "oacute;": "\xf3", "oast;": "\u229b", "ocir;": "\u229a", "ocirc": "\xf4", "ocirc;": "\xf4", "ocy;": "\u043e", "odash;": "\u229d", "odblac;": "\u0151", "odiv;": "\u2a38", "odot;": "\u2299", "odsold;": "\u29bc", "oelig;": "\u0153", "ofcir;": "\u29bf", "ofr;": "\U0001d52c", "ogon;": "\u02db", "ograve": "\xf2", "ograve;": "\xf2", "ogt;": "\u29c1", "ohbar;": "\u29b5", "ohm;": "\u03a9", "oint;": "\u222e", "olarr;": "\u21ba", "olcir;": "\u29be", "olcross;": "\u29bb", "oline;": "\u203e", "olt;": "\u29c0", "omacr;": "\u014d", "omega;": "\u03c9", "omicron;": "\u03bf", "omid;": "\u29b6", "ominus;": "\u2296", "oopf;": "\U0001d560", "opar;": "\u29b7", "operp;": "\u29b9", "oplus;": "\u2295", "or;": "\u2228", "orarr;": "\u21bb", "ord;": "\u2a5d", "order;": "\u2134", "orderof;": "\u2134", "ordf": "\xaa", "ordf;": "\xaa", "ordm": "\xba", "ordm;": "\xba", "origof;": "\u22b6", "oror;": "\u2a56", "orslope;": "\u2a57", "orv;": "\u2a5b", "oscr;": "\u2134", "oslash": "\xf8", "oslash;": "\xf8", "osol;": "\u2298", "otilde": "\xf5", "otilde;": "\xf5", "otimes;": "\u2297", "otimesas;": "\u2a36", "ouml": "\xf6", "ouml;": "\xf6", "ovbar;": "\u233d", "par;": "\u2225", "para": "\xb6", "para;": "\xb6", "parallel;": "\u2225", "parsim;": "\u2af3", "parsl;": "\u2afd", "part;": "\u2202", "pcy;": "\u043f", "percnt;": "%", "period;": ".", "permil;": "\u2030", "perp;": "\u22a5", "pertenk;": "\u2031", "pfr;": "\U0001d52d", "phi;": "\u03c6", "phiv;": "\u03d5", "phmmat;": "\u2133", "phone;": "\u260e", "pi;": "\u03c0", "pitchfork;": "\u22d4", "piv;": "\u03d6", "planck;": "\u210f", "planckh;": "\u210e", "plankv;": "\u210f", "plus;": "+", "plusacir;": "\u2a23", "plusb;": "\u229e", "pluscir;": "\u2a22", "plusdo;": "\u2214", "plusdu;": "\u2a25", "pluse;": "\u2a72", "plusmn": "\xb1", "plusmn;": "\xb1", "plussim;": "\u2a26", "plustwo;": "\u2a27", "pm;": "\xb1", "pointint;": "\u2a15", "popf;": "\U0001d561", "pound": "\xa3", "pound;": "\xa3", "pr;": "\u227a", "prE;": "\u2ab3", "prap;": "\u2ab7", "prcue;": "\u227c", "pre;": "\u2aaf", "prec;": "\u227a", "precapprox;": "\u2ab7", "preccurlyeq;": "\u227c", "preceq;": "\u2aaf", "precnapprox;": "\u2ab9", "precneqq;": "\u2ab5", "precnsim;": "\u22e8", "precsim;": "\u227e", "prime;": "\u2032", "primes;": "\u2119", "prnE;": "\u2ab5", "prnap;": "\u2ab9", "prnsim;": "\u22e8", "prod;": "\u220f", "profalar;": "\u232e", "profline;": "\u2312", "profsurf;": "\u2313", "prop;": "\u221d", "propto;": "\u221d", "prsim;": "\u227e", "prurel;": "\u22b0", "pscr;": "\U0001d4c5", "psi;": "\u03c8", "puncsp;": "\u2008", "qfr;": "\U0001d52e", "qint;": "\u2a0c", "qopf;": "\U0001d562", "qprime;": "\u2057", "qscr;": "\U0001d4c6", "quaternions;": "\u210d", "quatint;": "\u2a16", "quest;": "?", "questeq;": "\u225f", "quot": "\"", "quot;": "\"", "rAarr;": "\u21db", "rArr;": "\u21d2", "rAtail;": "\u291c", "rBarr;": "\u290f", "rHar;": "\u2964", "race;": "\u223d\u0331", "racute;": "\u0155", "radic;": "\u221a", "raemptyv;": "\u29b3", "rang;": "\u27e9", "rangd;": "\u2992", "range;": "\u29a5", "rangle;": "\u27e9", "raquo": "\xbb", "raquo;": "\xbb", "rarr;": "\u2192", "rarrap;": "\u2975", "rarrb;": "\u21e5", "rarrbfs;": "\u2920", "rarrc;": "\u2933", "rarrfs;": "\u291e", "rarrhk;": "\u21aa", "rarrlp;": "\u21ac", "rarrpl;": "\u2945", "rarrsim;": "\u2974", "rarrtl;": "\u21a3", "rarrw;": "\u219d", "ratail;": "\u291a", "ratio;": "\u2236", "rationals;": "\u211a", "rbarr;": "\u290d", "rbbrk;": "\u2773", "rbrace;": "}", "rbrack;": "]", "rbrke;": "\u298c", "rbrksld;": "\u298e", "rbrkslu;": "\u2990", "rcaron;": "\u0159", "rcedil;": "\u0157", "rceil;": "\u2309", "rcub;": "}", "rcy;": "\u0440", "rdca;": "\u2937", "rdldhar;": "\u2969", "rdquo;": "\u201d", "rdquor;": "\u201d", "rdsh;": "\u21b3", "real;": "\u211c", "realine;": "\u211b", "realpart;": "\u211c", "reals;": "\u211d", "rect;": "\u25ad", "reg": "\xae", "reg;": "\xae", "rfisht;": "\u297d", "rfloor;": "\u230b", "rfr;": "\U0001d52f", "rhard;": "\u21c1", "rharu;": "\u21c0", "rharul;": "\u296c", "rho;": "\u03c1", "rhov;": "\u03f1", "rightarrow;": "\u2192", "rightarrowtail;": "\u21a3", "rightharpoondown;": "\u21c1", "rightharpoonup;": "\u21c0", "rightleftarrows;": "\u21c4", "rightleftharpoons;": "\u21cc", "rightrightarrows;": "\u21c9", "rightsquigarrow;": "\u219d", "rightthreetimes;": "\u22cc", "ring;": "\u02da", "risingdotseq;": "\u2253", "rlarr;": "\u21c4", "rlhar;": "\u21cc", "rlm;": "\u200f", "rmoust;": "\u23b1", "rmoustache;": "\u23b1", "rnmid;": "\u2aee", "roang;": "\u27ed", "roarr;": "\u21fe", "robrk;": "\u27e7", "ropar;": "\u2986", "ropf;": "\U0001d563", "roplus;": "\u2a2e", "rotimes;": "\u2a35", "rpar;": ")", "rpargt;": "\u2994", "rppolint;": "\u2a12", "rrarr;": "\u21c9", "rsaquo;": "\u203a", "rscr;": "\U0001d4c7", "rsh;": "\u21b1", "rsqb;": "]", "rsquo;": "\u2019", "rsquor;": "\u2019", "rthree;": "\u22cc", "rtimes;": "\u22ca", "rtri;": "\u25b9", "rtrie;": "\u22b5", "rtrif;": "\u25b8", "rtriltri;": "\u29ce", "ruluhar;": "\u2968", "rx;": "\u211e", "sacute;": "\u015b", "sbquo;": "\u201a", "sc;": "\u227b", "scE;": "\u2ab4", "scap;": "\u2ab8", "scaron;": "\u0161", "sccue;": "\u227d", "sce;": "\u2ab0", "scedil;": "\u015f", "scirc;": "\u015d", "scnE;": "\u2ab6", "scnap;": "\u2aba", "scnsim;": "\u22e9", "scpolint;": "\u2a13", "scsim;": "\u227f", "scy;": "\u0441", "sdot;": "\u22c5", "sdotb;": "\u22a1", "sdote;": "\u2a66", "seArr;": "\u21d8", "searhk;": "\u2925", "searr;": "\u2198", "searrow;": "\u2198", "sect": "\xa7", "sect;": "\xa7", "semi;": ";", "seswar;": "\u2929", "setminus;": "\u2216", "setmn;": "\u2216", "sext;": "\u2736", "sfr;": "\U0001d530", "sfrown;": "\u2322", "sharp;": "\u266f", "shchcy;": "\u0449", "shcy;": "\u0448", "shortmid;": "\u2223", "shortparallel;": "\u2225", "shy": "\xad", "shy;": "\xad", "sigma;": "\u03c3", "sigmaf;": "\u03c2", "sigmav;": "\u03c2", "sim;": "\u223c", "simdot;": "\u2a6a", "sime;": "\u2243", "simeq;": "\u2243", "simg;": "\u2a9e", "simgE;": "\u2aa0", "siml;": "\u2a9d", "simlE;": "\u2a9f", "simne;": "\u2246", "simplus;": "\u2a24", "simrarr;": "\u2972", "slarr;": "\u2190", "smallsetminus;": "\u2216", "smashp;": "\u2a33", "smeparsl;": "\u29e4", "smid;": "\u2223", "smile;": "\u2323", "smt;": "\u2aaa", "smte;": "\u2aac", "smtes;": "\u2aac\ufe00", "softcy;": "\u044c", "sol;": "/", "solb;": "\u29c4", "solbar;": "\u233f", "sopf;": "\U0001d564", "spades;": "\u2660", "spadesuit;": "\u2660", "spar;": "\u2225", "sqcap;": "\u2293", "sqcaps;": "\u2293\ufe00", "sqcup;": "\u2294", "sqcups;": "\u2294\ufe00", "sqsub;": "\u228f", "sqsube;": "\u2291", "sqsubset;": "\u228f", "sqsubseteq;": "\u2291", "sqsup;": "\u2290", "sqsupe;": "\u2292", "sqsupset;": "\u2290", "sqsupseteq;": "\u2292", "squ;": "\u25a1", "square;": "\u25a1", "squarf;": "\u25aa", "squf;": "\u25aa", "srarr;": "\u2192", "sscr;": "\U0001d4c8", "ssetmn;": "\u2216", "ssmile;": "\u2323", "sstarf;": "\u22c6", "star;": "\u2606", "starf;": "\u2605", "straightepsilon;": "\u03f5", "straightphi;": "\u03d5", "strns;": "\xaf", "sub;": "\u2282", "subE;": "\u2ac5", "subdot;": "\u2abd", "sube;": "\u2286", "subedot;": "\u2ac3", "submult;": "\u2ac1", "subnE;": "\u2acb", "subne;": "\u228a", "subplus;": "\u2abf", "subrarr;": "\u2979", "subset;": "\u2282", "subseteq;": "\u2286", "subseteqq;": "\u2ac5", "subsetneq;": "\u228a", "subsetneqq;": "\u2acb", "subsim;": "\u2ac7", "subsub;": "\u2ad5", "subsup;": "\u2ad3", "succ;": "\u227b", "succapprox;": "\u2ab8", "succcurlyeq;": "\u227d", "succeq;": "\u2ab0", "succnapprox;": "\u2aba", "succneqq;": "\u2ab6", "succnsim;": "\u22e9", "succsim;": "\u227f", "sum;": "\u2211", "sung;": "\u266a", "sup1": "\xb9", "sup1;": "\xb9", "sup2": "\xb2", "sup2;": "\xb2", "sup3": "\xb3", "sup3;": "\xb3", "sup;": "\u2283", "supE;": "\u2ac6", "supdot;": "\u2abe", "supdsub;": "\u2ad8", "supe;": "\u2287", "supedot;": "\u2ac4", "suphsol;": "\u27c9", "suphsub;": "\u2ad7", "suplarr;": "\u297b", "supmult;": "\u2ac2", "supnE;": "\u2acc", "supne;": "\u228b", "supplus;": "\u2ac0", "supset;": "\u2283", "supseteq;": "\u2287", "supseteqq;": "\u2ac6", "supsetneq;": "\u228b", "supsetneqq;": "\u2acc", "supsim;": "\u2ac8", "supsub;": "\u2ad4", "supsup;": "\u2ad6", "swArr;": "\u21d9", "swarhk;": "\u2926", "swarr;": "\u2199", "swarrow;": "\u2199", "swnwar;": "\u292a", "szlig": "\xdf", "szlig;": "\xdf", "target;": "\u2316", "tau;": "\u03c4", "tbrk;": "\u23b4", "tcaron;": "\u0165", "tcedil;": "\u0163", "tcy;": "\u0442", "tdot;": "\u20db", "telrec;": "\u2315", "tfr;": "\U0001d531", "there4;": "\u2234", "therefore;": "\u2234", "theta;": "\u03b8", "thetasym;": "\u03d1", "thetav;": "\u03d1", "thickapprox;": "\u2248", "thicksim;": "\u223c", "thinsp;": "\u2009", "thkap;": "\u2248", "thksim;": "\u223c", "thorn": "\xfe", "thorn;": "\xfe", "tilde;": "\u02dc", "times": "\xd7", "times;": "\xd7", "timesb;": "\u22a0", "timesbar;": "\u2a31", "timesd;": "\u2a30", "tint;": "\u222d", "toea;": "\u2928", "top;": "\u22a4", "topbot;": "\u2336", "topcir;": "\u2af1", "topf;": "\U0001d565", "topfork;": "\u2ada", "tosa;": "\u2929", "tprime;": "\u2034", "trade;": "\u2122", "triangle;": "\u25b5", "triangledown;": "\u25bf", "triangleleft;": "\u25c3", "trianglelefteq;": "\u22b4", "triangleq;": "\u225c", "triangleright;": "\u25b9", "trianglerighteq;": "\u22b5", "tridot;": "\u25ec", "trie;": "\u225c", "triminus;": "\u2a3a", "triplus;": "\u2a39", "trisb;": "\u29cd", "tritime;": "\u2a3b", "trpezium;": "\u23e2", "tscr;": "\U0001d4c9", "tscy;": "\u0446", "tshcy;": "\u045b", "tstrok;": "\u0167", "twixt;": "\u226c", "twoheadleftarrow;": "\u219e", "twoheadrightarrow;": "\u21a0", "uArr;": "\u21d1", "uHar;": "\u2963", "uacute": "\xfa", "uacute;": "\xfa", "uarr;": "\u2191", "ubrcy;": "\u045e", "ubreve;": "\u016d", "ucirc": "\xfb", "ucirc;": "\xfb", "ucy;": "\u0443", "udarr;": "\u21c5", "udblac;": "\u0171", "udhar;": "\u296e", "ufisht;": "\u297e", "ufr;": "\U0001d532", "ugrave": "\xf9", "ugrave;": "\xf9", "uharl;": "\u21bf", "uharr;": "\u21be", "uhblk;": "\u2580", "ulcorn;": "\u231c", "ulcorner;": "\u231c", "ulcrop;": "\u230f", "ultri;": "\u25f8", "umacr;": "\u016b", "uml": "\xa8", "uml;": "\xa8", "uogon;": "\u0173", "uopf;": "\U0001d566", "uparrow;": "\u2191", "updownarrow;": "\u2195", "upharpoonleft;": "\u21bf", "upharpoonright;": "\u21be", "uplus;": "\u228e", "upsi;": "\u03c5", "upsih;": "\u03d2", "upsilon;": "\u03c5", "upuparrows;": "\u21c8", "urcorn;": "\u231d", "urcorner;": "\u231d", "urcrop;": "\u230e", "uring;": "\u016f", "urtri;": "\u25f9", "uscr;": "\U0001d4ca", "utdot;": "\u22f0", "utilde;": "\u0169", "utri;": "\u25b5", "utrif;": "\u25b4", "uuarr;": "\u21c8", "uuml": "\xfc", "uuml;": "\xfc", "uwangle;": "\u29a7", "vArr;": "\u21d5", "vBar;": "\u2ae8", "vBarv;": "\u2ae9", "vDash;": "\u22a8", "vangrt;": "\u299c", "varepsilon;": "\u03f5", "varkappa;": "\u03f0", "varnothing;": "\u2205", "varphi;": "\u03d5", "varpi;": "\u03d6", "varpropto;": "\u221d", "varr;": "\u2195", "varrho;": "\u03f1", "varsigma;": "\u03c2", "varsubsetneq;": "\u228a\ufe00", "varsubsetneqq;": "\u2acb\ufe00", "varsupsetneq;": "\u228b\ufe00", "varsupsetneqq;": "\u2acc\ufe00", "vartheta;": "\u03d1", "vartriangleleft;": "\u22b2", "vartriangleright;": "\u22b3", "vcy;": "\u0432", "vdash;": "\u22a2", "vee;": "\u2228", "veebar;": "\u22bb", "veeeq;": "\u225a", "vellip;": "\u22ee", "verbar;": "|", "vert;": "|", "vfr;": "\U0001d533", "vltri;": "\u22b2", "vnsub;": "\u2282\u20d2", "vnsup;": "\u2283\u20d2", "vopf;": "\U0001d567", "vprop;": "\u221d", "vrtri;": "\u22b3", "vscr;": "\U0001d4cb", "vsubnE;": "\u2acb\ufe00", "vsubne;": "\u228a\ufe00", "vsupnE;": "\u2acc\ufe00", "vsupne;": "\u228b\ufe00", "vzigzag;": "\u299a", "wcirc;": "\u0175", "wedbar;": "\u2a5f", "wedge;": "\u2227", "wedgeq;": "\u2259", "weierp;": "\u2118", "wfr;": "\U0001d534", "wopf;": "\U0001d568", "wp;": "\u2118", "wr;": "\u2240", "wreath;": "\u2240", "wscr;": "\U0001d4cc", "xcap;": "\u22c2", "xcirc;": "\u25ef", "xcup;": "\u22c3", "xdtri;": "\u25bd", "xfr;": "\U0001d535", "xhArr;": "\u27fa", "xharr;": "\u27f7", "xi;": "\u03be", "xlArr;": "\u27f8", "xlarr;": "\u27f5", "xmap;": "\u27fc", "xnis;": "\u22fb", "xodot;": "\u2a00", "xopf;": "\U0001d569", "xoplus;": "\u2a01", "xotime;": "\u2a02", "xrArr;": "\u27f9", "xrarr;": "\u27f6", "xscr;": "\U0001d4cd", "xsqcup;": "\u2a06", "xuplus;": "\u2a04", "xutri;": "\u25b3", "xvee;": "\u22c1", "xwedge;": "\u22c0", "yacute": "\xfd", "yacute;": "\xfd", "yacy;": "\u044f", "ycirc;": "\u0177", "ycy;": "\u044b", "yen": "\xa5", "yen;": "\xa5", "yfr;": "\U0001d536", "yicy;": "\u0457", "yopf;": "\U0001d56a", "yscr;": "\U0001d4ce", "yucy;": "\u044e", "yuml": "\xff", "yuml;": "\xff", "zacute;": "\u017a", "zcaron;": "\u017e", "zcy;": "\u0437", "zdot;": "\u017c", "zeetrf;": "\u2128", "zeta;": "\u03b6", "zfr;": "\U0001d537", "zhcy;": "\u0436", "zigrarr;": "\u21dd", "zopf;": "\U0001d56b", "zscr;": "\U0001d4cf", "zwj;": "\u200d", "zwnj;": "\u200c", } replacementCharacters = { 0x0: "\uFFFD", 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", 0x85: "\u2026", 0x86: "\u2020", 0x87: "\u2021", 0x88: "\u02C6", 0x89: "\u2030", 0x8A: "\u0160", 0x8B: "\u2039", 0x8C: "\u0152", 0x8D: "\u008D", 0x8E: "\u017D", 0x8F: "\u008F", 0x90: "\u0090", 0x91: "\u2018", 0x92: "\u2019", 0x93: "\u201C", 0x94: "\u201D", 0x95: "\u2022", 0x96: "\u2013", 0x97: "\u2014", 0x98: "\u02DC", 0x99: "\u2122", 0x9A: "\u0161", 0x9B: "\u203A", 0x9C: "\u0153", 0x9D: "\u009D", 0x9E: "\u017E", 0x9F: "\u0178", } encodings = { '437': 'cp437', '850': 'cp850', '852': 'cp852', '855': 'cp855', '857': 'cp857', '860': 'cp860', '861': 'cp861', '862': 'cp862', '863': 'cp863', '865': 'cp865', '866': 'cp866', '869': 'cp869', 'ansix341968': 'ascii', 'ansix341986': 'ascii', 'arabic': 'iso8859-6', 'ascii': 'ascii', 'asmo708': 'iso8859-6', 'big5': 'big5', 'big5hkscs': 'big5hkscs', 'chinese': 'gbk', 'cp037': 'cp037', 'cp1026': 'cp1026', 'cp154': 'ptcp154', 'cp367': 'ascii', 'cp424': 'cp424', 'cp437': 'cp437', 'cp500': 'cp500', 'cp775': 'cp775', 'cp819': 'windows-1252', 'cp850': 'cp850', 'cp852': 'cp852', 'cp855': 'cp855', 'cp857': 'cp857', 'cp860': 'cp860', 'cp861': 'cp861', 'cp862': 'cp862', 'cp863': 'cp863', 'cp864': 'cp864', 'cp865': 'cp865', 'cp866': 'cp866', 'cp869': 'cp869', 'cp936': 'gbk', 'cpgr': 'cp869', 'cpis': 'cp861', 'csascii': 'ascii', 'csbig5': 'big5', 'cseuckr': 'cp949', 'cseucpkdfmtjapanese': 'euc_jp', 'csgb2312': 'gbk', 'cshproman8': 'hp-roman8', 'csibm037': 'cp037', 'csibm1026': 'cp1026', 'csibm424': 'cp424', 'csibm500': 'cp500', 'csibm855': 'cp855', 'csibm857': 'cp857', 'csibm860': 'cp860', 'csibm861': 'cp861', 'csibm863': 'cp863', 'csibm864': 'cp864', 'csibm865': 'cp865', 'csibm866': 'cp866', 'csibm869': 'cp869', 'csiso2022jp': 'iso2022_jp', 'csiso2022jp2': 'iso2022_jp_2', 'csiso2022kr': 'iso2022_kr', 'csiso58gb231280': 'gbk', 'csisolatin1': 'windows-1252', 'csisolatin2': 'iso8859-2', 'csisolatin3': 'iso8859-3', 'csisolatin4': 'iso8859-4', 'csisolatin5': 'windows-1254', 'csisolatin6': 'iso8859-10', 'csisolatinarabic': 'iso8859-6', 'csisolatincyrillic': 'iso8859-5', 'csisolatingreek': 'iso8859-7', 'csisolatinhebrew': 'iso8859-8', 'cskoi8r': 'koi8-r', 'csksc56011987': 'cp949', 'cspc775baltic': 'cp775', 'cspc850multilingual': 'cp850', 'cspc862latinhebrew': 'cp862', 'cspc8codepage437': 'cp437', 'cspcp852': 'cp852', 'csptcp154': 'ptcp154', 'csshiftjis': 'shift_jis', 'csunicode11utf7': 'utf-7', 'cyrillic': 'iso8859-5', 'cyrillicasian': 'ptcp154', 'ebcdiccpbe': 'cp500', 'ebcdiccpca': 'cp037', 'ebcdiccpch': 'cp500', 'ebcdiccphe': 'cp424', 'ebcdiccpnl': 'cp037', 'ebcdiccpus': 'cp037', 'ebcdiccpwt': 'cp037', 'ecma114': 'iso8859-6', 'ecma118': 'iso8859-7', 'elot928': 'iso8859-7', 'eucjp': 'euc_jp', 'euckr': 'cp949', 'extendedunixcodepackedformatforjapanese': 'euc_jp', 'gb18030': 'gb18030', 'gb2312': 'gbk', 'gb231280': 'gbk', 'gbk': 'gbk', 'greek': 'iso8859-7', 'greek8': 'iso8859-7', 'hebrew': 'iso8859-8', 'hproman8': 'hp-roman8', 'hzgb2312': 'hz', 'ibm037': 'cp037', 'ibm1026': 'cp1026', 'ibm367': 'ascii', 'ibm424': 'cp424', 'ibm437': 'cp437', 'ibm500': 'cp500', 'ibm775': 'cp775', 'ibm819': 'windows-1252', 'ibm850': 'cp850', 'ibm852': 'cp852', 'ibm855': 'cp855', 'ibm857': 'cp857', 'ibm860': 'cp860', 'ibm861': 'cp861', 'ibm862': 'cp862', 'ibm863': 'cp863', 'ibm864': 'cp864', 'ibm865': 'cp865', 'ibm866': 'cp866', 'ibm869': 'cp869', 'iso2022jp': 'iso2022_jp', 'iso2022jp2': 'iso2022_jp_2', 'iso2022kr': 'iso2022_kr', 'iso646irv1991': 'ascii', 'iso646us': 'ascii', 'iso88591': 'windows-1252', 'iso885910': 'iso8859-10', 'iso8859101992': 'iso8859-10', 'iso885911987': 'windows-1252', 'iso885913': 'iso8859-13', 'iso885914': 'iso8859-14', 'iso8859141998': 'iso8859-14', 'iso885915': 'iso8859-15', 'iso885916': 'iso8859-16', 'iso8859162001': 'iso8859-16', 'iso88592': 'iso8859-2', 'iso885921987': 'iso8859-2', 'iso88593': 'iso8859-3', 'iso885931988': 'iso8859-3', 'iso88594': 'iso8859-4', 'iso885941988': 'iso8859-4', 'iso88595': 'iso8859-5', 'iso885951988': 'iso8859-5', 'iso88596': 'iso8859-6', 'iso885961987': 'iso8859-6', 'iso88597': 'iso8859-7', 'iso885971987': 'iso8859-7', 'iso88598': 'iso8859-8', 'iso885981988': 'iso8859-8', 'iso88599': 'windows-1254', 'iso885991989': 'windows-1254', 'isoceltic': 'iso8859-14', 'isoir100': 'windows-1252', 'isoir101': 'iso8859-2', 'isoir109': 'iso8859-3', 'isoir110': 'iso8859-4', 'isoir126': 'iso8859-7', 'isoir127': 'iso8859-6', 'isoir138': 'iso8859-8', 'isoir144': 'iso8859-5', 'isoir148': 'windows-1254', 'isoir149': 'cp949', 'isoir157': 'iso8859-10', 'isoir199': 'iso8859-14', 'isoir226': 'iso8859-16', 'isoir58': 'gbk', 'isoir6': 'ascii', 'koi8r': 'koi8-r', 'koi8u': 'koi8-u', 'korean': 'cp949', 'ksc5601': 'cp949', 'ksc56011987': 'cp949', 'ksc56011989': 'cp949', 'l1': 'windows-1252', 'l10': 'iso8859-16', 'l2': 'iso8859-2', 'l3': 'iso8859-3', 'l4': 'iso8859-4', 'l5': 'windows-1254', 'l6': 'iso8859-10', 'l8': 'iso8859-14', 'latin1': 'windows-1252', 'latin10': 'iso8859-16', 'latin2': 'iso8859-2', 'latin3': 'iso8859-3', 'latin4': 'iso8859-4', 'latin5': 'windows-1254', 'latin6': 'iso8859-10', 'latin8': 'iso8859-14', 'latin9': 'iso8859-15', 'ms936': 'gbk', 'mskanji': 'shift_jis', 'pt154': 'ptcp154', 'ptcp154': 'ptcp154', 'r8': 'hp-roman8', 'roman8': 'hp-roman8', 'shiftjis': 'shift_jis', 'tis620': 'cp874', 'unicode11utf7': 'utf-7', 'us': 'ascii', 'usascii': 'ascii', 'utf16': 'utf-16', 'utf16be': 'utf-16-be', 'utf16le': 'utf-16-le', 'utf8': 'utf-8', 'windows1250': 'cp1250', 'windows1251': 'cp1251', 'windows1252': 'cp1252', 'windows1253': 'cp1253', 'windows1254': 'cp1254', 'windows1255': 'cp1255', 'windows1256': 'cp1256', 'windows1257': 'cp1257', 'windows1258': 'cp1258', 'windows936': 'gbk', 'x-x-big5': 'big5'} tokenTypes = { "Doctype": 0, "Characters": 1, "SpaceCharacters": 2, "StartTag": 3, "EndTag": 4, "EmptyTag": 5, "Comment": 6, "ParseError": 7 } tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"])) prefixes = dict([(v, k) for k, v in namespaces.items()]) prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): pass class ReparseException(Exception): pass
87,346
Python
.py
3,071
23.267991
94
0.516975
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,562
html5parser.py
CouchPotato_CouchPotatoServer/libs/html5lib/html5parser.py
from __future__ import absolute_import, division, unicode_literals from six import with_metaclass import types from . import inputstream from . import tokenizer from . import treebuilders from .treebuilders._base import Marker from . import utils from . import constants from .constants import spaceCharacters, asciiUpper2Lower from .constants import specialElements from .constants import headingElements from .constants import cdataElements, rcdataElements from .constants import tokenTypes, ReparseException, namespaces from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements from .constants import adjustForeignAttributes as adjustForeignAttributesMap def parse(doc, treebuilder="etree", encoding=None, namespaceHTMLElements=True): """Parse a string or file-like object into a tree""" tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parse(doc, encoding=encoding) def parseFragment(doc, container="div", treebuilder="etree", encoding=None, namespaceHTMLElements=True): tb = treebuilders.getTreeBuilder(treebuilder) p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements) return p.parseFragment(doc, container=container, encoding=encoding) def method_decorator_metaclass(function): class Decorated(type): def __new__(meta, classname, bases, classDict): for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): attribute = function(attribute) classDict[attributeName] = attribute return type.__new__(meta, classname, bases, classDict) return Decorated class HTMLParser(object): """HTML parser. Generates a tree structure from a stream of (possibly malformed) HTML""" def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer, strict=False, namespaceHTMLElements=True, debug=False): """ strict - raise an exception when a parse error is encountered tree - a treebuilder class controlling the type of tree that will be returned. Built in treebuilders can be accessed through html5lib.treebuilders.getTreeBuilder(treeType) tokenizer - a class that provides a stream of tokens to the treebuilder. This may be replaced for e.g. a sanitizer which converts some tags to text """ # Raise an exception on the first error encountered self.strict = strict if tree is None: tree = treebuilders.getTreeBuilder("etree") self.tree = tree(namespaceHTMLElements) self.tokenizer_class = tokenizer self.errors = [] self.phases = dict([(name, cls(self, self.tree)) for name, cls in getPhases(debug).items()]) def _parse(self, stream, innerHTML=False, container="div", encoding=None, parseMeta=True, useChardet=True, **kwargs): self.innerHTMLMode = innerHTML self.container = container self.tokenizer = self.tokenizer_class(stream, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet, parser=self, **kwargs) self.reset() while True: try: self.mainLoop() break except ReparseException: self.reset() def reset(self): self.tree.reset() self.firstStartTag = False self.errors = [] self.log = [] # only used with debug mode # "quirks" / "limited quirks" / "no quirks" self.compatMode = "no quirks" if self.innerHTMLMode: self.innerHTML = self.container.lower() if self.innerHTML in cdataElements: self.tokenizer.state = self.tokenizer.rcdataState elif self.innerHTML in rcdataElements: self.tokenizer.state = self.tokenizer.rawtextState elif self.innerHTML == 'plaintext': self.tokenizer.state = self.tokenizer.plaintextState else: # state already is data state # self.tokenizer.state = self.tokenizer.dataState pass self.phase = self.phases["beforeHtml"] self.phase.insertHtmlElement() self.resetInsertionMode() else: self.innerHTML = False self.phase = self.phases["initial"] self.lastPhase = None self.beforeRCDataPhase = None self.framesetOK = True def isHTMLIntegrationPoint(self, element): if (element.name == "annotation-xml" and element.namespace == namespaces["mathml"]): return ("encoding" in element.attributes and element.attributes["encoding"].translate( asciiUpper2Lower) in ("text/html", "application/xhtml+xml")) else: return (element.namespace, element.name) in htmlIntegrationPointElements def isMathMLTextIntegrationPoint(self, element): return (element.namespace, element.name) in mathmlTextIntegrationPointElements def mainLoop(self): CharactersToken = tokenTypes["Characters"] SpaceCharactersToken = tokenTypes["SpaceCharacters"] StartTagToken = tokenTypes["StartTag"] EndTagToken = tokenTypes["EndTag"] CommentToken = tokenTypes["Comment"] DoctypeToken = tokenTypes["Doctype"] ParseErrorToken = tokenTypes["ParseError"] for token in self.normalizedTokens(): new_token = token while new_token is not None: currentNode = self.tree.openElements[-1] if self.tree.openElements else None currentNodeNamespace = currentNode.namespace if currentNode else None currentNodeName = currentNode.name if currentNode else None type = new_token["type"] if type == ParseErrorToken: self.parseError(new_token["data"], new_token.get("datavars", {})) new_token = None else: if (len(self.tree.openElements) == 0 or currentNodeNamespace == self.tree.defaultNamespace or (self.isMathMLTextIntegrationPoint(currentNode) and ((type == StartTagToken and token["name"] not in frozenset(["mglyph", "malignmark"])) or type in (CharactersToken, SpaceCharactersToken))) or (currentNodeNamespace == namespaces["mathml"] and currentNodeName == "annotation-xml" and token["name"] == "svg") or (self.isHTMLIntegrationPoint(currentNode) and type in (StartTagToken, CharactersToken, SpaceCharactersToken))): phase = self.phase else: phase = self.phases["inForeignContent"] if type == CharactersToken: new_token = phase.processCharacters(new_token) elif type == SpaceCharactersToken: new_token = phase.processSpaceCharacters(new_token) elif type == StartTagToken: new_token = phase.processStartTag(new_token) elif type == EndTagToken: new_token = phase.processEndTag(new_token) elif type == CommentToken: new_token = phase.processComment(new_token) elif type == DoctypeToken: new_token = phase.processDoctype(new_token) if (type == StartTagToken and token["selfClosing"] and not token["selfClosingAcknowledged"]): self.parseError("non-void-element-with-trailing-solidus", {"name": token["name"]}) # When the loop finishes it's EOF reprocess = True phases = [] while reprocess: phases.append(self.phase) reprocess = self.phase.processEOF() if reprocess: assert self.phase not in phases def normalizedTokens(self): for token in self.tokenizer: yield self.normalizeToken(token) def parse(self, stream, encoding=None, parseMeta=True, useChardet=True): """Parse a HTML document into a well-formed tree stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, innerHTML=False, encoding=encoding, parseMeta=parseMeta, useChardet=useChardet) return self.tree.getDocument() def parseFragment(self, stream, container="div", encoding=None, parseMeta=False, useChardet=True): """Parse a HTML fragment into a well-formed tree fragment container - name of the element we're setting the innerHTML property if set to None, default to 'div' stream - a filelike object or string containing the HTML to be parsed The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) """ self._parse(stream, True, container=container, encoding=encoding) return self.tree.getFragment() def parseError(self, errorcode="XXX-undefined-error", datavars={}): # XXX The idea is to make errorcode mandatory. self.errors.append((self.tokenizer.stream.position(), errorcode, datavars)) if self.strict: raise ParseError def normalizeToken(self, token): """ HTML5 specific normalizations to the token stream """ if token["type"] == tokenTypes["StartTag"]: token["data"] = dict(token["data"][::-1]) return token def adjustMathMLAttributes(self, token): replacements = {"definitionurl": "definitionURL"} for k, v in replacements.items(): if k in token["data"]: token["data"][v] = token["data"][k] del token["data"][k] def adjustSVGAttributes(self, token): replacements = { "attributename": "attributeName", "attributetype": "attributeType", "basefrequency": "baseFrequency", "baseprofile": "baseProfile", "calcmode": "calcMode", "clippathunits": "clipPathUnits", "contentscripttype": "contentScriptType", "contentstyletype": "contentStyleType", "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", "gradientunits": "gradientUnits", "kernelmatrix": "kernelMatrix", "kernelunitlength": "kernelUnitLength", "keypoints": "keyPoints", "keysplines": "keySplines", "keytimes": "keyTimes", "lengthadjust": "lengthAdjust", "limitingconeangle": "limitingConeAngle", "markerheight": "markerHeight", "markerunits": "markerUnits", "markerwidth": "markerWidth", "maskcontentunits": "maskContentUnits", "maskunits": "maskUnits", "numoctaves": "numOctaves", "pathlength": "pathLength", "patterncontentunits": "patternContentUnits", "patterntransform": "patternTransform", "patternunits": "patternUnits", "pointsatx": "pointsAtX", "pointsaty": "pointsAtY", "pointsatz": "pointsAtZ", "preservealpha": "preserveAlpha", "preserveaspectratio": "preserveAspectRatio", "primitiveunits": "primitiveUnits", "refx": "refX", "refy": "refY", "repeatcount": "repeatCount", "repeatdur": "repeatDur", "requiredextensions": "requiredExtensions", "requiredfeatures": "requiredFeatures", "specularconstant": "specularConstant", "specularexponent": "specularExponent", "spreadmethod": "spreadMethod", "startoffset": "startOffset", "stddeviation": "stdDeviation", "stitchtiles": "stitchTiles", "surfacescale": "surfaceScale", "systemlanguage": "systemLanguage", "tablevalues": "tableValues", "targetx": "targetX", "targety": "targetY", "textlength": "textLength", "viewbox": "viewBox", "viewtarget": "viewTarget", "xchannelselector": "xChannelSelector", "ychannelselector": "yChannelSelector", "zoomandpan": "zoomAndPan" } for originalName in list(token["data"].keys()): if originalName in replacements: svgName = replacements[originalName] token["data"][svgName] = token["data"][originalName] del token["data"][originalName] def adjustForeignAttributes(self, token): replacements = adjustForeignAttributesMap for originalName in token["data"].keys(): if originalName in replacements: foreignName = replacements[originalName] token["data"][foreignName] = token["data"][originalName] del token["data"][originalName] def reparseTokenNormal(self, token): self.parser.phase() def resetInsertionMode(self): # The name of this method is mostly historical. (It's also used in the # specification.) last = False newModes = { "select": "inSelect", "td": "inCell", "th": "inCell", "tr": "inRow", "tbody": "inTableBody", "thead": "inTableBody", "tfoot": "inTableBody", "caption": "inCaption", "colgroup": "inColumnGroup", "table": "inTable", "head": "inBody", "body": "inBody", "frameset": "inFrameset", "html": "beforeHead" } for node in self.tree.openElements[::-1]: nodeName = node.name new_phase = None if node == self.tree.openElements[0]: assert self.innerHTML last = True nodeName = self.innerHTML # Check for conditions that should only happen in the innerHTML # case if nodeName in ("select", "colgroup", "head", "html"): assert self.innerHTML if not last and node.namespace != self.tree.defaultNamespace: continue if nodeName in newModes: new_phase = self.phases[newModes[nodeName]] break elif last: new_phase = self.phases["inBody"] break self.phase = new_phase def parseRCDataRawtext(self, token, contentType): """Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT """ assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"] def getPhases(debug): def log(function): """Logger that records which phase processes each token""" type_names = dict((value, key) for key, value in constants.tokenTypes.items()) def wrapped(self, *args, **kwargs): if function.__name__.startswith("process") and len(args) > 0: token = args[0] try: info = {"type": type_names[token['type']]} except: raise if token['type'] in constants.tagTokenTypes: info["name"] = token['name'] self.parser.log.append((self.parser.tokenizer.state.__name__, self.parser.phase.__class__.__name__, self.__class__.__name__, function.__name__, info)) return function(self, *args, **kwargs) else: return function(self, *args, **kwargs) return wrapped def getMetaclass(use_metaclass, metaclass_func): if use_metaclass: return method_decorator_metaclass(metaclass_func) else: return type class Phase(with_metaclass(getMetaclass(debug, log))): """Base class for helper object that implements each phase of processing """ def __init__(self, parser, tree): self.parser = parser self.tree = tree def processEOF(self): raise NotImplementedError def processComment(self, token): # For most phases the following is correct. Where it's not it will be # overridden. self.tree.insertComment(token, self.tree.openElements[-1]) def processDoctype(self, token): self.parser.parseError("unexpected-doctype") def processCharacters(self, token): self.tree.insertText(token["data"]) def processSpaceCharacters(self, token): self.tree.insertText(token["data"]) def processStartTag(self, token): return self.startTagHandler[token["name"]](token) def startTagHtml(self, token): if not self.parser.firstStartTag and token["name"] == "html": self.parser.parseError("non-html-root") # XXX Need a check here to see if the first start tag token emitted is # this token... If it's not, invoke self.parser.parseError(). for attr, value in token["data"].items(): if attr not in self.tree.openElements[0].attributes: self.tree.openElements[0].attributes[attr] = value self.parser.firstStartTag = False def processEndTag(self, token): return self.endTagHandler[token["name"]](token) class InitialPhase(Phase): def processSpaceCharacters(self, token): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] correct = token["correct"] if (name != "html" or publicId is not None or systemId is not None and systemId != "about:legacy-compat"): self.parser.parseError("unknown-doctype") if publicId is None: publicId = "" self.tree.insertDoctype(token) if publicId != "": publicId = publicId.translate(asciiUpper2Lower) if (not correct or token["name"] != "html" or publicId.startswith( ("+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//")) or publicId in ("-//w3o//dtd w3 html strict 3.0//en//", "-/w3c/dtd html 4.0 transitional/en", "html") or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is None or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"): self.parser.compatMode = "quirks" elif (publicId.startswith( ("-//w3c//dtd xhtml 1.0 frameset//", "-//w3c//dtd xhtml 1.0 transitional//")) or publicId.startswith( ("-//w3c//dtd html 4.01 frameset//", "-//w3c//dtd html 4.01 transitional//")) and systemId is not None): self.parser.compatMode = "limited quirks" self.parser.phase = self.parser.phases["beforeHtml"] def anythingElse(self): self.parser.compatMode = "quirks" self.parser.phase = self.parser.phases["beforeHtml"] def processCharacters(self, token): self.parser.parseError("expected-doctype-but-got-chars") self.anythingElse() return token def processStartTag(self, token): self.parser.parseError("expected-doctype-but-got-start-tag", {"name": token["name"]}) self.anythingElse() return token def processEndTag(self, token): self.parser.parseError("expected-doctype-but-got-end-tag", {"name": token["name"]}) self.anythingElse() return token def processEOF(self): self.parser.parseError("expected-doctype-but-got-eof") self.anythingElse() return True class BeforeHtmlPhase(Phase): # helper methods def insertHtmlElement(self): self.tree.insertRoot(impliedTagToken("html", "StartTag")) self.parser.phase = self.parser.phases["beforeHead"] # other def processEOF(self): self.insertHtmlElement() return True def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.insertHtmlElement() return token def processStartTag(self, token): if token["name"] == "html": self.parser.firstStartTag = True self.insertHtmlElement() return token def processEndTag(self, token): if token["name"] not in ("head", "body", "html", "br"): self.parser.parseError("unexpected-end-tag-before-html", {"name": token["name"]}) else: self.insertHtmlElement() return token class BeforeHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("head", "body", "html", "br"), self.endTagImplyHead) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.startTagHead(impliedTagToken("head", "StartTag")) return True def processSpaceCharacters(self, token): pass def processCharacters(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.tree.insertElement(token) self.tree.headPointer = self.tree.openElements[-1] self.parser.phase = self.parser.phases["inHead"] def startTagOther(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagImplyHead(self, token): self.startTagHead(impliedTagToken("head", "StartTag")) return token def endTagOther(self, token): self.parser.parseError("end-tag-after-implied-root", {"name": token["name"]}) class InHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("title", self.startTagTitle), (("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle), ("script", self.startTagScript), (("base", "basefont", "bgsound", "command", "link"), self.startTagBaseLinkCommand), ("meta", self.startTagMeta), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self. endTagHandler = utils.MethodDispatcher([ ("head", self.endTagHead), (("br", "html", "body"), self.endTagHtmlBodyBr) ]) self.endTagHandler.default = self.endTagOther # the real thing def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagHead(self, token): self.parser.parseError("two-heads-are-not-better-than-one") def startTagBaseLinkCommand(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMeta(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True attributes = token["data"] if self.parser.tokenizer.stream.charEncoding[1] == "tentative": if "charset" in attributes: self.parser.tokenizer.stream.changeEncoding(attributes["charset"]) elif ("content" in attributes and "http-equiv" in attributes and attributes["http-equiv"].lower() == "content-type"): # Encoding it as UTF-8 here is a hack, as really we should pass # the abstract Unicode string, and just use the # ContentAttrParser on that, but using UTF-8 allows all chars # to be encoded and as a ASCII-superset works. data = inputstream.EncodingBytes(attributes["content"].encode("utf-8")) parser = inputstream.ContentAttrParser(data) codec = parser.parse() self.parser.tokenizer.stream.changeEncoding(codec) def startTagTitle(self, token): self.parser.parseRCDataRawtext(token, "RCDATA") def startTagNoScriptNoFramesStyle(self, token): # Need to decide whether to implement the scripting-disabled case self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagScript(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState self.parser.originalPhase = self.parser.phase self.parser.phase = self.parser.phases["text"] def startTagOther(self, token): self.anythingElse() return token def endTagHead(self, token): node = self.parser.tree.openElements.pop() assert node.name == "head", "Expected head got %s" % node.name self.parser.phase = self.parser.phases["afterHead"] def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.endTagHead(impliedTagToken("head")) # XXX If we implement a parser for which scripting is disabled we need to # implement this phase. # # class InHeadNoScriptPhase(Phase): class AfterHeadPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("base", "basefont", "bgsound", "link", "meta", "noframes", "script", "style", "title"), self.startTagFromHead), ("head", self.startTagHead) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"), self.endTagHtmlBodyBr)]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.anythingElse() return True def processCharacters(self, token): self.anythingElse() return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagBody(self, token): self.parser.framesetOK = False self.tree.insertElement(token) self.parser.phase = self.parser.phases["inBody"] def startTagFrameset(self, token): self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagFromHead(self, token): self.parser.parseError("unexpected-start-tag-out-of-my-head", {"name": token["name"]}) self.tree.openElements.append(self.tree.headPointer) self.parser.phases["inHead"].processStartTag(token) for node in self.tree.openElements[::-1]: if node.name == "head": self.tree.openElements.remove(node) break def startTagHead(self, token): self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) def startTagOther(self, token): self.anythingElse() return token def endTagHtmlBodyBr(self, token): self.anythingElse() return token def endTagOther(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def anythingElse(self): self.tree.insertElement(impliedTagToken("body", "StartTag")) self.parser.phase = self.parser.phases["inBody"] self.parser.framesetOK = True class InBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody # the really-really-really-very crazy mode def __init__(self, parser, tree): Phase.__init__(self, parser, tree) # Keep a ref to this for special handling of whitespace in <pre> self.processSpaceCharactersNonPre = self.processSpaceCharacters self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("base", "basefont", "bgsound", "command", "link", "meta", "noframes", "script", "style", "title"), self.startTagProcessInHead), ("body", self.startTagBody), ("frameset", self.startTagFrameset), (("address", "article", "aside", "blockquote", "center", "details", "details", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p", "section", "summary", "ul"), self.startTagCloseP), (headingElements, self.startTagHeading), (("pre", "listing"), self.startTagPreListing), ("form", self.startTagForm), (("li", "dd", "dt"), self.startTagListItem), ("plaintext", self.startTagPlaintext), ("a", self.startTagA), (("b", "big", "code", "em", "font", "i", "s", "small", "strike", "strong", "tt", "u"), self.startTagFormatting), ("nobr", self.startTagNobr), ("button", self.startTagButton), (("applet", "marquee", "object"), self.startTagAppletMarqueeObject), ("xmp", self.startTagXmp), ("table", self.startTagTable), (("area", "br", "embed", "img", "keygen", "wbr"), self.startTagVoidFormatting), (("param", "source", "track"), self.startTagParamSource), ("input", self.startTagInput), ("hr", self.startTagHr), ("image", self.startTagImage), ("isindex", self.startTagIsIndex), ("textarea", self.startTagTextarea), ("iframe", self.startTagIFrame), (("noembed", "noframes", "noscript"), self.startTagRawtext), ("select", self.startTagSelect), (("rp", "rt"), self.startTagRpRt), (("option", "optgroup"), self.startTagOpt), (("math"), self.startTagMath), (("svg"), self.startTagSvg), (("caption", "col", "colgroup", "frame", "head", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagMisplaced) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("body", self.endTagBody), ("html", self.endTagHtml), (("address", "article", "aside", "blockquote", "button", "center", "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure", "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre", "section", "summary", "ul"), self.endTagBlock), ("form", self.endTagForm), ("p", self.endTagP), (("dd", "dt", "li"), self.endTagListItem), (headingElements, self.endTagHeading), (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small", "strike", "strong", "tt", "u"), self.endTagFormatting), (("applet", "marquee", "object"), self.endTagAppletMarqueeObject), ("br", self.endTagBr), ]) self.endTagHandler.default = self.endTagOther def isMatchingFormattingElement(self, node1, node2): if node1.name != node2.name or node1.namespace != node2.namespace: return False elif len(node1.attributes) != len(node2.attributes): return False else: attributes1 = sorted(node1.attributes.items()) attributes2 = sorted(node2.attributes.items()) for attr1, attr2 in zip(attributes1, attributes2): if attr1 != attr2: return False return True # helper def addFormattingElement(self, token): self.tree.insertElement(token) element = self.tree.openElements[-1] matchingElements = [] for node in self.tree.activeFormattingElements[::-1]: if node is Marker: break elif self.isMatchingFormattingElement(node, element): matchingElements.append(node) assert len(matchingElements) <= 3 if len(matchingElements) == 3: self.tree.activeFormattingElements.remove(matchingElements[-1]) self.tree.activeFormattingElements.append(element) # the real deal def processEOF(self): allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")) for node in self.tree.openElements[::-1]: if node.name not in allowed_elements: self.parser.parseError("expected-closing-tag-but-got-eof") break # Stop parsing def processSpaceCharactersDropNewline(self, token): # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we # want to drop leading newlines data = token["data"] self.processSpaceCharacters = self.processSpaceCharactersNonPre if (data.startswith("\n") and self.tree.openElements[-1].name in ("pre", "listing", "textarea") and not self.tree.openElements[-1].hasContent()): data = data[1:] if data: self.tree.reconstructActiveFormattingElements() self.tree.insertText(data) def processCharacters(self, token): if token["data"] == "\u0000": # The tokenizer should always emit null on its own return self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) # This must be bad for performance if (self.parser.framesetOK and any([char not in spaceCharacters for char in token["data"]])): self.parser.framesetOK = False def processSpaceCharacters(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertText(token["data"]) def startTagProcessInHead(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagBody(self, token): self.parser.parseError("unexpected-start-tag", {"name": "body"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML else: self.parser.framesetOK = False for attr, value in token["data"].items(): if attr not in self.tree.openElements[1].attributes: self.tree.openElements[1].attributes[attr] = value def startTagFrameset(self, token): self.parser.parseError("unexpected-start-tag", {"name": "frameset"}) if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"): assert self.parser.innerHTML elif not self.parser.framesetOK: pass else: if self.tree.openElements[1].parent: self.tree.openElements[1].parent.removeChild(self.tree.openElements[1]) while self.tree.openElements[-1].name != "html": self.tree.openElements.pop() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inFrameset"] def startTagCloseP(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) def startTagPreListing(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.processSpaceCharacters = self.processSpaceCharactersDropNewline def startTagForm(self, token): if self.tree.formPointer: self.parser.parseError("unexpected-start-tag", {"name": "form"}) else: if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] def startTagListItem(self, token): self.parser.framesetOK = False stopNamesMap = {"li": ["li"], "dt": ["dt", "dd"], "dd": ["dt", "dd"]} stopNames = stopNamesMap[token["name"]] for node in reversed(self.tree.openElements): if node.name in stopNames: self.parser.phase.processEndTag( impliedTagToken(node.name, "EndTag")) break if (node.nameTuple in specialElements and node.name not in ("address", "div", "p")): break if self.tree.elementInScope("p", variant="button"): self.parser.phase.processEndTag( impliedTagToken("p", "EndTag")) self.tree.insertElement(token) def startTagPlaintext(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.plaintextState def startTagHeading(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) if self.tree.openElements[-1].name in headingElements: self.parser.parseError("unexpected-start-tag", {"name": token["name"]}) self.tree.openElements.pop() self.tree.insertElement(token) def startTagA(self, token): afeAElement = self.tree.elementInActiveFormattingElements("a") if afeAElement: self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "a", "endName": "a"}) self.endTagFormatting(impliedTagToken("a")) if afeAElement in self.tree.openElements: self.tree.openElements.remove(afeAElement) if afeAElement in self.tree.activeFormattingElements: self.tree.activeFormattingElements.remove(afeAElement) self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagNobr(self, token): self.tree.reconstructActiveFormattingElements() if self.tree.elementInScope("nobr"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "nobr", "endName": "nobr"}) self.processEndTag(impliedTagToken("nobr")) # XXX Need tests that trigger the following self.tree.reconstructActiveFormattingElements() self.addFormattingElement(token) def startTagButton(self, token): if self.tree.elementInScope("button"): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "button", "endName": "button"}) self.processEndTag(impliedTagToken("button")) return token else: self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False def startTagAppletMarqueeObject(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.activeFormattingElements.append(Marker) self.parser.framesetOK = False def startTagXmp(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.reconstructActiveFormattingElements() self.parser.framesetOK = False self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagTable(self, token): if self.parser.compatMode != "quirks": if self.tree.elementInScope("p", variant="button"): self.processEndTag(impliedTagToken("p")) self.tree.insertElement(token) self.parser.framesetOK = False self.parser.phase = self.parser.phases["inTable"] def startTagVoidFormatting(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagInput(self, token): framesetOK = self.parser.framesetOK self.startTagVoidFormatting(token) if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): # input type=hidden doesn't change framesetOK self.parser.framesetOK = framesetOK def startTagParamSource(self, token): self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagHr(self, token): if self.tree.elementInScope("p", variant="button"): self.endTagP(impliedTagToken("p")) self.tree.insertElement(token) self.tree.openElements.pop() token["selfClosingAcknowledged"] = True self.parser.framesetOK = False def startTagImage(self, token): # No really... self.parser.parseError("unexpected-start-tag-treated-as", {"originalName": "image", "newName": "img"}) self.processStartTag(impliedTagToken("img", "StartTag", attributes=token["data"], selfClosing=token["selfClosing"])) def startTagIsIndex(self, token): self.parser.parseError("deprecated-tag", {"name": "isindex"}) if self.tree.formPointer: return form_attrs = {} if "action" in token["data"]: form_attrs["action"] = token["data"]["action"] self.processStartTag(impliedTagToken("form", "StartTag", attributes=form_attrs)) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processStartTag(impliedTagToken("label", "StartTag")) # XXX Localization ... if "prompt" in token["data"]: prompt = token["data"]["prompt"] else: prompt = "This is a searchable index. Enter search keywords: " self.processCharacters( {"type": tokenTypes["Characters"], "data": prompt}) attributes = token["data"].copy() if "action" in attributes: del attributes["action"] if "prompt" in attributes: del attributes["prompt"] attributes["name"] = "isindex" self.processStartTag(impliedTagToken("input", "StartTag", attributes=attributes, selfClosing= token["selfClosing"])) self.processEndTag(impliedTagToken("label")) self.processStartTag(impliedTagToken("hr", "StartTag")) self.processEndTag(impliedTagToken("form")) def startTagTextarea(self, token): self.tree.insertElement(token) self.parser.tokenizer.state = self.parser.tokenizer.rcdataState self.processSpaceCharacters = self.processSpaceCharactersDropNewline self.parser.framesetOK = False def startTagIFrame(self, token): self.parser.framesetOK = False self.startTagRawtext(token) def startTagRawtext(self, token): """iframe, noembed noframes, noscript(if scripting enabled)""" self.parser.parseRCDataRawtext(token, "RAWTEXT") def startTagOpt(self, token): if self.tree.openElements[-1].name == "option": self.parser.phase.processEndTag(impliedTagToken("option")) self.tree.reconstructActiveFormattingElements() self.parser.tree.insertElement(token) def startTagSelect(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) self.parser.framesetOK = False if self.parser.phase in (self.parser.phases["inTable"], self.parser.phases["inCaption"], self.parser.phases["inColumnGroup"], self.parser.phases["inTableBody"], self.parser.phases["inRow"], self.parser.phases["inCell"]): self.parser.phase = self.parser.phases["inSelectInTable"] else: self.parser.phase = self.parser.phases["inSelect"] def startTagRpRt(self, token): if self.tree.elementInScope("ruby"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "ruby": self.parser.parseError() self.tree.insertElement(token) def startTagMath(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustMathMLAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["mathml"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagSvg(self, token): self.tree.reconstructActiveFormattingElements() self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = namespaces["svg"] self.tree.insertElement(token) # Need to get the parse error right for the case where the token # has a namespace not equal to the xmlns attribute if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def startTagMisplaced(self, token): """ Elements that should be children of other elements that have a different insertion mode; here they are ignored "caption", "col", "colgroup", "frame", "frameset", "head", "option", "optgroup", "tbody", "td", "tfoot", "th", "thead", "tr", "noscript" """ self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]}) def startTagOther(self, token): self.tree.reconstructActiveFormattingElements() self.tree.insertElement(token) def endTagP(self, token): if not self.tree.elementInScope("p", variant="button"): self.startTagCloseP(impliedTagToken("p", "StartTag")) self.parser.parseError("unexpected-end-tag", {"name": "p"}) self.endTagP(impliedTagToken("p", "EndTag")) else: self.tree.generateImpliedEndTags("p") if self.tree.openElements[-1].name != "p": self.parser.parseError("unexpected-end-tag", {"name": "p"}) node = self.tree.openElements.pop() while node.name != "p": node = self.tree.openElements.pop() def endTagBody(self, token): if not self.tree.elementInScope("body"): self.parser.parseError() return elif self.tree.openElements[-1].name != "body": for node in self.tree.openElements[2:]: if node.name not in frozenset(("dd", "dt", "li", "optgroup", "option", "p", "rp", "rt", "tbody", "td", "tfoot", "th", "thead", "tr", "body", "html")): # Not sure this is the correct name for the parse error self.parser.parseError( "expected-one-end-tag-but-got-another", {"expectedName": "body", "gotName": node.name}) break self.parser.phase = self.parser.phases["afterBody"] def endTagHtml(self, token): # We repeat the test for the body end tag token being ignored here if self.tree.elementInScope("body"): self.endTagBody(impliedTagToken("body")) return token def endTagBlock(self, token): # Put us back in the right whitespace handling mode if token["name"] == "pre": self.processSpaceCharacters = self.processSpaceCharactersNonPre inScope = self.tree.elementInScope(token["name"]) if inScope: self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if inScope: node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagForm(self, token): node = self.tree.formPointer self.tree.formPointer = None if node is None or not self.tree.elementInScope(node): self.parser.parseError("unexpected-end-tag", {"name": "form"}) else: self.tree.generateImpliedEndTags() if self.tree.openElements[-1] != node: self.parser.parseError("end-tag-too-early-ignored", {"name": "form"}) self.tree.openElements.remove(node) def endTagListItem(self, token): if token["name"] == "li": variant = "list" else: variant = None if not self.tree.elementInScope(token["name"], variant=variant): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) else: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError( "end-tag-too-early", {"name": token["name"]}) node = self.tree.openElements.pop() while node.name != token["name"]: node = self.tree.openElements.pop() def endTagHeading(self, token): for item in headingElements: if self.tree.elementInScope(item): self.tree.generateImpliedEndTags() break if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) for item in headingElements: if self.tree.elementInScope(item): item = self.tree.openElements.pop() while item.name not in headingElements: item = self.tree.openElements.pop() break def endTagFormatting(self, token): """The much-feared adoption agency algorithm""" # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867 # XXX Better parseError messages appreciated. # Step 1 outerLoopCounter = 0 # Step 2 while outerLoopCounter < 8: # Step 3 outerLoopCounter += 1 # Step 4: # Let the formatting element be the last element in # the list of active formatting elements that: # - is between the end of the list and the last scope # marker in the list, if any, or the start of the list # otherwise, and # - has the same tag name as the token. formattingElement = self.tree.elementInActiveFormattingElements( token["name"]) if (not formattingElement or (formattingElement in self.tree.openElements and not self.tree.elementInScope(formattingElement.name))): # If there is no such node, then abort these steps # and instead act as described in the "any other # end tag" entry below. self.endTagOther(token) return # Otherwise, if there is such a node, but that node is # not in the stack of open elements, then this is a # parse error; remove the element from the list, and # abort these steps. elif formattingElement not in self.tree.openElements: self.parser.parseError("adoption-agency-1.2", {"name": token["name"]}) self.tree.activeFormattingElements.remove(formattingElement) return # Otherwise, if there is such a node, and that node is # also in the stack of open elements, but the element # is not in scope, then this is a parse error; ignore # the token, and abort these steps. elif not self.tree.elementInScope(formattingElement.name): self.parser.parseError("adoption-agency-4.4", {"name": token["name"]}) return # Otherwise, there is a formatting element and that # element is in the stack and is in scope. If the # element is not the current node, this is a parse # error. In any case, proceed with the algorithm as # written in the following steps. else: if formattingElement != self.tree.openElements[-1]: self.parser.parseError("adoption-agency-1.3", {"name": token["name"]}) # Step 5: # Let the furthest block be the topmost node in the # stack of open elements that is lower in the stack # than the formatting element, and is an element in # the special category. There might not be one. afeIndex = self.tree.openElements.index(formattingElement) furthestBlock = None for element in self.tree.openElements[afeIndex:]: if element.nameTuple in specialElements: furthestBlock = element break # Step 6: # If there is no furthest block, then the UA must # first pop all the nodes from the bottom of the stack # of open elements, from the current node up to and # including the formatting element, then remove the # formatting element from the list of active # formatting elements, and finally abort these steps. if furthestBlock is None: element = self.tree.openElements.pop() while element != formattingElement: element = self.tree.openElements.pop() self.tree.activeFormattingElements.remove(element) return # Step 7 commonAncestor = self.tree.openElements[afeIndex - 1] # Step 8: # The bookmark is supposed to help us identify where to reinsert # nodes in step 15. We have to ensure that we reinsert nodes after # the node before the active formatting element. Note the bookmark # can move in step 9.7 bookmark = self.tree.activeFormattingElements.index(formattingElement) # Step 9 lastNode = node = furthestBlock innerLoopCounter = 0 index = self.tree.openElements.index(node) while innerLoopCounter < 3: innerLoopCounter += 1 # Node is element before node in open elements index -= 1 node = self.tree.openElements[index] if node not in self.tree.activeFormattingElements: self.tree.openElements.remove(node) continue # Step 9.6 if node == formattingElement: break # Step 9.7 if lastNode == furthestBlock: bookmark = self.tree.activeFormattingElements.index(node) + 1 # Step 9.8 clone = node.cloneNode() # Replace node with clone self.tree.activeFormattingElements[ self.tree.activeFormattingElements.index(node)] = clone self.tree.openElements[ self.tree.openElements.index(node)] = clone node = clone # Step 9.9 # Remove lastNode from its parents, if any if lastNode.parent: lastNode.parent.removeChild(lastNode) node.appendChild(lastNode) # Step 9.10 lastNode = node # Step 10 # Foster parent lastNode if commonAncestor is a # table, tbody, tfoot, thead, or tr we need to foster # parent the lastNode if lastNode.parent: lastNode.parent.removeChild(lastNode) if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")): parent, insertBefore = self.tree.getTableMisnestedNodePosition() parent.insertBefore(lastNode, insertBefore) else: commonAncestor.appendChild(lastNode) # Step 11 clone = formattingElement.cloneNode() # Step 12 furthestBlock.reparentChildren(clone) # Step 13 furthestBlock.appendChild(clone) # Step 14 self.tree.activeFormattingElements.remove(formattingElement) self.tree.activeFormattingElements.insert(bookmark, clone) # Step 15 self.tree.openElements.remove(formattingElement) self.tree.openElements.insert( self.tree.openElements.index(furthestBlock) + 1, clone) def endTagAppletMarqueeObject(self, token): if self.tree.elementInScope(token["name"]): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("end-tag-too-early", {"name": token["name"]}) if self.tree.elementInScope(token["name"]): element = self.tree.openElements.pop() while element.name != token["name"]: element = self.tree.openElements.pop() self.tree.clearActiveFormattingElements() def endTagBr(self, token): self.parser.parseError("unexpected-end-tag-treated-as", {"originalName": "br", "newName": "br element"}) self.tree.reconstructActiveFormattingElements() self.tree.insertElement(impliedTagToken("br", "StartTag")) self.tree.openElements.pop() def endTagOther(self, token): for node in self.tree.openElements[::-1]: if node.name == token["name"]: self.tree.generateImpliedEndTags(exclude=token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while self.tree.openElements.pop() != node: pass break else: if node.nameTuple in specialElements: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) break class TextPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("script", self.endTagScript)]) self.endTagHandler.default = self.endTagOther def processCharacters(self, token): self.tree.insertText(token["data"]) def processEOF(self): self.parser.parseError("expected-named-closing-tag-but-got-eof", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase return True def startTagOther(self, token): assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name'] def endTagScript(self, token): node = self.tree.openElements.pop() assert node.name == "script" self.parser.phase = self.parser.originalPhase # The rest of this method is all stuff that only happens if # document.write works def endTagOther(self, token): self.tree.openElements.pop() self.parser.phase = self.parser.originalPhase class InTablePhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("caption", self.startTagCaption), ("colgroup", self.startTagColgroup), ("col", self.startTagCol), (("tbody", "tfoot", "thead"), self.startTagRowGroup), (("td", "th", "tr"), self.startTagImplyTbody), ("table", self.startTagTable), (("style", "script"), self.startTagStyleScript), ("input", self.startTagInput), ("form", self.startTagForm) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("table", self.endTagTable), (("body", "caption", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods def clearStackToTableContext(self): # "clear the stack back to a table context" while self.tree.openElements[-1].name not in ("table", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() # When the current node is <html> it's an innerHTML case # processing methods def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-table") else: assert self.parser.innerHTML # Stop parsing def processSpaceCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processSpaceCharacters(token) def processCharacters(self, token): originalPhase = self.parser.phase self.parser.phase = self.parser.phases["inTableText"] self.parser.phase.originalPhase = originalPhase self.parser.phase.processCharacters(token) def insertText(self, token): # If we get here there must be at least one non-whitespace character # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processCharacters(token) self.tree.insertFromTable = False def startTagCaption(self, token): self.clearStackToTableContext() self.tree.activeFormattingElements.append(Marker) self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCaption"] def startTagColgroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inColumnGroup"] def startTagCol(self, token): self.startTagColgroup(impliedTagToken("colgroup", "StartTag")) return token def startTagRowGroup(self, token): self.clearStackToTableContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inTableBody"] def startTagImplyTbody(self, token): self.startTagRowGroup(impliedTagToken("tbody", "StartTag")) return token def startTagTable(self, token): self.parser.parseError("unexpected-start-tag-implies-end-tag", {"startName": "table", "endName": "table"}) self.parser.phase.processEndTag(impliedTagToken("table")) if not self.parser.innerHTML: return token def startTagStyleScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagInput(self, token): if ("type" in token["data"] and token["data"]["type"].translate(asciiUpper2Lower) == "hidden"): self.parser.parseError("unexpected-hidden-input-in-table") self.tree.insertElement(token) # XXX associate with form self.tree.openElements.pop() else: self.startTagOther(token) def startTagForm(self, token): self.parser.parseError("unexpected-form-in-table") if self.tree.formPointer is None: self.tree.insertElement(token) self.tree.formPointer = self.tree.openElements[-1] self.tree.openElements.pop() def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processStartTag(token) self.tree.insertFromTable = False def endTagTable(self, token): if self.tree.elementInScope("table", variant="table"): self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "table": self.parser.parseError("end-tag-too-early-named", {"gotName": "table", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "table": self.tree.openElements.pop() self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]}) # Do the table magic! self.tree.insertFromTable = True self.parser.phases["inBody"].processEndTag(token) self.tree.insertFromTable = False class InTableTextPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.originalPhase = None self.characterTokens = [] def flushCharacters(self): data = "".join([item["data"] for item in self.characterTokens]) if any([item not in spaceCharacters for item in data]): token = {"type": tokenTypes["Characters"], "data": data} self.parser.phases["inTable"].insertText(token) elif data: self.tree.insertText(data) self.characterTokens = [] def processComment(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEOF(self): self.flushCharacters() self.parser.phase = self.originalPhase return True def processCharacters(self, token): if token["data"] == "\u0000": return self.characterTokens.append(token) def processSpaceCharacters(self, token): # pretty sure we should never reach here self.characterTokens.append(token) # assert False def processStartTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEndTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token class InCaptionPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-caption def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagTableElement) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("caption", self.endTagCaption), ("table", self.endTagTable), (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th", "thead", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther def ignoreEndTagCaption(self): return not self.tree.elementInScope("caption", variant="table") def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableElement(self, token): self.parser.parseError() # XXX Have to duplicate logic here to find out if the tag is ignored ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagCaption(self, token): if not self.ignoreEndTagCaption(): # AT this code is quite similar to endTagTable in "InTable" self.tree.generateImpliedEndTags() if self.tree.openElements[-1].name != "caption": self.parser.parseError("expected-one-end-tag-but-got-another", {"gotName": "caption", "expectedName": self.tree.openElements[-1].name}) while self.tree.openElements[-1].name != "caption": self.tree.openElements.pop() self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inTable"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): self.parser.parseError() ignoreEndTag = self.ignoreEndTagCaption() self.parser.phase.processEndTag(impliedTagToken("caption")) if not ignoreEndTag: return token def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) class InColumnGroupPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-column def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("col", self.startTagCol) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("colgroup", self.endTagColgroup), ("col", self.endTagCol) ]) self.endTagHandler.default = self.endTagOther def ignoreEndTagColgroup(self): return self.tree.openElements[-1].name == "html" def processEOF(self): if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML return else: ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return True def processCharacters(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def startTagCol(self, token): self.tree.insertElement(token) self.tree.openElements.pop() def startTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token def endTagColgroup(self, token): if self.ignoreEndTagColgroup(): # innerHTML case assert self.parser.innerHTML self.parser.parseError() else: self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] def endTagCol(self, token): self.parser.parseError("no-end-tag", {"name": "col"}) def endTagOther(self, token): ignoreEndTag = self.ignoreEndTagColgroup() self.endTagColgroup(impliedTagToken("colgroup")) if not ignoreEndTag: return token class InTableBodyPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-table0 def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("tr", self.startTagTr), (("td", "th"), self.startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), ("table", self.endTagTable), (("body", "caption", "col", "colgroup", "html", "td", "th", "tr"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods def clearStackToTableBodyContext(self): while self.tree.openElements[-1].name not in ("tbody", "tfoot", "thead", "html"): # self.parser.parseError("unexpected-implied-end-tag-in-table", # {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() if self.tree.openElements[-1].name == "html": assert self.parser.innerHTML # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTr(self, token): self.clearStackToTableBodyContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inRow"] def startTagTableCell(self, token): self.parser.parseError("unexpected-cell-in-table-body", {"name": token["name"]}) self.startTagTr(impliedTagToken("tr", "StartTag")) return token def startTagTableOther(self, token): # XXX AT Any ideas on how to share this with endTagTable? if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.clearStackToTableBodyContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTable"] else: self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagTable(self, token): if (self.tree.elementInScope("tbody", variant="table") or self.tree.elementInScope("thead", variant="table") or self.tree.elementInScope("tfoot", variant="table")): self.clearStackToTableBodyContext() self.endTagTableRowGroup( impliedTagToken(self.tree.openElements[-1].name)) return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-body", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) class InRowPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-row def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("td", "th"), self.startTagTableCell), (("caption", "col", "colgroup", "tbody", "tfoot", "thead", "tr"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("tr", self.endTagTr), ("table", self.endTagTable), (("tbody", "tfoot", "thead"), self.endTagTableRowGroup), (("body", "caption", "col", "colgroup", "html", "td", "th"), self.endTagIgnore) ]) self.endTagHandler.default = self.endTagOther # helper methods (XXX unify this with other table helper methods) def clearStackToTableRowContext(self): while self.tree.openElements[-1].name not in ("tr", "html"): self.parser.parseError("unexpected-implied-end-tag-in-table-row", {"name": self.tree.openElements[-1].name}) self.tree.openElements.pop() def ignoreEndTagTr(self): return not self.tree.elementInScope("tr", variant="table") # the rest def processEOF(self): self.parser.phases["inTable"].processEOF() def processSpaceCharacters(self, token): return self.parser.phases["inTable"].processSpaceCharacters(token) def processCharacters(self, token): return self.parser.phases["inTable"].processCharacters(token) def startTagTableCell(self, token): self.clearStackToTableRowContext() self.tree.insertElement(token) self.parser.phase = self.parser.phases["inCell"] self.tree.activeFormattingElements.append(Marker) def startTagTableOther(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def startTagOther(self, token): return self.parser.phases["inTable"].processStartTag(token) def endTagTr(self, token): if not self.ignoreEndTagTr(): self.clearStackToTableRowContext() self.tree.openElements.pop() self.parser.phase = self.parser.phases["inTableBody"] else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagTable(self, token): ignoreEndTag = self.ignoreEndTagTr() self.endTagTr(impliedTagToken("tr")) # Reprocess the current tag if the tr end tag was not ignored # XXX how are we sure it's always ignored in the innerHTML case? if not ignoreEndTag: return token def endTagTableRowGroup(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.endTagTr(impliedTagToken("tr")) return token else: self.parser.parseError() def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag-in-table-row", {"name": token["name"]}) def endTagOther(self, token): return self.parser.phases["inTable"].processEndTag(token) class InCellPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-cell def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th", "thead", "tr"), self.startTagTableOther) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("td", "th"), self.endTagTableCell), (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore), (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply) ]) self.endTagHandler.default = self.endTagOther # helper def closeCell(self): if self.tree.elementInScope("td", variant="table"): self.endTagTableCell(impliedTagToken("td")) elif self.tree.elementInScope("th", variant="table"): self.endTagTableCell(impliedTagToken("th")) # the rest def processEOF(self): self.parser.phases["inBody"].processEOF() def processCharacters(self, token): return self.parser.phases["inBody"].processCharacters(token) def startTagTableOther(self, token): if (self.tree.elementInScope("td", variant="table") or self.tree.elementInScope("th", variant="table")): self.closeCell() return token else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def startTagOther(self, token): return self.parser.phases["inBody"].processStartTag(token) def endTagTableCell(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.tree.generateImpliedEndTags(token["name"]) if self.tree.openElements[-1].name != token["name"]: self.parser.parseError("unexpected-cell-end-tag", {"name": token["name"]}) while True: node = self.tree.openElements.pop() if node.name == token["name"]: break else: self.tree.openElements.pop() self.tree.clearActiveFormattingElements() self.parser.phase = self.parser.phases["inRow"] else: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagIgnore(self, token): self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) def endTagImply(self, token): if self.tree.elementInScope(token["name"], variant="table"): self.closeCell() return token else: # sometimes innerHTML case self.parser.parseError() def endTagOther(self, token): return self.parser.phases["inBody"].processEndTag(token) class InSelectPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("option", self.startTagOption), ("optgroup", self.startTagOptgroup), ("select", self.startTagSelect), (("input", "keygen", "textarea"), self.startTagInput), ("script", self.startTagScript) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("option", self.endTagOption), ("optgroup", self.endTagOptgroup), ("select", self.endTagSelect) ]) self.endTagHandler.default = self.endTagOther # http://www.whatwg.org/specs/web-apps/current-work/#in-select def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-select") else: assert self.parser.innerHTML def processCharacters(self, token): if token["data"] == "\u0000": return self.tree.insertText(token["data"]) def startTagOption(self, token): # We need to imply </option> if <option> is the current node. if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() self.tree.insertElement(token) def startTagOptgroup(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() self.tree.insertElement(token) def startTagSelect(self, token): self.parser.parseError("unexpected-select-in-select") self.endTagSelect(impliedTagToken("select")) def startTagInput(self, token): self.parser.parseError("unexpected-input-in-select") if self.tree.elementInScope("select", variant="select"): self.endTagSelect(impliedTagToken("select")) return token else: assert self.parser.innerHTML def startTagScript(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-select", {"name": token["name"]}) def endTagOption(self, token): if self.tree.openElements[-1].name == "option": self.tree.openElements.pop() else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "option"}) def endTagOptgroup(self, token): # </optgroup> implicitly closes <option> if (self.tree.openElements[-1].name == "option" and self.tree.openElements[-2].name == "optgroup"): self.tree.openElements.pop() # It also closes </optgroup> if self.tree.openElements[-1].name == "optgroup": self.tree.openElements.pop() # But nothing else else: self.parser.parseError("unexpected-end-tag-in-select", {"name": "optgroup"}) def endTagSelect(self, token): if self.tree.elementInScope("select", variant="select"): node = self.tree.openElements.pop() while node.name != "select": node = self.tree.openElements.pop() self.parser.resetInsertionMode() else: # innerHTML case assert self.parser.innerHTML self.parser.parseError() def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-select", {"name": token["name"]}) class InSelectInTablePhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), self.startTagTable) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"), self.endTagTable) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): self.parser.phases["inSelect"].processEOF() def processCharacters(self, token): return self.parser.phases["inSelect"].processCharacters(token) def startTagTable(self, token): self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]}) self.endTagOther(impliedTagToken("select")) return token def startTagOther(self, token): return self.parser.phases["inSelect"].processStartTag(token) def endTagTable(self, token): self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]}) if self.tree.elementInScope(token["name"], variant="table"): self.endTagOther(impliedTagToken("select")) return token def endTagOther(self, token): return self.parser.phases["inSelect"].processEndTag(token) class InForeignContentPhase(Phase): breakoutElements = frozenset(["b", "big", "blockquote", "body", "br", "center", "code", "dd", "div", "dl", "dt", "em", "embed", "h1", "h2", "h3", "h4", "h5", "h6", "head", "hr", "i", "img", "li", "listing", "menu", "meta", "nobr", "ol", "p", "pre", "ruby", "s", "small", "span", "strong", "strike", "sub", "sup", "table", "tt", "u", "ul", "var"]) def __init__(self, parser, tree): Phase.__init__(self, parser, tree) def adjustSVGTagNames(self, token): replacements = {"altglyph": "altGlyph", "altglyphdef": "altGlyphDef", "altglyphitem": "altGlyphItem", "animatecolor": "animateColor", "animatemotion": "animateMotion", "animatetransform": "animateTransform", "clippath": "clipPath", "feblend": "feBlend", "fecolormatrix": "feColorMatrix", "fecomponenttransfer": "feComponentTransfer", "fecomposite": "feComposite", "feconvolvematrix": "feConvolveMatrix", "fediffuselighting": "feDiffuseLighting", "fedisplacementmap": "feDisplacementMap", "fedistantlight": "feDistantLight", "feflood": "feFlood", "fefunca": "feFuncA", "fefuncb": "feFuncB", "fefuncg": "feFuncG", "fefuncr": "feFuncR", "fegaussianblur": "feGaussianBlur", "feimage": "feImage", "femerge": "feMerge", "femergenode": "feMergeNode", "femorphology": "feMorphology", "feoffset": "feOffset", "fepointlight": "fePointLight", "fespecularlighting": "feSpecularLighting", "fespotlight": "feSpotLight", "fetile": "feTile", "feturbulence": "feTurbulence", "foreignobject": "foreignObject", "glyphref": "glyphRef", "lineargradient": "linearGradient", "radialgradient": "radialGradient", "textpath": "textPath"} if token["name"] in replacements: token["name"] = replacements[token["name"]] def processCharacters(self, token): if token["data"] == "\u0000": token["data"] = "\uFFFD" elif (self.parser.framesetOK and any(char not in spaceCharacters for char in token["data"])): self.parser.framesetOK = False Phase.processCharacters(self, token) def processStartTag(self, token): currentNode = self.tree.openElements[-1] if (token["name"] in self.breakoutElements or (token["name"] == "font" and set(token["data"].keys()) & set(["color", "face", "size"]))): self.parser.parseError("unexpected-html-element-in-foreign-content", {"name": token["name"]}) while (self.tree.openElements[-1].namespace != self.tree.defaultNamespace and not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])): self.tree.openElements.pop() return token else: if currentNode.namespace == namespaces["mathml"]: self.parser.adjustMathMLAttributes(token) elif currentNode.namespace == namespaces["svg"]: self.adjustSVGTagNames(token) self.parser.adjustSVGAttributes(token) self.parser.adjustForeignAttributes(token) token["namespace"] = currentNode.namespace self.tree.insertElement(token) if token["selfClosing"]: self.tree.openElements.pop() token["selfClosingAcknowledged"] = True def processEndTag(self, token): nodeIndex = len(self.tree.openElements) - 1 node = self.tree.openElements[-1] if node.name != token["name"]: self.parser.parseError("unexpected-end-tag", {"name": token["name"]}) while True: if node.name.translate(asciiUpper2Lower) == token["name"]: # XXX this isn't in the spec but it seems necessary if self.parser.phase == self.parser.phases["inTableText"]: self.parser.phase.flushCharacters() self.parser.phase = self.parser.phase.originalPhase while self.tree.openElements.pop() != node: assert self.tree.openElements new_token = None break nodeIndex -= 1 node = self.tree.openElements[nodeIndex] if node.namespace != self.tree.defaultNamespace: continue else: new_token = self.parser.phase.processEndTag(token) break return new_token class AfterBodyPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)]) self.endTagHandler.default = self.endTagOther def processEOF(self): # Stop parsing pass def processComment(self, token): # This is needed because data is to be appended to the <html> element # here and not to whatever is currently open. self.tree.insertComment(token, self.tree.openElements[0]) def processCharacters(self, token): self.parser.parseError("unexpected-char-after-body") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def endTagHtml(self, name): if self.parser.innerHTML: self.parser.parseError("unexpected-end-tag-after-body-innerhtml") else: self.parser.phase = self.parser.phases["afterAfterBody"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-body", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token class InFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("frameset", self.startTagFrameset), ("frame", self.startTagFrame), ("noframes", self.startTagNoframes) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("frameset", self.endTagFrameset) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): if self.tree.openElements[-1].name != "html": self.parser.parseError("eof-in-frameset") else: assert self.parser.innerHTML def processCharacters(self, token): self.parser.parseError("unexpected-char-in-frameset") def startTagFrameset(self, token): self.tree.insertElement(token) def startTagFrame(self, token): self.tree.insertElement(token) self.tree.openElements.pop() def startTagNoframes(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-in-frameset", {"name": token["name"]}) def endTagFrameset(self, token): if self.tree.openElements[-1].name == "html": # innerHTML case self.parser.parseError("unexpected-frameset-in-frameset-innerhtml") else: self.tree.openElements.pop() if (not self.parser.innerHTML and self.tree.openElements[-1].name != "frameset"): # If we're not in innerHTML mode and the the current node is not a # "frameset" element (anymore) then switch. self.parser.phase = self.parser.phases["afterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-in-frameset", {"name": token["name"]}) class AfterFramesetPhase(Phase): # http://www.whatwg.org/specs/web-apps/current-work/#after3 def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("noframes", self.startTagNoframes) ]) self.startTagHandler.default = self.startTagOther self.endTagHandler = utils.MethodDispatcher([ ("html", self.endTagHtml) ]) self.endTagHandler.default = self.endTagOther def processEOF(self): # Stop parsing pass def processCharacters(self, token): self.parser.parseError("unexpected-char-after-frameset") def startTagNoframes(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("unexpected-start-tag-after-frameset", {"name": token["name"]}) def endTagHtml(self, token): self.parser.phase = self.parser.phases["afterAfterFrameset"] def endTagOther(self, token): self.parser.parseError("unexpected-end-tag-after-frameset", {"name": token["name"]}) class AfterAfterBodyPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml) ]) self.startTagHandler.default = self.startTagOther def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") self.parser.phase = self.parser.phases["inBody"] return token def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) self.parser.phase = self.parser.phases["inBody"] return token class AfterAfterFramesetPhase(Phase): def __init__(self, parser, tree): Phase.__init__(self, parser, tree) self.startTagHandler = utils.MethodDispatcher([ ("html", self.startTagHtml), ("noframes", self.startTagNoFrames) ]) self.startTagHandler.default = self.startTagOther def processEOF(self): pass def processComment(self, token): self.tree.insertComment(token, self.tree.document) def processSpaceCharacters(self, token): return self.parser.phases["inBody"].processSpaceCharacters(token) def processCharacters(self, token): self.parser.parseError("expected-eof-but-got-char") def startTagHtml(self, token): return self.parser.phases["inBody"].processStartTag(token) def startTagNoFrames(self, token): return self.parser.phases["inHead"].processStartTag(token) def startTagOther(self, token): self.parser.parseError("expected-eof-but-got-start-tag", {"name": token["name"]}) def processEndTag(self, token): self.parser.parseError("expected-eof-but-got-end-tag", {"name": token["name"]}) return { "initial": InitialPhase, "beforeHtml": BeforeHtmlPhase, "beforeHead": BeforeHeadPhase, "inHead": InHeadPhase, # XXX "inHeadNoscript": InHeadNoScriptPhase, "afterHead": AfterHeadPhase, "inBody": InBodyPhase, "text": TextPhase, "inTable": InTablePhase, "inTableText": InTableTextPhase, "inCaption": InCaptionPhase, "inColumnGroup": InColumnGroupPhase, "inTableBody": InTableBodyPhase, "inRow": InRowPhase, "inCell": InCellPhase, "inSelect": InSelectPhase, "inSelectInTable": InSelectInTablePhase, "inForeignContent": InForeignContentPhase, "afterBody": AfterBodyPhase, "inFrameset": InFramesetPhase, "afterFrameset": AfterFramesetPhase, "afterAfterBody": AfterAfterBodyPhase, "afterAfterFrameset": AfterAfterFramesetPhase, # XXX after after frameset } def impliedTagToken(name, type="EndTag", attributes=None, selfClosing=False): if attributes is None: attributes = {} return {"type": tokenTypes[type], "name": name, "data": attributes, "selfClosing": selfClosing} class ParseError(Exception): """Error in parsed document""" pass
117,017
Python
.py
2,303
35.473296
118
0.55705
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,563
utils.py
CouchPotato_CouchPotatoServer/libs/html5lib/utils.py
from __future__ import absolute_import, division, unicode_literals from types import ModuleType try: import xml.etree.cElementTree as default_etree except ImportError: import xml.etree.ElementTree as default_etree __all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair", "surrogatePairToCodepoint", "moduleFactoryFactory"] class MethodDispatcher(dict): """Dict with 2 special properties: On initiation, keys that are lists, sets or tuples are converted to multiple keys so accessing any one of the items in the original list-like object returns the matching value md = MethodDispatcher({("foo", "bar"):"baz"}) md["foo"] == "baz" A default value which can be set through the default attribute. """ def __init__(self, items=()): # Using _dictEntries instead of directly assigning to self is about # twice as fast. Please do careful performance testing before changing # anything here. _dictEntries = [] for name, value in items: if type(name) in (list, tuple, frozenset, set): for item in name: _dictEntries.append((item, value)) else: _dictEntries.append((name, value)) dict.__init__(self, _dictEntries) self.default = None def __getitem__(self, key): return dict.get(self, key, self.default) # Some utility functions to dal with weirdness around UCS2 vs UCS4 # python builds def isSurrogatePair(data): return (len(data) == 2 and ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF) def surrogatePairToCodepoint(data): char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 + (ord(data[1]) - 0xDC00)) return char_val # Module Factory Factory (no, this isn't Java, I know) # Here to stop this being duplicated all over the place. def moduleFactoryFactory(factory): moduleCache = {} def moduleFactory(baseModule, *args, **kwargs): if isinstance(ModuleType.__name__, type("")): name = "_%s_factory" % baseModule.__name__ else: name = b"_%s_factory" % baseModule.__name__ if name in moduleCache: return moduleCache[name] else: mod = ModuleType(name) objs = factory(baseModule, *args, **kwargs) mod.__dict__.update(objs) moduleCache[name] = mod return mod return moduleFactory
2,545
Python
.py
60
34.533333
78
0.637434
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,564
__init__.py
CouchPotato_CouchPotatoServer/libs/html5lib/__init__.py
""" HTML parsing library based on the WHATWG "HTML5" specification. The parser is designed to be compatible with existing HTML found in the wild and implements well-defined error recovery that is largely compatible with modern desktop web browsers. Example usage: import html5lib f = open("my_document.html") tree = html5lib.parse(f) """ from __future__ import absolute_import, division, unicode_literals from .html5parser import HTMLParser, parse, parseFragment from .treebuilders import getTreeBuilder from .treewalkers import getTreeWalker from .serializer import serialize __all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder", "getTreeWalker", "serialize"] __version__ = "0.999"
714
Python
.py
18
37.777778
70
0.788712
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,565
ihatexml.py
CouchPotato_CouchPotatoServer/libs/html5lib/ihatexml.py
from __future__ import absolute_import, division, unicode_literals import re import warnings from .constants import DataLossWarning baseChar = """ [#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]""" ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]""" combiningCharacter = """ [#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A""" digit = """ [#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]""" extender = """ #x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | #[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]""" letter = " | ".join([baseChar, ideographic]) # Without the name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter, extender]) nameFirst = " | ".join([letter, "_"]) reChar = re.compile(r"#x([\d|A-F]{4,4})") reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]") def charStringToList(chars): charRanges = [item.strip() for item in chars.split(" | ")] rv = [] for item in charRanges: foundMatch = False for regexp in (reChar, reCharRange): match = regexp.match(item) if match is not None: rv.append([hexToInt(item) for item in match.groups()]) if len(rv[-1]) == 1: rv[-1] = rv[-1] * 2 foundMatch = True break if not foundMatch: assert len(item) == 1 rv.append([ord(item)] * 2) rv = normaliseCharList(rv) return rv def normaliseCharList(charList): charList = sorted(charList) for item in charList: assert item[1] >= item[0] rv = [] i = 0 while i < len(charList): j = 1 rv.append(charList[i]) while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1: rv[-1][1] = charList[i + j][1] j += 1 i += j return rv # We don't really support characters above the BMP :( max_unicode = int("FFFF", 16) def missingRanges(charList): rv = [] if charList[0] != 0: rv.append([0, charList[0][0] - 1]) for i, item in enumerate(charList[:-1]): rv.append([item[1] + 1, charList[i + 1][0] - 1]) if charList[-1][1] != max_unicode: rv.append([charList[-1][1] + 1, max_unicode]) return rv def listToRegexpStr(charList): rv = [] for item in charList: if item[0] == item[1]: rv.append(escapeRegexp(chr(item[0]))) else: rv.append(escapeRegexp(chr(item[0])) + "-" + escapeRegexp(chr(item[1]))) return "[%s]" % "".join(rv) def hexToInt(hex_str): return int(hex_str, 16) def escapeRegexp(string): specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}", "[", "]", "|", "(", ")", "-") for char in specialCharacters: string = string.replace(char, "\\" + char) return string # output from the above nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # Simpler things nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]") class InfosetFilter(object): replacementRegexp = re.compile(r"U[\dA-F]{5,5}") def __init__(self, replaceChars=None, dropXmlnsLocalName=False, dropXmlnsAttrNs=False, preventDoubleDashComments=False, preventDashAtCommentEnd=False, replaceFormFeedCharacters=True, preventSingleQuotePubid=False): self.dropXmlnsLocalName = dropXmlnsLocalName self.dropXmlnsAttrNs = dropXmlnsAttrNs self.preventDoubleDashComments = preventDoubleDashComments self.preventDashAtCommentEnd = preventDashAtCommentEnd self.replaceFormFeedCharacters = replaceFormFeedCharacters self.preventSingleQuotePubid = preventSingleQuotePubid self.replaceCache = {} def coerceAttribute(self, name, namespace=None): if self.dropXmlnsLocalName and name.startswith("xmlns:"): warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) return None elif (self.dropXmlnsAttrNs and namespace == "http://www.w3.org/2000/xmlns/"): warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) return None else: return self.toXmlName(name) def coerceElement(self, name, namespace=None): return self.toXmlName(name) def coerceComment(self, data): if self.preventDoubleDashComments: while "--" in data: warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) data = data.replace("--", "- -") return data def coerceCharacters(self, data): if self.replaceFormFeedCharacters: for i in range(data.count("\x0C")): warnings.warn("Text cannot contain U+000C", DataLossWarning) data = data.replace("\x0C", " ") # Other non-xml characters return data def coercePubid(self, data): dataOutput = data for char in nonPubidCharRegexp.findall(data): warnings.warn("Coercing non-XML pubid", DataLossWarning) replacement = self.getReplacementCharacter(char) dataOutput = dataOutput.replace(char, replacement) if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: warnings.warn("Pubid cannot contain single quote", DataLossWarning) dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) return dataOutput def toXmlName(self, name): nameFirst = name[0] nameRest = name[1:] m = nonXmlNameFirstBMPRegexp.match(nameFirst) if m: warnings.warn("Coercing non-XML name", DataLossWarning) nameFirstOutput = self.getReplacementCharacter(nameFirst) else: nameFirstOutput = nameFirst nameRestOutput = nameRest replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) for char in replaceChars: warnings.warn("Coercing non-XML name", DataLossWarning) replacement = self.getReplacementCharacter(char) nameRestOutput = nameRestOutput.replace(char, replacement) return nameFirstOutput + nameRestOutput def getReplacementCharacter(self, char): if char in self.replaceCache: replacement = self.replaceCache[char] else: replacement = self.escapeChar(char) return replacement def fromXmlName(self, name): for item in set(self.replacementRegexp.findall(name)): name = name.replace(item, self.unescapeChar(item)) return name def escapeChar(self, char): replacement = "U%05X" % ord(char) self.replaceCache[char] = replacement return replacement def unescapeChar(self, charcode): return chr(int(charcode[1:], 16))
16,581
Python
.py
238
63.302521
2,861
0.647337
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,566
tokenizer.py
CouchPotato_CouchPotatoServer/libs/html5lib/tokenizer.py
from __future__ import absolute_import, division, unicode_literals try: chr = unichr # flake8: noqa except NameError: pass from collections import deque from .constants import spaceCharacters from .constants import entities from .constants import asciiLetters, asciiUpper2Lower from .constants import digits, hexDigits, EOF from .constants import tokenTypes, tagTokenTypes from .constants import replacementCharacters from .inputstream import HTMLInputStream from .trie import Trie entitiesTrie = Trie(entities) class HTMLTokenizer(object): """ This class takes care of tokenizing HTML. * self.currentToken Holds the token that is currently being processed. * self.state Holds a reference to the method to be invoked... XXX * self.stream Points to HTMLInputStream object. """ def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True, lowercaseElementName=True, lowercaseAttrName=True, parser=None): self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet) self.parser = parser # Perform case conversions? self.lowercaseElementName = lowercaseElementName self.lowercaseAttrName = lowercaseAttrName # Setup the initial tokenizer state self.escapeFlag = False self.lastFourChars = [] self.state = self.dataState self.escape = False # The current token being created self.currentToken = None super(HTMLTokenizer, self).__init__() def __iter__(self): """ This is where the magic happens. We do our usually processing through the states and when we have a token to return we yield the token which pauses processing until the next token is requested. """ self.tokenQueue = deque([]) # Start processing. When EOF is reached self.state will return False # instead of True and the loop will terminate. while self.state(): while self.stream.errors: yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)} while self.tokenQueue: yield self.tokenQueue.popleft() def consumeNumberEntity(self, isHex): """This function returns either U+FFFD or the character based on the decimal or hexadecimal representation. It also discards ";" if present. If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked. """ allowed = digits radix = 10 if isHex: allowed = hexDigits radix = 16 charStack = [] # Consume all the characters that are in range while making sure we # don't hit an EOF. c = self.stream.char() while c in allowed and c is not EOF: charStack.append(c) c = self.stream.char() # Convert the set of characters consumed to an int. charAsInt = int("".join(charStack), radix) # Certain characters get replaced with others if charAsInt in replacementCharacters: char = replacementCharacters[charAsInt] self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) elif ((0xD800 <= charAsInt <= 0xDFFF) or (charAsInt > 0x10FFFF)): char = "\uFFFD" self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) else: # Should speed up this check somehow (e.g. move the set to a constant) if ((0x0001 <= charAsInt <= 0x0008) or (0x000E <= charAsInt <= 0x001F) or (0x007F <= charAsInt <= 0x009F) or (0xFDD0 <= charAsInt <= 0xFDEF) or charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE, 0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE, 0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF, 0x10FFFE, 0x10FFFF])): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "illegal-codepoint-for-numeric-entity", "datavars": {"charAsInt": charAsInt}}) try: # Try/except needed as UCS-2 Python builds' unichar only works # within the BMP. char = chr(charAsInt) except ValueError: v = charAsInt - 0x10000 char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF)) # Discard the ; if present. Otherwise, put it back on the queue and # invoke parseError on parser. if c != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "numeric-entity-without-semicolon"}) self.stream.unget(c) return char def consumeEntity(self, allowedChar=None, fromAttribute=False): # Initialise to the default output for when no entity is matched output = "&" charStack = [self.stream.char()] if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or (allowedChar is not None and allowedChar == charStack[0])): self.stream.unget(charStack[0]) elif charStack[0] == "#": # Read the next character to see if it's hex or decimal hex = False charStack.append(self.stream.char()) if charStack[-1] in ("x", "X"): hex = True charStack.append(self.stream.char()) # charStack[-1] should be the first digit if (hex and charStack[-1] in hexDigits) \ or (not hex and charStack[-1] in digits): # At least one digit found, so consume the whole number self.stream.unget(charStack[-1]) output = self.consumeNumberEntity(hex) else: # No digits found self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-numeric-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: # At this point in the process might have named entity. Entities # are stored in the global variable "entities". # # Consume characters and compare to these to a substring of the # entity names in the list until the substring no longer matches. while (charStack[-1] is not EOF): if not entitiesTrie.has_keys_with_prefix("".join(charStack)): break charStack.append(self.stream.char()) # At this point we have a string that starts with some characters # that may match an entity # Try to find the longest entity the string will match to take care # of &noti for instance. try: entityName = entitiesTrie.longest_prefix("".join(charStack[:-1])) entityLength = len(entityName) except KeyError: entityName = None if entityName is not None: if entityName[-1] != ";": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "named-entity-without-semicolon"}) if (entityName[-1] != ";" and fromAttribute and (charStack[entityLength] in asciiLetters or charStack[entityLength] in digits or charStack[entityLength] == "=")): self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) else: output = entities[entityName] self.stream.unget(charStack.pop()) output += "".join(charStack[entityLength:]) else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-named-entity"}) self.stream.unget(charStack.pop()) output = "&" + "".join(charStack) if fromAttribute: self.currentToken["data"][-1][1] += output else: if output in spaceCharacters: tokenType = "SpaceCharacters" else: tokenType = "Characters" self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output}) def processEntityInAttribute(self, allowedChar): """This method replaces the need for "entityInAttributeValueState". """ self.consumeEntity(allowedChar=allowedChar, fromAttribute=True) def emitCurrentToken(self): """This method is a generic handler for emitting the tags. It also sets the state to "data" because that's what's needed after a token has been emitted. """ token = self.currentToken # Add token to the queue to be yielded if (token["type"] in tagTokenTypes): if self.lowercaseElementName: token["name"] = token["name"].translate(asciiUpper2Lower) if token["type"] == tokenTypes["EndTag"]: if token["data"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "attributes-in-end-tag"}) if token["selfClosing"]: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "self-closing-flag-on-end-tag"}) self.tokenQueue.append(token) self.state = self.dataState # Below are the various tokenizer states worked out. def dataState(self): data = self.stream.char() if data == "&": self.state = self.entityDataState elif data == "<": self.state = self.tagOpenState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\u0000"}) elif data is EOF: # Tokenization ends. return False elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def entityDataState(self): self.consumeEntity() self.state = self.dataState return True def rcdataState(self): data = self.stream.char() if data == "&": self.state = self.characterReferenceInRcdata elif data == "<": self.state = self.rcdataLessThanSignState elif data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data in spaceCharacters: # Directly after emitting a token you switch back to the "data # state". At that point spaceCharacters are important so they are # emitted separately. self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data": data + self.stream.charsUntil(spaceCharacters, True)}) # No need to update lastFourChars here, since the first space will # have already been appended to lastFourChars and will have broken # any <!-- or --> sequences else: chars = self.stream.charsUntil(("&", "<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def characterReferenceInRcdata(self): self.consumeEntity() self.state = self.rcdataState return True def rawtextState(self): data = self.stream.char() if data == "<": self.state = self.rawtextLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataState(self): data = self.stream.char() if data == "<": self.state = self.scriptDataLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: # Tokenization ends. return False else: chars = self.stream.charsUntil(("<", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def plaintextState(self): data = self.stream.char() if data == EOF: # Tokenization ends. return False elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + self.stream.charsUntil("\u0000")}) return True def tagOpenState(self): data = self.stream.char() if data == "!": self.state = self.markupDeclarationOpenState elif data == "/": self.state = self.closeTagOpenState elif data in asciiLetters: self.currentToken = {"type": tokenTypes["StartTag"], "name": data, "data": [], "selfClosing": False, "selfClosingAcknowledged": False} self.state = self.tagNameState elif data == ">": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-right-bracket"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"}) self.state = self.dataState elif data == "?": # XXX In theory it could be something besides a tag name. But # do we really care? self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name-but-got-question-mark"}) self.stream.unget(data) self.state = self.bogusCommentState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-tag-name"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.dataState return True def closeTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.currentToken = {"type": tokenTypes["EndTag"], "name": data, "data": [], "selfClosing": False} self.state = self.tagNameState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-right-bracket"}) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-eof"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.state = self.dataState else: # XXX data can be _'_... self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-closing-tag-but-got-char", "datavars": {"data": data}}) self.stream.unget(data) self.state = self.bogusCommentState return True def tagNameState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-tag-name"}) self.state = self.dataState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" else: self.currentToken["name"] += data # (Don't use charsUntil here, because tag names are # very short and it's faster to not do anything fancy) return True def rcdataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rcdataEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rcdataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rcdataState return True def rcdataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rcdataState return True def rawtextLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.rawtextEndTagOpenState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.rawtextEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.rawtextState return True def rawtextEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.rawtextState return True def scriptDataLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEndTagOpenState elif data == "!": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"}) self.state = self.scriptDataEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer += data self.state = self.scriptDataEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapeStartDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapeStartDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState else: self.stream.unget(data) self.state = self.scriptDataState return True def scriptDataEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.state = self.dataState else: chars = self.stream.charsUntil(("<", "-", "\u0000")) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data + chars}) return True def scriptDataEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataEscapedDashDashState elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.state = self.scriptDataEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataEscapedState elif data == EOF: self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataEscapedState return True def scriptDataEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.temporaryBuffer = "" self.state = self.scriptDataEscapedEndTagOpenState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data}) self.temporaryBuffer = data self.state = self.scriptDataDoubleEscapeStartState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagOpenState(self): data = self.stream.char() if data in asciiLetters: self.temporaryBuffer = data self.state = self.scriptDataEscapedEndTagNameState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataEscapedEndTagNameState(self): appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower() data = self.stream.char() if data in spaceCharacters and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.beforeAttributeNameState elif data == "/" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.state = self.selfClosingStartTagState elif data == ">" and appropriate: self.currentToken = {"type": tokenTypes["EndTag"], "name": self.temporaryBuffer, "data": [], "selfClosing": False} self.emitCurrentToken() self.state = self.dataState elif data in asciiLetters: self.temporaryBuffer += data else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</" + self.temporaryBuffer}) self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapeStartState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataDoubleEscapedState else: self.state = self.scriptDataEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataEscapedState return True def scriptDataDoubleEscapedState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) return True def scriptDataDoubleEscapedDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) self.state = self.scriptDataDoubleEscapedDashDashState elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedDashDashState(self): data = self.stream.char() if data == "-": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"}) elif data == "<": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"}) self.state = self.scriptDataDoubleEscapedLessThanSignState elif data == ">": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"}) self.state = self.scriptDataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "\uFFFD"}) self.state = self.scriptDataDoubleEscapedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-script-in-script"}) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapedLessThanSignState(self): data = self.stream.char() if data == "/": self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"}) self.temporaryBuffer = "" self.state = self.scriptDataDoubleEscapeEndState else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def scriptDataDoubleEscapeEndState(self): data = self.stream.char() if data in (spaceCharacters | frozenset(("/", ">"))): self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) if self.temporaryBuffer.lower() == "script": self.state = self.scriptDataEscapedState else: self.state = self.scriptDataDoubleEscapedState elif data in asciiLetters: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.temporaryBuffer += data else: self.stream.unget(data) self.state = self.scriptDataDoubleEscapedState return True def beforeAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data in ("'", '"', "=", "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-name-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def attributeNameState(self): data = self.stream.char() leavingThisState = True emitToken = False if data == "=": self.state = self.beforeAttributeValueState elif data in asciiLetters: self.currentToken["data"][-1][0] += data +\ self.stream.charsUntil(asciiLetters, True) leavingThisState = False elif data == ">": # XXX If we emit here the attributes are converted to a dict # without being checked and when the code below runs we error # because data is a dict not a list emitToken = True elif data in spaceCharacters: self.state = self.afterAttributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][0] += "\uFFFD" leavingThisState = False elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-in-attribute-name"}) self.currentToken["data"][-1][0] += data leavingThisState = False elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-name"}) self.state = self.dataState else: self.currentToken["data"][-1][0] += data leavingThisState = False if leavingThisState: # Attributes are not dropped at this stage. That happens when the # start tag token is emitted so values can still be safely appended # to attributes, but we do want to report the parse error in time. if self.lowercaseAttrName: self.currentToken["data"][-1][0] = ( self.currentToken["data"][-1][0].translate(asciiUpper2Lower)) for name, value in self.currentToken["data"][:-1]: if self.currentToken["data"][-1][0] == name: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "duplicate-attribute"}) break # XXX Fix for above XXX if emitToken: self.emitCurrentToken() return True def afterAttributeNameState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "=": self.state = self.beforeAttributeValueState elif data == ">": self.emitCurrentToken() elif data in asciiLetters: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data == "/": self.state = self.selfClosingStartTagState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"].append(["\uFFFD", ""]) self.state = self.attributeNameState elif data in ("'", '"', "<"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-character-after-attribute-name"}) self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-end-of-tag-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"].append([data, ""]) self.state = self.attributeNameState return True def beforeAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.stream.charsUntil(spaceCharacters, True) elif data == "\"": self.state = self.attributeValueDoubleQuotedState elif data == "&": self.state = self.attributeValueUnQuotedState self.stream.unget(data) elif data == "'": self.state = self.attributeValueSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-right-bracket"}) self.emitCurrentToken() elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" self.state = self.attributeValueUnQuotedState elif data in ("=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "equals-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-attribute-value-but-got-eof"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data self.state = self.attributeValueUnQuotedState return True def attributeValueDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute('"') elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-double-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("\"", "&", "\u0000")) return True def attributeValueSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterAttributeValueState elif data == "&": self.processEntityInAttribute("'") elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-single-quote"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data +\ self.stream.charsUntil(("'", "&", "\u0000")) return True def attributeValueUnQuotedState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == "&": self.processEntityInAttribute(">") elif data == ">": self.emitCurrentToken() elif data in ('"', "'", "=", "<", "`"): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-in-unquoted-attribute-value"}) self.currentToken["data"][-1][1] += data elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"][-1][1] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-attribute-value-no-quotes"}) self.state = self.dataState else: self.currentToken["data"][-1][1] += data + self.stream.charsUntil( frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters) return True def afterAttributeValueState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeAttributeNameState elif data == ">": self.emitCurrentToken() elif data == "/": self.state = self.selfClosingStartTagState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-attribute-value"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-attribute-value"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True def bogusCommentState(self): # Make a new comment token and give it as value all the characters # until the first > or EOF (charsUntil checks for EOF automatically) # and emit it. data = self.stream.charsUntil(">") data = data.replace("\u0000", "\uFFFD") self.tokenQueue.append( {"type": tokenTypes["Comment"], "data": data}) # Eat the character directly after the bogus comment which is either a # ">" or an EOF. self.stream.char() self.state = self.dataState return True def markupDeclarationOpenState(self): charStack = [self.stream.char()] if charStack[-1] == "-": charStack.append(self.stream.char()) if charStack[-1] == "-": self.currentToken = {"type": tokenTypes["Comment"], "data": ""} self.state = self.commentStartState return True elif charStack[-1] in ('d', 'D'): matched = True for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'), ('y', 'Y'), ('p', 'P'), ('e', 'E')): charStack.append(self.stream.char()) if charStack[-1] not in expected: matched = False break if matched: self.currentToken = {"type": tokenTypes["Doctype"], "name": "", "publicId": None, "systemId": None, "correct": True} self.state = self.doctypeState return True elif (charStack[-1] == "[" and self.parser is not None and self.parser.tree.openElements and self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace): matched = True for expected in ["C", "D", "A", "T", "A", "["]: charStack.append(self.stream.char()) if charStack[-1] != expected: matched = False break if matched: self.state = self.cdataSectionState return True self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-dashes-or-doctype"}) while charStack: self.stream.unget(charStack.pop()) self.state = self.bogusCommentState return True def commentStartState(self): data = self.stream.char() if data == "-": self.state = self.commentStartDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data self.state = self.commentState return True def commentStartDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "incorrect-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentState(self): data = self.stream.char() if data == "-": self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "\uFFFD" elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += data + \ self.stream.charsUntil(("-", "\u0000")) return True def commentEndDashState(self): data = self.stream.char() if data == "-": self.state = self.commentEndState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "-\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "-" + data self.state = self.commentState return True def commentEndState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--\uFFFD" self.state = self.commentState elif data == "!": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-bang-after-double-dash-in-comment"}) self.state = self.commentEndBangState elif data == "-": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-dash-after-double-dash-in-comment"}) self.currentToken["data"] += data elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-double-dash"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: # XXX self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-comment"}) self.currentToken["data"] += "--" + data self.state = self.commentState return True def commentEndBangState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "-": self.currentToken["data"] += "--!" self.state = self.commentEndDashState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["data"] += "--!\uFFFD" self.state = self.commentState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-comment-end-bang-state"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["data"] += "--!" + data self.state = self.commentState return True def doctypeState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "need-space-after-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeNameState return True def beforeDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-right-bracket"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] = "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-doctype-name-but-got-eof"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] = data self.state = self.doctypeNameState return True def doctypeNameState(self): data = self.stream.char() if data in spaceCharacters: self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.state = self.afterDoctypeNameState elif data == ">": self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["name"] += "\uFFFD" self.state = self.doctypeNameState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype-name"}) self.currentToken["correct"] = False self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["name"] += data return True def afterDoctypeNameState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.currentToken["correct"] = False self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: if data in ("p", "P"): matched = True for expected in (("u", "U"), ("b", "B"), ("l", "L"), ("i", "I"), ("c", "C")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypePublicKeywordState return True elif data in ("s", "S"): matched = True for expected in (("y", "Y"), ("s", "S"), ("t", "T"), ("e", "E"), ("m", "M")): data = self.stream.char() if data not in expected: matched = False break if matched: self.state = self.afterDoctypeSystemKeywordState return True # All the characters read before the current 'data' will be # [a-zA-Z], so they're garbage in the bogus doctype and can be # discarded; only the latest character might be '>' or EOF # and needs to be ungetted self.stream.unget(data) self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "expected-space-or-right-bracket-in-doctype", "datavars": {"data": data}}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypePublicKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypePublicIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypePublicIdentifierState return True def beforeDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierDoubleQuotedState elif data == "'": self.currentToken["publicId"] = "" self.state = self.doctypePublicIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypePublicIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def doctypePublicIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypePublicIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["publicId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["publicId"] += data return True def afterDoctypePublicIdentifierState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.betweenDoctypePublicAndSystemIdentifiersState elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def betweenDoctypePublicAndSystemIdentifiersState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data == '"': self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def afterDoctypeSystemKeywordState(self): data = self.stream.char() if data in spaceCharacters: self.state = self.beforeDoctypeSystemIdentifierState elif data in ("'", '"'): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.stream.unget(data) self.state = self.beforeDoctypeSystemIdentifierState return True def beforeDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == "\"": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierDoubleQuotedState elif data == "'": self.currentToken["systemId"] = "" self.state = self.doctypeSystemIdentifierSingleQuotedState elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.currentToken["correct"] = False self.state = self.bogusDoctypeState return True def doctypeSystemIdentifierDoubleQuotedState(self): data = self.stream.char() if data == "\"": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def doctypeSystemIdentifierSingleQuotedState(self): data = self.stream.char() if data == "'": self.state = self.afterDoctypeSystemIdentifierState elif data == "\u0000": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) self.currentToken["systemId"] += "\uFFFD" elif data == ">": self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-end-of-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.currentToken["systemId"] += data return True def afterDoctypeSystemIdentifierState(self): data = self.stream.char() if data in spaceCharacters: pass elif data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "eof-in-doctype"}) self.currentToken["correct"] = False self.tokenQueue.append(self.currentToken) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-char-in-doctype"}) self.state = self.bogusDoctypeState return True def bogusDoctypeState(self): data = self.stream.char() if data == ">": self.tokenQueue.append(self.currentToken) self.state = self.dataState elif data is EOF: # XXX EMIT self.stream.unget(data) self.tokenQueue.append(self.currentToken) self.state = self.dataState else: pass return True def cdataSectionState(self): data = [] while True: data.append(self.stream.charsUntil("]")) data.append(self.stream.charsUntil(">")) char = self.stream.char() if char == EOF: break else: assert char == ">" if data[-1][-2:] == "]]": data[-1] = data[-1][:-2] break else: data.append(char) data = "".join(data) # Deal with null here rather than in the parser nullCount = data.count("\u0000") if nullCount > 0: for i in range(nullCount): self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "invalid-codepoint"}) data = data.replace("\u0000", "\uFFFD") if data: self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data}) self.state = self.dataState return True
76,929
Python
.py
1,621
33.02591
109
0.542115
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,567
etree_lxml.py
CouchPotato_CouchPotatoServer/libs/html5lib/treebuilders/etree_lxml.py
"""Module for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning """ from __future__ import absolute_import, division, unicode_literals import warnings import re import sys from . import _base from ..constants import DataLossWarning from .. import constants from . import etree as etree_builders from .. import ihatexml import lxml.etree as etree fullTree = True tag_regexp = re.compile("{([^}]*)}(.*)") comment_type = etree.Comment("asd").tag class DocumentType(object): def __init__(self, name, publicId, systemId): self.name = name self.publicId = publicId self.systemId = systemId class Document(object): def __init__(self): self._elementTree = None self._childNodes = [] def appendChild(self, element): self._elementTree.getroot().addnext(element._element) def _getChildNodes(self): return self._childNodes childNodes = property(_getChildNodes) def testSerializer(element): rv = [] finalText = None infosetFilter = ihatexml.InfosetFilter() def serializeElement(element, indent=0): if not hasattr(element, "tag"): if hasattr(element, "getroot"): # Full tree case rv.append("#document") if element.docinfo.internalDTD: if not (element.docinfo.public_id or element.docinfo.system_url): dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name else: dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( element.docinfo.root_name, element.docinfo.public_id, element.docinfo.system_url) rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) next_element = element.getroot() while next_element.getprevious() is not None: next_element = next_element.getprevious() while next_element is not None: serializeElement(next_element, indent + 2) next_element = next_element.getnext() elif isinstance(element, str) or isinstance(element, bytes): # Text in a fragment assert isinstance(element, str) or sys.version_info.major == 2 rv.append("|%s\"%s\"" % (' ' * indent, element)) else: # Fragment case rv.append("#document-fragment") for next_element in element: serializeElement(next_element, indent + 2) elif element.tag == comment_type: rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) else: assert isinstance(element, etree._Element) nsmatch = etree_builders.tag_regexp.match(element.tag) if nsmatch is not None: ns = nsmatch.group(1) tag = nsmatch.group(2) prefix = constants.prefixes[ns] rv.append("|%s<%s %s>" % (' ' * indent, prefix, infosetFilter.fromXmlName(tag))) else: rv.append("|%s<%s>" % (' ' * indent, infosetFilter.fromXmlName(element.tag))) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() name = infosetFilter.fromXmlName(name) prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = infosetFilter.fromXmlName(name) attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) if finalText is not None: rv.append("|%s\"%s\"" % (' ' * 2, finalText)) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] finalText = None def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif element.tag == comment_type: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (element.tag,)) else: attr = " ".join(["%s=\"%s\"" % (name, value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) if finalText is not None: rv.append("%s\"" % (' ' * 2, finalText)) return "".join(rv) class TreeBuilder(_base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None commentClass = None fragmentClass = Document implementation = etree def __init__(self, namespaceHTMLElements, fullTree=False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) infosetFilter = self.infosetFilter = ihatexml.InfosetFilter() self.namespaceHTMLElements = namespaceHTMLElements class Attributes(dict): def __init__(self, element, value={}): self._element = element dict.__init__(self, value) for key, value in self.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value def __setitem__(self, key, value): dict.__setitem__(self, key, value) if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value class Element(builder.Element): def __init__(self, name, namespace): name = infosetFilter.coerceElement(name) builder.Element.__init__(self, name, namespace=namespace) self._attributes = Attributes(self) def _setName(self, name): self._name = infosetFilter.coerceElement(name) self._element.tag = self._getETreeTag( self._name, self._namespace) def _getName(self): return infosetFilter.fromXmlName(self._name) name = property(_getName, _setName) def _getAttributes(self): return self._attributes def _setAttributes(self, attributes): self._attributes = Attributes(self, attributes) attributes = property(_getAttributes, _setAttributes) def insertText(self, data, insertBefore=None): data = infosetFilter.coerceCharacters(data) builder.Element.insertText(self, data, insertBefore) def appendChild(self, child): builder.Element.appendChild(self, child) class Comment(builder.Comment): def __init__(self, data): data = infosetFilter.coerceComment(data) builder.Comment.__init__(self, data) def _setData(self, data): data = infosetFilter.coerceComment(data) self._element.text = data def _getData(self): return self._element.text data = property(_getData, _setData) self.elementClass = Element self.commentClass = builder.Comment # self.fragmentClass = builder.DocumentFragment _base.TreeBuilder.__init__(self, namespaceHTMLElements) def reset(self): _base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._elementTree else: return self.document._elementTree.getroot() def getFragment(self): fragment = [] element = self.openElements[0]._element if element.text: fragment.append(element.text) fragment.extend(list(element)) if element.tail: fragment.append(element.tail) return fragment def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] if not name: warnings.warn("lxml cannot represent empty doctype", DataLossWarning) self.doctype = None else: coercedName = self.infosetFilter.coerceElement(name) if coercedName != name: warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) doctype = self.doctypeClass(coercedName, publicId, systemId) self.doctype = doctype def insertCommentInitial(self, data, parent=None): self.initial_comments.append(data) def insertCommentMain(self, data, parent=None): if (parent == self.document and self.document._elementTree.getroot()[-1].tag == comment_type): warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) super(TreeBuilder, self).insertComment(data, parent) def insertRoot(self, token): """Create the document root""" # Because of the way libxml2 works, it doesn't seem to be possible to # alter information like the doctype after the tree has been parsed. # Therefore we need to use the built-in parser to create our iniial # tree, after which we can add elements like normal docStr = "" if self.doctype: assert self.doctype.name docStr += "<!DOCTYPE %s" % self.doctype.name if (self.doctype.publicId is not None or self.doctype.systemId is not None): docStr += (' PUBLIC "%s" ' % (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) if self.doctype.systemId: sysid = self.doctype.systemId if sysid.find("'") >= 0 and sysid.find('"') >= 0: warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) sysid = sysid.replace("'", 'U00027') if sysid.find("'") >= 0: docStr += '"%s"' % sysid else: docStr += "'%s'" % sysid else: docStr += "''" docStr += ">" if self.doctype.name != token["name"]: warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" root = etree.fromstring(docStr) # Append the initial comments: for comment_token in self.initial_comments: root.addprevious(etree.Comment(comment_token["data"])) # Create the root document and add the ElementTree to it self.document = self.documentClass() self.document._elementTree = root.getroottree() # Give the root element the right name name = token["name"] namespace = token.get("namespace", self.defaultNamespace) if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) root.tag = etree_tag # Add the root element to the internal child/open data structures root_element = self.elementClass(name, namespace) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) # Reset to the default insert comment function self.insertComment = self.insertCommentMain
14,031
Python
.py
300
33.91
121
0.568511
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,568
dom.py
CouchPotato_CouchPotatoServer/libs/html5lib/treebuilders/dom.py
from __future__ import absolute_import, division, unicode_literals from xml.dom import minidom, Node import weakref from . import _base from .. import constants from ..constants import namespaces from ..utils import moduleFactoryFactory def getDomBuilder(DomImplementation): Dom = DomImplementation class AttrList(object): def __init__(self, element): self.element = element def __iter__(self): return list(self.element.attributes.items()).__iter__() def __setitem__(self, name, value): self.element.setAttribute(name, value) def __len__(self): return len(list(self.element.attributes.items())) def items(self): return [(item[0], item[1]) for item in list(self.element.attributes.items())] def keys(self): return list(self.element.attributes.keys()) def __getitem__(self, name): return self.element.getAttribute(name) def __contains__(self, name): if isinstance(name, tuple): raise NotImplementedError else: return self.element.hasAttribute(name) class NodeBuilder(_base.Node): def __init__(self, element): _base.Node.__init__(self, element.nodeName) self.element = element namespace = property(lambda self: hasattr(self.element, "namespaceURI") and self.element.namespaceURI or None) def appendChild(self, node): node.parent = self self.element.appendChild(node.element) def insertText(self, data, insertBefore=None): text = self.element.ownerDocument.createTextNode(data) if insertBefore: self.element.insertBefore(text, insertBefore.element) else: self.element.appendChild(text) def insertBefore(self, node, refNode): self.element.insertBefore(node.element, refNode.element) node.parent = self def removeChild(self, node): if node.element.parentNode == self.element: self.element.removeChild(node.element) node.parent = None def reparentChildren(self, newParent): while self.element.hasChildNodes(): child = self.element.firstChild self.element.removeChild(child) newParent.element.appendChild(child) self.childNodes = [] def getAttributes(self): return AttrList(self.element) def setAttributes(self, attributes): if attributes: for name, value in list(attributes.items()): if isinstance(name, tuple): if name[0] is not None: qualifiedName = (name[0] + ":" + name[1]) else: qualifiedName = name[1] self.element.setAttributeNS(name[2], qualifiedName, value) else: self.element.setAttribute( name, value) attributes = property(getAttributes, setAttributes) def cloneNode(self): return NodeBuilder(self.element.cloneNode(False)) def hasContent(self): return self.element.hasChildNodes() def getNameTuple(self): if self.namespace is None: return namespaces["html"], self.name else: return self.namespace, self.name nameTuple = property(getNameTuple) class TreeBuilder(_base.TreeBuilder): def documentClass(self): self.dom = Dom.getDOMImplementation().createDocument(None, None, None) return weakref.proxy(self) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] domimpl = Dom.getDOMImplementation() doctype = domimpl.createDocumentType(name, publicId, systemId) self.document.appendChild(NodeBuilder(doctype)) if Dom == minidom: doctype.ownerDocument = self.dom def elementClass(self, name, namespace=None): if namespace is None and self.defaultNamespace is None: node = self.dom.createElement(name) else: node = self.dom.createElementNS(namespace, name) return NodeBuilder(node) def commentClass(self, data): return NodeBuilder(self.dom.createComment(data)) def fragmentClass(self): return NodeBuilder(self.dom.createDocumentFragment()) def appendChild(self, node): self.dom.appendChild(node.element) def testSerializer(self, element): return testSerializer(element) def getDocument(self): return self.dom def getFragment(self): return _base.TreeBuilder.getFragment(self).element def insertText(self, data, parent=None): data = data if parent != self: _base.TreeBuilder.insertText(self, data, parent) else: # HACK: allow text nodes as children of the document node if hasattr(self.dom, '_child_node_types'): if not Node.TEXT_NODE in self.dom._child_node_types: self.dom._child_node_types = list(self.dom._child_node_types) self.dom._child_node_types.append(Node.TEXT_NODE) self.dom.appendChild(self.dom.createTextNode(data)) implementation = DomImplementation name = None def testSerializer(element): element.normalize() rv = [] def serializeElement(element, indent=0): if element.nodeType == Node.DOCUMENT_TYPE_NODE: if element.name: if element.publicId or element.systemId: publicId = element.publicId or "" systemId = element.systemId or "" rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" % (' ' * indent, element.name, publicId, systemId)) else: rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name)) else: rv.append("|%s<!DOCTYPE >" % (' ' * indent,)) elif element.nodeType == Node.DOCUMENT_NODE: rv.append("#document") elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE: rv.append("#document-fragment") elif element.nodeType == Node.COMMENT_NODE: rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue)) elif element.nodeType == Node.TEXT_NODE: rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue)) else: if (hasattr(element, "namespaceURI") and element.namespaceURI is not None): name = "%s %s" % (constants.prefixes[element.namespaceURI], element.nodeName) else: name = element.nodeName rv.append("|%s<%s>" % (' ' * indent, name)) if element.hasAttributes(): attributes = [] for i in range(len(element.attributes)): attr = element.attributes.item(i) name = attr.nodeName value = attr.value ns = attr.namespaceURI if ns: name = "%s %s" % (constants.prefixes[ns], attr.localName) else: name = attr.nodeName attributes.append((name, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) indent += 2 for child in element.childNodes: serializeElement(child, indent) serializeElement(element, 0) return "\n".join(rv) return locals() # The actual means to get a module! getDomModule = moduleFactoryFactory(getDomBuilder)
8,469
Python
.py
181
31.872928
85
0.546469
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,569
etree.py
CouchPotato_CouchPotatoServer/libs/html5lib/treebuilders/etree.py
from __future__ import absolute_import, division, unicode_literals from six import text_type import re from . import _base from .. import ihatexml from .. import constants from ..constants import namespaces from ..utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation, fullTree=False): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class Element(_base.Node): def __init__(self, name, namespace=None): self._name = name self._namespace = namespace self._element = ElementTree.Element(self._getETreeTag(name, namespace)) if namespace is None: self.nameTuple = namespaces["html"], self._name else: self.nameTuple = self._namespace, self._name self.parent = None self._childNodes = [] self._flags = [] def _getETreeTag(self, name, namespace): if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) return etree_tag def _setName(self, name): self._name = name self._element.tag = self._getETreeTag(self._name, self._namespace) def _getName(self): return self._name name = property(_getName, _setName) def _setNamespace(self, namespace): self._namespace = namespace self._element.tag = self._getETreeTag(self._name, self._namespace) def _getNamespace(self): return self._namespace namespace = property(_getNamespace, _setNamespace) def _getAttributes(self): return self._element.attrib def _setAttributes(self, attributes): # Delete existing attributes first # XXX - there may be a better way to do this... for key in list(self._element.attrib.keys()): del self._element.attrib[key] for key, value in attributes.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], key[1]) else: name = key self._element.set(name, value) attributes = property(_getAttributes, _setAttributes) def _getChildNodes(self): return self._childNodes def _setChildNodes(self, value): del self._element[:] self._childNodes = [] for element in value: self.insertChild(element) childNodes = property(_getChildNodes, _setChildNodes) def hasContent(self): """Return true if the node has children or text""" return bool(self._element.text or len(self._element)) def appendChild(self, node): self._childNodes.append(node) self._element.append(node._element) node.parent = self def insertBefore(self, node, refNode): index = list(self._element).index(refNode._element) self._element.insert(index, node._element) node.parent = self def removeChild(self, node): self._element.remove(node._element) node.parent = None def insertText(self, data, insertBefore=None): if not(len(self._element)): if not self._element.text: self._element.text = "" self._element.text += data elif insertBefore is None: # Insert the text as the tail of the last child element if not self._element[-1].tail: self._element[-1].tail = "" self._element[-1].tail += data else: # Insert the text before the specified node children = list(self._element) index = children.index(insertBefore._element) if index > 0: if not self._element[index - 1].tail: self._element[index - 1].tail = "" self._element[index - 1].tail += data else: if not self._element.text: self._element.text = "" self._element.text += data def cloneNode(self): element = type(self)(self.name, self.namespace) for name, value in self.attributes.items(): element.attributes[name] = value return element def reparentChildren(self, newParent): if newParent.childNodes: newParent.childNodes[-1]._element.tail += self._element.text else: if not newParent._element.text: newParent._element.text = "" if self._element.text is not None: newParent._element.text += self._element.text self._element.text = "" _base.Node.reparentChildren(self, newParent) class Comment(Element): def __init__(self, data): # Use the superclass constructor to set all properties on the # wrapper element self._element = ElementTree.Comment(data) self.parent = None self._childNodes = [] self._flags = [] def _getData(self): return self._element.text def _setData(self, value): self._element.text = value data = property(_getData, _setData) class DocumentType(Element): def __init__(self, name, publicId, systemId): Element.__init__(self, "<!DOCTYPE>") self._element.text = name self.publicId = publicId self.systemId = systemId def _getPublicId(self): return self._element.get("publicId", "") def _setPublicId(self, value): if value is not None: self._element.set("publicId", value) publicId = property(_getPublicId, _setPublicId) def _getSystemId(self): return self._element.get("systemId", "") def _setSystemId(self, value): if value is not None: self._element.set("systemId", value) systemId = property(_getSystemId, _setSystemId) class Document(Element): def __init__(self): Element.__init__(self, "DOCUMENT_ROOT") class DocumentFragment(Element): def __init__(self): Element.__init__(self, "DOCUMENT_FRAGMENT") def testSerializer(element): rv = [] def serializeElement(element, indent=0): if not(hasattr(element, "tag")): element = element.getroot() if element.tag == "<!DOCTYPE>": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""<!DOCTYPE %s "%s" "%s">""" % (element.text, publicId, systemId)) else: rv.append("<!DOCTYPE %s>" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": rv.append("#document") if element.text is not None: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") elif element.tag == ElementTreeCommentType: rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) else: assert isinstance(element.tag, text_type), \ "Expected unicode, got %s, %s" % (type(element.tag), element.tag) nsmatch = tag_regexp.match(element.tag) if nsmatch is None: name = element.tag else: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] name = "%s %s" % (prefix, name) rv.append("|%s<%s>" % (' ' * indent, name)) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = name attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] filter = ihatexml.InfosetFilter() def serializeElement(element): if isinstance(element, ElementTree.ElementTree): element = element.getroot() if element.tag == "<!DOCTYPE>": if element.get("publicId") or element.get("systemId"): publicId = element.get("publicId") or "" systemId = element.get("systemId") or "" rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" % (element.text, publicId, systemId)) else: rv.append("<!DOCTYPE %s>" % (element.text,)) elif element.tag == "DOCUMENT_ROOT": if element.text is not None: rv.append(element.text) if element.tail is not None: raise TypeError("Document node cannot have tail") if hasattr(element, "attrib") and len(element.attrib): raise TypeError("Document node cannot have attributes") for child in element: serializeElement(child) elif element.tag == ElementTreeCommentType: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (filter.fromXmlName(element.tag),)) else: attr = " ".join(["%s=\"%s\"" % ( filter.fromXmlName(name), value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv) class TreeBuilder(_base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = Element commentClass = Comment fragmentClass = DocumentFragment implementation = ElementTreeImplementation def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._element else: if self.defaultNamespace is not None: return self.document._element.find( "{%s}html" % self.defaultNamespace) else: return self.document._element.find("html") def getFragment(self): return _base.TreeBuilder.getFragment(self)._element return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder)
12,609
Python
.py
275
31.134545
85
0.52355
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,570
__init__.py
CouchPotato_CouchPotatoServer/libs/html5lib/treebuilders/__init__.py
"""A collection of modules for building different kinds of tree from HTML documents. To create a treebuilder for a new type of tree, you need to do implement several things: 1) A set of classes for various types of elements: Document, Doctype, Comment, Element. These must implement the interface of _base.treebuilders.Node (although comment nodes have a different signature for their constructor, see treebuilders.etree.Comment) Textual content may also be implemented as another node type, or not, as your tree implementation requires. 2) A treebuilder object (called TreeBuilder by convention) that inherits from treebuilders._base.TreeBuilder. This has 4 required attributes: documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes It also has one required method: getDocument - Returns the root node of the complete document tree 3) If you wish to run the unit tests, you must also create a testSerializer method on your treebuilder which accepts a node and returns a string containing Node and its children serialized according to the format used in the unittests """ from __future__ import absolute_import, division, unicode_literals from ..utils import default_etree treeBuilderCache = {} def getTreeBuilder(treeType, implementation=None, **kwargs): """Get a TreeBuilder class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - A generic builder for DOM implementations, defaulting to a xml.dom.minidom based implementation. "etree" - A generic builder for tree implementations exposing an ElementTree-like interface, defaulting to xml.etree.cElementTree if available and xml.etree.ElementTree if not. "lxml" - A etree-based builder for lxml.etree, handling limitations of lxml's implementation. implementation - (Currently applies to the "etree" and "dom" tree types). A module implementing the tree type e.g. xml.etree.ElementTree or xml.etree.cElementTree.""" treeType = treeType.lower() if treeType not in treeBuilderCache: if treeType == "dom": from . import dom # Come up with a sane default (pref. from the stdlib) if implementation is None: from xml.dom import minidom implementation = minidom # NEVER cache here, caching is done in the dom submodule return dom.getDomModule(implementation, **kwargs).TreeBuilder elif treeType == "lxml": from . import etree_lxml treeBuilderCache[treeType] = etree_lxml.TreeBuilder elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeBuilder else: raise ValueError("""Unrecognised treebuilder "%s" """ % treeType) return treeBuilderCache.get(treeType)
3,405
Python
.py
63
45.31746
79
0.697507
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,571
_base.py
CouchPotato_CouchPotatoServer/libs/html5lib/treebuilders/_base.py
from __future__ import absolute_import, division, unicode_literals from six import text_type from ..constants import scopingElements, tableInsertModeElements, namespaces # The scope markers are inserted when entering object elements, # marquees, table cells, and table captions, and are used to prevent formatting # from "leaking" into tables, object elements, and marquees. Marker = None listElementsMap = { None: (frozenset(scopingElements), False), "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False), "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"), (namespaces["html"], "ul")])), False), "table": (frozenset([(namespaces["html"], "html"), (namespaces["html"], "table")]), False), "select": (frozenset([(namespaces["html"], "optgroup"), (namespaces["html"], "option")]), True) } class Node(object): def __init__(self, name): """Node representing an item in the tree. name - The tag name associated with the node parent - The parent of the current node (or None for the document node) value - The value of the current node (applies to text nodes and comments attributes - a dict holding name, value pairs for attributes of the node childNodes - a list of child nodes of the current node. This must include all elements but not necessarily other node types _flags - A list of miscellaneous flags that can be set on the node """ self.name = name self.parent = None self.value = None self.attributes = {} self.childNodes = [] self._flags = [] def __str__(self): attributesStr = " ".join(["%s=\"%s\"" % (name, value) for name, value in self.attributes.items()]) if attributesStr: return "<%s %s>" % (self.name, attributesStr) else: return "<%s>" % (self.name) def __repr__(self): return "<%s>" % (self.name) def appendChild(self, node): """Insert node as a child of the current node """ raise NotImplementedError def insertText(self, data, insertBefore=None): """Insert data as text in the current node, positioned before the start of node insertBefore or to the end of the node's text. """ raise NotImplementedError def insertBefore(self, node, refNode): """Insert node as a child of the current node, before refNode in the list of child nodes. Raises ValueError if refNode is not a child of the current node""" raise NotImplementedError def removeChild(self, node): """Remove node from the children of the current node """ raise NotImplementedError def reparentChildren(self, newParent): """Move all the children of the current node to newParent. This is needed so that trees that don't store text as nodes move the text in the correct way """ # XXX - should this method be made more general? for child in self.childNodes: newParent.appendChild(child) self.childNodes = [] def cloneNode(self): """Return a shallow copy of the current node i.e. a node with the same name and attributes but with no parent or child nodes """ raise NotImplementedError def hasContent(self): """Return true if the node has children or text, false otherwise """ raise NotImplementedError class ActiveFormattingElements(list): def append(self, node): equalCount = 0 if node != Marker: for element in self[::-1]: if element == Marker: break if self.nodesEqual(element, node): equalCount += 1 if equalCount == 3: self.remove(element) break list.append(self, node) def nodesEqual(self, node1, node2): if not node1.nameTuple == node2.nameTuple: return False if not node1.attributes == node2.attributes: return False return True class TreeBuilder(object): """Base treebuilder implementation documentClass - the class to use for the bottommost node of a document elementClass - the class to use for HTML Elements commentClass - the class to use for comments doctypeClass - the class to use for doctypes """ # Document class documentClass = None # The class to use for creating a node elementClass = None # The class to use for creating comments commentClass = None # The class to use for creating doctypes doctypeClass = None # Fragment class fragmentClass = None def __init__(self, namespaceHTMLElements): if namespaceHTMLElements: self.defaultNamespace = "http://www.w3.org/1999/xhtml" else: self.defaultNamespace = None self.reset() def reset(self): self.openElements = [] self.activeFormattingElements = ActiveFormattingElements() # XXX - rename these to headElement, formElement self.headPointer = None self.formPointer = None self.insertFromTable = False self.document = self.documentClass() def elementInScope(self, target, variant=None): # If we pass a node in we match that. if we pass a string # match any node with that name exactNode = hasattr(target, "nameTuple") listElements, invert = listElementsMap[variant] for node in reversed(self.openElements): if (node.name == target and not exactNode or node == target and exactNode): return True elif (invert ^ (node.nameTuple in listElements)): return False assert False # We should never reach this point def reconstructActiveFormattingElements(self): # Within this algorithm the order of steps described in the # specification is not quite the same as the order of steps in the # code. It should still do the same though. # Step 1: stop the algorithm when there's nothing to do. if not self.activeFormattingElements: return # Step 2 and step 3: we start with the last element. So i is -1. i = len(self.activeFormattingElements) - 1 entry = self.activeFormattingElements[i] if entry == Marker or entry in self.openElements: return # Step 6 while entry != Marker and entry not in self.openElements: if i == 0: # This will be reset to 0 below i = -1 break i -= 1 # Step 5: let entry be one earlier in the list. entry = self.activeFormattingElements[i] while True: # Step 7 i += 1 # Step 8 entry = self.activeFormattingElements[i] clone = entry.cloneNode() # Mainly to get a new copy of the attributes # Step 9 element = self.insertElement({"type": "StartTag", "name": clone.name, "namespace": clone.namespace, "data": clone.attributes}) # Step 10 self.activeFormattingElements[i] = element # Step 11 if element == self.activeFormattingElements[-1]: break def clearActiveFormattingElements(self): entry = self.activeFormattingElements.pop() while self.activeFormattingElements and entry != Marker: entry = self.activeFormattingElements.pop() def elementInActiveFormattingElements(self, name): """Check if an element exists between the end of the active formatting elements and the last marker. If it does, return it, else return false""" for item in self.activeFormattingElements[::-1]: # Check for Marker first because if it's a Marker it doesn't have a # name attribute. if item == Marker: break elif item.name == name: return item return False def insertRoot(self, token): element = self.createElement(token) self.openElements.append(element) self.document.appendChild(element) def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] doctype = self.doctypeClass(name, publicId, systemId) self.document.appendChild(doctype) def insertComment(self, token, parent=None): if parent is None: parent = self.openElements[-1] parent.appendChild(self.commentClass(token["data"])) def createElement(self, token): """Create an element but don't insert it anywhere""" name = token["name"] namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] return element def _getInsertFromTable(self): return self._insertFromTable def _setInsertFromTable(self, value): """Switch the function used to insert an element from the normal one to the misnested table one and back again""" self._insertFromTable = value if value: self.insertElement = self.insertElementTable else: self.insertElement = self.insertElementNormal insertFromTable = property(_getInsertFromTable, _setInsertFromTable) def insertElementNormal(self, token): name = token["name"] assert isinstance(name, text_type), "Element %s not unicode" % name namespace = token.get("namespace", self.defaultNamespace) element = self.elementClass(name, namespace) element.attributes = token["data"] self.openElements[-1].appendChild(element) self.openElements.append(element) return element def insertElementTable(self, token): """Create an element and insert it into the tree""" element = self.createElement(token) if self.openElements[-1].name not in tableInsertModeElements: return self.insertElementNormal(token) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() if insertBefore is None: parent.appendChild(element) else: parent.insertBefore(element, insertBefore) self.openElements.append(element) return element def insertText(self, data, parent=None): """Insert text data.""" if parent is None: parent = self.openElements[-1] if (not self.insertFromTable or (self.insertFromTable and self.openElements[-1].name not in tableInsertModeElements)): parent.insertText(data) else: # We should be in the InTable mode. This means we want to do # special magic element rearranging parent, insertBefore = self.getTableMisnestedNodePosition() parent.insertText(data, insertBefore) def getTableMisnestedNodePosition(self): """Get the foster parent element, and sibling to insert before (or None) when inserting a misnested table node""" # The foster parent element is the one which comes before the most # recently opened table element # XXX - this is really inelegant lastTable = None fosterParent = None insertBefore = None for elm in self.openElements[::-1]: if elm.name == "table": lastTable = elm break if lastTable: # XXX - we should really check that this parent is actually a # node here if lastTable.parent: fosterParent = lastTable.parent insertBefore = lastTable else: fosterParent = self.openElements[ self.openElements.index(lastTable) - 1] else: fosterParent = self.openElements[0] return fosterParent, insertBefore def generateImpliedEndTags(self, exclude=None): name = self.openElements[-1].name # XXX td, th and tr are not actually needed if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and name != exclude): self.openElements.pop() # XXX This is not entirely what the specification says. We should # investigate it more closely. self.generateImpliedEndTags(exclude) def getDocument(self): "Return the final tree" return self.document def getFragment(self): "Return the final fragment" # assert self.innerHTML fragment = self.fragmentClass() self.openElements[0].reparentChildren(fragment) return fragment def testSerializer(self, node): """Serialize the subtree of node in the format required by unit tests node - the node from which to start serializing""" raise NotImplementedError
13,699
Python
.py
312
33.298077
90
0.615523
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,572
dom.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/dom.py
from __future__ import absolute_import, division, unicode_literals from xml.dom import Node import gettext _ = gettext.gettext from . import _base class TreeWalker(_base.NonRecursiveTreeWalker): def getNodeDetails(self, node): if node.nodeType == Node.DOCUMENT_TYPE_NODE: return _base.DOCTYPE, node.name, node.publicId, node.systemId elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): return _base.TEXT, node.nodeValue elif node.nodeType == Node.ELEMENT_NODE: attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) if attr.namespaceURI: attrs[(attr.namespaceURI, attr.localName)] = attr.value else: attrs[(None, attr.name)] = attr.value return (_base.ELEMENT, node.namespaceURI, node.nodeName, attrs, node.hasChildNodes()) elif node.nodeType == Node.COMMENT_NODE: return _base.COMMENT, node.nodeValue elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE): return (_base.DOCUMENT,) else: return _base.UNKNOWN, node.nodeType def getFirstChild(self, node): return node.firstChild def getNextSibling(self, node): return node.nextSibling def getParentNode(self, node): return node.parentNode
1,457
Python
.py
33
33.909091
80
0.635719
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,573
pulldom.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/pulldom.py
from __future__ import absolute_import, division, unicode_literals from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \ COMMENT, IGNORABLE_WHITESPACE, CHARACTERS from . import _base from ..constants import voidElements class TreeWalker(_base.TreeWalker): def __iter__(self): ignore_until = None previous = None for event in self.tree: if previous is not None and \ (ignore_until is None or previous[1] is ignore_until): if previous[1] is ignore_until: ignore_until = None for token in self.tokens(previous, event): yield token if token["type"] == "EmptyTag": ignore_until = previous[1] previous = event if ignore_until is None or previous[1] is ignore_until: for token in self.tokens(previous, None): yield token elif ignore_until is not None: raise ValueError("Illformed DOM event stream: void element without END_ELEMENT") def tokens(self, event, next): type, node = event if type == START_ELEMENT: name = node.nodeName namespace = node.namespaceURI attrs = {} for attr in list(node.attributes.keys()): attr = node.getAttributeNode(attr) attrs[(attr.namespaceURI, attr.localName)] = attr.value if name in voidElements: for token in self.emptyTag(namespace, name, attrs, not next or next[1] is not node): yield token else: yield self.startTag(namespace, name, attrs) elif type == END_ELEMENT: name = node.nodeName namespace = node.namespaceURI if name not in voidElements: yield self.endTag(namespace, name) elif type == COMMENT: yield self.comment(node.nodeValue) elif type in (IGNORABLE_WHITESPACE, CHARACTERS): for token in self.text(node.nodeValue): yield token else: yield self.unknown(type)
2,302
Python
.py
53
29.245283
92
0.548013
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,574
etree.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/etree.py
from __future__ import absolute_import, division, unicode_literals try: from collections import OrderedDict except ImportError: try: from ordereddict import OrderedDict except ImportError: OrderedDict = dict import gettext _ = gettext.gettext import re from six import text_type from . import _base from ..utils import moduleFactoryFactory tag_regexp = re.compile("{([^}]*)}(.*)") def getETreeBuilder(ElementTreeImplementation): ElementTree = ElementTreeImplementation ElementTreeCommentType = ElementTree.Comment("asd").tag class TreeWalker(_base.NonRecursiveTreeWalker): """Given the particular ElementTree representation, this implementation, to avoid using recursion, returns "nodes" as tuples with the following content: 1. The current element 2. The index of the element relative to its parent 3. A stack of ancestor elements 4. A flag "text", "tail" or None to indicate if the current node is a text node; either the text or tail of the current element (1) """ def getNodeDetails(self, node): if isinstance(node, tuple): # It might be the root Element elt, key, parents, flag = node if flag in ("text", "tail"): return _base.TEXT, getattr(elt, flag) else: node = elt if not(hasattr(node, "tag")): node = node.getroot() if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"): return (_base.DOCUMENT,) elif node.tag == "<!DOCTYPE>": return (_base.DOCTYPE, node.text, node.get("publicId"), node.get("systemId")) elif node.tag == ElementTreeCommentType: return _base.COMMENT, node.text else: assert type(node.tag) == text_type, type(node.tag) # This is assumed to be an ordinary element match = tag_regexp.match(node.tag) if match: namespace, tag = match.groups() else: namespace = None tag = node.tag attrs = OrderedDict() for name, value in list(node.attrib.items()): match = tag_regexp.match(name) if match: attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value return (_base.ELEMENT, namespace, tag, attrs, len(node) or node.text) def getFirstChild(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: element, key, parents, flag = node, None, [], None if flag in ("text", "tail"): return None else: if element.text: return element, key, parents, "text" elif len(element): parents.append(element) return element[0], 0, parents, None else: return None def getNextSibling(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if len(element): parents.append(element) return element[0], 0, parents, None else: return None else: if element.tail and flag != "tail": return element, key, parents, "tail" elif key < len(parents[-1]) - 1: return parents[-1][key + 1], key + 1, parents, None else: return None def getParentNode(self, node): if isinstance(node, tuple): element, key, parents, flag = node else: return None if flag == "text": if not parents: return element else: return element, key, parents, None else: parent = parents.pop() if not parents: return parent else: return parent, list(parents[-1]).index(parent), parents, None return locals() getETreeModule = moduleFactoryFactory(getETreeBuilder)
4,613
Python
.py
113
26.654867
81
0.517765
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,575
genshistream.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/genshistream.py
from __future__ import absolute_import, division, unicode_literals from genshi.core import QName from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT from . import _base from ..constants import voidElements, namespaces class TreeWalker(_base.TreeWalker): def __iter__(self): # Buffer the events so we can pass in the following one previous = None for event in self.tree: if previous is not None: for token in self.tokens(previous, event): yield token previous = event # Don't forget the final event! if previous is not None: for token in self.tokens(previous, None): yield token def tokens(self, event, next): kind, data, pos = event if kind == START: tag, attribs = data name = tag.localname namespace = tag.namespace converted_attribs = {} for k, v in attribs: if isinstance(k, QName): converted_attribs[(k.namespace, k.localname)] = v else: converted_attribs[(None, k)] = v if namespace == namespaces["html"] and name in voidElements: for token in self.emptyTag(namespace, name, converted_attribs, not next or next[0] != END or next[1] != tag): yield token else: yield self.startTag(namespace, name, converted_attribs) elif kind == END: name = data.localname namespace = data.namespace if name not in voidElements: yield self.endTag(namespace, name) elif kind == COMMENT: yield self.comment(data) elif kind == TEXT: for token in self.text(data): yield token elif kind == DOCTYPE: yield self.doctype(*data) elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, START_CDATA, END_CDATA, PI): pass else: yield self.unknown(kind)
2,278
Python
.py
55
28.454545
78
0.54957
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,576
__init__.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/__init__.py
"""A collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. """ from __future__ import absolute_import, division, unicode_literals import sys from ..utils import default_etree treeWalkerCache = {} def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
2,323
Python
.py
46
40.413043
80
0.655781
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,577
lxmletree.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/lxmletree.py
from __future__ import absolute_import, division, unicode_literals from six import text_type from lxml import etree from ..treebuilders.etree import tag_regexp from gettext import gettext _ = gettext from . import _base from .. import ihatexml def ensure_str(s): if s is None: return None elif isinstance(s, text_type): return s else: return s.decode("utf-8", "strict") class Root(object): def __init__(self, et): self.elementtree = et self.children = [] if et.docinfo.internalDTD: self.children.append(Doctype(self, ensure_str(et.docinfo.root_name), ensure_str(et.docinfo.public_id), ensure_str(et.docinfo.system_url))) root = et.getroot() node = root while node.getprevious() is not None: node = node.getprevious() while node is not None: self.children.append(node) node = node.getnext() self.text = None self.tail = None def __getitem__(self, key): return self.children[key] def getnext(self): return None def __len__(self): return 1 class Doctype(object): def __init__(self, root_node, name, public_id, system_id): self.root_node = root_node self.name = name self.public_id = public_id self.system_id = system_id self.text = None self.tail = None def getnext(self): return self.root_node.children[1] class FragmentRoot(Root): def __init__(self, children): self.children = [FragmentWrapper(self, child) for child in children] self.text = self.tail = None def getnext(self): return None class FragmentWrapper(object): def __init__(self, fragment_root, obj): self.root_node = fragment_root self.obj = obj if hasattr(self.obj, 'text'): self.text = ensure_str(self.obj.text) else: self.text = None if hasattr(self.obj, 'tail'): self.tail = ensure_str(self.obj.tail) else: self.tail = None def __getattr__(self, name): return getattr(self.obj, name) def getnext(self): siblings = self.root_node.children idx = siblings.index(self) if idx < len(siblings) - 1: return siblings[idx + 1] else: return None def __getitem__(self, key): return self.obj[key] def __bool__(self): return bool(self.obj) def getparent(self): return None def __str__(self): return str(self.obj) def __unicode__(self): return str(self.obj) def __len__(self): return len(self.obj) class TreeWalker(_base.NonRecursiveTreeWalker): def __init__(self, tree): if hasattr(tree, "getroot"): tree = Root(tree) elif isinstance(tree, list): tree = FragmentRoot(tree) _base.NonRecursiveTreeWalker.__init__(self, tree) self.filter = ihatexml.InfosetFilter() def getNodeDetails(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key return _base.TEXT, ensure_str(getattr(node, key)) elif isinstance(node, Root): return (_base.DOCUMENT,) elif isinstance(node, Doctype): return _base.DOCTYPE, node.name, node.public_id, node.system_id elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): return _base.TEXT, node.obj elif node.tag == etree.Comment: return _base.COMMENT, ensure_str(node.text) elif node.tag == etree.Entity: return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &; else: # This is assumed to be an ordinary element match = tag_regexp.match(ensure_str(node.tag)) if match: namespace, tag = match.groups() else: namespace = None tag = ensure_str(node.tag) attrs = {} for name, value in list(node.attrib.items()): name = ensure_str(name) value = ensure_str(value) match = tag_regexp.match(name) if match: attrs[(match.group(1), match.group(2))] = value else: attrs[(None, name)] = value return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag), attrs, len(node) > 0 or node.text) def getFirstChild(self, node): assert not isinstance(node, tuple), _("Text nodes have no children") assert len(node) or node.text, "Node has no children" if node.text: return (node, "text") else: return node[0] def getNextSibling(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key if key == "text": # XXX: we cannot use a "bool(node) and node[0] or None" construct here # because node[0] might evaluate to False if it has no child element if len(node): return node[0] else: return None else: # tail return node.getnext() return (node, "tail") if node.tail else node.getnext() def getParentNode(self, node): if isinstance(node, tuple): # Text node node, key = node assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key if key == "text": return node # else: fallback to "normal" processing return node.getparent()
6,033
Python
.py
159
27.358491
92
0.5579
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,578
_base.py
CouchPotato_CouchPotatoServer/libs/html5lib/treewalkers/_base.py
from __future__ import absolute_import, division, unicode_literals from six import text_type, string_types import gettext _ = gettext.gettext from xml.dom import Node DOCUMENT = Node.DOCUMENT_NODE DOCTYPE = Node.DOCUMENT_TYPE_NODE TEXT = Node.TEXT_NODE ELEMENT = Node.ELEMENT_NODE COMMENT = Node.COMMENT_NODE ENTITY = Node.ENTITY_NODE UNKNOWN = "<#UNKNOWN#>" from ..constants import voidElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) def to_text(s, blank_if_none=True): """Wrapper around six.text_type to convert None to empty string""" if s is None: if blank_if_none: return "" else: return None elif isinstance(s, text_type): return s else: return text_type(s) def is_text_or_none(string): """Wrapper around isinstance(string_types) or is None""" return string is None or isinstance(string, string_types) class TreeWalker(object): def __init__(self, tree): self.tree = tree def __iter__(self): raise NotImplementedError def error(self, msg): return {"type": "SerializeError", "data": msg} def emptyTag(self, namespace, name, attrs, hasChildren=False): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(name) assert all((namespace is None or isinstance(namespace, string_types)) and isinstance(name, string_types) and isinstance(value, string_types) for (namespace, name), value in attrs.items()) yield {"type": "EmptyTag", "name": to_text(name, False), "namespace": to_text(namespace), "data": attrs} if hasChildren: yield self.error(_("Void element has children")) def startTag(self, namespace, name, attrs): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(name) assert all((namespace is None or isinstance(namespace, string_types)) and isinstance(name, string_types) and isinstance(value, string_types) for (namespace, name), value in attrs.items()) return {"type": "StartTag", "name": text_type(name), "namespace": to_text(namespace), "data": dict(((to_text(namespace, False), to_text(name)), to_text(value, False)) for (namespace, name), value in attrs.items())} def endTag(self, namespace, name): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(namespace) return {"type": "EndTag", "name": to_text(name, False), "namespace": to_text(namespace), "data": {}} def text(self, data): assert isinstance(data, string_types), type(data) data = to_text(data) middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right} def comment(self, data): assert isinstance(data, string_types), type(data) return {"type": "Comment", "data": text_type(data)} def doctype(self, name, publicId=None, systemId=None, correct=True): assert is_text_or_none(name), type(name) assert is_text_or_none(publicId), type(publicId) assert is_text_or_none(systemId), type(systemId) return {"type": "Doctype", "name": to_text(name), "publicId": to_text(publicId), "systemId": to_text(systemId), "correct": to_text(correct)} def entity(self, name): assert isinstance(name, string_types), type(name) return {"type": "Entity", "name": text_type(name)} def unknown(self, nodeType): return self.error(_("Unknown node type: ") + nodeType) class NonRecursiveTreeWalker(TreeWalker): def getNodeDetails(self, node): raise NotImplementedError def getFirstChild(self, node): raise NotImplementedError def getNextSibling(self, node): raise NotImplementedError def getParentNode(self, node): raise NotImplementedError def __iter__(self): currentNode = self.tree while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] hasChildren = False if type == DOCTYPE: yield self.doctype(*details) elif type == TEXT: for token in self.text(*details): yield token elif type == ELEMENT: namespace, name, attributes, hasChildren = details if name in voidElements: for token in self.emptyTag(namespace, name, attributes, hasChildren): yield token hasChildren = False else: yield self.startTag(namespace, name, attributes) elif type == COMMENT: yield self.comment(details[0]) elif type == ENTITY: yield self.entity(details[0]) elif type == DOCUMENT: hasChildren = True else: yield self.unknown(details[0]) if hasChildren: firstChild = self.getFirstChild(currentNode) else: firstChild = None if firstChild is not None: currentNode = firstChild else: while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] if type == ELEMENT: namespace, name, attributes, hasChildren = details if name not in voidElements: yield self.endTag(namespace, name) if self.tree is currentNode: currentNode = None break nextSibling = self.getNextSibling(currentNode) if nextSibling is not None: currentNode = nextSibling break else: currentNode = self.getParentNode(currentNode)
6,907
Python
.py
158
31.449367
88
0.576264
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,579
sax.py
CouchPotato_CouchPotatoServer/libs/html5lib/treeadapters/sax.py
from __future__ import absolute_import, division, unicode_literals from xml.sax.xmlreader import AttributesNSImpl from ..constants import adjustForeignAttributes, unadjustForeignAttributes prefix_mapping = {} for prefix, localName, namespace in adjustForeignAttributes.values(): if prefix is not None: prefix_mapping[prefix] = namespace def to_sax(walker, handler): """Call SAX-like content handler based on treewalker walker""" handler.startDocument() for prefix, namespace in prefix_mapping.items(): handler.startPrefixMapping(prefix, namespace) for token in walker: type = token["type"] if type == "Doctype": continue elif type in ("StartTag", "EmptyTag"): attrs = AttributesNSImpl(token["data"], unadjustForeignAttributes) handler.startElementNS((token["namespace"], token["name"]), token["name"], attrs) if type == "EmptyTag": handler.endElementNS((token["namespace"], token["name"]), token["name"]) elif type == "EndTag": handler.endElementNS((token["namespace"], token["name"]), token["name"]) elif type in ("Characters", "SpaceCharacters"): handler.characters(token["data"]) elif type == "Comment": pass else: assert False, "Unknown token type" for prefix, namespace in prefix_mapping.items(): handler.endPrefixMapping(prefix) handler.endDocument()
1,661
Python
.py
37
32.972973
74
0.593074
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,580
datrie.py
CouchPotato_CouchPotatoServer/libs/html5lib/trie/datrie.py
from __future__ import absolute_import, division, unicode_literals from datrie import Trie as DATrie from six import text_type from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): chars = set() for key in data.keys(): if not isinstance(key, text_type): raise TypeError("All keys must be strings") for char in key: chars.add(char) self._data = DATrie("".join(chars)) for key, value in data.items(): self._data[key] = value def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): raise NotImplementedError() def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): return self._data.keys(prefix) def has_keys_with_prefix(self, prefix): return self._data.has_keys_with_prefix(prefix) def longest_prefix(self, prefix): return self._data.longest_prefix(prefix) def longest_prefix_item(self, prefix): return self._data.longest_prefix_item(prefix)
1,166
Python
.py
31
29.741935
66
0.623886
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,581
__init__.py
CouchPotato_CouchPotatoServer/libs/html5lib/trie/__init__.py
from __future__ import absolute_import, division, unicode_literals from .py import Trie as PyTrie Trie = PyTrie try: from .datrie import Trie as DATrie except ImportError: pass else: Trie = DATrie
212
Python
.py
9
20.888889
66
0.765
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,582
py.py
CouchPotato_CouchPotatoServer/libs/html5lib/trie/py.py
from __future__ import absolute_import, division, unicode_literals from six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix)
1,763
Python
.py
48
27.666667
66
0.573113
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,583
_base.py
CouchPotato_CouchPotatoServer/libs/html5lib/trie/_base.py
from __future__ import absolute_import, division, unicode_literals from collections import Mapping class Trie(Mapping): """Abstract base class for tries""" def keys(self, prefix=None): keys = super().keys() if prefix is None: return set(keys) # Python 2.6: no set comprehensions return set([x for x in keys if x.startswith(prefix)]) def has_keys_with_prefix(self, prefix): for key in self.keys(): if key.startswith(prefix): return True return False def longest_prefix(self, prefix): if prefix in self: return prefix for i in range(1, len(prefix) + 1): if prefix[:-i] in self: return prefix[:-i] raise KeyError(prefix) def longest_prefix_item(self, prefix): lprefix = self.longest_prefix(prefix) return (lprefix, self[lprefix])
927
Python
.py
25
28.08
66
0.608989
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,584
__init__.py
CouchPotato_CouchPotatoServer/libs/html5lib/serializer/__init__.py
from __future__ import absolute_import, division, unicode_literals from .. import treewalkers from .htmlserializer import HTMLSerializer def serialize(input, tree="etree", format="html", encoding=None, **serializer_opts): # XXX: Should we cache this? walker = treewalkers.getTreeWalker(tree) if format == "html": s = HTMLSerializer(**serializer_opts) else: raise ValueError("type must be html") return s.render(walker(input), encoding)
490
Python
.py
12
35.333333
66
0.704641
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,585
htmlserializer.py
CouchPotato_CouchPotatoServer/libs/html5lib/serializer/htmlserializer.py
from __future__ import absolute_import, division, unicode_literals from six import text_type import gettext _ = gettext.gettext try: from functools import reduce except ImportError: pass from ..constants import voidElements, booleanAttributes, spaceCharacters from ..constants import rcdataElements, entities, xmlEntities from .. import utils from xml.sax.saxutils import escape spaceCharacters = "".join(spaceCharacters) try: from codecs import register_error, xmlcharrefreplace_errors except ImportError: unicode_encode_errors = "strict" else: unicode_encode_errors = "htmlentityreplace" encode_entity_map = {} is_ucs4 = len("\U0010FFFF") == 1 for k, v in list(entities.items()): # skip multi-character entities if ((is_ucs4 and len(v) > 1) or (not is_ucs4 and len(v) > 2)): continue if v != "&": if len(v) == 2: v = utils.surrogatePairToCodepoint(v) else: v = ord(v) if not v in encode_entity_map or k.islower(): # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc. encode_entity_map[v] = k def htmlentityreplace_errors(exc): if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)): res = [] codepoints = [] skip = False for i, c in enumerate(exc.object[exc.start:exc.end]): if skip: skip = False continue index = i + exc.start if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]): codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2]) skip = True else: codepoint = ord(c) codepoints.append(codepoint) for cp in codepoints: e = encode_entity_map.get(cp) if e: res.append("&") res.append(e) if not e.endswith(";"): res.append(";") else: res.append("&#x%s;" % (hex(cp)[2:])) return ("".join(res), exc.end) else: return xmlcharrefreplace_errors(exc) register_error(unicode_encode_errors, htmlentityreplace_errors) del register_error class HTMLSerializer(object): # attribute quoting options quote_attr_values = False quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer. Keyword options (default given first unless specified) include: inject_meta_charset=True|False Whether it insert a meta element to define the character set of the document. quote_attr_values=True|False Whether to quote attribute values that don't require quoting per HTML5 parsing rules. quote_char=u'"'|u"'" Use given quote character for attribute quoting. Default is to use double quote unless attribute value contains a double quote, in which case single quotes are used instead. escape_lt_in_attrs=False|True Whether to escape < in attribute values. escape_rcdata=False|True Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. resolve_entities=True|False Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. strip_whitespace=False|True Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within pre.) minimize_boolean_attributes=True|False Shortens boolean attributes to give just the attribute value, for example <input disabled="disabled"> becomes <input disabled>. use_trailing_solidus=False|True Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. <hr/>. space_before_trailing_solidus=True|False Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus. sanitize=False|True Strip all unsafe or unknown constructs from output. See `html5lib user documentation`_ omit_optional_tags=True|False Omit start/end tags that are optional. alphabetical_attributes=False|True Reorder attributes to be in alphabetical order. .. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation """ if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, unicode_encode_errors) else: return string def encodeStrict(self, string): assert(isinstance(string, text_type)) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from ..filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from ..filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from ..filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from ..filters.optionaltags import Filter treewalker = Filter(treewalker) # Alphabetical attributes must be last, as other filters # could add attributes and alter the order if self.alphabetical_attributes: from ..filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError(_("System identifer contains both single and double quote characters")) quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError(_("Unexpected </ in CDATA")) yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError(_("Unexpected child element of a CDATA element")) for (attr_namespace, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values or not v: quote_attr = True else: quote_attr = reduce(lambda x, y: x or (y in v), spaceCharacters + ">\"'=", False) v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError(_("Unexpected child element of a CDATA element")) yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError(_("Comment contains --")) yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if not key in entities: self.serializeError(_("Entity %s not recognized" % name)) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError def SerializeError(Exception): """Error in serialized tree""" pass
12,897
Python
.py
282
31.712766
119
0.547348
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,586
whitespace.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/whitespace.py
from __future__ import absolute_import, division, unicode_literals import re from . import _base from ..constants import rcdataElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) SPACES_REGEX = re.compile("[%s]+" % spaceCharacters) class Filter(_base.Filter): spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements)) def __iter__(self): preserve = 0 for token in _base.Filter.__iter__(self): type = token["type"] if type == "StartTag" \ and (preserve or token["name"] in self.spacePreserveElements): preserve += 1 elif type == "EndTag" and preserve: preserve -= 1 elif not preserve and type == "SpaceCharacters" and token["data"]: # Test on token["data"] above to not introduce spaces where there were not token["data"] = " " elif not preserve and type == "Characters": token["data"] = collapse_spaces(token["data"]) yield token def collapse_spaces(text): return SPACES_REGEX.sub(' ', text)
1,142
Python
.py
25
36.16
90
0.610507
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,587
sanitizer.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/sanitizer.py
from __future__ import absolute_import, division, unicode_literals from . import _base from ..sanitizer import HTMLSanitizerMixin class Filter(_base.Filter, HTMLSanitizerMixin): def __iter__(self): for token in _base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token
352
Python
.py
9
32
66
0.664706
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,588
lint.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/lint.py
from __future__ import absolute_import, division, unicode_literals from gettext import gettext _ = gettext from . import _base from ..constants import cdataElements, rcdataElements, voidElements from ..constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) class LintError(Exception): pass class Filter(_base.Filter): def __iter__(self): open_elements = [] contentModelFlag = "PCDATA" for token in _base.Filter.__iter__(self): type = token["type"] if type in ("StartTag", "EmptyTag"): name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if type == "StartTag" and name in voidElements: raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name}) elif type == "EmptyTag" and name not in voidElements: raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]}) if type == "StartTag": open_elements.append(name) for name, value in token["data"]: if not isinstance(name, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name}) if not name: raise LintError(_("Empty attribute name")) if not isinstance(value, str): raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value}) if name in cdataElements: contentModelFlag = "CDATA" elif name in rcdataElements: contentModelFlag = "RCDATA" elif name == "plaintext": contentModelFlag = "PLAINTEXT" elif type == "EndTag": name = token["name"] if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if name in voidElements: raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name}) start_name = open_elements.pop() if start_name != name: raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name}) contentModelFlag = "PCDATA" elif type == "Comment": if contentModelFlag != "PCDATA": raise LintError(_("Comment not in PCDATA content model flag")) elif type in ("Characters", "SpaceCharacters"): data = token["data"] if not isinstance(data, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data}) if not data: raise LintError(_("%(type)s token with empty data") % {"type": type}) if type == "SpaceCharacters": data = data.strip(spaceCharacters) if data: raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data}) elif type == "Doctype": name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) # XXX: what to do with token["data"] ? elif type in ("ParseError", "SerializeError"): pass else: raise LintError(_("Unknown token type: %(type)s") % {"type": type}) yield token
4,306
Python
.py
79
38.64557
131
0.520057
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,589
inject_meta_charset.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/inject_meta_charset.py
from __future__ import absolute_import, division, unicode_literals from . import _base class Filter(_base.Filter): def __init__(self, source, encoding): _base.Filter.__init__(self, source) self.encoding = encoding def __iter__(self): state = "pre_head" meta_found = (self.encoding is None) pending = [] for token in _base.Filter.__iter__(self): type = token["type"] if type == "StartTag": if token["name"].lower() == "head": state = "in_head" elif type == "EmptyTag": if token["name"].lower() == "meta": # replace charset with actual encoding has_http_equiv_content_type = False for (namespace, name), value in token["data"].items(): if namespace is not None: continue elif name.lower() == 'charset': token["data"][(namespace, name)] = self.encoding meta_found = True break elif name == 'http-equiv' and value.lower() == 'content-type': has_http_equiv_content_type = True else: if has_http_equiv_content_type and (None, "content") in token["data"]: token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding meta_found = True elif token["name"].lower() == "head" and not meta_found: # insert meta into empty head yield {"type": "StartTag", "name": "head", "data": token["data"]} yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} yield {"type": "EndTag", "name": "head"} meta_found = True continue elif type == "EndTag": if token["name"].lower() == "head" and pending: # insert meta into head (if necessary) and flush pending queue yield pending.pop(0) if not meta_found: yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} while pending: yield pending.pop(0) meta_found = True state = "post_head" if state == "in_head": pending.append(token) else: yield token
2,746
Python
.py
56
30.214286
102
0.441626
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,590
alphabeticalattributes.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/alphabeticalattributes.py
from __future__ import absolute_import, division, unicode_literals from . import _base try: from collections import OrderedDict except ImportError: from ordereddict import OrderedDict class Filter(_base.Filter): def __iter__(self): for token in _base.Filter.__iter__(self): if token["type"] in ("StartTag", "EmptyTag"): attrs = OrderedDict() for name, value in sorted(token["data"].items(), key=lambda x: x[0]): attrs[name] = value token["data"] = attrs yield token
624
Python
.py
16
28.125
66
0.562914
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,591
optionaltags.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/optionaltags.py
from __future__ import absolute_import, division, unicode_literals from . import _base class Filter(_base.Filter): def slider(self): previous1 = previous2 = None for token in self.source: if previous1 is not None: yield previous2, previous1, token previous2 = previous1 previous1 = token yield previous2, previous1, None def __iter__(self): for previous, token, next in self.slider(): type = token["type"] if type == "StartTag": if (token["data"] or not self.is_optional_start(token["name"], previous, next)): yield token elif type == "EndTag": if not self.is_optional_end(token["name"], next): yield token else: yield token def is_optional_start(self, tagname, previous, next): type = next and next["type"] or None if tagname in 'html': # An html element's start tag may be omitted if the first thing # inside the html element is not a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname == 'head': # A head element's start tag may be omitted if the first thing # inside the head element is an element. # XXX: we also omit the start tag if the head element is empty if type in ("StartTag", "EmptyTag"): return True elif type == "EndTag": return next["name"] == "head" elif tagname == 'body': # A body element's start tag may be omitted if the first thing # inside the body element is not a space character or a comment, # except if the first thing inside the body element is a script # or style element and the node immediately preceding the body # element is a head element whose end tag has been omitted. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we do not look at the preceding event, so we never omit # the body element's start tag if it's followed by a script or # a style element. return next["name"] not in ('script', 'style') else: return True elif tagname == 'colgroup': # A colgroup element's start tag may be omitted if the first thing # inside the colgroup element is a col element, and if the element # is not immediately preceeded by another colgroup element whose # end tag has been omitted. if type in ("StartTag", "EmptyTag"): # XXX: we do not look at the preceding event, so instead we never # omit the colgroup element's end tag when it is immediately # followed by another colgroup element. See is_optional_end. return next["name"] == "col" else: return False elif tagname == 'tbody': # A tbody element's start tag may be omitted if the first thing # inside the tbody element is a tr element, and if the element is # not immediately preceeded by a tbody, thead, or tfoot element # whose end tag has been omitted. if type == "StartTag": # omit the thead and tfoot elements' end tag when they are # immediately followed by a tbody element. See is_optional_end. if previous and previous['type'] == 'EndTag' and \ previous['name'] in ('tbody', 'thead', 'tfoot'): return False return next["name"] == 'tr' else: return False return False def is_optional_end(self, tagname, next): type = next and next["type"] or None if tagname in ('html', 'head', 'body'): # An html element's end tag may be omitted if the html element # is not immediately followed by a space character or a comment. return type not in ("Comment", "SpaceCharacters") elif tagname in ('li', 'optgroup', 'tr'): # A li element's end tag may be omitted if the li element is # immediately followed by another li element or if there is # no more content in the parent element. # An optgroup element's end tag may be omitted if the optgroup # element is immediately followed by another optgroup element, # or if there is no more content in the parent element. # A tr element's end tag may be omitted if the tr element is # immediately followed by another tr element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] == tagname else: return type == "EndTag" or type is None elif tagname in ('dt', 'dd'): # A dt element's end tag may be omitted if the dt element is # immediately followed by another dt element or a dd element. # A dd element's end tag may be omitted if the dd element is # immediately followed by another dd element or a dt element, # or if there is no more content in the parent element. if type == "StartTag": return next["name"] in ('dt', 'dd') elif tagname == 'dd': return type == "EndTag" or type is None else: return False elif tagname == 'p': # A p element's end tag may be omitted if the p element is # immediately followed by an address, article, aside, # blockquote, datagrid, dialog, dir, div, dl, fieldset, # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu, # nav, ol, p, pre, section, table, or ul, element, or if # there is no more content in the parent element. if type in ("StartTag", "EmptyTag"): return next["name"] in ('address', 'article', 'aside', 'blockquote', 'datagrid', 'dialog', 'dir', 'div', 'dl', 'fieldset', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hr', 'menu', 'nav', 'ol', 'p', 'pre', 'section', 'table', 'ul') else: return type == "EndTag" or type is None elif tagname == 'option': # An option element's end tag may be omitted if the option # element is immediately followed by another option element, # or if it is immediately followed by an <code>optgroup</code> # element, or if there is no more content in the parent # element. if type == "StartTag": return next["name"] in ('option', 'optgroup') else: return type == "EndTag" or type is None elif tagname in ('rt', 'rp'): # An rt element's end tag may be omitted if the rt element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. # An rp element's end tag may be omitted if the rp element is # immediately followed by an rt or rp element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('rt', 'rp') else: return type == "EndTag" or type is None elif tagname == 'colgroup': # A colgroup element's end tag may be omitted if the colgroup # element is not immediately followed by a space character or # a comment. if type in ("Comment", "SpaceCharacters"): return False elif type == "StartTag": # XXX: we also look for an immediately following colgroup # element. See is_optional_start. return next["name"] != 'colgroup' else: return True elif tagname in ('thead', 'tbody'): # A thead element's end tag may be omitted if the thead element # is immediately followed by a tbody or tfoot element. # A tbody element's end tag may be omitted if the tbody element # is immediately followed by a tbody or tfoot element, or if # there is no more content in the parent element. # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] in ['tbody', 'tfoot'] elif tagname == 'tbody': return type == "EndTag" or type is None else: return False elif tagname == 'tfoot': # A tfoot element's end tag may be omitted if the tfoot element # is immediately followed by a tbody element, or if there is no # more content in the parent element. # XXX: we never omit the end tag when the following element is # a tbody. See is_optional_start. if type == "StartTag": return next["name"] == 'tbody' else: return type == "EndTag" or type is None elif tagname in ('td', 'th'): # A td element's end tag may be omitted if the td element is # immediately followed by a td or th element, or if there is # no more content in the parent element. # A th element's end tag may be omitted if the th element is # immediately followed by a td or th element, or if there is # no more content in the parent element. if type == "StartTag": return next["name"] in ('td', 'th') else: return type == "EndTag" or type is None return False
10,500
Python
.py
199
38.688442
83
0.553667
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,592
_base.py
CouchPotato_CouchPotatoServer/libs/html5lib/filters/_base.py
from __future__ import absolute_import, division, unicode_literals class Filter(object): def __init__(self, source): self.source = source def __iter__(self): return iter(self.source) def __getattr__(self, name): return getattr(self.source, name)
286
Python
.py
8
29.75
66
0.653285
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,593
axel.py
CouchPotato_CouchPotatoServer/libs/axl/axel.py
# axel.py # # Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom # Edits by Ruud Burger # # Based on an idea by Peter Thatcher, found on # http://www.valuedlessons.com/2008/04/events-in-python.html # # This module is part of Axel and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php # # Source: http://pypi.python.org/pypi/axel # Docs: http://packages.python.org/axel from Queue import Empty, Queue import hashlib import sys import threading from couchpotato.core.helpers.variable import natsortKey class Event(object): """ Event object inspired by C# events. Handlers can be registered and unregistered using += and -= operators. Execution and result are influenced by the arguments passed to the constructor and += method. from axel import Event event = Event() def on_event(*args, **kwargs): return (args, kwargs) event += on_event # handler registration print(event(10, 20, y=30)) >> ((True, ((10, 20), {'y': 30}), <function on_event at 0x00BAA270>),) event -= on_event # handler is unregistered print(event(10, 20, y=30)) >> None class Mouse(object): def __init__(self): self.click = Event(self) self.click += self.on_click # handler registration def on_click(self, sender, *args, **kwargs): assert isinstance(sender, Mouse), 'Wrong sender' return (args, kwargs) mouse = Mouse() print(mouse.click(10, 20)) >> ((True, ((10, 20), {}), >> <bound method Mouse.on_click of <__main__.Mouse object at 0x00B6F470>>),) mouse.click -= mouse.on_click # handler is unregistered print(mouse.click(10, 20)) >> None """ def __init__(self, name = None, sender = None, asynch = False, exc_info = False, lock = None, threads = 3, traceback = False): """ Creates an event asynch if True handler's are executes asynchronous exc_info if True, result will contain sys.exc_info()[:2] on error lock threading.RLock used to synchronize execution sender event's sender. The sender is passed as the first argument to the handler, only if is not None. For this case the handler must have a placeholder in the arguments to receive the sender threads maximum number of threads that will be started traceback if True, the execution result will contain sys.exc_info() on error. exc_info must be also True to get the traceback hash = self.hash(handler) Handlers are stored in a dictionary that has as keys the handler's hash handlers = { hash : (handler, memoize, timeout), hash : (handler, memoize, timeout), ... } The execution result is cached using the following structure memoize = { hash : ((args, kwargs, result), (args, kwargs, result), ...), hash : ((args, kwargs, result), ...), ... } The execution result is returned as a tuple having this structure exec_result = ( (True, result, handler), # on success (False, error_info, handler), # on error (None, None, handler), ... # asynchronous execution ) """ self.name = name self.asynchronous = asynch self.exc_info = exc_info self.lock = lock self.sender = sender self.threads = threads self.traceback = traceback self.handlers = {} self.memoize = {} def hash(self, handler): return hashlib.md5(str(handler)).hexdigest() def handle(self, handler, priority = 0): """ Registers a handler. The handler can be transmitted together with two arguments as a list or dictionary. The arguments are: memoize if True, the execution result will be cached in self.memoize timeout will allocate a predefined time interval for the execution If arguments are provided as a list, they are considered to have this sequence: (handler, memoize, timeout) Examples: event += handler event += (handler, True, 1.5) event += {'handler':handler, 'memoize':True, 'timeout':1.5} """ handler_, memoize, timeout = self._extract(handler) self.handlers['%s.%s' % (priority, self.hash(handler_))] = (handler_, memoize, timeout) return self def unhandle(self, handler): """ Unregisters a handler """ handler_, memoize, timeout = self._extract(handler) key = self.hash(handler_) if not key in self.handlers: raise ValueError('Handler "%s" was not found' % str(handler_)) del self.handlers[key] return self def fire(self, *args, **kwargs): """ Stores all registered handlers in a queue for processing """ self.queue = Queue() result = {} if self.handlers: max_threads = 1 if kwargs.get('event_order_lock') else self._threads() # Set global result def add_to(key, value): result[key] = value kwargs['event_add_to_result'] = add_to for i in range(max_threads): t = threading.Thread(target = self._execute, args = args, kwargs = kwargs) t.daemon = True t.start() handler_keys = self.handlers.keys() handler_keys.sort(key = natsortKey) for handler in handler_keys: self.queue.put(handler) if self.asynchronous: handler_, memoize, timeout = self.handlers[handler] result[handler] = (None, None, handler_) if not self.asynchronous: self.queue.join() return result def count(self): """ Returns the count of registered handlers """ return len(self.handlers) def clear(self): """ Discards all registered handlers and cached results """ self.handlers.clear() self.memoize.clear() def _execute(self, *args, **kwargs): # Remove get and set from kwargs add_to_result = kwargs.get('event_add_to_result') del kwargs['event_add_to_result'] # Get and remove order lock order_lock = kwargs.get('event_order_lock') try: del kwargs['event_order_lock'] except: pass # Get and remove return on first return_on_result = kwargs.get('event_return_on_result') try: del kwargs['event_return_on_result'] except: pass got_results = False """ Executes all handlers stored in the queue """ while True: try: h_ = self.queue.get(timeout = 2) handler, memoize, timeout = self.handlers[h_] if return_on_result and got_results: if not self.asynchronous: self.queue.task_done() continue if order_lock: order_lock.acquire() try: r = self._memoize(memoize, timeout, handler, *args, **kwargs) if not self.asynchronous: if not return_on_result or (return_on_result and r[1] is not None): add_to_result(h_, tuple(r)) got_results = True except Exception: if not self.asynchronous: add_to_result(h_, (False, self._error(sys.exc_info()), handler)) else: self.error_handler(sys.exc_info()) finally: if order_lock: order_lock.release() if not self.asynchronous: self.queue.task_done() if self.queue.empty(): raise Empty except Empty: break def _extract(self, queue_item): """ Extracts a handler and handler's arguments that can be provided as list or dictionary. If arguments are provided as list, they are considered to have this sequence: (handler, memoize, timeout) Examples: event += handler event += (handler, True, 1.5) event += {'handler':handler, 'memoize':True, 'timeout':1.5} """ assert queue_item, 'Invalid list of arguments' handler = None memoize = False timeout = 0 if not isinstance(queue_item, (list, tuple, dict)): handler = queue_item elif isinstance(queue_item, (list, tuple)): if len(queue_item) == 3: handler, memoize, timeout = queue_item elif len(queue_item) == 2: handler, memoize, = queue_item elif len(queue_item) == 1: handler = queue_item elif isinstance(queue_item, dict): handler = queue_item.get('handler') memoize = queue_item.get('memoize', False) timeout = queue_item.get('timeout', 0) return (handler, bool(memoize), float(timeout)) def _memoize(self, memoize, timeout, handler, *args, **kwargs): """ Caches the execution result of successful executions hash = self.hash(handler) memoize = { hash : ((args, kwargs, result), (args, kwargs, result), ...), hash : ((args, kwargs, result), ...), ... } """ if not isinstance(handler, Event) and self.sender is not None: args = list(args)[:] args.insert(0, self.sender) if not memoize: if timeout <= 0: #no time restriction result = [True, handler(*args, **kwargs), handler] return result result = self._timeout(timeout, handler, *args, **kwargs) if isinstance(result, tuple) and len(result) == 3: if isinstance(result[1], Exception): #error occurred return [False, self._error(result), handler] return [True, result, handler] else: hash_ = self.hash(handler) if hash_ in self.memoize: for args_, kwargs_, result in self.memoize[hash_]: if args_ == args and kwargs_ == kwargs: return [True, result, handler] if timeout <= 0: #no time restriction result = handler(*args, **kwargs) else: result = self._timeout(timeout, handler, *args, **kwargs) if isinstance(result, tuple) and len(result) == 3: if isinstance(result[1], Exception): #error occurred return [False, self._error(result), handler] lock = threading.RLock() lock.acquire() try: if hash_ not in self.memoize: self.memoize[hash_] = [] self.memoize[hash_].append((args, kwargs, result)) return [True, result, handler] finally: lock.release() def _timeout(self, timeout, handler, *args, **kwargs): """ Controls the time allocated for the execution of a method """ t = spawn_thread(target = handler, args = args, kwargs = kwargs) t.daemon = True t.start() t.join(timeout) if not t.is_alive(): if t.exc_info: return t.exc_info return t.result else: try: msg = '[%s] Execution was forcefully terminated' raise RuntimeError(msg % t.name) except: return sys.exc_info() def _threads(self): """ Calculates maximum number of threads that will be started """ if self.threads < len(self.handlers): return self.threads return len(self.handlers) def _error(self, exc_info): """ Retrieves the error info """ if self.exc_info: if self.traceback: return exc_info return exc_info[:2] return exc_info[1] __iadd__ = handle __isub__ = unhandle __call__ = fire __len__ = count class spawn_thread(threading.Thread): """ Spawns a new thread and returns the execution result """ def __init__(self, target, args = (), kwargs = {}, default = None): threading.Thread.__init__(self) self._target = target self._args = args self._kwargs = kwargs self.result = default self.exc_info = None def run(self): try: self.result = self._target(*self._args, **self._kwargs) except: self.exc_info = sys.exc_info() finally: del self._target, self._args, self._kwargs
13,262
Python
.py
314
30.671975
95
0.553839
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,594
__init__.py
CouchPotato_CouchPotatoServer/libs/axl/__init__.py
# __init__.py # # Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom # # This module is part of Axel and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php
213
Python
.py
6
34.166667
70
0.757282
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,595
socks.py
CouchPotato_CouchPotatoServer/libs/httplib2/socks.py
"""SocksiPy - Python SOCKS module. Version 1.00 Copyright 2006 Dan-Haim. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of Dan Haim nor the names of his contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. This module provides a standard socket-like interface for Python for tunneling connections through SOCKS proxies. """ """ Minor modifications made by Christopher Gilbert (http://motomastyle.com/) for use in PyLoris (http://pyloris.sourceforge.net/) Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/) mainly to merge bug fixes found in Sourceforge """ import base64 import socket import struct import sys if getattr(socket, 'socket', None) is None: raise ImportError('socket.socket missing, proxy support unusable') PROXY_TYPE_SOCKS4 = 1 PROXY_TYPE_SOCKS5 = 2 PROXY_TYPE_HTTP = 3 PROXY_TYPE_HTTP_NO_TUNNEL = 4 _defaultproxy = None _orgsocket = socket.socket class ProxyError(Exception): pass class GeneralProxyError(ProxyError): pass class Socks5AuthError(ProxyError): pass class Socks5Error(ProxyError): pass class Socks4Error(ProxyError): pass class HTTPError(ProxyError): pass _generalerrors = ("success", "invalid data", "not connected", "not available", "bad proxy type", "bad input") _socks5errors = ("succeeded", "general SOCKS server failure", "connection not allowed by ruleset", "Network unreachable", "Host unreachable", "Connection refused", "TTL expired", "Command not supported", "Address type not supported", "Unknown error") _socks5autherrors = ("succeeded", "authentication is required", "all offered authentication methods were rejected", "unknown username or invalid password", "unknown error") _socks4errors = ("request granted", "request rejected or failed", "request rejected because SOCKS server cannot connect to identd on the client", "request rejected because the client program and identd report different user-ids", "unknown error") def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): """setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets a default proxy which all further socksocket objects will use, unless explicitly changed. """ global _defaultproxy _defaultproxy = (proxytype, addr, port, rdns, username, password) def wrapmodule(module): """wrapmodule(module) Attempts to replace a module's socket library with a SOCKS socket. Must set a default proxy using setdefaultproxy(...) first. This will only work on modules that import socket directly into the namespace; most of the Python Standard Library falls into this category. """ if _defaultproxy != None: module.socket.socket = socksocket else: raise GeneralProxyError((4, "no proxy specified")) class socksocket(socket.socket): """socksocket([family[, type[, proto]]]) -> socket object Open a SOCKS enabled socket. The parameters are the same as those of the standard socket init. In order for SOCKS to work, you must specify family=AF_INET, type=SOCK_STREAM and proto=0. """ def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None): _orgsocket.__init__(self, family, type, proto, _sock) if _defaultproxy != None: self.__proxy = _defaultproxy else: self.__proxy = (None, None, None, None, None, None) self.__proxysockname = None self.__proxypeername = None self.__httptunnel = True def __recvall(self, count): """__recvall(count) -> data Receive EXACTLY the number of bytes requested from the socket. Blocks until the required number of bytes have been received. """ data = self.recv(count) while len(data) < count: d = self.recv(count-len(data)) if not d: raise GeneralProxyError((0, "connection closed unexpectedly")) data = data + d return data def sendall(self, content, *args): """ override socket.socket.sendall method to rewrite the header for non-tunneling proxies if needed """ if not self.__httptunnel: content = self.__rewriteproxy(content) return super(socksocket, self).sendall(content, *args) def __rewriteproxy(self, header): """ rewrite HTTP request headers to support non-tunneling proxies (i.e. those which do not support the CONNECT method). This only works for HTTP (not HTTPS) since HTTPS requires tunneling. """ host, endpt = None, None hdrs = header.split("\r\n") for hdr in hdrs: if hdr.lower().startswith("host:"): host = hdr elif hdr.lower().startswith("get") or hdr.lower().startswith("post"): endpt = hdr if host and endpt: hdrs.remove(host) hdrs.remove(endpt) host = host.split(" ")[1] endpt = endpt.split(" ") if (self.__proxy[4] != None and self.__proxy[5] != None): hdrs.insert(0, self.__getauthheader()) hdrs.insert(0, "Host: %s" % host) hdrs.insert(0, "%s http://%s%s %s" % (endpt[0], host, endpt[1], endpt[2])) return "\r\n".join(hdrs) def __getauthheader(self): auth = self.__proxy[4] + ":" + self.__proxy[5] return "Proxy-Authorization: Basic " + base64.b64encode(auth) def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None): """setproxy(proxytype, addr[, port[, rdns[, username[, password]]]]) Sets the proxy to be used. proxytype - The type of the proxy to be used. Three types are supported: PROXY_TYPE_SOCKS4 (including socks4a), PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP addr - The address of the server (IP or DNS). port - The port of the server. Defaults to 1080 for SOCKS servers and 8080 for HTTP proxy servers. rdns - Should DNS queries be preformed on the remote side (rather than the local side). The default is True. Note: This has no effect with SOCKS4 servers. username - Username to authenticate with to the server. The default is no authentication. password - Password to authenticate with to the server. Only relevant when username is also provided. """ self.__proxy = (proxytype, addr, port, rdns, username, password) def __negotiatesocks5(self, destaddr, destport): """__negotiatesocks5(self,destaddr,destport) Negotiates a connection through a SOCKS5 server. """ # First we'll send the authentication packages we support. if (self.__proxy[4]!=None) and (self.__proxy[5]!=None): # The username/password details were supplied to the # setproxy method so we support the USERNAME/PASSWORD # authentication (in addition to the standard none). self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02)) else: # No username/password were entered, therefore we # only support connections with no authentication. self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00)) # We'll receive the server's response to determine which # method was selected chosenauth = self.__recvall(2) if chosenauth[0:1] != chr(0x05).encode(): self.close() raise GeneralProxyError((1, _generalerrors[1])) # Check the chosen authentication method if chosenauth[1:2] == chr(0x00).encode(): # No authentication is required pass elif chosenauth[1:2] == chr(0x02).encode(): # Okay, we need to perform a basic username/password # authentication. self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5]) authstat = self.__recvall(2) if authstat[0:1] != chr(0x01).encode(): # Bad response self.close() raise GeneralProxyError((1, _generalerrors[1])) if authstat[1:2] != chr(0x00).encode(): # Authentication failed self.close() raise Socks5AuthError((3, _socks5autherrors[3])) # Authentication succeeded else: # Reaching here is always bad self.close() if chosenauth[1] == chr(0xFF).encode(): raise Socks5AuthError((2, _socks5autherrors[2])) else: raise GeneralProxyError((1, _generalerrors[1])) # Now we can request the actual connection req = struct.pack('BBB', 0x05, 0x01, 0x00) # If the given destination address is an IP address, we'll # use the IPv4 address request even if remote resolving was specified. try: ipaddr = socket.inet_aton(destaddr) req = req + chr(0x01).encode() + ipaddr except socket.error: # Well it's not an IP number, so it's probably a DNS name. if self.__proxy[3]: # Resolve remotely ipaddr = None req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr else: # Resolve locally ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) req = req + chr(0x01).encode() + ipaddr req = req + struct.pack(">H", destport) self.sendall(req) # Get the response resp = self.__recvall(4) if resp[0:1] != chr(0x05).encode(): self.close() raise GeneralProxyError((1, _generalerrors[1])) elif resp[1:2] != chr(0x00).encode(): # Connection failed self.close() if ord(resp[1:2])<=8: raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])])) else: raise Socks5Error((9, _socks5errors[9])) # Get the bound address/port elif resp[3:4] == chr(0x01).encode(): boundaddr = self.__recvall(4) elif resp[3:4] == chr(0x03).encode(): resp = resp + self.recv(1) boundaddr = self.__recvall(ord(resp[4:5])) else: self.close() raise GeneralProxyError((1,_generalerrors[1])) boundport = struct.unpack(">H", self.__recvall(2))[0] self.__proxysockname = (boundaddr, boundport) if ipaddr != None: self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) else: self.__proxypeername = (destaddr, destport) def getproxysockname(self): """getsockname() -> address info Returns the bound IP address and port number at the proxy. """ return self.__proxysockname def getproxypeername(self): """getproxypeername() -> address info Returns the IP and port number of the proxy. """ return _orgsocket.getpeername(self) def getpeername(self): """getpeername() -> address info Returns the IP address and port number of the destination machine (note: getproxypeername returns the proxy) """ return self.__proxypeername def __negotiatesocks4(self,destaddr,destport): """__negotiatesocks4(self,destaddr,destport) Negotiates a connection through a SOCKS4 server. """ # Check if the destination address provided is an IP address rmtrslv = False try: ipaddr = socket.inet_aton(destaddr) except socket.error: # It's a DNS name. Check where it should be resolved. if self.__proxy[3]: ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01) rmtrslv = True else: ipaddr = socket.inet_aton(socket.gethostbyname(destaddr)) # Construct the request packet req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr # The username parameter is considered userid for SOCKS4 if self.__proxy[4] != None: req = req + self.__proxy[4] req = req + chr(0x00).encode() # DNS name if remote resolving is required # NOTE: This is actually an extension to the SOCKS4 protocol # called SOCKS4A and may not be supported in all cases. if rmtrslv: req = req + destaddr + chr(0x00).encode() self.sendall(req) # Get the response from the server resp = self.__recvall(8) if resp[0:1] != chr(0x00).encode(): # Bad data self.close() raise GeneralProxyError((1,_generalerrors[1])) if resp[1:2] != chr(0x5A).encode(): # Server returned an error self.close() if ord(resp[1:2]) in (91, 92, 93): self.close() raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90])) else: raise Socks4Error((94, _socks4errors[4])) # Get the bound address/port self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0]) if rmtrslv != None: self.__proxypeername = (socket.inet_ntoa(ipaddr), destport) else: self.__proxypeername = (destaddr, destport) def __negotiatehttp(self, destaddr, destport): """__negotiatehttp(self,destaddr,destport) Negotiates a connection through an HTTP server. """ # If we need to resolve locally, we do this now if not self.__proxy[3]: addr = socket.gethostbyname(destaddr) else: addr = destaddr headers = ["CONNECT ", addr, ":", str(destport), " HTTP/1.1\r\n"] headers += ["Host: ", destaddr, "\r\n"] if (self.__proxy[4] != None and self.__proxy[5] != None): headers += [self.__getauthheader(), "\r\n"] headers.append("\r\n") self.sendall("".join(headers).encode()) # We read the response until we get the string "\r\n\r\n" resp = self.recv(1) while resp.find("\r\n\r\n".encode()) == -1: resp = resp + self.recv(1) # We just need the first line to check if the connection # was successful statusline = resp.splitlines()[0].split(" ".encode(), 2) if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()): self.close() raise GeneralProxyError((1, _generalerrors[1])) try: statuscode = int(statusline[1]) except ValueError: self.close() raise GeneralProxyError((1, _generalerrors[1])) if statuscode != 200: self.close() raise HTTPError((statuscode, statusline[2])) self.__proxysockname = ("0.0.0.0", 0) self.__proxypeername = (addr, destport) def connect(self, destpair): """connect(self, despair) Connects to the specified destination through a proxy. destpar - A tuple of the IP/DNS address and the port number. (identical to socket's connect). To select the proxy server use setproxy(). """ # Do a minimal input check first if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (not isinstance(destpair[0], basestring)) or (type(destpair[1]) != int): raise GeneralProxyError((5, _generalerrors[5])) if self.__proxy[0] == PROXY_TYPE_SOCKS5: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 _orgsocket.connect(self, (self.__proxy[1], portnum)) self.__negotiatesocks5(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_SOCKS4: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 1080 _orgsocket.connect(self,(self.__proxy[1], portnum)) self.__negotiatesocks4(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_HTTP: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 8080 _orgsocket.connect(self,(self.__proxy[1], portnum)) self.__negotiatehttp(destpair[0], destpair[1]) elif self.__proxy[0] == PROXY_TYPE_HTTP_NO_TUNNEL: if self.__proxy[2] != None: portnum = self.__proxy[2] else: portnum = 8080 _orgsocket.connect(self,(self.__proxy[1],portnum)) if destpair[1] == 443: self.__negotiatehttp(destpair[0],destpair[1]) else: self.__httptunnel = False elif self.__proxy[0] == None: _orgsocket.connect(self, (destpair[0], destpair[1])) else: raise GeneralProxyError((4, _generalerrors[4]))
18,459
Python
.py
403
36.471464
146
0.616004
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,596
__init__.py
CouchPotato_CouchPotatoServer/libs/httplib2/__init__.py
from __future__ import generators """ httplib2 A caching http interface that supports ETags and gzip to conserve bandwidth. Requires Python 2.3 or later Changelog: 2007-08-18, Rick: Modified so it's able to use a socks proxy if needed. """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = ["Thomas Broyer (t.broyer@ltgt.net)", "James Antill", "Xavier Verges Farrero", "Jonathan Feinberg", "Blair Zajac", "Sam Ruby", "Louis Nyffenegger"] __license__ = "MIT" __version__ = "0.9" import re import sys import email import email.Utils import email.Message import email.FeedParser import StringIO import gzip import zlib import httplib import urlparse import urllib import base64 import os import copy import calendar import time import random import errno try: from hashlib import sha1 as _sha, md5 as _md5 except ImportError: # prior to Python 2.5, these were separate modules import sha import md5 _sha = sha.new _md5 = md5.new import hmac from gettext import gettext as _ import socket try: from httplib2 import socks except ImportError: try: import socks except (ImportError, AttributeError): socks = None # Build the appropriate socket wrapper for ssl try: import ssl # python 2.6 ssl_SSLError = ssl.SSLError def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs): if disable_validation: cert_reqs = ssl.CERT_NONE else: cert_reqs = ssl.CERT_REQUIRED # We should be specifying SSL version 3 or TLS v1, but the ssl module # doesn't expose the necessary knobs. So we need to go with the default # of SSLv23. return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file, cert_reqs=cert_reqs, ca_certs=ca_certs) except (AttributeError, ImportError): ssl_SSLError = None def _ssl_wrap_socket(sock, key_file, cert_file, disable_validation, ca_certs): if not disable_validation: raise CertificateValidationUnsupported( "SSL certificate validation is not supported without " "the ssl module installed. To avoid this error, install " "the ssl module, or explicity disable validation.") ssl_sock = socket.ssl(sock, key_file, cert_file) return httplib.FakeSocket(sock, ssl_sock) if sys.version_info >= (2,3): from iri2uri import iri2uri else: def iri2uri(uri): return uri def has_timeout(timeout): # python 2.6 if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'): return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT) return (timeout is not None) __all__ = [ 'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent', 'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError', 'debuglevel', 'ProxiesUnavailableError'] # The httplib debug level, set to a non-zero value to get debug output debuglevel = 0 # A request will be tried 'RETRIES' times if it fails at the socket/connection level. RETRIES = 2 # Python 2.3 support if sys.version_info < (2,4): def sorted(seq): seq.sort() return seq # Python 2.3 support def HTTPResponse__getheaders(self): """Return list of (header, value) tuples.""" if self.msg is None: raise httplib.ResponseNotReady() return self.msg.items() if not hasattr(httplib.HTTPResponse, 'getheaders'): httplib.HTTPResponse.getheaders = HTTPResponse__getheaders # All exceptions raised here derive from HttpLib2Error class HttpLib2Error(Exception): pass # Some exceptions can be caught and optionally # be turned back into responses. class HttpLib2ErrorWithResponse(HttpLib2Error): def __init__(self, desc, response, content): self.response = response self.content = content HttpLib2Error.__init__(self, desc) class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass class RedirectLimit(HttpLib2ErrorWithResponse): pass class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass class MalformedHeader(HttpLib2Error): pass class RelativeURIError(HttpLib2Error): pass class ServerNotFoundError(HttpLib2Error): pass class ProxiesUnavailableError(HttpLib2Error): pass class CertificateValidationUnsupported(HttpLib2Error): pass class SSLHandshakeError(HttpLib2Error): pass class NotSupportedOnThisPlatform(HttpLib2Error): pass class CertificateHostnameMismatch(SSLHandshakeError): def __init__(self, desc, host, cert): HttpLib2Error.__init__(self, desc) self.host = host self.cert = cert # Open Items: # ----------- # Proxy support # Are we removing the cached content too soon on PUT (only delete on 200 Maybe?) # Pluggable cache storage (supports storing the cache in # flat files by default. We need a plug-in architecture # that can support Berkeley DB and Squid) # == Known Issues == # Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator. # Does not handle Cache-Control: max-stale # Does not use Age: headers when calculating cache freshness. # The number of redirections to follow before giving up. # Note that only GET redirects are automatically followed. # Will also honor 301 requests by saving that info and never # requesting that URI again. DEFAULT_MAX_REDIRECTS = 5 try: # Users can optionally provide a module that tells us where the CA_CERTS # are located. import ca_certs_locater CA_CERTS = ca_certs_locater.get() except ImportError: # Default CA certificates file bundled with httplib2. CA_CERTS = os.path.join( os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt") # Which headers are hop-by-hop headers by default HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade'] def _get_end2end_headers(response): hopbyhop = list(HOP_BY_HOP) hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')]) return [header for header in response.keys() if header not in hopbyhop] URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") def parse_uri(uri): """Parses a URI using the regex given in Appendix B of RFC 3986. (scheme, authority, path, query, fragment) = parse_uri(uri) """ groups = URI.match(uri).groups() return (groups[1], groups[3], groups[4], groups[6], groups[8]) def urlnorm(uri): (scheme, authority, path, query, fragment) = parse_uri(uri) if not scheme or not authority: raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri) authority = authority.lower() scheme = scheme.lower() if not path: path = "/" # Could do syntax based normalization of the URI before # computing the digest. See Section 6.2.2 of Std 66. request_uri = query and "?".join([path, query]) or path scheme = scheme.lower() defrag_uri = scheme + "://" + authority + request_uri return scheme, authority, request_uri, defrag_uri # Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/) re_url_scheme = re.compile(r'^\w+://') re_slash = re.compile(r'[?/:|]+') def safename(filename): """Return a filename suitable for the cache. Strips dangerous and common characters to create a filename we can use to store the cache in. """ try: if re_url_scheme.match(filename): if isinstance(filename,str): filename = filename.decode('utf-8') filename = filename.encode('idna') else: filename = filename.encode('idna') except UnicodeError: pass if isinstance(filename,unicode): filename=filename.encode('utf-8') filemd5 = _md5(filename).hexdigest() filename = re_url_scheme.sub("", filename) filename = re_slash.sub(",", filename) # limit length of filename if len(filename)>200: filename=filename[:200] return ",".join((filename, filemd5)) NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+') def _normalize_headers(headers): return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()]) def _parse_cache_control(headers): retval = {} if headers.has_key('cache-control'): parts = headers['cache-control'].split(',') parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")] parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")] retval = dict(parts_with_args + parts_wo_args) return retval # Whether to use a strict mode to parse WWW-Authenticate headers # Might lead to bad results in case of ill-formed header value, # so disabled by default, falling back to relaxed parsing. # Set to true to turn on, usefull for testing servers. USE_WWW_AUTH_STRICT_PARSING = 0 # In regex below: # [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP # "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space # Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both: # \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"? WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$") WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$") UNQUOTE_PAIRS = re.compile(r'\\(.)') def _parse_www_authenticate(headers, headername='www-authenticate'): """Returns a dictionary of dictionaries, one dict per auth_scheme.""" retval = {} if headers.has_key(headername): try: authenticate = headers[headername].strip() www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED while authenticate: # Break off the scheme at the beginning of the line if headername == 'authentication-info': (auth_scheme, the_rest) = ('digest', authenticate) else: (auth_scheme, the_rest) = authenticate.split(" ", 1) # Now loop over all the key value pairs that come after the scheme, # being careful not to roll into the next scheme match = www_auth.search(the_rest) auth_params = {} while match: if match and len(match.groups()) == 3: (key, value, the_rest) = match.groups() auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')]) match = www_auth.search(the_rest) retval[auth_scheme.lower()] = auth_params authenticate = the_rest.strip() except ValueError: raise MalformedHeader("WWW-Authenticate") return retval def _entry_disposition(response_headers, request_headers): """Determine freshness from the Date, Expires and Cache-Control headers. We don't handle the following: 1. Cache-Control: max-stale 2. Age: headers are not used in the calculations. Not that this algorithm is simpler than you might think because we are operating as a private (non-shared) cache. This lets us ignore 's-maxage'. We can also ignore 'proxy-invalidate' since we aren't a proxy. We will never return a stale document as fresh as a design decision, and thus the non-implementation of 'max-stale'. This also lets us safely ignore 'must-revalidate' since we operate as if every server has sent 'must-revalidate'. Since we are private we get to ignore both 'public' and 'private' parameters. We also ignore 'no-transform' since we don't do any transformations. The 'no-store' parameter is handled at a higher level. So the only Cache-Control parameters we look at are: no-cache only-if-cached max-age min-fresh """ retval = "STALE" cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1: retval = "TRANSPARENT" if 'cache-control' not in request_headers: request_headers['cache-control'] = 'no-cache' elif cc.has_key('no-cache'): retval = "TRANSPARENT" elif cc_response.has_key('no-cache'): retval = "STALE" elif cc.has_key('only-if-cached'): retval = "FRESH" elif response_headers.has_key('date'): date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date'])) now = time.time() current_age = max(0, now - date) if cc_response.has_key('max-age'): try: freshness_lifetime = int(cc_response['max-age']) except ValueError: freshness_lifetime = 0 elif response_headers.has_key('expires'): expires = email.Utils.parsedate_tz(response_headers['expires']) if None == expires: freshness_lifetime = 0 else: freshness_lifetime = max(0, calendar.timegm(expires) - date) else: freshness_lifetime = 0 if cc.has_key('max-age'): try: freshness_lifetime = int(cc['max-age']) except ValueError: freshness_lifetime = 0 if cc.has_key('min-fresh'): try: min_fresh = int(cc['min-fresh']) except ValueError: min_fresh = 0 current_age += min_fresh if freshness_lifetime > current_age: retval = "FRESH" return retval def _decompressContent(response, new_content): content = new_content try: encoding = response.get('content-encoding', None) if encoding in ['gzip', 'deflate']: if encoding == 'gzip': content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read() if encoding == 'deflate': content = zlib.decompress(content) response['content-length'] = str(len(content)) # Record the historical presence of the encoding in a way the won't interfere. response['-content-encoding'] = response['content-encoding'] del response['content-encoding'] except IOError: content = "" raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content) return content def _updateCache(request_headers, response_headers, content, cache, cachekey): if cachekey: cc = _parse_cache_control(request_headers) cc_response = _parse_cache_control(response_headers) if cc.has_key('no-store') or cc_response.has_key('no-store'): cache.delete(cachekey) else: info = email.Message.Message() for key, value in response_headers.iteritems(): if key not in ['status','content-encoding','transfer-encoding']: info[key] = value # Add annotations to the cache to indicate what headers # are variant for this request. vary = response_headers.get('vary', None) if vary: vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header try: info[key] = request_headers[header] except KeyError: pass status = response_headers.status if status == 304: status = 200 status_header = 'status: %d\r\n' % status header_str = info.as_string() header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str) text = "".join([status_header, header_str, content]) cache.set(cachekey, text) def _cnonce(): dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest() return dig[:16] def _wsse_username_token(cnonce, iso_now, password): return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip() # For credentials we need two things, first # a pool of credential to try (not necesarily tied to BAsic, Digest, etc.) # Then we also need a list of URIs that have already demanded authentication # That list is tricky since sub-URIs can take the same auth, or the # auth scheme may change as you descend the tree. # So we also need each Auth instance to be able to tell us # how close to the 'top' it is. class Authentication(object): def __init__(self, credentials, host, request_uri, headers, response, content, http): (scheme, authority, path, query, fragment) = parse_uri(request_uri) self.path = path self.host = host self.credentials = credentials self.http = http def depth(self, request_uri): (scheme, authority, path, query, fragment) = parse_uri(request_uri) return request_uri[len(self.path):].count("/") def inscope(self, host, request_uri): # XXX Should we normalize the request_uri? (scheme, authority, path, query, fragment) = parse_uri(request_uri) return (host == self.host) and path.startswith(self.path) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header. Over-ride this in sub-classes.""" pass def response(self, response, content): """Gives us a chance to update with new nonces or such returned from the last authorized response. Over-rise this in sub-classes if necessary. Return TRUE is the request is to be retried, for example Digest may return stale=true. """ return False class BasicAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip() class DigestAuthentication(Authentication): """Only do qop='auth' and MD5, since that is all Apache currently implements""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['digest'] qop = self.challenge.get('qop', 'auth') self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None if self.challenge['qop'] is None: raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop)) self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper() if self.challenge['algorithm'] != 'MD5': raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]]) self.challenge['nc'] = 1 def request(self, method, request_uri, headers, content, cnonce = None): """Modify the request headers""" H = lambda x: _md5(x).hexdigest() KD = lambda s, d: H("%s:%s" % (s, d)) A2 = "".join([method, ":", request_uri]) self.challenge['cnonce'] = cnonce or _cnonce() request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % ( self.challenge['nonce'], '%08x' % self.challenge['nc'], self.challenge['cnonce'], self.challenge['qop'], H(A2))) headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['nonce'], request_uri, self.challenge['algorithm'], request_digest, self.challenge['qop'], self.challenge['nc'], self.challenge['cnonce']) if self.challenge.get('opaque'): headers['authorization'] += ', opaque="%s"' % self.challenge['opaque'] self.challenge['nc'] += 1 def response(self, response, content): if not response.has_key('authentication-info'): challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {}) if 'true' == challenge.get('stale'): self.challenge['nonce'] = challenge['nonce'] self.challenge['nc'] = 1 return True else: updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {}) if updated_challenge.has_key('nextnonce'): self.challenge['nonce'] = updated_challenge['nextnonce'] self.challenge['nc'] = 1 return False class HmacDigestAuthentication(Authentication): """Adapted from Robert Sayre's code and DigestAuthentication above.""" __author__ = "Thomas Broyer (t.broyer@ltgt.net)" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') self.challenge = challenge['hmacdigest'] # TODO: self.challenge['domain'] self.challenge['reason'] = self.challenge.get('reason', 'unauthorized') if self.challenge['reason'] not in ['unauthorized', 'integrity']: self.challenge['reason'] = 'unauthorized' self.challenge['salt'] = self.challenge.get('salt', '') if not self.challenge.get('snonce'): raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty.")) self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1') if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm'])) self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1') if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']: raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm'])) if self.challenge['algorithm'] == 'HMAC-MD5': self.hashmod = _md5 else: self.hashmod = _sha if self.challenge['pw-algorithm'] == 'MD5': self.pwhashmod = _md5 else: self.pwhashmod = _sha self.key = "".join([self.credentials[0], ":", self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(), ":", self.challenge['realm']]) self.key = self.pwhashmod.new(self.key).hexdigest().lower() def request(self, method, request_uri, headers, content): """Modify the request headers""" keys = _get_end2end_headers(headers) keylist = "".join(["%s " % k for k in keys]) headers_val = "".join([headers[k] for k in keys]) created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime()) cnonce = _cnonce() request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val) request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower() headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % ( self.credentials[0], self.challenge['realm'], self.challenge['snonce'], cnonce, request_uri, created, request_digest, keylist) def response(self, response, content): challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {}) if challenge.get('reason') in ['integrity', 'stale']: return True return False class WsseAuthentication(Authentication): """This is thinly tested and should not be relied upon. At this time there isn't any third party server to test against. Blogger and TypePad implemented this algorithm at one point but Blogger has since switched to Basic over HTTPS and TypePad has implemented it wrong, by never issuing a 401 challenge but instead requiring your client to telepathically know that their endpoint is expecting WSSE profile="UsernameToken".""" def __init__(self, credentials, host, request_uri, headers, response, content, http): Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'WSSE profile="UsernameToken"' iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) cnonce = _cnonce() password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1]) headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % ( self.credentials[0], password_digest, cnonce, iso_now) class GoogleLoginAuthentication(Authentication): def __init__(self, credentials, host, request_uri, headers, response, content, http): from urllib import urlencode Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http) challenge = _parse_www_authenticate(response, 'www-authenticate') service = challenge['googlelogin'].get('service', 'xapi') # Bloggger actually returns the service in the challenge # For the rest we guess based on the URI if service == 'xapi' and request_uri.find("calendar") > 0: service = "cl" # No point in guessing Base or Spreadsheet #elif request_uri.find("spreadsheets") > 0: # service = "wise" auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent']) resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'}) lines = content.split('\n') d = dict([tuple(line.split("=", 1)) for line in lines if line]) if resp.status == 403: self.Auth = "" else: self.Auth = d['Auth'] def request(self, method, request_uri, headers, content): """Modify the request headers to add the appropriate Authorization header.""" headers['authorization'] = 'GoogleLogin Auth=' + self.Auth AUTH_SCHEME_CLASSES = { "basic": BasicAuthentication, "wsse": WsseAuthentication, "digest": DigestAuthentication, "hmacdigest": HmacDigestAuthentication, "googlelogin": GoogleLoginAuthentication } AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"] class FileCache(object): """Uses a local directory as a store for cached files. Not really safe to use if multiple threads or processes are going to be running on the same cache. """ def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior self.cache = cache self.safe = safe if not os.path.exists(cache): os.makedirs(self.cache) def get(self, key): retval = None cacheFullPath = os.path.join(self.cache, self.safe(key)) try: f = file(cacheFullPath, "rb") retval = f.read() f.close() except IOError: pass return retval def set(self, key, value): cacheFullPath = os.path.join(self.cache, self.safe(key)) f = file(cacheFullPath, "wb") f.write(value) f.close() def delete(self, key): cacheFullPath = os.path.join(self.cache, self.safe(key)) if os.path.exists(cacheFullPath): os.remove(cacheFullPath) class Credentials(object): def __init__(self): self.credentials = [] def add(self, name, password, domain=""): self.credentials.append((domain.lower(), name, password)) def clear(self): self.credentials = [] def iter(self, domain): for (cdomain, name, password) in self.credentials: if cdomain == "" or domain == cdomain: yield (name, password) class KeyCerts(Credentials): """Identical to Credentials except that name/password are mapped to key/cert.""" pass class AllHosts(object): pass class ProxyInfo(object): """Collect information required to use a proxy.""" bypass_hosts = () def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None): """The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX constants. For example: p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000) """ self.proxy_type = proxy_type self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_rdns = proxy_rdns self.proxy_user = proxy_user self.proxy_pass = proxy_pass def astuple(self): return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass) def isgood(self): return (self.proxy_host != None) and (self.proxy_port != None) def applies_to(self, hostname): return not self.bypass_host(hostname) def bypass_host(self, hostname): """Has this host been excluded from the proxy config""" if self.bypass_hosts is AllHosts: return True bypass = False for domain in self.bypass_hosts: if hostname.endswith(domain): bypass = True return bypass def proxy_info_from_environment(method='http'): """ Read proxy info from the environment variables. """ if method not in ['http', 'https']: return env_var = method + '_proxy' url = os.environ.get(env_var, os.environ.get(env_var.upper())) if not url: return pi = proxy_info_from_url(url, method) no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', '')) bypass_hosts = [] if no_proxy: bypass_hosts = no_proxy.split(',') # special case, no_proxy=* means all hosts bypassed if no_proxy == '*': bypass_hosts = AllHosts pi.bypass_hosts = bypass_hosts return pi def proxy_info_from_url(url, method='http'): """ Construct a ProxyInfo from a URL (such as http_proxy env var) """ url = urlparse.urlparse(url) username = None password = None port = None if '@' in url[1]: ident, host_port = url[1].split('@', 1) if ':' in ident: username, password = ident.split(':', 1) else: password = ident else: host_port = url[1] if ':' in host_port: host, port = host_port.split(':', 1) else: host = host_port if port: port = int(port) else: port = dict(https=443, http=80)[method] proxy_type = 3 # socks.PROXY_TYPE_HTTP return ProxyInfo( proxy_type = proxy_type, proxy_host = host, proxy_port = port, proxy_user = username or None, proxy_pass = password or None, ) class HTTPConnectionWithTimeout(httplib.HTTPConnection): """ HTTPConnection subclass that supports timeouts All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): httplib.HTTPConnection.__init__(self, host, port, strict) self.timeout = timeout self.proxy_info = proxy_info def connect(self): """Connect to the host and port specified in __init__.""" # Mostly verbatim from httplib.py. if self.proxy_info and socks is None: raise ProxiesUnavailableError( 'Proxy support missing but proxy use was requested!') msg = "getaddrinfo returns an empty list" if self.proxy_info and self.proxy_info.isgood(): use_proxy = True proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() else: use_proxy = False if use_proxy and proxy_rdns: host = proxy_host port = proxy_port else: host = self.host port = self.port for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: if use_proxy: self.sock = socks.socksocket(af, socktype, proto) self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) else: self.sock = socket.socket(af, socktype, proto) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # Different from httplib: support timeouts. if has_timeout(self.timeout): self.sock.settimeout(self.timeout) # End of difference from httplib. if self.debuglevel > 0: print "connect: (%s, %s) ************" % (self.host, self.port) if use_proxy: print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) self.sock.connect((self.host, self.port) + sa[2:]) except socket.error, msg: if self.debuglevel > 0: print "connect fail: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg class HTTPSConnectionWithTimeout(httplib.HTTPSConnection): """ This class allows communication via SSL. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout """ def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict) self.timeout = timeout self.proxy_info = proxy_info if ca_certs is None: ca_certs = CA_CERTS self.ca_certs = ca_certs self.disable_ssl_certificate_validation = \ disable_ssl_certificate_validation # The following two methods were adapted from https_wrapper.py, released # with the Google Appengine SDK at # http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py # under the following license: # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def _GetValidHostsForCert(self, cert): """Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs. """ if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname'] def _ValidateCertificateHostname(self, cert, hostname): """Validates that a given hostname is valid for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. hostname: The hostname to test. Returns: bool: Whether or not the hostname is valid for this certificate. """ hosts = self._GetValidHostsForCert(cert) for host in hosts: host_re = host.replace('.', '\.').replace('*', '[^.]*') if re.search('^%s$' % (host_re,), hostname, re.I): return True return False def connect(self): "Connect to a host on a given (SSL) port." msg = "getaddrinfo returns an empty list" if self.proxy_info and self.proxy_info.isgood(): use_proxy = True proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple() else: use_proxy = False if use_proxy and proxy_rdns: host = proxy_host port = proxy_port else: host = self.host port = self.port address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM) for family, socktype, proto, canonname, sockaddr in address_info: try: if use_proxy: sock = socks.socksocket(family, socktype, proto) sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass) else: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if has_timeout(self.timeout): sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self.sock =_ssl_wrap_socket( sock, self.key_file, self.cert_file, self.disable_ssl_certificate_validation, self.ca_certs) if self.debuglevel > 0: print "connect: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if not self.disable_ssl_certificate_validation: cert = self.sock.getpeercert() hostname = self.host.split(':', 0)[0] if not self._ValidateCertificateHostname(cert, hostname): raise CertificateHostnameMismatch( 'Server presented certificate that does not match ' 'host %s: %s' % (hostname, cert), hostname, cert) except ssl_SSLError, e: if sock: sock.close() if self.sock: self.sock.close() self.sock = None # Unfortunately the ssl module doesn't seem to provide any way # to get at more detailed error information, in particular # whether the error is due to certificate validation or # something else (such as SSL protocol mismatch). if e.errno == ssl.SSL_ERROR_SSL: raise SSLHandshakeError(e) else: raise except (socket.timeout, socket.gaierror): raise except socket.error, msg: if self.debuglevel > 0: print "connect fail: (%s, %s)" % (self.host, self.port) if use_proxy: print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)) if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg SCHEME_TO_CONNECTION = { 'http': HTTPConnectionWithTimeout, 'https': HTTPSConnectionWithTimeout } # Use a different connection object for Google App Engine try: try: from google.appengine.api import apiproxy_stub_map if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: raise ImportError # Bail out; we're not actually running on App Engine. from google.appengine.api.urlfetch import fetch from google.appengine.api.urlfetch import InvalidURLError except (ImportError, AttributeError): from google3.apphosting.api import apiproxy_stub_map if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None: raise ImportError # Bail out; we're not actually running on App Engine. from google3.apphosting.api.urlfetch import fetch from google3.apphosting.api.urlfetch import InvalidURLError def _new_fixed_fetch(validate_certificate): def fixed_fetch(url, payload=None, method="GET", headers={}, allow_truncated=False, follow_redirects=True, deadline=None): if deadline is None: deadline = socket.getdefaulttimeout() or 5 return fetch(url, payload=payload, method=method, headers=headers, allow_truncated=allow_truncated, follow_redirects=follow_redirects, deadline=deadline, validate_certificate=validate_certificate) return fixed_fetch class AppEngineHttpConnection(httplib.HTTPConnection): """Use httplib on App Engine, but compensate for its weirdness. The parameters key_file, cert_file, proxy_info, ca_certs, and disable_ssl_certificate_validation are all dropped on the ground. """ def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPConnection.__init__(self, host, port=port, strict=strict, timeout=timeout) class AppEngineHttpsConnection(httplib.HTTPSConnection): """Same as AppEngineHttpConnection, but for HTTPS URIs.""" def __init__(self, host, port=None, key_file=None, cert_file=None, strict=None, timeout=None, proxy_info=None, ca_certs=None, disable_ssl_certificate_validation=False): httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file, cert_file=cert_file, strict=strict, timeout=timeout) self._fetch = _new_fixed_fetch( not disable_ssl_certificate_validation) # Update the connection classes to use the Googel App Engine specific ones. SCHEME_TO_CONNECTION = { 'http': AppEngineHttpConnection, 'https': AppEngineHttpsConnection } except (ImportError, AttributeError): pass class Http(object): """An HTTP client that handles: - all methods - caching - ETags - compression, - HTTPS - Basic - Digest - WSSE and more. """ def __init__(self, cache=None, timeout=None, proxy_info=proxy_info_from_environment, ca_certs=None, disable_ssl_certificate_validation=False): """If 'cache' is a string then it is used as a directory name for a disk cache. Otherwise it must be an object that supports the same interface as FileCache. All timeouts are in seconds. If None is passed for timeout then Python's default timeout for sockets will be used. See for example the docs of socket.setdefaulttimeout(): http://docs.python.org/library/socket.html#socket.setdefaulttimeout `proxy_info` may be: - a callable that takes the http scheme ('http' or 'https') and returns a ProxyInfo instance per request. By default, uses proxy_nfo_from_environment. - a ProxyInfo instance (static proxy config). - None (proxy disabled). ca_certs is the path of a file containing root CA certificates for SSL server certificate validation. By default, a CA cert file bundled with httplib2 is used. If disable_ssl_certificate_validation is true, SSL cert validation will not be performed. """ self.proxy_info = proxy_info self.ca_certs = ca_certs self.disable_ssl_certificate_validation = \ disable_ssl_certificate_validation # Map domain name to an httplib connection self.connections = {} # The location of the cache, for now a directory # where cached responses are held. if cache and isinstance(cache, basestring): self.cache = FileCache(cache) else: self.cache = cache # Name/password self.credentials = Credentials() # Key/cert self.certificates = KeyCerts() # authorization objects self.authorizations = [] # If set to False then no redirects are followed, even safe ones. self.follow_redirects = True # Which HTTP methods do we apply optimistic concurrency to, i.e. # which methods get an "if-match:" etag header added to them. self.optimistic_concurrency_methods = ["PUT", "PATCH"] # If 'follow_redirects' is True, and this is set to True then # all redirecs are followed, including unsafe ones. self.follow_all_redirects = False self.ignore_etag = False self.force_exception_to_status_code = False self.timeout = timeout # Keep Authorization: headers on a redirect. self.forward_authorization_headers = False def __getstate__(self): state_dict = copy.copy(self.__dict__) # In case request is augmented by some foreign object such as # credentials which handle auth if 'request' in state_dict: del state_dict['request'] if 'connections' in state_dict: del state_dict['connections'] return state_dict def __setstate__(self, state): self.__dict__.update(state) self.connections = {} def _auth_from_challenge(self, host, request_uri, headers, response, content): """A generator that creates Authorization objects that can be applied to requests. """ challenges = _parse_www_authenticate(response, 'www-authenticate') for cred in self.credentials.iter(host): for scheme in AUTH_SCHEME_ORDER: if challenges.has_key(scheme): yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self) def add_credentials(self, name, password, domain=""): """Add a name and password that will be used any time a request requires authentication.""" self.credentials.add(name, password, domain) def add_certificate(self, key, cert, domain): """Add a key and cert that will be used any time a request requires authentication.""" self.certificates.add(key, cert, domain) def clear_credentials(self): """Remove all the names and passwords that are used for authentication""" self.credentials.clear() self.authorizations = [] def _conn_request(self, conn, request_uri, method, body, headers): i = 0 seen_bad_status_line = False while i < RETRIES: i += 1 try: if hasattr(conn, 'sock') and conn.sock is None: conn.connect() conn.request(method, request_uri, body, headers) except socket.timeout: raise except socket.gaierror: conn.close() raise ServerNotFoundError("Unable to find the server at %s" % conn.host) except ssl_SSLError: conn.close() raise except socket.error, e: err = 0 if hasattr(e, 'args'): err = getattr(e, 'args')[0] else: err = e.errno if err == errno.ECONNREFUSED: # Connection refused raise except httplib.HTTPException: # Just because the server closed the connection doesn't apparently mean # that the server didn't send a response. if hasattr(conn, 'sock') and conn.sock is None: if i < RETRIES-1: conn.close() conn.connect() continue else: conn.close() raise if i < RETRIES-1: conn.close() conn.connect() continue try: response = conn.getresponse() except httplib.BadStatusLine: # If we get a BadStatusLine on the first try then that means # the connection just went stale, so retry regardless of the # number of RETRIES set. if not seen_bad_status_line and i == 1: i = 0 seen_bad_status_line = True conn.close() conn.connect() continue else: conn.close() raise except (socket.error, httplib.HTTPException): if i < RETRIES-1: conn.close() conn.connect() continue else: conn.close() raise else: content = "" if method == "HEAD": conn.close() else: content = response.read() response = Response(response) if method != "HEAD": content = _decompressContent(response, content) break return (response, content) def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey): """Do the actual request using the connection object and also follow one level of redirects if necessary""" auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)] auth = auths and sorted(auths)[0][1] or None if auth: auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers) if auth: if auth.response(response, body): auth.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers ) response._stale_digest = 1 if response.status == 401: for authorization in self._auth_from_challenge(host, request_uri, headers, response, content): authorization.request(method, request_uri, headers, body) (response, content) = self._conn_request(conn, request_uri, method, body, headers, ) if response.status != 401: self.authorizations.append(authorization) authorization.response(response, body) break if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303): if self.follow_redirects and response.status in [300, 301, 302, 303, 307]: # Pick out the location header and basically start from the beginning # remembering first to strip the ETag header and decrement our 'depth' if redirections: if not response.has_key('location') and response.status != 300: raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content) # Fix-up relative redirects (which violate an RFC 2616 MUST) if response.has_key('location'): location = response['location'] (scheme, authority, path, query, fragment) = parse_uri(location) if authority == None: response['location'] = urlparse.urljoin(absolute_uri, location) if response.status == 301 and method in ["GET", "HEAD"]: response['-x-permanent-redirect-url'] = response['location'] if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) if headers.has_key('if-none-match'): del headers['if-none-match'] if headers.has_key('if-modified-since'): del headers['if-modified-since'] if 'authorization' in headers and not self.forward_authorization_headers: del headers['authorization'] if response.has_key('location'): location = response['location'] old_response = copy.deepcopy(response) if not old_response.has_key('content-location'): old_response['content-location'] = absolute_uri redirect_method = method if response.status in [302, 303]: redirect_method = "GET" body = None (response, content) = self.request( location, method=redirect_method, body=body, headers=headers, redirections=redirections - 1) response.previous = old_response else: raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content) elif response.status in [200, 203] and method in ["GET", "HEAD"]: # Don't cache 206's since we aren't going to handle byte range requests if not response.has_key('content-location'): response['content-location'] = absolute_uri _updateCache(headers, response, content, self.cache, cachekey) return (response, content) def _normalize_headers(self, headers): return _normalize_headers(headers) # Need to catch and rebrand some exceptions # Then need to optionally turn all exceptions into status codes # including all socket.* and httplib.* exceptions. def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None): """ Performs a single HTTP request. The 'uri' is the URI of the HTTP resource and can begin with either 'http' or 'https'. The value of 'uri' must be an absolute URI. The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc. There is no restriction on the methods allowed. The 'body' is the entity body to be sent with the request. It is a string object. Any extra headers that are to be sent with the request should be provided in the 'headers' dictionary. The maximum number of redirect to follow before raising an exception is 'redirections. The default is 5. The return value is a tuple of (response, content), the first being and instance of the 'Response' class, the second being a string that contains the response entity body. """ try: if headers is None: headers = {} else: headers = self._normalize_headers(headers) if not headers.has_key('user-agent'): headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__ uri = iri2uri(uri) (scheme, authority, request_uri, defrag_uri) = urlnorm(uri) domain_port = authority.split(":")[0:2] if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http': scheme = 'https' authority = domain_port[0] proxy_info = self._get_proxy_info(scheme, authority) conn_key = scheme+":"+authority if conn_key in self.connections: conn = self.connections[conn_key] else: if not connection_type: connection_type = SCHEME_TO_CONNECTION[scheme] certs = list(self.certificates.iter(authority)) if scheme == 'https': if certs: conn = self.connections[conn_key] = connection_type( authority, key_file=certs[0][0], cert_file=certs[0][1], timeout=self.timeout, proxy_info=proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=proxy_info, ca_certs=self.ca_certs, disable_ssl_certificate_validation= self.disable_ssl_certificate_validation) else: conn = self.connections[conn_key] = connection_type( authority, timeout=self.timeout, proxy_info=proxy_info) conn.set_debuglevel(debuglevel) if 'range' not in headers and 'accept-encoding' not in headers: headers['accept-encoding'] = 'gzip, deflate' info = email.Message.Message() cached_value = None if self.cache: cachekey = defrag_uri cached_value = self.cache.get(cachekey) if cached_value: # info = email.message_from_string(cached_value) # # Need to replace the line above with the kludge below # to fix the non-existent bug not fixed in this # bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html try: info, content = cached_value.split('\r\n\r\n', 1) feedparser = email.FeedParser.FeedParser() feedparser.feed(info) info = feedparser.close() feedparser._parse = None except (IndexError, ValueError): self.cache.delete(cachekey) cachekey = None cached_value = None else: cachekey = None if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers: # http://www.w3.org/1999/04/Editing/ headers['if-match'] = info['etag'] if method not in ["GET", "HEAD"] and self.cache and cachekey: # RFC 2616 Section 13.10 self.cache.delete(cachekey) # Check the vary header in the cache to see if this request # matches what varies in the cache. if method in ['GET', 'HEAD'] and 'vary' in info: vary = info['vary'] vary_headers = vary.lower().replace(' ', '').split(',') for header in vary_headers: key = '-varied-%s' % header value = info[key] if headers.get(header, None) != value: cached_value = None break if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers: if info.has_key('-x-permanent-redirect-url'): # Should cached permanent redirects be counted in our redirection count? For now, yes. if redirections <= 0: raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "") (response, new_content) = self.request( info['-x-permanent-redirect-url'], method='GET', headers=headers, redirections=redirections - 1) response.previous = Response(info) response.previous.fromcache = True else: # Determine our course of action: # Is the cached entry fresh or stale? # Has the client requested a non-cached response? # # There seems to be three possible answers: # 1. [FRESH] Return the cache entry w/o doing a GET # 2. [STALE] Do the GET (but add in cache validators if available) # 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request entry_disposition = _entry_disposition(info, headers) if entry_disposition == "FRESH": if not cached_value: info['status'] = '504' content = "" response = Response(info) if cached_value: response.fromcache = True return (response, content) if entry_disposition == "STALE": if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers: headers['if-none-match'] = info['etag'] if info.has_key('last-modified') and not 'last-modified' in headers: headers['if-modified-since'] = info['last-modified'] elif entry_disposition == "TRANSPARENT": pass (response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) if response.status == 304 and method == "GET": # Rewrite the cache entry with the new end-to-end headers # Take all headers that are in response # and overwrite their values in info. # unless they are hop-by-hop, or are listed in the connection header. for key in _get_end2end_headers(response): info[key] = response[key] merged_response = Response(info) if hasattr(response, "_stale_digest"): merged_response._stale_digest = response._stale_digest _updateCache(headers, merged_response, content, self.cache, cachekey) response = merged_response response.status = 200 response.fromcache = True elif response.status == 200: content = new_content else: self.cache.delete(cachekey) content = new_content else: cc = _parse_cache_control(headers) if cc.has_key('only-if-cached'): info['status'] = '504' response = Response(info) content = "" else: (response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey) except Exception, e: if self.force_exception_to_status_code: if isinstance(e, HttpLib2ErrorWithResponse): response = e.response content = e.content response.status = 500 response.reason = str(e) elif isinstance(e, socket.timeout): content = "Request Timeout" response = Response({ "content-type": "text/plain", "status": "408", "content-length": len(content) }) response.reason = "Request Timeout" else: content = str(e) response = Response({ "content-type": "text/plain", "status": "400", "content-length": len(content) }) response.reason = "Bad Request" else: raise return (response, content) def _get_proxy_info(self, scheme, authority): """Return a ProxyInfo instance (or None) based on the scheme and authority. """ hostname, port = urllib.splitport(authority) proxy_info = self.proxy_info if callable(proxy_info): proxy_info = proxy_info(scheme) if (hasattr(proxy_info, 'applies_to') and not proxy_info.applies_to(hostname)): proxy_info = None return proxy_info class Response(dict): """An object more like email.Message than httplib.HTTPResponse.""" """Is this response from our local cache""" fromcache = False """HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """ version = 11 "Status code returned by server. " status = 200 """Reason phrase returned by server.""" reason = "Ok" previous = None def __init__(self, info): # info is either an email.Message or # an httplib.HTTPResponse object. if isinstance(info, httplib.HTTPResponse): for key, value in info.getheaders(): self[key.lower()] = value self.status = info.status self['status'] = str(self.status) self.reason = info.reason self.version = info.version elif isinstance(info, email.Message.Message): for key, value in info.items(): self[key.lower()] = value self.status = int(self['status']) else: for key, value in info.iteritems(): self[key.lower()] = value self.status = int(self.get('status', self.status)) self.reason = self.get('reason', self.reason) def __getattr__(self, name): if name == 'dict': return self else: raise AttributeError, name
70,465
Python
.py
1,456
36.774725
235
0.590071
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,597
iri2uri.py
CouchPotato_CouchPotatoServer/libs/httplib2/iri2uri.py
""" iri2uri Converts an IRI to a URI. """ __author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = [] __version__ = "1.0.0" __license__ = "MIT" __history__ = """ """ import urlparse # Convert an IRI to a URI following the rules in RFC 3987 # # The characters we need to enocde and escape are defined in the spec: # # iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD # ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF # / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD # / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD # / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD # / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD # / %xD0000-DFFFD / %xE1000-EFFFD escape_range = [ (0xA0, 0xD7FF), (0xE000, 0xF8FF), (0xF900, 0xFDCF), (0xFDF0, 0xFFEF), (0x10000, 0x1FFFD), (0x20000, 0x2FFFD), (0x30000, 0x3FFFD), (0x40000, 0x4FFFD), (0x50000, 0x5FFFD), (0x60000, 0x6FFFD), (0x70000, 0x7FFFD), (0x80000, 0x8FFFD), (0x90000, 0x9FFFD), (0xA0000, 0xAFFFD), (0xB0000, 0xBFFFD), (0xC0000, 0xCFFFD), (0xD0000, 0xDFFFD), (0xE1000, 0xEFFFD), (0xF0000, 0xFFFFD), (0x100000, 0x10FFFD), ] def encode(c): retval = c i = ord(c) for low, high in escape_range: if i < low: break if i >= low and i <= high: retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) break return retval def iri2uri(uri): """Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.""" if isinstance(uri ,unicode): (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) authority = authority.encode('idna') # For each character in 'ucschar' or 'iprivate' # 1. encode as utf-8 # 2. then %-encode each octet of that utf-8 uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) uri = "".join([encode(c) for c in uri]) return uri if __name__ == "__main__": import unittest class Test(unittest.TestCase): def test_uris(self): """Test that URIs are invariant under the transformation.""" invariant = [ u"ftp://ftp.is.co.za/rfc/rfc1808.txt", u"http://www.ietf.org/rfc/rfc2396.txt", u"ldap://[2001:db8::7]/c=GB?objectClass?one", u"mailto:John.Doe@example.com", u"news:comp.infosystems.www.servers.unix", u"tel:+1-816-555-1212", u"telnet://192.0.2.16:80/", u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] for uri in invariant: self.assertEqual(uri, iri2uri(uri)) def test_iri(self): """ Test that the right type of escaping is done for each part of the URI.""" self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) unittest.main()
3,828
Python
.py
94
33.93617
143
0.597902
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,598
core.py
CouchPotato_CouchPotatoServer/libs/certifi/core.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ceritfi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem. """ import os def where(): f = os.path.split(__file__)[0] return os.path.join(f, 'cacert.pem') if __name__ == '__main__': print(where())
288
Python
.py
13
19.769231
60
0.609665
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,599
eucjpprober.py
CouchPotato_CouchPotatoServer/libs/chardet/eucjpprober.py
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mContextAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
3,678
Python
.py
80
37.025
78
0.633779
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)