id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
20,900
test_sessionauthenticate.py
evilhero_mylar/lib/cherrypy/test/test_sessionauthenticate.py
import cherrypy from cherrypy.test import helper class SessionAuthenticateTest(helper.CPWebCase): def setup_server(): def check(username, password): # Dummy check_username_and_password function if username != 'test' or password != 'password': return 'Wrong login/password' def augment_params(): # A simple tool to add some things to request.params # This is to check to make sure that session_auth can handle request # params (ticket #780) cherrypy.request.params["test"] = "test" cherrypy.tools.augment_params = cherrypy.Tool('before_handler', augment_params, None, priority=30) class Test: _cp_config = {'tools.sessions.on': True, 'tools.session_auth.on': True, 'tools.session_auth.check_username_and_password': check, 'tools.augment_params.on': True, } def index(self, **kwargs): return "Hi %s, you are logged in" % cherrypy.request.login index.exposed = True cherrypy.tree.mount(Test()) setup_server = staticmethod(setup_server) def testSessionAuthenticate(self): # request a page and check for login form self.getPage('/') self.assertInBody('<form method="post" action="do_login">') # setup credentials login_body = 'username=test&password=password&from_page=/' # attempt a login self.getPage('/do_login', method='POST', body=login_body) self.assertStatus((302, 303)) # get the page now that we are logged in self.getPage('/', self.cookies) self.assertBody('Hi test, you are logged in') # do a logout self.getPage('/do_logout', self.cookies, method='POST') self.assertStatus((302, 303)) # verify we are logged out self.getPage('/', self.cookies) self.assertInBody('<form method="post" action="do_login">')
2,170
Python
.py
44
35.613636
82
0.596726
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,901
modfcgid.py
evilhero_mylar/lib/cherrypy/test/modfcgid.py
"""Wrapper for mod_fcgid, for use as a CherryPy HTTP server when testing. To autostart fcgid, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl", "apache2ctl", or "httpd"--create a symlink to them if needed. You'll also need the WSGIServer from flup.servers. See http://projects.amor.org/misc/wiki/ModPythonGateway KNOWN BUGS ========== 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. 4. Apache replaces status "reason phrases" automatically. For example, CherryPy may set "304 Not modified" but Apache will write out "304 Not Modified" (capital "M"). 5. Apache does not allow custom error codes as per the spec. 6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the Request-URI too early. 7. mod_python will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. Apache will output a "Content-Length: 0" response header even if there's no response entity body. This isn't really a bug; it just differs from the CherryPy default. """ import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import sys import time import cherrypy from cherrypy._cpcompat import ntob from cherrypy.process import plugins, servers from cherrypy.test import helper def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output APACHE_PATH = "httpd" CONF_PATH = "fcgi.conf" conf_fcgid = """ # Apache2 server conf file for testing CherryPy with mod_fcgid. DocumentRoot "%(root)s" ServerName 127.0.0.1 Listen %(port)s LoadModule fastcgi_module modules/mod_fastcgi.dll LoadModule rewrite_module modules/mod_rewrite.so Options ExecCGI SetHandler fastcgi-script RewriteEngine On RewriteRule ^(.*)$ /fastcgi.pyc [L] FastCgiExternalServer "%(server)s" -host 127.0.0.1:4000 """ class ModFCGISupervisor(helper.LocalSupervisor): using_apache = True using_wsgi = True template = conf_fcgid def __str__(self): return "FCGI Server on %s:%s" % (self.host, self.port) def start(self, modulename): cherrypy.server.httpserver = servers.FlupFCGIServer( application=cherrypy.tree, bindAddress=('127.0.0.1', 4000)) cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000) # For FCGI, we both start apache... self.start_apache() # ...and our local server helper.LocalServer.start(self, modulename) def start_apache(self): fcgiconf = CONF_PATH if not os.path.isabs(fcgiconf): fcgiconf = os.path.join(curdir, fcgiconf) # Write the Apache conf file. f = open(fcgiconf, 'wb') try: server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1] output = self.template % {'port': self.port, 'root': curdir, 'server': server} output = ntob(output.replace('\r\n', '\n')) f.write(output) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % fcgiconf) if result: print(result) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") helper.LocalServer.stop(self) def sync_apps(self): cherrypy.server.httpserver.fcgiserver.application = self.get_app()
4,308
Python
.py
101
36.623762
77
0.691528
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,902
_test_states_demo.py
evilhero_mylar/lib/cherrypy/test/_test_states_demo.py
import os import sys import time starttime = time.time() import cherrypy class Root: def index(self): return "Hello World" index.exposed = True def mtimes(self): return repr(cherrypy.engine.publish("Autoreloader", "mtimes")) mtimes.exposed = True def pid(self): return str(os.getpid()) pid.exposed = True def start(self): return repr(starttime) start.exposed = True def exit(self): # This handler might be called before the engine is STARTED if an # HTTP worker thread handles it before the HTTP server returns # control to engine.start. We avoid that race condition here # by waiting for the Bus to be STARTED. cherrypy.engine.wait(state=cherrypy.engine.states.STARTED) cherrypy.engine.exit() exit.exposed = True def unsub_sig(): cherrypy.log("unsubsig: %s" % cherrypy.config.get('unsubsig', False)) if cherrypy.config.get('unsubsig', False): cherrypy.log("Unsubscribing the default cherrypy signal handler") cherrypy.engine.signal_handler.unsubscribe() try: from signal import signal, SIGTERM except ImportError: pass else: def old_term_handler(signum=None, frame=None): cherrypy.log("I am an old SIGTERM handler.") sys.exit(0) cherrypy.log("Subscribing the new one.") signal(SIGTERM, old_term_handler) cherrypy.engine.subscribe('start', unsub_sig, priority=100) def starterror(): if cherrypy.config.get('starterror', False): zerodiv = 1 / 0 cherrypy.engine.subscribe('start', starterror, priority=6) def log_test_case_name(): if cherrypy.config.get('test_case_name', False): cherrypy.log("STARTED FROM: %s" % cherrypy.config.get('test_case_name')) cherrypy.engine.subscribe('start', log_test_case_name, priority=6) cherrypy.tree.mount(Root(), '/', {'/': {}})
1,945
Python
.py
51
31.588235
80
0.678706
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,903
logtest.py
evilhero_mylar/lib/cherrypy/test/logtest.py
"""logtest, a unittest.TestCase helper for testing log output.""" import sys import time import cherrypy try: # On Windows, msvcrt.getch reads a single char without output. import msvcrt def getchar(): return msvcrt.getch() except ImportError: # Unix getchr import tty, termios def getchar(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class LogCase(object): """unittest.TestCase mixin for testing log messages. logfile: a filename for the desired log. Yes, I know modes are evil, but it makes the test functions so much cleaner to set this once. lastmarker: the last marker in the log. This can be used to search for messages since the last marker. markerPrefix: a string with which to prefix log markers. This should be unique enough from normal log output to use for marker identification. """ logfile = None lastmarker = None markerPrefix = "test suite marker: " def _handleLogError(self, msg, data, marker, pattern): print("") print(" ERROR: %s" % msg) if not self.interactive: raise self.failureException(msg) p = " Show: [L]og [M]arker [P]attern; [I]gnore, [R]aise, or sys.e[X]it >> " print p, # ARGH sys.stdout.flush() while True: i = getchar().upper() if i not in "MPLIRX": continue print(i.upper()) # Also prints new line if i == "L": for x, line in enumerate(data): if (x + 1) % self.console_height == 0: # The \r and comma should make the next line overwrite print "<-- More -->\r", m = getchar().lower() # Erase our "More" prompt print " \r", if m == "q": break print(line.rstrip()) elif i == "M": print(repr(marker or self.lastmarker)) elif i == "P": print(repr(pattern)) elif i == "I": # return without raising the normal exception return elif i == "R": raise self.failureException(msg) elif i == "X": self.exit() print p, def exit(self): sys.exit() def emptyLog(self): """Overwrite self.logfile with 0 bytes.""" open(self.logfile, 'wb').write("") def markLog(self, key=None): """Insert a marker line into the log and set self.lastmarker.""" if key is None: key = str(time.time()) self.lastmarker = key open(self.logfile, 'ab+').write("%s%s\n" % (self.markerPrefix, key)) def _read_marked_region(self, marker=None): """Return lines from self.logfile in the marked region. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be returned. """ ## # Give the logger time to finish writing? ## time.sleep(0.5) logfile = self.logfile marker = marker or self.lastmarker if marker is None: return open(logfile, 'rb').readlines() data = [] in_region = False for line in open(logfile, 'rb'): if in_region: if (line.startswith(self.markerPrefix) and not marker in line): break else: data.append(line) elif marker in line: in_region = True return data def assertInLog(self, line, marker=None): """Fail if the given (partial) line is not in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: return msg = "%r not found in log" % line self._handleLogError(msg, data, marker, line) def assertNotInLog(self, line, marker=None): """Fail if the given (partial) line is in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: msg = "%r found in log" % line self._handleLogError(msg, data, marker, line) def assertLog(self, sliceargs, lines, marker=None): """Fail if log.readlines()[sliceargs] is not contained in 'lines'. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) if isinstance(sliceargs, int): # Single arg. Use __getitem__ and allow lines to be str or list. if isinstance(lines, (tuple, list)): lines = lines[0] if lines not in data[sliceargs]: msg = "%r not found on log line %r" % (lines, sliceargs) self._handleLogError(msg, [data[sliceargs]], marker, lines) else: # Multiple args. Use __getslice__ and require lines to be list. if isinstance(lines, tuple): lines = list(lines) elif isinstance(lines, basestring): raise TypeError("The 'lines' arg must be a list when " "'sliceargs' is a tuple.") start, stop = sliceargs for line, logline in zip(lines, data[start:stop]): if line not in logline: msg = "%r not found in log" % line self._handleLogError(msg, data[start:stop], marker, line)
6,611
Python
.py
152
31.302632
86
0.561131
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,904
test_refleaks.py
evilhero_mylar/lib/cherrypy/test/test_refleaks.py
"""Tests for refleaks.""" import gc from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob import threading import cherrypy from cherrypy import _cprequest data = object() def get_instances(cls): return [x for x in gc.get_objects() if isinstance(x, cls)] from cherrypy.test import helper class ReferenceTests(helper.CPWebCase): def setup_server(): class Root: def index(self, *args, **kwargs): cherrypy.request.thing = data return "Hello world!" index.exposed = True def gc_stats(self): output = ["Statistics:"] # Uncollectable garbage # gc_collect isn't perfectly synchronous, because it may # break reference cycles that then take time to fully # finalize. Call it twice and hope for the best. gc.collect() unreachable = gc.collect() if unreachable: output.append("\n%s unreachable objects:" % unreachable) trash = {} for x in gc.garbage: trash[type(x)] = trash.get(type(x), 0) + 1 trash = [(v, k) for k, v in trash.items()] trash.sort() for pair in trash: output.append(" " + repr(pair)) # Request references reqs = get_instances(_cprequest.Request) lenreqs = len(reqs) if lenreqs < 2: output.append("\nMissing Request reference. Should be 1 in " "this request thread and 1 in the main thread.") elif lenreqs > 2: output.append("\nToo many Request references (%r)." % lenreqs) for req in reqs: output.append("Referrers for %s:" % repr(req)) for ref in gc.get_referrers(req): if ref is not reqs: output.append(" %s" % repr(ref)) # Response references resps = get_instances(_cprequest.Response) lenresps = len(resps) if lenresps < 2: output.append("\nMissing Response reference. Should be 1 in " "this request thread and 1 in the main thread.") elif lenresps > 2: output.append("\nToo many Response references (%r)." % lenresps) for resp in resps: output.append("Referrers for %s:" % repr(resp)) for ref in gc.get_referrers(resp): if ref is not resps: output.append(" %s" % repr(ref)) return "\n".join(output) gc_stats.exposed = True cherrypy.tree.mount(Root()) setup_server = staticmethod(setup_server) def test_threadlocal_garbage(self): success = [] def getpage(): host = '%s:%s' % (self.interface(), self.PORT) if self.scheme == 'https': c = HTTPSConnection(host) else: c = HTTPConnection(host) try: c.putrequest('GET', '/') c.endheaders() response = c.getresponse() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello world!")) finally: c.close() success.append(True) ITERATIONS = 25 ts = [] for _ in range(ITERATIONS): t = threading.Thread(target=getpage) ts.append(t) t.start() for t in ts: t.join() self.assertEqual(len(success), ITERATIONS) self.getPage("/gc_stats") self.assertBody("Statistics:")
4,178
Python
.py
93
27.774194
84
0.496289
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,905
test_tutorials.py
evilhero_mylar/lib/cherrypy/test/test_tutorials.py
import sys import cherrypy from cherrypy.test import helper class TutorialTest(helper.CPWebCase): def setup_server(cls): conf = cherrypy.config.copy() def load_tut_module(name): """Import or reload tutorial module as needed.""" cherrypy.config.reset() cherrypy.config.update(conf) target = "cherrypy.tutorial." + name if target in sys.modules: module = reload(sys.modules[target]) else: module = __import__(target, globals(), locals(), ['']) # The above import will probably mount a new app at "". app = cherrypy.tree.apps[""] app.root.load_tut_module = load_tut_module app.root.sessions = sessions app.root.traceback_setting = traceback_setting cls.supervisor.sync_apps() load_tut_module.exposed = True def sessions(): cherrypy.config.update({"tools.sessions.on": True}) sessions.exposed = True def traceback_setting(): return repr(cherrypy.request.show_tracebacks) traceback_setting.exposed = True class Dummy: pass root = Dummy() root.load_tut_module = load_tut_module cherrypy.tree.mount(root) setup_server = classmethod(setup_server) def test01HelloWorld(self): self.getPage("/load_tut_module/tut01_helloworld") self.getPage("/") self.assertBody('Hello world!') def test02ExposeMethods(self): self.getPage("/load_tut_module/tut02_expose_methods") self.getPage("/showMessage") self.assertBody('Hello world!') def test03GetAndPost(self): self.getPage("/load_tut_module/tut03_get_and_post") # Try different GET queries self.getPage("/greetUser?name=Bob") self.assertBody("Hey Bob, what's up?") self.getPage("/greetUser") self.assertBody('Please enter your name <a href="./">here</a>.') self.getPage("/greetUser?name=") self.assertBody('No, really, enter your name <a href="./">here</a>.') # Try the same with POST self.getPage("/greetUser", method="POST", body="name=Bob") self.assertBody("Hey Bob, what's up?") self.getPage("/greetUser", method="POST", body="name=") self.assertBody('No, really, enter your name <a href="./">here</a>.') def test04ComplexSite(self): self.getPage("/load_tut_module/tut04_complex_site") msg = ''' <p>Here are some extra useful links:</p> <ul> <li><a href="http://del.icio.us">del.icio.us</a></li> <li><a href="http://www.mornography.de">Hendrik's weblog</a></li> </ul> <p>[<a href="../">Return to links page</a>]</p>''' self.getPage("/links/extra/") self.assertBody(msg) def test05DerivedObjects(self): self.getPage("/load_tut_module/tut05_derived_objects") msg = ''' <html> <head> <title>Another Page</title> <head> <body> <h2>Another Page</h2> <p> And this is the amazing second page! </p> </body> </html> ''' self.getPage("/another/") self.assertBody(msg) def test06DefaultMethod(self): self.getPage("/load_tut_module/tut06_default_method") self.getPage('/hendrik') self.assertBody('Hendrik Mans, CherryPy co-developer & crazy German ' '(<a href="./">back</a>)') def test07Sessions(self): self.getPage("/load_tut_module/tut07_sessions") self.getPage("/sessions") self.getPage('/') self.assertBody("\n During your current session, you've viewed this" "\n page 1 times! Your life is a patio of fun!" "\n ") self.getPage('/', self.cookies) self.assertBody("\n During your current session, you've viewed this" "\n page 2 times! Your life is a patio of fun!" "\n ") def test08GeneratorsAndYield(self): self.getPage("/load_tut_module/tut08_generators_and_yield") self.getPage('/') self.assertBody('<html><body><h2>Generators rule!</h2>' '<h3>List of users:</h3>' 'Remi<br/>Carlos<br/>Hendrik<br/>Lorenzo Lamas<br/>' '</body></html>') def test09Files(self): self.getPage("/load_tut_module/tut09_files") # Test upload filesize = 5 h = [("Content-type", "multipart/form-data; boundary=x"), ("Content-Length", str(105 + filesize))] b = '--x\n' + \ 'Content-Disposition: form-data; name="myFile"; filename="hello.txt"\r\n' + \ 'Content-Type: text/plain\r\n' + \ '\r\n' + \ 'a' * filesize + '\n' + \ '--x--\n' self.getPage('/upload', h, "POST", b) self.assertBody('''<html> <body> myFile length: %d<br /> myFile filename: hello.txt<br /> myFile mime-type: text/plain </body> </html>''' % filesize) # Test download self.getPage('/download') self.assertStatus("200 OK") self.assertHeader("Content-Type", "application/x-download") self.assertHeader("Content-Disposition", # Make sure the filename is quoted. 'attachment; filename="pdf_file.pdf"') self.assertEqual(len(self.body), 85698) def test10HTTPErrors(self): self.getPage("/load_tut_module/tut10_http_errors") self.getPage("/") self.assertInBody("""<a href="toggleTracebacks">""") self.assertInBody("""<a href="/doesNotExist">""") self.assertInBody("""<a href="/error?code=403">""") self.assertInBody("""<a href="/error?code=500">""") self.assertInBody("""<a href="/messageArg">""") self.getPage("/traceback_setting") setting = self.body self.getPage("/toggleTracebacks") self.assertStatus((302, 303)) self.getPage("/traceback_setting") self.assertBody(str(not eval(setting))) self.getPage("/error?code=500") self.assertStatus(500) self.assertInBody("The server encountered an unexpected condition " "which prevented it from fulfilling the request.") self.getPage("/error?code=403") self.assertStatus(403) self.assertInBody("<h2>You can't do that!</h2>") self.getPage("/messageArg") self.assertStatus(500) self.assertInBody("If you construct an HTTPError with a 'message'")
7,190
Python
.py
159
32.396226
89
0.560446
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,906
test_mime.py
evilhero_mylar/lib/cherrypy/test/test_mime.py
"""Tests for various MIME issues, including the safe_multipart Tool.""" import cherrypy from cherrypy._cpcompat import ntob, ntou, sorted def setup_server(): class Root: def multipart(self, parts): return repr(parts) multipart.exposed = True def multipart_form_data(self, **kwargs): return repr(list(sorted(kwargs.items()))) multipart_form_data.exposed = True def flashupload(self, Filedata, Upload, Filename): return ("Upload: %r, Filename: %r, Filedata: %r" % (Upload, Filename, Filedata.file.read())) flashupload.exposed = True cherrypy.config.update({'server.max_request_body_size': 0}) cherrypy.tree.mount(Root()) # Client-side code # from cherrypy.test import helper class MultipartTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_multipart(self): text_part = ntou("This is the text version") html_part = ntou("""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> <html> <head> <meta content="text/html;charset=ISO-8859-1" http-equiv="Content-Type"> </head> <body bgcolor="#ffffff" text="#000000"> This is the <strong>HTML</strong> version </body> </html> """) body = '\r\n'.join([ "--123456789", "Content-Type: text/plain; charset='ISO-8859-1'", "Content-Transfer-Encoding: 7bit", "", text_part, "--123456789", "Content-Type: text/html; charset='ISO-8859-1'", "", html_part, "--123456789--"]) headers = [ ('Content-Type', 'multipart/mixed; boundary=123456789'), ('Content-Length', str(len(body))), ] self.getPage('/multipart', headers, "POST", body) self.assertBody(repr([text_part, html_part])) def test_multipart_form_data(self): body='\r\n'.join(['--X', 'Content-Disposition: form-data; name="foo"', '', 'bar', '--X', # Test a param with more than one value. # See http://www.cherrypy.org/ticket/1028 'Content-Disposition: form-data; name="baz"', '', '111', '--X', 'Content-Disposition: form-data; name="baz"', '', '333', '--X--']) self.getPage('/multipart_form_data', method='POST', headers=[("Content-Type", "multipart/form-data;boundary=X"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(repr([('baz', [u'111', u'333']), ('foo', u'bar')])) class SafeMultipartHandlingTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_Flash_Upload(self): headers = [ ('Accept', 'text/*'), ('Content-Type', 'multipart/form-data; ' 'boundary=----------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6'), ('User-Agent', 'Shockwave Flash'), ('Host', 'www.example.com:8080'), ('Content-Length', '499'), ('Connection', 'Keep-Alive'), ('Cache-Control', 'no-cache'), ] filedata = ntob('<?xml version="1.0" encoding="UTF-8"?>\r\n' '<projectDescription>\r\n' '</projectDescription>\r\n') body = (ntob( '------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n' 'Content-Disposition: form-data; name="Filename"\r\n' '\r\n' '.project\r\n' '------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n' 'Content-Disposition: form-data; ' 'name="Filedata"; filename=".project"\r\n' 'Content-Type: application/octet-stream\r\n' '\r\n') + filedata + ntob('\r\n' '------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6\r\n' 'Content-Disposition: form-data; name="Upload"\r\n' '\r\n' 'Submit Query\r\n' # Flash apps omit the trailing \r\n on the last line: '------------KM7Ij5cH2KM7Ef1gL6ae0ae0cH2gL6--' )) self.getPage('/flashupload', headers, "POST", body) self.assertBody("Upload: u'Submit Query', Filename: u'.project', " "Filedata: %r" % filedata)
4,752
Python
.py
110
30.063636
91
0.503927
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,907
test_session.py
evilhero_mylar/lib/cherrypy/test/test_session.py
import os localDir = os.path.dirname(__file__) import sys import threading import time import cherrypy from cherrypy._cpcompat import copykeys, HTTPConnection, HTTPSConnection from cherrypy.lib import sessions from cherrypy.lib.httputil import response_codes def http_methods_allowed(methods=['GET', 'HEAD']): method = cherrypy.request.method.upper() if method not in methods: cherrypy.response.headers['Allow'] = ", ".join(methods) raise cherrypy.HTTPError(405) cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed) def setup_server(): class Root: _cp_config = {'tools.sessions.on': True, 'tools.sessions.storage_type' : 'ram', 'tools.sessions.storage_path' : localDir, 'tools.sessions.timeout': (1.0 / 60), 'tools.sessions.clean_freq': (1.0 / 60), } def clear(self): cherrypy.session.cache.clear() clear.exposed = True def data(self): cherrypy.session['aha'] = 'foo' return repr(cherrypy.session._data) data.exposed = True def testGen(self): counter = cherrypy.session.get('counter', 0) + 1 cherrypy.session['counter'] = counter yield str(counter) testGen.exposed = True def testStr(self): counter = cherrypy.session.get('counter', 0) + 1 cherrypy.session['counter'] = counter return str(counter) testStr.exposed = True def setsessiontype(self, newtype): self.__class__._cp_config.update({'tools.sessions.storage_type': newtype}) if hasattr(cherrypy, "session"): del cherrypy.session cls = getattr(sessions, newtype.title() + 'Session') if cls.clean_thread: cls.clean_thread.stop() cls.clean_thread.unsubscribe() del cls.clean_thread setsessiontype.exposed = True setsessiontype._cp_config = {'tools.sessions.on': False} def index(self): sess = cherrypy.session c = sess.get('counter', 0) + 1 time.sleep(0.01) sess['counter'] = c return str(c) index.exposed = True def keyin(self, key): return str(key in cherrypy.session) keyin.exposed = True def delete(self): cherrypy.session.delete() sessions.expire() return "done" delete.exposed = True def delkey(self, key): del cherrypy.session[key] return "OK" delkey.exposed = True def blah(self): return self._cp_config['tools.sessions.storage_type'] blah.exposed = True def iredir(self): raise cherrypy.InternalRedirect('/blah') iredir.exposed = True def restricted(self): return cherrypy.request.method restricted.exposed = True restricted._cp_config = {'tools.allow.on': True, 'tools.allow.methods': ['GET']} def regen(self): cherrypy.tools.sessions.regenerate() return "logged in" regen.exposed = True def length(self): return str(len(cherrypy.session)) length.exposed = True def session_cookie(self): # Must load() to start the clean thread. cherrypy.session.load() return cherrypy.session.id session_cookie.exposed = True session_cookie._cp_config = { 'tools.sessions.path': '/session_cookie', 'tools.sessions.name': 'temp', 'tools.sessions.persistent': False} cherrypy.tree.mount(Root()) from cherrypy.test import helper class SessionTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def tearDown(self): # Clean up sessions. for fname in os.listdir(localDir): if fname.startswith(sessions.FileSession.SESSION_PREFIX): os.unlink(os.path.join(localDir, fname)) def test_0_Session(self): self.getPage('/setsessiontype/ram') self.getPage('/clear') # Test that a normal request gets the same id in the cookies. # Note: this wouldn't work if /data didn't load the session. self.getPage('/data') self.assertBody("{'aha': 'foo'}") c = self.cookies[0] self.getPage('/data', self.cookies) self.assertEqual(self.cookies[0], c) self.getPage('/testStr') self.assertBody('1') cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")]) # Assert there is an 'expires' param self.assertEqual(set(cookie_parts.keys()), set(['session_id', 'expires', 'Path'])) self.getPage('/testGen', self.cookies) self.assertBody('2') self.getPage('/testStr', self.cookies) self.assertBody('3') self.getPage('/data', self.cookies) self.assertBody("{'aha': 'foo', 'counter': 3}") self.getPage('/length', self.cookies) self.assertBody('2') self.getPage('/delkey?key=counter', self.cookies) self.assertStatus(200) self.getPage('/setsessiontype/file') self.getPage('/testStr') self.assertBody('1') self.getPage('/testGen', self.cookies) self.assertBody('2') self.getPage('/testStr', self.cookies) self.assertBody('3') self.getPage('/delkey?key=counter', self.cookies) self.assertStatus(200) # Wait for the session.timeout (1 second) time.sleep(2) self.getPage('/') self.assertBody('1') self.getPage('/length', self.cookies) self.assertBody('1') # Test session __contains__ self.getPage('/keyin?key=counter', self.cookies) self.assertBody("True") cookieset1 = self.cookies # Make a new session and test __len__ again self.getPage('/') self.getPage('/length', self.cookies) self.assertBody('2') # Test session delete self.getPage('/delete', self.cookies) self.assertBody("done") self.getPage('/delete', cookieset1) self.assertBody("done") f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')] self.assertEqual(f(), []) # Wait for the cleanup thread to delete remaining session files self.getPage('/') f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')] self.assertNotEqual(f(), []) time.sleep(2) self.assertEqual(f(), []) def test_1_Ram_Concurrency(self): self.getPage('/setsessiontype/ram') self._test_Concurrency() def test_2_File_Concurrency(self): self.getPage('/setsessiontype/file') self._test_Concurrency() def _test_Concurrency(self): client_thread_count = 5 request_count = 30 # Get initial cookie self.getPage("/") self.assertBody("1") cookies = self.cookies data_dict = {} errors = [] def request(index): if self.scheme == 'https': c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) for i in range(request_count): c.putrequest('GET', '/') for k, v in cookies: c.putheader(k, v) c.endheaders() response = c.getresponse() body = response.read() if response.status != 200 or not body.isdigit(): errors.append((response.status, body)) else: data_dict[index] = max(data_dict[index], int(body)) # Uncomment the following line to prove threads overlap. ## sys.stdout.write("%d " % index) # Start <request_count> requests from each of # <client_thread_count> concurrent clients ts = [] for c in range(client_thread_count): data_dict[c] = 0 t = threading.Thread(target=request, args=(c,)) ts.append(t) t.start() for t in ts: t.join() hitcount = max(data_dict.values()) expected = 1 + (client_thread_count * request_count) for e in errors: print(e) self.assertEqual(hitcount, expected) def test_3_Redirect(self): # Start a new session self.getPage('/testStr') self.getPage('/iredir', self.cookies) self.assertBody("file") def test_4_File_deletion(self): # Start a new session self.getPage('/testStr') # Delete the session file manually and retry. id = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1] path = os.path.join(localDir, "session-" + id) os.unlink(path) self.getPage('/testStr', self.cookies) def test_5_Error_paths(self): self.getPage('/unknown/page') self.assertErrorPage(404, "The path '/unknown/page' was not found.") # Note: this path is *not* the same as above. The above # takes a normal route through the session code; this one # skips the session code's before_handler and only calls # before_finalize (save) and on_end (close). So the session # code has to survive calling save/close without init. self.getPage('/restricted', self.cookies, method='POST') self.assertErrorPage(405, response_codes[405]) def test_6_regenerate(self): self.getPage('/testStr') # grab the cookie ID id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1] self.getPage('/regen') self.assertBody('logged in') id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1] self.assertNotEqual(id1, id2) self.getPage('/testStr') # grab the cookie ID id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1] self.getPage('/testStr', headers=[('Cookie', 'session_id=maliciousid; ' 'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')]) id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1] self.assertNotEqual(id1, id2) self.assertNotEqual(id2, 'maliciousid') def test_7_session_cookies(self): self.getPage('/setsessiontype/ram') self.getPage('/clear') self.getPage('/session_cookie') # grab the cookie ID cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")]) # Assert there is no 'expires' param self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path'])) id1 = cookie_parts['temp'] self.assertEqual(copykeys(sessions.RamSession.cache), [id1]) # Send another request in the same "browser session". self.getPage('/session_cookie', self.cookies) cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")]) # Assert there is no 'expires' param self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path'])) self.assertBody(id1) self.assertEqual(copykeys(sessions.RamSession.cache), [id1]) # Simulate a browser close by just not sending the cookies self.getPage('/session_cookie') # grab the cookie ID cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")]) # Assert there is no 'expires' param self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path'])) # Assert a new id has been generated... id2 = cookie_parts['temp'] self.assertNotEqual(id1, id2) self.assertEqual(set(sessions.RamSession.cache.keys()), set([id1, id2])) # Wait for the session.timeout on both sessions time.sleep(2.5) cache = copykeys(sessions.RamSession.cache) if cache: if cache == [id2]: self.fail("The second session did not time out.") else: self.fail("Unknown session id in cache: %r", cache) import socket try: import memcache host, port = '127.0.0.1', 11211 for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() raise break except (ImportError, socket.error): class MemcachedSessionTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def test(self): return self.skip("memcached not reachable ") else: class MemcachedSessionTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_0_Session(self): self.getPage('/setsessiontype/memcached') self.getPage('/testStr') self.assertBody('1') self.getPage('/testGen', self.cookies) self.assertBody('2') self.getPage('/testStr', self.cookies) self.assertBody('3') self.getPage('/length', self.cookies) self.assertErrorPage(500) self.assertInBody("NotImplementedError") self.getPage('/delkey?key=counter', self.cookies) self.assertStatus(200) # Wait for the session.timeout (1 second) time.sleep(1.25) self.getPage('/') self.assertBody('1') # Test session __contains__ self.getPage('/keyin?key=counter', self.cookies) self.assertBody("True") # Test session delete self.getPage('/delete', self.cookies) self.assertBody("done") def test_1_Concurrency(self): client_thread_count = 5 request_count = 30 # Get initial cookie self.getPage("/") self.assertBody("1") cookies = self.cookies data_dict = {} def request(index): for i in range(request_count): self.getPage("/", cookies) # Uncomment the following line to prove threads overlap. ## sys.stdout.write("%d " % index) if not self.body.isdigit(): self.fail(self.body) data_dict[index] = v = int(self.body) # Start <request_count> concurrent requests from # each of <client_thread_count> clients ts = [] for c in range(client_thread_count): data_dict[c] = 0 t = threading.Thread(target=request, args=(c,)) ts.append(t) t.start() for t in ts: t.join() hitcount = max(data_dict.values()) expected = 1 + (client_thread_count * request_count) self.assertEqual(hitcount, expected) def test_3_Redirect(self): # Start a new session self.getPage('/testStr') self.getPage('/iredir', self.cookies) self.assertBody("memcached") def test_5_Error_paths(self): self.getPage('/unknown/page') self.assertErrorPage(404, "The path '/unknown/page' was not found.") # Note: this path is *not* the same as above. The above # takes a normal route through the session code; this one # skips the session code's before_handler and only calls # before_finalize (save) and on_end (close). So the session # code has to survive calling save/close without init. self.getPage('/restricted', self.cookies, method='POST') self.assertErrorPage(405, response_codes[405])
16,945
Python
.py
388
31.378866
90
0.571545
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,908
benchmark.py
evilhero_mylar/lib/cherrypy/test/benchmark.py
"""CherryPy Benchmark Tool Usage: benchmark.py --null --notests --help --cpmodpy --modpython --ab=path --apache=path --null: use a null Request object (to bench the HTTP server only) --notests: start the server but do not run the tests; this allows you to check the tested pages with a browser --help: show this help message --cpmodpy: run tests via apache on 8080 (with the builtin _cpmodpy) --modpython: run tests via apache on 8080 (with modpython_gateway) --ab=path: Use the ab script/executable at 'path' (see below) --apache=path: Use the apache script/exe at 'path' (see below) To run the benchmarks, the Apache Benchmark tool "ab" must either be on your system path, or specified via the --ab=path option. To run the modpython tests, the "apache" executable or script must be on your system path, or provided via the --apache=path option. On some platforms, "apache" may be called "apachectl" or "apache2ctl"--create a symlink to them if needed. """ import getopt import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import sys import time import traceback import cherrypy from cherrypy._cpcompat import ntob from cherrypy import _cperror, _cpmodpy from cherrypy.lib import httputil AB_PATH = "" APACHE_PATH = "apache" SCRIPT_NAME = "/cpbench/users/rdelon/apps/blog" __all__ = ['ABSession', 'Root', 'print_report', 'run_standard_benchmarks', 'safe_threads', 'size_report', 'startup', 'thread_report', ] size_cache = {} class Root: def index(self): return """<html> <head> <title>CherryPy Benchmark</title> </head> <body> <ul> <li><a href="hello">Hello, world! (14 byte dynamic)</a></li> <li><a href="static/index.html">Static file (14 bytes static)</a></li> <li><form action="sizer">Response of length: <input type='text' name='size' value='10' /></form> </li> </ul> </body> </html>""" index.exposed = True def hello(self): return "Hello, world\r\n" hello.exposed = True def sizer(self, size): resp = size_cache.get(size, None) if resp is None: size_cache[size] = resp = "X" * int(size) return resp sizer.exposed = True cherrypy.config.update({ 'log.error.file': '', 'environment': 'production', 'server.socket_host': '127.0.0.1', 'server.socket_port': 8080, 'server.max_request_header_size': 0, 'server.max_request_body_size': 0, 'engine.deadlock_poll_freq': 0, }) # Cheat mode on ;) del cherrypy.config['tools.log_tracebacks.on'] del cherrypy.config['tools.log_headers.on'] del cherrypy.config['tools.trailing_slash.on'] appconf = { '/static': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'static', 'tools.staticdir.root': curdir, }, } app = cherrypy.tree.mount(Root(), SCRIPT_NAME, appconf) class NullRequest: """A null HTTP request class, returning 200 and an empty body.""" def __init__(self, local, remote, scheme="http"): pass def close(self): pass def run(self, method, path, query_string, protocol, headers, rfile): cherrypy.response.status = "200 OK" cherrypy.response.header_list = [("Content-Type", 'text/html'), ("Server", "Null CherryPy"), ("Date", httputil.HTTPDate()), ("Content-Length", "0"), ] cherrypy.response.body = [""] return cherrypy.response class NullResponse: pass class ABSession: """A session of 'ab', the Apache HTTP server benchmarking tool. Example output from ab: This is ApacheBench, Version 2.0.40-dev <$Revision: 1.121.2.1 $> apache-2.0 Copyright (c) 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ Copyright (c) 1998-2002 The Apache Software Foundation, http://www.apache.org/ Benchmarking 127.0.0.1 (be patient) Completed 100 requests Completed 200 requests Completed 300 requests Completed 400 requests Completed 500 requests Completed 600 requests Completed 700 requests Completed 800 requests Completed 900 requests Server Software: CherryPy/3.1beta Server Hostname: 127.0.0.1 Server Port: 8080 Document Path: /static/index.html Document Length: 14 bytes Concurrency Level: 10 Time taken for tests: 9.643867 seconds Complete requests: 1000 Failed requests: 0 Write errors: 0 Total transferred: 189000 bytes HTML transferred: 14000 bytes Requests per second: 103.69 [#/sec] (mean) Time per request: 96.439 [ms] (mean) Time per request: 9.644 [ms] (mean, across all concurrent requests) Transfer rate: 19.08 [Kbytes/sec] received Connection Times (ms) min mean[+/-sd] median max Connect: 0 0 2.9 0 10 Processing: 20 94 7.3 90 130 Waiting: 0 43 28.1 40 100 Total: 20 95 7.3 100 130 Percentage of the requests served within a certain time (ms) 50% 100 66% 100 75% 100 80% 100 90% 100 95% 100 98% 100 99% 110 100% 130 (longest request) Finished 1000 requests """ parse_patterns = [('complete_requests', 'Completed', ntob(r'^Complete requests:\s*(\d+)')), ('failed_requests', 'Failed', ntob(r'^Failed requests:\s*(\d+)')), ('requests_per_second', 'req/sec', ntob(r'^Requests per second:\s*([0-9.]+)')), ('time_per_request_concurrent', 'msec/req', ntob(r'^Time per request:\s*([0-9.]+).*concurrent requests\)$')), ('transfer_rate', 'KB/sec', ntob(r'^Transfer rate:\s*([0-9.]+)')), ] def __init__(self, path=SCRIPT_NAME + "/hello", requests=1000, concurrency=10): self.path = path self.requests = requests self.concurrency = concurrency def args(self): port = cherrypy.server.socket_port assert self.concurrency > 0 assert self.requests > 0 # Don't use "localhost". # Cf http://mail.python.org/pipermail/python-win32/2008-March/007050.html return ("-k -n %s -c %s http://127.0.0.1:%s%s" % (self.requests, self.concurrency, port, self.path)) def run(self): # Parse output of ab, setting attributes on self try: self.output = _cpmodpy.read_process(AB_PATH or "ab", self.args()) except: print(_cperror.format_exc()) raise for attr, name, pattern in self.parse_patterns: val = re.search(pattern, self.output, re.MULTILINE) if val: val = val.group(1) setattr(self, attr, val) else: setattr(self, attr, None) safe_threads = (25, 50, 100, 200, 400) if sys.platform in ("win32",): # For some reason, ab crashes with > 50 threads on my Win2k laptop. safe_threads = (10, 20, 30, 40, 50) def thread_report(path=SCRIPT_NAME + "/hello", concurrency=safe_threads): sess = ABSession(path) attrs, names, patterns = list(zip(*sess.parse_patterns)) avg = dict.fromkeys(attrs, 0.0) yield ('threads',) + names for c in concurrency: sess.concurrency = c sess.run() row = [c] for attr in attrs: val = getattr(sess, attr) if val is None: print(sess.output) row = None break val = float(val) avg[attr] += float(val) row.append(val) if row: yield row # Add a row of averages. yield ["Average"] + [str(avg[attr] / len(concurrency)) for attr in attrs] def size_report(sizes=(10, 100, 1000, 10000, 100000, 100000000), concurrency=50): sess = ABSession(concurrency=concurrency) attrs, names, patterns = list(zip(*sess.parse_patterns)) yield ('bytes',) + names for sz in sizes: sess.path = "%s/sizer?size=%s" % (SCRIPT_NAME, sz) sess.run() yield [sz] + [getattr(sess, attr) for attr in attrs] def print_report(rows): for row in rows: print("") for i, val in enumerate(row): sys.stdout.write(str(val).rjust(10) + " | ") print("") def run_standard_benchmarks(): print("") print("Client Thread Report (1000 requests, 14 byte response body, " "%s server threads):" % cherrypy.server.thread_pool) print_report(thread_report()) print("") print("Client Thread Report (1000 requests, 14 bytes via staticdir, " "%s server threads):" % cherrypy.server.thread_pool) print_report(thread_report("%s/static/index.html" % SCRIPT_NAME)) print("") print("Size Report (1000 requests, 50 client threads, " "%s server threads):" % cherrypy.server.thread_pool) print_report(size_report()) # modpython and other WSGI # def startup_modpython(req=None): """Start the CherryPy app server in 'serverless' mode (for modpython/WSGI).""" if cherrypy.engine.state == cherrypy._cpengine.STOPPED: if req: if "nullreq" in req.get_options(): cherrypy.engine.request_class = NullRequest cherrypy.engine.response_class = NullResponse ab_opt = req.get_options().get("ab", "") if ab_opt: global AB_PATH AB_PATH = ab_opt cherrypy.engine.start() if cherrypy.engine.state == cherrypy._cpengine.STARTING: cherrypy.engine.wait() return 0 # apache.OK def run_modpython(use_wsgi=False): print("Starting mod_python...") pyopts = [] # Pass the null and ab=path options through Apache if "--null" in opts: pyopts.append(("nullreq", "")) if "--ab" in opts: pyopts.append(("ab", opts["--ab"])) s = _cpmodpy.ModPythonServer if use_wsgi: pyopts.append(("wsgi.application", "cherrypy::tree")) pyopts.append(("wsgi.startup", "cherrypy.test.benchmark::startup_modpython")) handler = "modpython_gateway::handler" s = s(port=8080, opts=pyopts, apache_path=APACHE_PATH, handler=handler) else: pyopts.append(("cherrypy.setup", "cherrypy.test.benchmark::startup_modpython")) s = s(port=8080, opts=pyopts, apache_path=APACHE_PATH) try: s.start() run() finally: s.stop() if __name__ == '__main__': longopts = ['cpmodpy', 'modpython', 'null', 'notests', 'help', 'ab=', 'apache='] try: switches, args = getopt.getopt(sys.argv[1:], "", longopts) opts = dict(switches) except getopt.GetoptError: print(__doc__) sys.exit(2) if "--help" in opts: print(__doc__) sys.exit(0) if "--ab" in opts: AB_PATH = opts['--ab'] if "--notests" in opts: # Return without stopping the server, so that the pages # can be tested from a standard web browser. def run(): port = cherrypy.server.socket_port print("You may now open http://127.0.0.1:%s%s/" % (port, SCRIPT_NAME)) if "--null" in opts: print("Using null Request object") else: def run(): end = time.time() - start print("Started in %s seconds" % end) if "--null" in opts: print("\nUsing null Request object") try: try: run_standard_benchmarks() except: print(_cperror.format_exc()) raise finally: cherrypy.engine.exit() print("Starting CherryPy app server...") class NullWriter(object): """Suppresses the printing of socket errors.""" def write(self, data): pass sys.stderr = NullWriter() start = time.time() if "--cpmodpy" in opts: run_modpython() elif "--modpython" in opts: run_modpython(use_wsgi=True) else: if "--null" in opts: cherrypy.server.request_class = NullRequest cherrypy.server.response_class = NullResponse cherrypy.engine.start_with_callback(run) cherrypy.engine.block()
12,824
Python
.py
334
30.167665
90
0.590631
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,909
test_wsgiapps.py
evilhero_mylar/lib/cherrypy/test/test_wsgiapps.py
from cherrypy.test import helper class WSGIGraftTests(helper.CPWebCase): def setup_server(): import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import cherrypy def test_app(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) output = ['Hello, world!\n', 'This is a wsgi app running within CherryPy!\n\n'] keys = list(environ.keys()) keys.sort() for k in keys: output.append('%s: %s\n' % (k,environ[k])) return output def test_empty_string_app(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) return ['Hello', '', ' ', '', 'world'] class WSGIResponse(object): def __init__(self, appresults): self.appresults = appresults self.iter = iter(appresults) def __iter__(self): return self def next(self): return self.iter.next() def close(self): if hasattr(self.appresults, "close"): self.appresults.close() class ReversingMiddleware(object): def __init__(self, app): self.app = app def __call__(self, environ, start_response): results = app(environ, start_response) class Reverser(WSGIResponse): def next(this): line = list(this.iter.next()) line.reverse() return "".join(line) return Reverser(results) class Root: def index(self): return "I'm a regular CherryPy page handler!" index.exposed = True cherrypy.tree.mount(Root()) cherrypy.tree.graft(test_app, '/hosted/app1') cherrypy.tree.graft(test_empty_string_app, '/hosted/app3') # Set script_name explicitly to None to signal CP that it should # be pulled from the WSGI environ each time. app = cherrypy.Application(Root(), script_name=None) cherrypy.tree.graft(ReversingMiddleware(app), '/hosted/app2') setup_server = staticmethod(setup_server) wsgi_output = '''Hello, world! This is a wsgi app running within CherryPy!''' def test_01_standard_app(self): self.getPage("/") self.assertBody("I'm a regular CherryPy page handler!") def test_04_pure_wsgi(self): import cherrypy if not cherrypy.server.using_wsgi: return self.skip("skipped (not using WSGI)... ") self.getPage("/hosted/app1") self.assertHeader("Content-Type", "text/plain") self.assertInBody(self.wsgi_output) def test_05_wrapped_cp_app(self): import cherrypy if not cherrypy.server.using_wsgi: return self.skip("skipped (not using WSGI)... ") self.getPage("/hosted/app2/") body = list("I'm a regular CherryPy page handler!") body.reverse() body = "".join(body) self.assertInBody(body) def test_06_empty_string_app(self): import cherrypy if not cherrypy.server.using_wsgi: return self.skip("skipped (not using WSGI)... ") self.getPage("/hosted/app3") self.assertHeader("Content-Type", "text/plain") self.assertInBody('Hello world')
3,819
Python
.py
84
31.547619
72
0.568722
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,910
webtest.py
evilhero_mylar/lib/cherrypy/test/webtest.py
"""Extensions to unittest for web frameworks. Use the WebCase.getPage method to request a page from your HTTP server. Framework Integration ===================== If you have control over your server process, you can handle errors in the server-side of the HTTP conversation a bit better. You must run both the client (your WebCase tests) and the server in the same process (but in separate threads, obviously). When an error occurs in the framework, call server_error. It will print the traceback to stdout, and keep any assertions you have from running (the assumption is that, if the server errors, the page output will not be of further significance to your tests). """ import os import pprint import re import socket import sys import time import traceback import types from unittest import * from unittest import _TextTestResult from cherrypy._cpcompat import basestring, HTTPConnection, HTTPSConnection, unicodestr def interface(host): """Return an IP address for a client connection given the server host. If the server is listening on '0.0.0.0' (INADDR_ANY) or '::' (IN6ADDR_ANY), this will return the proper localhost.""" if host == '0.0.0.0': # INADDR_ANY, which should respond on localhost. return "127.0.0.1" if host == '::': # IN6ADDR_ANY, which should respond on localhost. return "::1" return host class TerseTestResult(_TextTestResult): def printErrors(self): # Overridden to avoid unnecessary empty line if self.errors or self.failures: if self.dots or self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) class TerseTestRunner(TextTestRunner): """A test runner class that displays results in textual form.""" def _makeResult(self): return TerseTestResult(self.stream, self.descriptions, self.verbosity) def run(self, test): "Run the given test case or test suite." # Overridden to remove unnecessary empty lines and separators result = self._makeResult() test(result) result.printErrors() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = list(map(len, (result.failures, result.errors))) if failed: self.stream.write("failures=%d" % failed) if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) self.stream.writeln(")") return result class ReloadingTestLoader(TestLoader): def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') unused_parts = [] if module is None: if not parts: raise ValueError("incomplete test name: %s" % name) else: parts_copy = parts[:] while parts_copy: target = ".".join(parts_copy) if target in sys.modules: module = reload(sys.modules[target]) parts = unused_parts break else: try: module = __import__(target) parts = unused_parts break except ImportError: unused_parts.insert(0,parts_copy[-1]) del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: obj = getattr(obj, part) if type(obj) == types.ModuleType: return self.loadTestsFromModule(obj) elif (isinstance(obj, (type, types.ClassType)) and issubclass(obj, TestCase)): return self.loadTestsFromTestCase(obj) elif type(obj) == types.UnboundMethodType: return obj.im_class(obj.__name__) elif hasattr(obj, '__call__'): test = obj() if not isinstance(test, TestCase) and \ not isinstance(test, TestSuite): raise ValueError("calling %s returned %s, " "not a test" % (obj,test)) return test else: raise ValueError("do not know how to make test from: %s" % obj) try: # Jython support if sys.platform[:4] == 'java': def getchar(): # Hopefully this is enough return sys.stdin.read(1) else: # On Windows, msvcrt.getch reads a single char without output. import msvcrt def getchar(): return msvcrt.getch() except ImportError: # Unix getchr import tty, termios def getchar(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch class WebCase(TestCase): HOST = "127.0.0.1" PORT = 8000 HTTP_CONN = HTTPConnection PROTOCOL = "HTTP/1.1" scheme = "http" url = None status = None headers = None body = None encoding = 'utf-8' time = None def get_conn(self, auto_open=False): """Return a connection to our HTTP server.""" if self.scheme == "https": cls = HTTPSConnection else: cls = HTTPConnection conn = cls(self.interface(), self.PORT) # Automatically re-connect? conn.auto_open = auto_open conn.connect() return conn def set_persistent(self, on=True, auto_open=False): """Make our HTTP_CONN persistent (or not). If the 'on' argument is True (the default), then self.HTTP_CONN will be set to an instance of HTTPConnection (or HTTPS if self.scheme is "https"). This will then persist across requests. We only allow for a single open connection, so if you call this and we currently have an open connection, it will be closed. """ try: self.HTTP_CONN.close() except (TypeError, AttributeError): pass if on: self.HTTP_CONN = self.get_conn(auto_open=auto_open) else: if self.scheme == "https": self.HTTP_CONN = HTTPSConnection else: self.HTTP_CONN = HTTPConnection def _get_persistent(self): return hasattr(self.HTTP_CONN, "__class__") def _set_persistent(self, on): self.set_persistent(on) persistent = property(_get_persistent, _set_persistent) def interface(self): """Return an IP address for a client connection. If the server is listening on '0.0.0.0' (INADDR_ANY) or '::' (IN6ADDR_ANY), this will return the proper localhost.""" return interface(self.HOST) def getPage(self, url, headers=None, method="GET", body=None, protocol=None): """Open the url with debugging support. Return status, headers, body.""" ServerError.on = False if isinstance(url, unicodestr): url = url.encode('utf-8') if isinstance(body, unicodestr): body = body.encode('utf-8') self.url = url self.time = None start = time.time() result = openURL(url, headers, method, body, self.HOST, self.PORT, self.HTTP_CONN, protocol or self.PROTOCOL) self.time = time.time() - start self.status, self.headers, self.body = result # Build a list of request cookies from the previous response cookies. self.cookies = [('Cookie', v) for k, v in self.headers if k.lower() == 'set-cookie'] if ServerError.on: raise ServerError() return result interactive = True console_height = 30 def _handlewebError(self, msg): print("") print(" ERROR: %s" % msg) if not self.interactive: raise self.failureException(msg) p = " Show: [B]ody [H]eaders [S]tatus [U]RL; [I]gnore, [R]aise, or sys.e[X]it >> " sys.stdout.write(p) sys.stdout.flush() while True: i = getchar().upper() if i not in "BHSUIRX": continue print(i.upper()) # Also prints new line if i == "B": for x, line in enumerate(self.body.splitlines()): if (x + 1) % self.console_height == 0: # The \r and comma should make the next line overwrite sys.stdout.write("<-- More -->\r") m = getchar().lower() # Erase our "More" prompt sys.stdout.write(" \r") if m == "q": break print(line) elif i == "H": pprint.pprint(self.headers) elif i == "S": print(self.status) elif i == "U": print(self.url) elif i == "I": # return without raising the normal exception return elif i == "R": raise self.failureException(msg) elif i == "X": self.exit() sys.stdout.write(p) sys.stdout.flush() def exit(self): sys.exit() def assertStatus(self, status, msg=None): """Fail if self.status != status.""" if isinstance(status, basestring): if not self.status == status: if msg is None: msg = 'Status (%r) != %r' % (self.status, status) self._handlewebError(msg) elif isinstance(status, int): code = int(self.status[:3]) if code != status: if msg is None: msg = 'Status (%r) != %r' % (self.status, status) self._handlewebError(msg) else: # status is a tuple or list. match = False for s in status: if isinstance(s, basestring): if self.status == s: match = True break elif int(self.status[:3]) == s: match = True break if not match: if msg is None: msg = 'Status (%r) not in %r' % (self.status, status) self._handlewebError(msg) def assertHeader(self, key, value=None, msg=None): """Fail if (key, [value]) not in self.headers.""" lowkey = key.lower() for k, v in self.headers: if k.lower() == lowkey: if value is None or str(value) == v: return v if msg is None: if value is None: msg = '%r not in headers' % key else: msg = '%r:%r not in headers' % (key, value) self._handlewebError(msg) def assertHeaderItemValue(self, key, value, msg=None): """Fail if the header does not contain the specified value""" actual_value = self.assertHeader(key, msg=msg) header_values = map(str.strip, actual_value.split(',')) if value in header_values: return value if msg is None: msg = "%r not in %r" % (value, header_values) self._handlewebError(msg) def assertNoHeader(self, key, msg=None): """Fail if key in self.headers.""" lowkey = key.lower() matches = [k for k, v in self.headers if k.lower() == lowkey] if matches: if msg is None: msg = '%r in headers' % key self._handlewebError(msg) def assertBody(self, value, msg=None): """Fail if value != self.body.""" if value != self.body: if msg is None: msg = 'expected body:\n%r\n\nactual body:\n%r' % (value, self.body) self._handlewebError(msg) def assertInBody(self, value, msg=None): """Fail if value not in self.body.""" if value not in self.body: if msg is None: msg = '%r not in body: %s' % (value, self.body) self._handlewebError(msg) def assertNotInBody(self, value, msg=None): """Fail if value in self.body.""" if value in self.body: if msg is None: msg = '%r found in body' % value self._handlewebError(msg) def assertMatchesBody(self, pattern, msg=None, flags=0): """Fail if value (a regex pattern) is not in self.body.""" if re.search(pattern, self.body, flags) is None: if msg is None: msg = 'No match for %r in body' % pattern self._handlewebError(msg) methods_with_bodies = ("POST", "PUT") def cleanHeaders(headers, method, body, host, port): """Return request headers, with required headers added (if missing).""" if headers is None: headers = [] # Add the required Host request header if not present. # [This specifies the host:port of the server, not the client.] found = False for k, v in headers: if k.lower() == 'host': found = True break if not found: if port == 80: headers.append(("Host", host)) else: headers.append(("Host", "%s:%s" % (host, port))) if method in methods_with_bodies: # Stick in default type and length headers if not present found = False for k, v in headers: if k.lower() == 'content-type': found = True break if not found: headers.append(("Content-Type", "application/x-www-form-urlencoded")) headers.append(("Content-Length", str(len(body or "")))) return headers def shb(response): """Return status, headers, body the way we like from a response.""" h = [] key, value = None, None for line in response.msg.headers: if line: if line[0] in " \t": value += line.strip() else: if key and value: h.append((key, value)) key, value = line.split(":", 1) key = key.strip() value = value.strip() if key and value: h.append((key, value)) return "%s %s" % (response.status, response.reason), h, response.read() def openURL(url, headers=None, method="GET", body=None, host="127.0.0.1", port=8000, http_conn=HTTPConnection, protocol="HTTP/1.1"): """Open the given HTTP resource and return status, headers, and body.""" headers = cleanHeaders(headers, method, body, host, port) # Trying 10 times is simply in case of socket errors. # Normal case--it should run once. for trial in range(10): try: # Allow http_conn to be a class or an instance if hasattr(http_conn, "host"): conn = http_conn else: conn = http_conn(interface(host), port) conn._http_vsn_str = protocol conn._http_vsn = int("".join([x for x in protocol if x.isdigit()])) # skip_accept_encoding argument added in python version 2.4 if sys.version_info < (2, 4): def putheader(self, header, value): if header == 'Accept-Encoding' and value == 'identity': return self.__class__.putheader(self, header, value) import new conn.putheader = new.instancemethod(putheader, conn, conn.__class__) conn.putrequest(method.upper(), url, skip_host=True) else: conn.putrequest(method.upper(), url, skip_host=True, skip_accept_encoding=True) for key, value in headers: conn.putheader(key, value) conn.endheaders() if body is not None: conn.send(body) # Handle response response = conn.getresponse() s, h, b = shb(response) if not hasattr(http_conn, "host"): # We made our own conn instance. Close it. conn.close() return s, h, b except socket.error: time.sleep(0.5) raise # Add any exceptions which your web framework handles # normally (that you don't want server_error to trap). ignored_exceptions = [] # You'll want set this to True when you can't guarantee # that each response will immediately follow each request; # for example, when handling requests via multiple threads. ignore_all = False class ServerError(Exception): on = False def server_error(exc=None): """Server debug hook. Return True if exception handled, False if ignored. You probably want to wrap this, so you can still handle an error using your framework when it's ignored. """ if exc is None: exc = sys.exc_info() if ignore_all or exc[0] in ignored_exceptions: return False else: ServerError.on = True print("") print("".join(traceback.format_exception(*exc))) return True
17,977
Python
.py
445
29.283146
93
0.558905
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,911
test_objectmapping.py
evilhero_mylar/lib/cherrypy/test/test_objectmapping.py
import cherrypy from cherrypy._cptree import Application from cherrypy.test import helper script_names = ["", "/foo", "/users/fred/blog", "/corp/blog"] class ObjectMappingTest(helper.CPWebCase): def setup_server(): class Root: def index(self, name="world"): return name index.exposed = True def foobar(self): return "bar" foobar.exposed = True def default(self, *params, **kwargs): return "default:" + repr(params) default.exposed = True def other(self): return "other" other.exposed = True def extra(self, *p): return repr(p) extra.exposed = True def redirect(self): raise cherrypy.HTTPRedirect('dir1/', 302) redirect.exposed = True def notExposed(self): return "not exposed" def confvalue(self): return cherrypy.request.config.get("user") confvalue.exposed = True def redirect_via_url(self, path): raise cherrypy.HTTPRedirect(cherrypy.url(path)) redirect_via_url.exposed = True def translate_html(self): return "OK" translate_html.exposed = True def mapped_func(self, ID=None): return "ID is %s" % ID mapped_func.exposed = True setattr(Root, "Von B\xfclow", mapped_func) class Exposing: def base(self): return "expose works!" cherrypy.expose(base) cherrypy.expose(base, "1") cherrypy.expose(base, "2") class ExposingNewStyle(object): def base(self): return "expose works!" cherrypy.expose(base) cherrypy.expose(base, "1") cherrypy.expose(base, "2") class Dir1: def index(self): return "index for dir1" index.exposed = True def myMethod(self): return "myMethod from dir1, path_info is:" + repr(cherrypy.request.path_info) myMethod.exposed = True myMethod._cp_config = {'tools.trailing_slash.extra': True} def default(self, *params): return "default for dir1, param is:" + repr(params) default.exposed = True class Dir2: def index(self): return "index for dir2, path is:" + cherrypy.request.path_info index.exposed = True def script_name(self): return cherrypy.tree.script_name() script_name.exposed = True def cherrypy_url(self): return cherrypy.url("/extra") cherrypy_url.exposed = True def posparam(self, *vpath): return "/".join(vpath) posparam.exposed = True class Dir3: def default(self): return "default for dir3, not exposed" class Dir4: def index(self): return "index for dir4, not exposed" class DefNoIndex: def default(self, *args): raise cherrypy.HTTPRedirect("contact") default.exposed = True # MethodDispatcher code class ByMethod: exposed = True def __init__(self, *things): self.things = list(things) def GET(self): return repr(self.things) def POST(self, thing): self.things.append(thing) class Collection: default = ByMethod('a', 'bit') Root.exposing = Exposing() Root.exposingnew = ExposingNewStyle() Root.dir1 = Dir1() Root.dir1.dir2 = Dir2() Root.dir1.dir2.dir3 = Dir3() Root.dir1.dir2.dir3.dir4 = Dir4() Root.defnoindex = DefNoIndex() Root.bymethod = ByMethod('another') Root.collection = Collection() d = cherrypy.dispatch.MethodDispatcher() for url in script_names: conf = {'/': {'user': (url or "/").split("/")[-2]}, '/bymethod': {'request.dispatch': d}, '/collection': {'request.dispatch': d}, } cherrypy.tree.mount(Root(), url, conf) class Isolated: def index(self): return "made it!" index.exposed = True cherrypy.tree.mount(Isolated(), "/isolated") class AnotherApp: exposed = True def GET(self): return "milk" cherrypy.tree.mount(AnotherApp(), "/app", {'/': {'request.dispatch': d}}) setup_server = staticmethod(setup_server) def testObjectMapping(self): for url in script_names: prefix = self.script_name = url self.getPage('/') self.assertBody('world') self.getPage("/dir1/myMethod") self.assertBody("myMethod from dir1, path_info is:'/dir1/myMethod'") self.getPage("/this/method/does/not/exist") self.assertBody("default:('this', 'method', 'does', 'not', 'exist')") self.getPage("/extra/too/much") self.assertBody("('too', 'much')") self.getPage("/other") self.assertBody('other') self.getPage("/notExposed") self.assertBody("default:('notExposed',)") self.getPage("/dir1/dir2/") self.assertBody('index for dir2, path is:/dir1/dir2/') # Test omitted trailing slash (should be redirected by default). self.getPage("/dir1/dir2") self.assertStatus(301) self.assertHeader('Location', '%s/dir1/dir2/' % self.base()) # Test extra trailing slash (should be redirected if configured). self.getPage("/dir1/myMethod/") self.assertStatus(301) self.assertHeader('Location', '%s/dir1/myMethod' % self.base()) # Test that default method must be exposed in order to match. self.getPage("/dir1/dir2/dir3/dir4/index") self.assertBody("default for dir1, param is:('dir2', 'dir3', 'dir4', 'index')") # Test *vpath when default() is defined but not index() # This also tests HTTPRedirect with default. self.getPage("/defnoindex") self.assertStatus((302, 303)) self.assertHeader('Location', '%s/contact' % self.base()) self.getPage("/defnoindex/") self.assertStatus((302, 303)) self.assertHeader('Location', '%s/defnoindex/contact' % self.base()) self.getPage("/defnoindex/page") self.assertStatus((302, 303)) self.assertHeader('Location', '%s/defnoindex/contact' % self.base()) self.getPage("/redirect") self.assertStatus('302 Found') self.assertHeader('Location', '%s/dir1/' % self.base()) if not getattr(cherrypy.server, "using_apache", False): # Test that we can use URL's which aren't all valid Python identifiers # This should also test the %XX-unquoting of URL's. self.getPage("/Von%20B%fclow?ID=14") self.assertBody("ID is 14") # Test that %2F in the path doesn't get unquoted too early; # that is, it should not be used to separate path components. # See ticket #393. self.getPage("/page%2Fname") self.assertBody("default:('page/name',)") self.getPage("/dir1/dir2/script_name") self.assertBody(url) self.getPage("/dir1/dir2/cherrypy_url") self.assertBody("%s/extra" % self.base()) # Test that configs don't overwrite each other from diferent apps self.getPage("/confvalue") self.assertBody((url or "/").split("/")[-2]) self.script_name = "" # Test absoluteURI's in the Request-Line self.getPage('http://%s:%s/' % (self.interface(), self.PORT)) self.assertBody('world') self.getPage('http://%s:%s/abs/?service=http://192.168.0.1/x/y/z' % (self.interface(), self.PORT)) self.assertBody("default:('abs',)") self.getPage('/rel/?service=http://192.168.120.121:8000/x/y/z') self.assertBody("default:('rel',)") # Test that the "isolated" app doesn't leak url's into the root app. # If it did leak, Root.default() would answer with # "default:('isolated', 'doesnt', 'exist')". self.getPage("/isolated/") self.assertStatus("200 OK") self.assertBody("made it!") self.getPage("/isolated/doesnt/exist") self.assertStatus("404 Not Found") # Make sure /foobar maps to Root.foobar and not to the app # mounted at /foo. See http://www.cherrypy.org/ticket/573 self.getPage("/foobar") self.assertBody("bar") def test_translate(self): self.getPage("/translate_html") self.assertStatus("200 OK") self.assertBody("OK") self.getPage("/translate.html") self.assertStatus("200 OK") self.assertBody("OK") self.getPage("/translate-html") self.assertStatus("200 OK") self.assertBody("OK") def test_redir_using_url(self): for url in script_names: prefix = self.script_name = url # Test the absolute path to the parent (leading slash) self.getPage('/redirect_via_url?path=./') self.assertStatus(('302 Found', '303 See Other')) self.assertHeader('Location', '%s/' % self.base()) # Test the relative path to the parent (no leading slash) self.getPage('/redirect_via_url?path=./') self.assertStatus(('302 Found', '303 See Other')) self.assertHeader('Location', '%s/' % self.base()) # Test the absolute path to the parent (leading slash) self.getPage('/redirect_via_url/?path=./') self.assertStatus(('302 Found', '303 See Other')) self.assertHeader('Location', '%s/' % self.base()) # Test the relative path to the parent (no leading slash) self.getPage('/redirect_via_url/?path=./') self.assertStatus(('302 Found', '303 See Other')) self.assertHeader('Location', '%s/' % self.base()) def testPositionalParams(self): self.getPage("/dir1/dir2/posparam/18/24/hut/hike") self.assertBody("18/24/hut/hike") # intermediate index methods should not receive posparams; # only the "final" index method should do so. self.getPage("/dir1/dir2/5/3/sir") self.assertBody("default for dir1, param is:('dir2', '5', '3', 'sir')") # test that extra positional args raises an 404 Not Found # See http://www.cherrypy.org/ticket/733. self.getPage("/dir1/dir2/script_name/extra/stuff") self.assertStatus(404) def testExpose(self): # Test the cherrypy.expose function/decorator self.getPage("/exposing/base") self.assertBody("expose works!") self.getPage("/exposing/1") self.assertBody("expose works!") self.getPage("/exposing/2") self.assertBody("expose works!") self.getPage("/exposingnew/base") self.assertBody("expose works!") self.getPage("/exposingnew/1") self.assertBody("expose works!") self.getPage("/exposingnew/2") self.assertBody("expose works!") def testMethodDispatch(self): self.getPage("/bymethod") self.assertBody("['another']") self.assertHeader('Allow', 'GET, HEAD, POST') self.getPage("/bymethod", method="HEAD") self.assertBody("") self.assertHeader('Allow', 'GET, HEAD, POST') self.getPage("/bymethod", method="POST", body="thing=one") self.assertBody("") self.assertHeader('Allow', 'GET, HEAD, POST') self.getPage("/bymethod") self.assertBody("['another', u'one']") self.assertHeader('Allow', 'GET, HEAD, POST') self.getPage("/bymethod", method="PUT") self.assertErrorPage(405) self.assertHeader('Allow', 'GET, HEAD, POST') # Test default with posparams self.getPage("/collection/silly", method="POST") self.getPage("/collection", method="GET") self.assertBody("['a', 'bit', 'silly']") # Test custom dispatcher set on app root (see #737). self.getPage("/app") self.assertBody("milk") def testTreeMounting(self): class Root(object): def hello(self): return "Hello world!" hello.exposed = True # When mounting an application instance, # we can't specify a different script name in the call to mount. a = Application(Root(), '/somewhere') self.assertRaises(ValueError, cherrypy.tree.mount, a, '/somewhereelse') # When mounting an application instance... a = Application(Root(), '/somewhere') # ...we MUST allow in identical script name in the call to mount... cherrypy.tree.mount(a, '/somewhere') self.getPage('/somewhere/hello') self.assertStatus(200) # ...and MUST allow a missing script_name. del cherrypy.tree.apps['/somewhere'] cherrypy.tree.mount(a) self.getPage('/somewhere/hello') self.assertStatus(200) # In addition, we MUST be able to create an Application using # script_name == None for access to the wsgi_environ. a = Application(Root(), script_name=None) # However, this does not apply to tree.mount self.assertRaises(TypeError, cherrypy.tree.mount, a, None)
14,816
Python
.py
306
33.839869
93
0.572018
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,912
test_json.py
evilhero_mylar/lib/cherrypy/test/test_json.py
import cherrypy from cherrypy.test import helper from cherrypy._cpcompat import json class JsonTest(helper.CPWebCase): def setup_server(): class Root(object): def plain(self): return 'hello' plain.exposed = True def json_string(self): return 'hello' json_string.exposed = True json_string._cp_config = {'tools.json_out.on': True} def json_list(self): return ['a', 'b', 42] json_list.exposed = True json_list._cp_config = {'tools.json_out.on': True} def json_dict(self): return {'answer': 42} json_dict.exposed = True json_dict._cp_config = {'tools.json_out.on': True} def json_post(self): if cherrypy.request.json == [13, 'c']: return 'ok' else: return 'nok' json_post.exposed = True json_post._cp_config = {'tools.json_in.on': True} root = Root() cherrypy.tree.mount(root) setup_server = staticmethod(setup_server) def test_json_output(self): if json is None: self.skip("json not found ") return self.getPage("/plain") self.assertBody("hello") self.getPage("/json_string") self.assertBody('"hello"') self.getPage("/json_list") self.assertBody('["a", "b", 42]') self.getPage("/json_dict") self.assertBody('{"answer": 42}') def test_json_input(self): if json is None: self.skip("json not found ") return body = '[13, "c"]' headers = [('Content-Type', 'application/json'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertBody('ok') body = '[13, "c"]' headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertStatus(415, 'Expected an application/json content type') body = '[13, -]' headers = [('Content-Type', 'application/json'), ('Content-Length', str(len(body)))] self.getPage("/json_post", method="POST", headers=headers, body=body) self.assertStatus(400, 'Invalid JSON document')
2,541
Python
.py
62
28.919355
77
0.538462
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,913
__init__.py
evilhero_mylar/lib/cherrypy/test/__init__.py
"""Regression test suite for CherryPy. Run 'nosetests -s test/' to exercise all tests. The '-s' flag instructs nose to output stdout messages, wihch is crucial to the 'interactive' mode of webtest.py. If you run these tests without the '-s' flag, don't be surprised if the test seems to hang: it's waiting for your interactive input. """ import sys def newexit(): raise SystemExit('Exit called') def setup(): # We want to monkey patch sys.exit so that we can get some # information about where exit is being called. newexit._old = sys.exit sys.exit = newexit def teardown(): try: sys.exit = sys.exit._old except AttributeError: sys.exit = sys._exit
698
Python
.py
20
31.45
77
0.719168
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,914
test_auth_digest.py
evilhero_mylar/lib/cherrypy/test/test_auth_digest.py
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 import cherrypy from cherrypy.lib import auth_digest from cherrypy.test import helper class DigestAuthTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "This is public." index.exposed = True class DigestProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True def fetch_users(): return {'test': 'test'} get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(fetch_users()) conf = {'/digest': {'tools.auth_digest.on': True, 'tools.auth_digest.realm': 'localhost', 'tools.auth_digest.get_ha1': get_ha1, 'tools.auth_digest.key': 'a565c27146791cfb', 'tools.auth_digest.debug': 'True'}} root = Root() root.digest = DigestProtected() cherrypy.tree.mount(root, config=conf) setup_server = staticmethod(setup_server) def testPublic(self): self.getPage("/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('This is public.') def testDigest(self): self.getPage("/digest/") self.assertStatus(401) value = None for k, v in self.headers: if k.lower() == "www-authenticate": if v.startswith("Digest"): value = v break if value is None: self._handlewebError("Digest authentification scheme was not found") value = value[7:] items = value.split(', ') tokens = {} for item in items: key, value = item.split('=') tokens[key.lower()] = value missing_msg = "%s is missing" bad_value_msg = "'%s' was expecting '%s' but found '%s'" nonce = None if 'realm' not in tokens: self._handlewebError(missing_msg % 'realm') elif tokens['realm'] != '"localhost"': self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm'])) if 'nonce' not in tokens: self._handlewebError(missing_msg % 'nonce') else: nonce = tokens['nonce'].strip('"') if 'algorithm' not in tokens: self._handlewebError(missing_msg % 'algorithm') elif tokens['algorithm'] != '"MD5"': self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm'])) if 'qop' not in tokens: self._handlewebError(missing_msg % 'qop') elif tokens['qop'] != '"auth"': self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop'])) get_ha1 = auth_digest.get_ha1_dict_plain({'test' : 'test'}) # Test user agent response with a wrong value for 'realm' base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001') auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET') # calculate the response digest ha1 = get_ha1(auth.realm, 'test') response = auth.request_digest(ha1) # send response with correct response digest, but wrong realm auth_header = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth_header)]) self.assertStatus(401) # Test that must pass base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth_header = base_auth % (nonce, '11111111111111111111111111111111', '00000001') auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET') # calculate the response digest ha1 = get_ha1('localhost', 'test') response = auth.request_digest(ha1) # send response with correct response digest auth_header = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth_header)]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.")
4,553
Python
.py
93
38.344086
167
0.591791
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,915
test_proxy.py
evilhero_mylar/lib/cherrypy/test/test_proxy.py
import cherrypy from cherrypy.test import helper script_names = ["", "/path/to/myapp"] class ProxyTest(helper.CPWebCase): def setup_server(): # Set up site cherrypy.config.update({ 'tools.proxy.on': True, 'tools.proxy.base': 'www.mydomain.test', }) # Set up application class Root: def __init__(self, sn): # Calculate a URL outside of any requests. self.thisnewpage = cherrypy.url("/this/new/page", script_name=sn) def pageurl(self): return self.thisnewpage pageurl.exposed = True def index(self): raise cherrypy.HTTPRedirect('dummy') index.exposed = True def remoteip(self): return cherrypy.request.remote.ip remoteip.exposed = True def xhost(self): raise cherrypy.HTTPRedirect('blah') xhost.exposed = True xhost._cp_config = {'tools.proxy.local': 'X-Host', 'tools.trailing_slash.extra': True, } def base(self): return cherrypy.request.base base.exposed = True def ssl(self): return cherrypy.request.base ssl.exposed = True ssl._cp_config = {'tools.proxy.scheme': 'X-Forwarded-Ssl'} def newurl(self): return ("Browse to <a href='%s'>this page</a>." % cherrypy.url("/this/new/page")) newurl.exposed = True for sn in script_names: cherrypy.tree.mount(Root(sn), sn) setup_server = staticmethod(setup_server) def testProxy(self): self.getPage("/") self.assertHeader('Location', "%s://www.mydomain.test%s/dummy" % (self.scheme, self.prefix())) # Test X-Forwarded-Host (Apache 1.3.33+ and Apache 2) self.getPage("/", headers=[('X-Forwarded-Host', 'http://www.example.test')]) self.assertHeader('Location', "http://www.example.test/dummy") self.getPage("/", headers=[('X-Forwarded-Host', 'www.example.test')]) self.assertHeader('Location', "%s://www.example.test/dummy" % self.scheme) # Test multiple X-Forwarded-Host headers self.getPage("/", headers=[ ('X-Forwarded-Host', 'http://www.example.test, www.cherrypy.test'), ]) self.assertHeader('Location', "http://www.example.test/dummy") # Test X-Forwarded-For (Apache2) self.getPage("/remoteip", headers=[('X-Forwarded-For', '192.168.0.20')]) self.assertBody("192.168.0.20") self.getPage("/remoteip", headers=[('X-Forwarded-For', '67.15.36.43, 192.168.0.20')]) self.assertBody("192.168.0.20") # Test X-Host (lighttpd; see https://trac.lighttpd.net/trac/ticket/418) self.getPage("/xhost", headers=[('X-Host', 'www.example.test')]) self.assertHeader('Location', "%s://www.example.test/blah" % self.scheme) # Test X-Forwarded-Proto (lighttpd) self.getPage("/base", headers=[('X-Forwarded-Proto', 'https')]) self.assertBody("https://www.mydomain.test") # Test X-Forwarded-Ssl (webfaction?) self.getPage("/ssl", headers=[('X-Forwarded-Ssl', 'on')]) self.assertBody("https://www.mydomain.test") # Test cherrypy.url() for sn in script_names: # Test the value inside requests self.getPage(sn + "/newurl") self.assertBody("Browse to <a href='%s://www.mydomain.test" % self.scheme + sn + "/this/new/page'>this page</a>.") self.getPage(sn + "/newurl", headers=[('X-Forwarded-Host', 'http://www.example.test')]) self.assertBody("Browse to <a href='http://www.example.test" + sn + "/this/new/page'>this page</a>.") # Test the value outside requests port = "" if self.scheme == "http" and self.PORT != 80: port = ":%s" % self.PORT elif self.scheme == "https" and self.PORT != 443: port = ":%s" % self.PORT host = self.HOST if host in ('0.0.0.0', '::'): import socket host = socket.gethostname() expected = ("%s://%s%s%s/this/new/page" % (self.scheme, host, port, sn)) self.getPage(sn + "/pageurl") self.assertBody(expected) # Test trailing slash (see http://www.cherrypy.org/ticket/562). self.getPage("/xhost/", headers=[('X-Host', 'www.example.test')]) self.assertHeader('Location', "%s://www.example.test/xhost" % self.scheme)
5,161
Python
.py
103
34.533981
85
0.53125
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,916
test_caching.py
evilhero_mylar/lib/cherrypy/test/test_caching.py
import datetime import gzip from itertools import count import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import sys import threading import time import urllib import cherrypy from cherrypy._cpcompat import next, ntob, quote, xrange from cherrypy.lib import httputil gif_bytes = ntob('GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00' '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' '\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;') from cherrypy.test import helper class CacheTest(helper.CPWebCase): def setup_server(): class Root: _cp_config = {'tools.caching.on': True} def __init__(self): self.counter = 0 self.control_counter = 0 self.longlock = threading.Lock() def index(self): self.counter += 1 msg = "visit #%s" % self.counter return msg index.exposed = True def control(self): self.control_counter += 1 return "visit #%s" % self.control_counter control.exposed = True def a_gif(self): cherrypy.response.headers['Last-Modified'] = httputil.HTTPDate() return gif_bytes a_gif.exposed = True def long_process(self, seconds='1'): try: self.longlock.acquire() time.sleep(float(seconds)) finally: self.longlock.release() return 'success!' long_process.exposed = True def clear_cache(self, path): cherrypy._cache.store[cherrypy.request.base + path].clear() clear_cache.exposed = True class VaryHeaderCachingServer(object): _cp_config = {'tools.caching.on': True, 'tools.response_headers.on': True, 'tools.response_headers.headers': [('Vary', 'Our-Varying-Header')], } def __init__(self): self.counter = count(1) def index(self): return "visit #%s" % next(self.counter) index.exposed = True class UnCached(object): _cp_config = {'tools.expires.on': True, 'tools.expires.secs': 60, 'tools.staticdir.on': True, 'tools.staticdir.dir': 'static', 'tools.staticdir.root': curdir, } def force(self): cherrypy.response.headers['Etag'] = 'bibbitybobbityboo' self._cp_config['tools.expires.force'] = True self._cp_config['tools.expires.secs'] = 0 return "being forceful" force.exposed = True force._cp_config = {'tools.expires.secs': 0} def dynamic(self): cherrypy.response.headers['Etag'] = 'bibbitybobbityboo' cherrypy.response.headers['Cache-Control'] = 'private' return "D-d-d-dynamic!" dynamic.exposed = True def cacheable(self): cherrypy.response.headers['Etag'] = 'bibbitybobbityboo' return "Hi, I'm cacheable." cacheable.exposed = True def specific(self): cherrypy.response.headers['Etag'] = 'need_this_to_make_me_cacheable' return "I am being specific" specific.exposed = True specific._cp_config = {'tools.expires.secs': 86400} class Foo(object):pass def wrongtype(self): cherrypy.response.headers['Etag'] = 'need_this_to_make_me_cacheable' return "Woops" wrongtype.exposed = True wrongtype._cp_config = {'tools.expires.secs': Foo()} cherrypy.tree.mount(Root()) cherrypy.tree.mount(UnCached(), "/expires") cherrypy.tree.mount(VaryHeaderCachingServer(), "/varying_headers") cherrypy.config.update({'tools.gzip.on': True}) setup_server = staticmethod(setup_server) def testCaching(self): elapsed = 0.0 for trial in range(10): self.getPage("/") # The response should be the same every time, # except for the Age response header. self.assertBody('visit #1') if trial != 0: age = int(self.assertHeader("Age")) self.assert_(age >= elapsed) elapsed = age # POST, PUT, DELETE should not be cached. self.getPage("/", method="POST") self.assertBody('visit #2') # Because gzip is turned on, the Vary header should always Vary for content-encoding self.assertHeader('Vary', 'Accept-Encoding') # The previous request should have invalidated the cache, # so this request will recalc the response. self.getPage("/", method="GET") self.assertBody('visit #3') # ...but this request should get the cached copy. self.getPage("/", method="GET") self.assertBody('visit #3') self.getPage("/", method="DELETE") self.assertBody('visit #4') # The previous request should have invalidated the cache, # so this request will recalc the response. self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')]) self.assertHeader('Content-Encoding', 'gzip') self.assertHeader('Vary') self.assertEqual(cherrypy.lib.encoding.decompress(self.body), ntob("visit #5")) # Now check that a second request gets the gzip header and gzipped body # This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped # response body was being gzipped a second time. self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')]) self.assertHeader('Content-Encoding', 'gzip') self.assertEqual(cherrypy.lib.encoding.decompress(self.body), ntob("visit #5")) # Now check that a third request that doesn't accept gzip # skips the cache (because the 'Vary' header denies it). self.getPage("/", method="GET") self.assertNoHeader('Content-Encoding') self.assertBody('visit #6') def testVaryHeader(self): self.getPage("/varying_headers/") self.assertStatus("200 OK") self.assertHeaderItemValue('Vary', 'Our-Varying-Header') self.assertBody('visit #1') # Now check that different 'Vary'-fields don't evict each other. # This test creates 2 requests with different 'Our-Varying-Header' # and then tests if the first one still exists. self.getPage("/varying_headers/", headers=[('Our-Varying-Header', 'request 2')]) self.assertStatus("200 OK") self.assertBody('visit #2') self.getPage("/varying_headers/", headers=[('Our-Varying-Header', 'request 2')]) self.assertStatus("200 OK") self.assertBody('visit #2') self.getPage("/varying_headers/") self.assertStatus("200 OK") self.assertBody('visit #1') def testExpiresTool(self): # test setting an expires header self.getPage("/expires/specific") self.assertStatus("200 OK") self.assertHeader("Expires") # test exceptions for bad time values self.getPage("/expires/wrongtype") self.assertStatus(500) self.assertInBody("TypeError") # static content should not have "cache prevention" headers self.getPage("/expires/index.html") self.assertStatus("200 OK") self.assertNoHeader("Pragma") self.assertNoHeader("Cache-Control") self.assertHeader("Expires") # dynamic content that sets indicators should not have # "cache prevention" headers self.getPage("/expires/cacheable") self.assertStatus("200 OK") self.assertNoHeader("Pragma") self.assertNoHeader("Cache-Control") self.assertHeader("Expires") self.getPage('/expires/dynamic') self.assertBody("D-d-d-dynamic!") # the Cache-Control header should be untouched self.assertHeader("Cache-Control", "private") self.assertHeader("Expires") # configure the tool to ignore indicators and replace existing headers self.getPage("/expires/force") self.assertStatus("200 OK") # This also gives us a chance to test 0 expiry with no other headers self.assertHeader("Pragma", "no-cache") if cherrypy.server.protocol_version == "HTTP/1.1": self.assertHeader("Cache-Control", "no-cache, must-revalidate") self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT") # static content should now have "cache prevention" headers self.getPage("/expires/index.html") self.assertStatus("200 OK") self.assertHeader("Pragma", "no-cache") if cherrypy.server.protocol_version == "HTTP/1.1": self.assertHeader("Cache-Control", "no-cache, must-revalidate") self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT") # the cacheable handler should now have "cache prevention" headers self.getPage("/expires/cacheable") self.assertStatus("200 OK") self.assertHeader("Pragma", "no-cache") if cherrypy.server.protocol_version == "HTTP/1.1": self.assertHeader("Cache-Control", "no-cache, must-revalidate") self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT") self.getPage('/expires/dynamic') self.assertBody("D-d-d-dynamic!") # dynamic sets Cache-Control to private but it should be # overwritten here ... self.assertHeader("Pragma", "no-cache") if cherrypy.server.protocol_version == "HTTP/1.1": self.assertHeader("Cache-Control", "no-cache, must-revalidate") self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT") def testLastModified(self): self.getPage("/a.gif") self.assertStatus(200) self.assertBody(gif_bytes) lm1 = self.assertHeader("Last-Modified") # this request should get the cached copy. self.getPage("/a.gif") self.assertStatus(200) self.assertBody(gif_bytes) self.assertHeader("Age") lm2 = self.assertHeader("Last-Modified") self.assertEqual(lm1, lm2) # this request should match the cached copy, but raise 304. self.getPage("/a.gif", [('If-Modified-Since', lm1)]) self.assertStatus(304) self.assertNoHeader("Last-Modified") if not getattr(cherrypy.server, "using_apache", False): self.assertHeader("Age") def test_antistampede(self): SECONDS = 4 # We MUST make an initial synchronous request in order to create the # AntiStampedeCache object, and populate its selecting_headers, # before the actual stampede. self.getPage("/long_process?seconds=%d" % SECONDS) self.assertBody('success!') self.getPage("/clear_cache?path=" + quote('/long_process?seconds=%d' % SECONDS, safe='')) self.assertStatus(200) sys.stdout.write("prepped... ") sys.stdout.flush() start = datetime.datetime.now() def run(): self.getPage("/long_process?seconds=%d" % SECONDS) # The response should be the same every time self.assertBody('success!') ts = [threading.Thread(target=run) for i in xrange(100)] for t in ts: t.start() for t in ts: t.join() self.assertEqualDates(start, datetime.datetime.now(), # Allow a second for our thread/TCP overhead etc. seconds=SECONDS + 1.1) def test_cache_control(self): self.getPage("/control") self.assertBody('visit #1') self.getPage("/control") self.assertBody('visit #1') self.getPage("/control", headers=[('Cache-Control', 'no-cache')]) self.assertBody('visit #2') self.getPage("/control") self.assertBody('visit #2') self.getPage("/control", headers=[('Pragma', 'no-cache')]) self.assertBody('visit #3') self.getPage("/control") self.assertBody('visit #3') time.sleep(1) self.getPage("/control", headers=[('Cache-Control', 'max-age=0')]) self.assertBody('visit #4') self.getPage("/control") self.assertBody('visit #4')
13,057
Python
.py
274
35.343066
92
0.598044
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,917
modpy.py
evilhero_mylar/lib/cherrypy/test/modpy.py
"""Wrapper for mod_python, for use as a CherryPy HTTP server when testing. To autostart modpython, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl" or "apache2ctl"-- create a symlink to them if needed. If you wish to test the WSGI interface instead of our _cpmodpy interface, you also need the 'modpython_gateway' module at: http://projects.amor.org/misc/wiki/ModPythonGateway KNOWN BUGS ========== 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. 4. Apache replaces status "reason phrases" automatically. For example, CherryPy may set "304 Not modified" but Apache will write out "304 Not Modified" (capital "M"). 5. Apache does not allow custom error codes as per the spec. 6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the Request-URI too early. 7. mod_python will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. Apache will output a "Content-Length: 0" response header even if there's no response entity body. This isn't really a bug; it just differs from the CherryPy default. """ import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import time from cherrypy.test import helper def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output APACHE_PATH = "httpd" CONF_PATH = "test_mp.conf" conf_modpython_gateway = """ # Apache2 server conf file for testing CherryPy with modpython_gateway. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so SetHandler python-program PythonFixupHandler cherrypy.test.modpy::wsgisetup PythonOption testmod %(modulename)s PythonHandler modpython_gateway::handler PythonOption wsgi.application cherrypy::tree PythonOption socket_host %(host)s PythonDebug On """ conf_cpmodpy = """ # Apache2 server conf file for testing CherryPy with _cpmodpy. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s LoadModule python_module modules/mod_python.so SetHandler python-program PythonFixupHandler cherrypy.test.modpy::cpmodpysetup PythonHandler cherrypy._cpmodpy::handler PythonOption cherrypy.setup cherrypy.test.%(modulename)s::setup_server PythonOption socket_host %(host)s PythonDebug On """ class ModPythonSupervisor(helper.Supervisor): using_apache = True using_wsgi = False template = None def __str__(self): return "ModPython Server on %s:%s" % (self.host, self.port) def start(self, modulename): mpconf = CONF_PATH if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) f = open(mpconf, 'wb') try: f.write(self.template % {'port': self.port, 'modulename': modulename, 'host': self.host}) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % mpconf) if result: print(result) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") loaded = False def wsgisetup(req): global loaded if not loaded: loaded = True options = req.get_options() import cherrypy cherrypy.config.update({ "log.error_file": os.path.join(curdir, "test.log"), "environment": "test_suite", "server.socket_host": options['socket_host'], }) modname = options['testmod'] mod = __import__(modname, globals(), locals(), ['']) mod.setup_server() cherrypy.server.unsubscribe() cherrypy.engine.start() from mod_python import apache return apache.OK def cpmodpysetup(req): global loaded if not loaded: loaded = True options = req.get_options() import cherrypy cherrypy.config.update({ "log.error_file": os.path.join(curdir, "test.log"), "environment": "test_suite", "server.socket_host": options['socket_host'], }) from mod_python import apache return apache.OK
5,091
Python
.py
130
33.176923
77
0.694696
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,918
test_logging.py
evilhero_mylar/lib/cherrypy/test/test_logging.py
"""Basic tests for the CherryPy core: request handling.""" import os localDir = os.path.dirname(__file__) import cherrypy access_log = os.path.join(localDir, "access.log") error_log = os.path.join(localDir, "error.log") # Some unicode strings. tartaros = u'\u03a4\u1f71\u03c1\u03c4\u03b1\u03c1\u03bf\u03c2' erebos = u'\u0388\u03c1\u03b5\u03b2\u03bf\u03c2.com' def setup_server(): class Root: def index(self): return "hello" index.exposed = True def uni_code(self): cherrypy.request.login = tartaros cherrypy.request.remote.name = erebos uni_code.exposed = True def slashes(self): cherrypy.request.request_line = r'GET /slashed\path HTTP/1.1' slashes.exposed = True def whitespace(self): # User-Agent = "User-Agent" ":" 1*( product | comment ) # comment = "(" *( ctext | quoted-pair | comment ) ")" # ctext = <any TEXT excluding "(" and ")"> # TEXT = <any OCTET except CTLs, but including LWS> # LWS = [CRLF] 1*( SP | HT ) cherrypy.request.headers['User-Agent'] = 'Browzuh (1.0\r\n\t\t.3)' whitespace.exposed = True def as_string(self): return "content" as_string.exposed = True def as_yield(self): yield "content" as_yield.exposed = True def error(self): raise ValueError() error.exposed = True error._cp_config = {'tools.log_tracebacks.on': True} root = Root() cherrypy.config.update({'log.error_file': error_log, 'log.access_file': access_log, }) cherrypy.tree.mount(root) from cherrypy.test import helper, logtest class AccessLogTests(helper.CPWebCase, logtest.LogCase): setup_server = staticmethod(setup_server) logfile = access_log def testNormalReturn(self): self.markLog() self.getPage("/as_string", headers=[('Referer', 'http://www.cherrypy.org/'), ('User-Agent', 'Mozilla/5.0')]) self.assertBody('content') self.assertStatus(200) intro = '%s - - [' % self.interface() self.assertLog(-1, intro) if [k for k, v in self.headers if k.lower() == 'content-length']: self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 7 ' '"http://www.cherrypy.org/" "Mozilla/5.0"' % self.prefix()) else: self.assertLog(-1, '] "GET %s/as_string HTTP/1.1" 200 - ' '"http://www.cherrypy.org/" "Mozilla/5.0"' % self.prefix()) def testNormalYield(self): self.markLog() self.getPage("/as_yield") self.assertBody('content') self.assertStatus(200) intro = '%s - - [' % self.interface() self.assertLog(-1, intro) if [k for k, v in self.headers if k.lower() == 'content-length']: self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 7 "" ""' % self.prefix()) else: self.assertLog(-1, '] "GET %s/as_yield HTTP/1.1" 200 - "" ""' % self.prefix()) def testEscapedOutput(self): # Test unicode in access log pieces. self.markLog() self.getPage("/uni_code") self.assertStatus(200) self.assertLog(-1, repr(tartaros.encode('utf8'))[1:-1]) # Test the erebos value. Included inline for your enlightenment. # Note the 'r' prefix--those backslashes are literals. self.assertLog(-1, r'\xce\x88\xcf\x81\xce\xb5\xce\xb2\xce\xbf\xcf\x82') # Test backslashes in output. self.markLog() self.getPage("/slashes") self.assertStatus(200) self.assertLog(-1, r'"GET /slashed\\path HTTP/1.1"') # Test whitespace in output. self.markLog() self.getPage("/whitespace") self.assertStatus(200) # Again, note the 'r' prefix. self.assertLog(-1, r'"Browzuh (1.0\r\n\t\t.3)"') class ErrorLogTests(helper.CPWebCase, logtest.LogCase): setup_server = staticmethod(setup_server) logfile = error_log def testTracebacks(self): # Test that tracebacks get written to the error log. self.markLog() ignore = helper.webtest.ignored_exceptions ignore.append(ValueError) try: self.getPage("/error") self.assertInBody("raise ValueError()") self.assertLog(0, 'HTTP Traceback (most recent call last):') self.assertLog(-3, 'raise ValueError()') finally: ignore.pop()
4,920
Python
.py
113
31.876106
79
0.566616
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,919
test_httpauth.py
evilhero_mylar/lib/cherrypy/test/test_httpauth.py
import cherrypy from cherrypy._cpcompat import md5, sha, ntob from cherrypy.lib import httpauth from cherrypy.test import helper class HTTPAuthTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "This is public." index.exposed = True class DigestProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected2: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True def fetch_users(): return {'test': 'test'} def sha_password_encrypter(password): return sha(ntob(password)).hexdigest() def fetch_password(username): return sha(ntob('test')).hexdigest() conf = {'/digest': {'tools.digest_auth.on': True, 'tools.digest_auth.realm': 'localhost', 'tools.digest_auth.users': fetch_users}, '/basic': {'tools.basic_auth.on': True, 'tools.basic_auth.realm': 'localhost', 'tools.basic_auth.users': {'test': md5(ntob('test')).hexdigest()}}, '/basic2': {'tools.basic_auth.on': True, 'tools.basic_auth.realm': 'localhost', 'tools.basic_auth.users': fetch_password, 'tools.basic_auth.encrypt': sha_password_encrypter}} root = Root() root.digest = DigestProtected() root.basic = BasicProtected() root.basic2 = BasicProtected2() cherrypy.tree.mount(root, config=conf) setup_server = staticmethod(setup_server) def testPublic(self): self.getPage("/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('This is public.') def testBasic(self): self.getPage("/basic/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"') self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZX60')]) self.assertStatus(401) self.getPage('/basic/', [('Authorization', 'Basic dGVzdDp0ZXN0')]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.") def testBasic2(self): self.getPage("/basic2/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="localhost"') self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZX60')]) self.assertStatus(401) self.getPage('/basic2/', [('Authorization', 'Basic dGVzdDp0ZXN0')]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.") def testDigest(self): self.getPage("/digest/") self.assertStatus(401) value = None for k, v in self.headers: if k.lower() == "www-authenticate": if v.startswith("Digest"): value = v break if value is None: self._handlewebError("Digest authentification scheme was not found") value = value[7:] items = value.split(', ') tokens = {} for item in items: key, value = item.split('=') tokens[key.lower()] = value missing_msg = "%s is missing" bad_value_msg = "'%s' was expecting '%s' but found '%s'" nonce = None if 'realm' not in tokens: self._handlewebError(missing_msg % 'realm') elif tokens['realm'] != '"localhost"': self._handlewebError(bad_value_msg % ('realm', '"localhost"', tokens['realm'])) if 'nonce' not in tokens: self._handlewebError(missing_msg % 'nonce') else: nonce = tokens['nonce'].strip('"') if 'algorithm' not in tokens: self._handlewebError(missing_msg % 'algorithm') elif tokens['algorithm'] != '"MD5"': self._handlewebError(bad_value_msg % ('algorithm', '"MD5"', tokens['algorithm'])) if 'qop' not in tokens: self._handlewebError(missing_msg % 'qop') elif tokens['qop'] != '"auth"': self._handlewebError(bad_value_msg % ('qop', '"auth"', tokens['qop'])) # Test a wrong 'realm' value base_auth = 'Digest username="test", realm="wrong realm", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth = base_auth % (nonce, '', '00000001') params = httpauth.parseAuthorization(auth) response = httpauth._computeDigestResponse(params, 'test') auth = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth)]) self.assertStatus(401) # Test that must pass base_auth = 'Digest username="test", realm="localhost", nonce="%s", uri="/digest/", algorithm=MD5, response="%s", qop=auth, nc=%s, cnonce="1522e61005789929"' auth = base_auth % (nonce, '', '00000001') params = httpauth.parseAuthorization(auth) response = httpauth._computeDigestResponse(params, 'test') auth = base_auth % (nonce, response, '00000001') self.getPage('/digest/', [('Authorization', auth)]) self.assertStatus('200 OK') self.assertBody("Hello test, you've been authorized.")
5,844
Python
.py
120
36.758333
167
0.580381
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,920
test_core.py
evilhero_mylar/lib/cherrypy/test/test_core.py
"""Basic tests for the CherryPy core: request handling.""" import os localDir = os.path.dirname(__file__) import sys import types import cherrypy from cherrypy._cpcompat import IncompleteRead, itervalues, ntob from cherrypy import _cptools, tools from cherrypy.lib import httputil, static favicon_path = os.path.join(os.getcwd(), localDir, "../favicon.ico") # Client-side code # from cherrypy.test import helper class CoreRequestHandlingTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "hello" index.exposed = True favicon_ico = tools.staticfile.handler(filename=favicon_path) def defct(self, newct): newct = "text/%s" % newct cherrypy.config.update({'tools.response_headers.on': True, 'tools.response_headers.headers': [('Content-Type', newct)]}) defct.exposed = True def baseurl(self, path_info, relative=None): return cherrypy.url(path_info, relative=bool(relative)) baseurl.exposed = True root = Root() if sys.version_info >= (2, 5): from cherrypy.test._test_decorators import ExposeExamples root.expose_dec = ExposeExamples() class TestType(type): """Metaclass which automatically exposes all functions in each subclass, and adds an instance of the subclass as an attribute of root. """ def __init__(cls, name, bases, dct): type.__init__(cls, name, bases, dct) for value in itervalues(dct): if isinstance(value, types.FunctionType): value.exposed = True setattr(root, name.lower(), cls()) class Test(object): __metaclass__ = TestType class URL(Test): _cp_config = {'tools.trailing_slash.on': False} def index(self, path_info, relative=None): if relative != 'server': relative = bool(relative) return cherrypy.url(path_info, relative=relative) def leaf(self, path_info, relative=None): if relative != 'server': relative = bool(relative) return cherrypy.url(path_info, relative=relative) class Status(Test): def index(self): return "normal" def blank(self): cherrypy.response.status = "" # According to RFC 2616, new status codes are OK as long as they # are between 100 and 599. # Here is an illegal code... def illegal(self): cherrypy.response.status = 781 return "oops" # ...and here is an unknown but legal code. def unknown(self): cherrypy.response.status = "431 My custom error" return "funky" # Non-numeric code def bad(self): cherrypy.response.status = "error" return "bad news" class Redirect(Test): class Error: _cp_config = {"tools.err_redirect.on": True, "tools.err_redirect.url": "/errpage", "tools.err_redirect.internal": False, } def index(self): raise NameError("redirect_test") index.exposed = True error = Error() def index(self): return "child" def custom(self, url, code): raise cherrypy.HTTPRedirect(url, code) def by_code(self, code): raise cherrypy.HTTPRedirect("somewhere%20else", code) by_code._cp_config = {'tools.trailing_slash.extra': True} def nomodify(self): raise cherrypy.HTTPRedirect("", 304) def proxy(self): raise cherrypy.HTTPRedirect("proxy", 305) def stringify(self): return str(cherrypy.HTTPRedirect("/")) def fragment(self, frag): raise cherrypy.HTTPRedirect("/some/url#%s" % frag) def login_redir(): if not getattr(cherrypy.request, "login", None): raise cherrypy.InternalRedirect("/internalredirect/login") tools.login_redir = _cptools.Tool('before_handler', login_redir) def redir_custom(): raise cherrypy.InternalRedirect("/internalredirect/custom_err") class InternalRedirect(Test): def index(self): raise cherrypy.InternalRedirect("/") def choke(self): return 3 / 0 choke.exposed = True choke._cp_config = {'hooks.before_error_response': redir_custom} def relative(self, a, b): raise cherrypy.InternalRedirect("cousin?t=6") def cousin(self, t): assert cherrypy.request.prev.closed return cherrypy.request.prev.query_string def petshop(self, user_id): if user_id == "parrot": # Trade it for a slug when redirecting raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug') elif user_id == "terrier": # Trade it for a fish when redirecting raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish') else: # This should pass the user_id through to getImagesByUser raise cherrypy.InternalRedirect( '/image/getImagesByUser?user_id=%s' % str(user_id)) # We support Python 2.3, but the @-deco syntax would look like this: # @tools.login_redir() def secure(self): return "Welcome!" secure = tools.login_redir()(secure) # Since calling the tool returns the same function you pass in, # you could skip binding the return value, and just write: # tools.login_redir()(secure) def login(self): return "Please log in" def custom_err(self): return "Something went horribly wrong." def early_ir(self, arg): return "whatever" early_ir._cp_config = {'hooks.before_request_body': redir_custom} class Image(Test): def getImagesByUser(self, user_id): return "0 images for %s" % user_id class Flatten(Test): def as_string(self): return "content" def as_list(self): return ["con", "tent"] def as_yield(self): yield ntob("content") def as_dblyield(self): yield self.as_yield() as_dblyield._cp_config = {'tools.flatten.on': True} def as_refyield(self): for chunk in self.as_yield(): yield chunk class Ranges(Test): def get_ranges(self, bytes): return repr(httputil.get_ranges('bytes=%s' % bytes, 8)) def slice_file(self): path = os.path.join(os.getcwd(), os.path.dirname(__file__)) return static.serve_file(os.path.join(path, "static/index.html")) class Cookies(Test): def single(self, name): cookie = cherrypy.request.cookie[name] # Python2's SimpleCookie.__setitem__ won't take unicode keys. cherrypy.response.cookie[str(name)] = cookie.value def multiple(self, names): for name in names: cookie = cherrypy.request.cookie[name] # Python2's SimpleCookie.__setitem__ won't take unicode keys. cherrypy.response.cookie[str(name)] = cookie.value cherrypy.tree.mount(root) setup_server = staticmethod(setup_server) def testStatus(self): self.getPage("/status/") self.assertBody('normal') self.assertStatus(200) self.getPage("/status/blank") self.assertBody('') self.assertStatus(200) self.getPage("/status/illegal") self.assertStatus(500) msg = "Illegal response status from server (781 is out of range)." self.assertErrorPage(500, msg) if not getattr(cherrypy.server, 'using_apache', False): self.getPage("/status/unknown") self.assertBody('funky') self.assertStatus(431) self.getPage("/status/bad") self.assertStatus(500) msg = "Illegal response status from server ('error' is non-numeric)." self.assertErrorPage(500, msg) def testSlashes(self): # Test that requests for index methods without a trailing slash # get redirected to the same URI path with a trailing slash. # Make sure GET params are preserved. self.getPage("/redirect?id=3") self.assertStatus(301) self.assertInBody("<a href='%s/redirect/?id=3'>" "%s/redirect/?id=3</a>" % (self.base(), self.base())) if self.prefix(): # Corner case: the "trailing slash" redirect could be tricky if # we're using a virtual root and the URI is "/vroot" (no slash). self.getPage("") self.assertStatus(301) self.assertInBody("<a href='%s/'>%s/</a>" % (self.base(), self.base())) # Test that requests for NON-index methods WITH a trailing slash # get redirected to the same URI path WITHOUT a trailing slash. # Make sure GET params are preserved. self.getPage("/redirect/by_code/?code=307") self.assertStatus(301) self.assertInBody("<a href='%s/redirect/by_code?code=307'>" "%s/redirect/by_code?code=307</a>" % (self.base(), self.base())) # If the trailing_slash tool is off, CP should just continue # as if the slashes were correct. But it needs some help # inside cherrypy.url to form correct output. self.getPage('/url?path_info=page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf/?path_info=page1') self.assertBody('%s/url/page1' % self.base()) def testRedirect(self): self.getPage("/redirect/") self.assertBody('child') self.assertStatus(200) self.getPage("/redirect/by_code?code=300") self.assertMatchesBody(r"<a href='(.*)somewhere%20else'>\1somewhere%20else</a>") self.assertStatus(300) self.getPage("/redirect/by_code?code=301") self.assertMatchesBody(r"<a href='(.*)somewhere%20else'>\1somewhere%20else</a>") self.assertStatus(301) self.getPage("/redirect/by_code?code=302") self.assertMatchesBody(r"<a href='(.*)somewhere%20else'>\1somewhere%20else</a>") self.assertStatus(302) self.getPage("/redirect/by_code?code=303") self.assertMatchesBody(r"<a href='(.*)somewhere%20else'>\1somewhere%20else</a>") self.assertStatus(303) self.getPage("/redirect/by_code?code=307") self.assertMatchesBody(r"<a href='(.*)somewhere%20else'>\1somewhere%20else</a>") self.assertStatus(307) self.getPage("/redirect/nomodify") self.assertBody('') self.assertStatus(304) self.getPage("/redirect/proxy") self.assertBody('') self.assertStatus(305) # HTTPRedirect on error self.getPage("/redirect/error/") self.assertStatus(('302 Found', '303 See Other')) self.assertInBody('/errpage') # Make sure str(HTTPRedirect()) works. self.getPage("/redirect/stringify", protocol="HTTP/1.0") self.assertStatus(200) self.assertBody("(['%s/'], 302)" % self.base()) if cherrypy.server.protocol_version == "HTTP/1.1": self.getPage("/redirect/stringify", protocol="HTTP/1.1") self.assertStatus(200) self.assertBody("(['%s/'], 303)" % self.base()) # check that #fragments are handled properly # http://skrb.org/ietf/http_errata.html#location-fragments frag = "foo" self.getPage("/redirect/fragment/%s" % frag) self.assertMatchesBody(r"<a href='(.*)\/some\/url\#%s'>\1\/some\/url\#%s</a>" % (frag, frag)) loc = self.assertHeader('Location') assert loc.endswith("#%s" % frag) self.assertStatus(('302 Found', '303 See Other')) # check injection protection # See http://www.cherrypy.org/ticket/1003 self.getPage("/redirect/custom?code=303&url=/foobar/%0d%0aSet-Cookie:%20somecookie=someval") self.assertStatus(303) loc = self.assertHeader('Location') assert 'Set-Cookie' in loc self.assertNoHeader('Set-Cookie') def test_InternalRedirect(self): # InternalRedirect self.getPage("/internalredirect/") self.assertBody('hello') self.assertStatus(200) # Test passthrough self.getPage("/internalredirect/petshop?user_id=Sir-not-appearing-in-this-film") self.assertBody('0 images for Sir-not-appearing-in-this-film') self.assertStatus(200) # Test args self.getPage("/internalredirect/petshop?user_id=parrot") self.assertBody('0 images for slug') self.assertStatus(200) # Test POST self.getPage("/internalredirect/petshop", method="POST", body="user_id=terrier") self.assertBody('0 images for fish') self.assertStatus(200) # Test ir before body read self.getPage("/internalredirect/early_ir", method="POST", body="arg=aha!") self.assertBody("Something went horribly wrong.") self.assertStatus(200) self.getPage("/internalredirect/secure") self.assertBody('Please log in') self.assertStatus(200) # Relative path in InternalRedirect. # Also tests request.prev. self.getPage("/internalredirect/relative?a=3&b=5") self.assertBody("a=3&b=5") self.assertStatus(200) # InternalRedirect on error self.getPage("/internalredirect/choke") self.assertStatus(200) self.assertBody("Something went horribly wrong.") def testFlatten(self): for url in ["/flatten/as_string", "/flatten/as_list", "/flatten/as_yield", "/flatten/as_dblyield", "/flatten/as_refyield"]: self.getPage(url) self.assertBody('content') def testRanges(self): self.getPage("/ranges/get_ranges?bytes=3-6") self.assertBody("[(3, 7)]") # Test multiple ranges and a suffix-byte-range-spec, for good measure. self.getPage("/ranges/get_ranges?bytes=2-4,-1") self.assertBody("[(2, 5), (7, 8)]") # Get a partial file. if cherrypy.server.protocol_version == "HTTP/1.1": self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')]) self.assertStatus(206) self.assertHeader("Content-Type", "text/html;charset=utf-8") self.assertHeader("Content-Range", "bytes 2-5/14") self.assertBody("llo,") # What happens with overlapping ranges (and out of order, too)? self.getPage("/ranges/slice_file", [('Range', 'bytes=4-6,2-5')]) self.assertStatus(206) ct = self.assertHeader("Content-Type") expected_type = "multipart/byteranges; boundary=" self.assert_(ct.startswith(expected_type)) boundary = ct[len(expected_type):] expected_body = ("\r\n--%s\r\n" "Content-type: text/html\r\n" "Content-range: bytes 4-6/14\r\n" "\r\n" "o, \r\n" "--%s\r\n" "Content-type: text/html\r\n" "Content-range: bytes 2-5/14\r\n" "\r\n" "llo,\r\n" "--%s--\r\n" % (boundary, boundary, boundary)) self.assertBody(expected_body) self.assertHeader("Content-Length") # Test "416 Requested Range Not Satisfiable" self.getPage("/ranges/slice_file", [('Range', 'bytes=2300-2900')]) self.assertStatus(416) # "When this status code is returned for a byte-range request, # the response SHOULD include a Content-Range entity-header # field specifying the current length of the selected resource" self.assertHeader("Content-Range", "bytes */14") elif cherrypy.server.protocol_version == "HTTP/1.0": # Test Range behavior with HTTP/1.0 request self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')]) self.assertStatus(200) self.assertBody("Hello, world\r\n") def testFavicon(self): # favicon.ico is served by staticfile. icofilename = os.path.join(localDir, "../favicon.ico") icofile = open(icofilename, "rb") data = icofile.read() icofile.close() self.getPage("/favicon.ico") self.assertBody(data) def testCookies(self): if sys.version_info >= (2, 5): header_value = lambda x: x else: header_value = lambda x: x+';' self.getPage("/cookies/single?name=First", [('Cookie', 'First=Dinsdale;')]) self.assertHeader('Set-Cookie', header_value('First=Dinsdale')) self.getPage("/cookies/multiple?names=First&names=Last", [('Cookie', 'First=Dinsdale; Last=Piranha;'), ]) self.assertHeader('Set-Cookie', header_value('First=Dinsdale')) self.assertHeader('Set-Cookie', header_value('Last=Piranha')) self.getPage("/cookies/single?name=Something-With:Colon", [('Cookie', 'Something-With:Colon=some-value')]) self.assertStatus(400) def testDefaultContentType(self): self.getPage('/') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.getPage('/defct/plain') self.getPage('/') self.assertHeader('Content-Type', 'text/plain;charset=utf-8') self.getPage('/defct/html') def test_cherrypy_url(self): # Input relative to current self.getPage('/url/leaf?path_info=page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/?path_info=page1') self.assertBody('%s/url/page1' % self.base()) # Other host header host = 'www.mydomain.example' self.getPage('/url/leaf?path_info=page1', headers=[('Host', host)]) self.assertBody('%s://%s/url/page1' % (self.scheme, host)) # Input is 'absolute'; that is, relative to script_name self.getPage('/url/leaf?path_info=/page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/?path_info=/page1') self.assertBody('%s/page1' % self.base()) # Single dots self.getPage('/url/leaf?path_info=./page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf?path_info=other/./page1') self.assertBody('%s/url/other/page1' % self.base()) self.getPage('/url/?path_info=/other/./page1') self.assertBody('%s/other/page1' % self.base()) # Double dots self.getPage('/url/leaf?path_info=../page1') self.assertBody('%s/page1' % self.base()) self.getPage('/url/leaf?path_info=other/../page1') self.assertBody('%s/url/page1' % self.base()) self.getPage('/url/leaf?path_info=/other/../page1') self.assertBody('%s/page1' % self.base()) # Output relative to current path or script_name self.getPage('/url/?path_info=page1&relative=True') self.assertBody('page1') self.getPage('/url/leaf?path_info=/page1&relative=True') self.assertBody('../page1') self.getPage('/url/leaf?path_info=page1&relative=True') self.assertBody('page1') self.getPage('/url/leaf?path_info=leaf/page1&relative=True') self.assertBody('leaf/page1') self.getPage('/url/leaf?path_info=../page1&relative=True') self.assertBody('../page1') self.getPage('/url/?path_info=other/../page1&relative=True') self.assertBody('page1') # Output relative to / self.getPage('/baseurl?path_info=ab&relative=True') self.assertBody('ab') # Output relative to / self.getPage('/baseurl?path_info=/ab&relative=True') self.assertBody('ab') # absolute-path references ("server-relative") # Input relative to current self.getPage('/url/leaf?path_info=page1&relative=server') self.assertBody('/url/page1') self.getPage('/url/?path_info=page1&relative=server') self.assertBody('/url/page1') # Input is 'absolute'; that is, relative to script_name self.getPage('/url/leaf?path_info=/page1&relative=server') self.assertBody('/page1') self.getPage('/url/?path_info=/page1&relative=server') self.assertBody('/page1') def test_expose_decorator(self): if not sys.version_info >= (2, 5): return self.skip("skipped (Python 2.5+ only) ") # Test @expose self.getPage("/expose_dec/no_call") self.assertStatus(200) self.assertBody("Mr E. R. Bradshaw") # Test @expose() self.getPage("/expose_dec/call_empty") self.assertStatus(200) self.assertBody("Mrs. B.J. Smegma") # Test @expose("alias") self.getPage("/expose_dec/call_alias") self.assertStatus(200) self.assertBody("Mr Nesbitt") # Does the original name work? self.getPage("/expose_dec/nesbitt") self.assertStatus(200) self.assertBody("Mr Nesbitt") # Test @expose(["alias1", "alias2"]) self.getPage("/expose_dec/alias1") self.assertStatus(200) self.assertBody("Mr Ken Andrews") self.getPage("/expose_dec/alias2") self.assertStatus(200) self.assertBody("Mr Ken Andrews") # Does the original name work? self.getPage("/expose_dec/andrews") self.assertStatus(200) self.assertBody("Mr Ken Andrews") # Test @expose(alias="alias") self.getPage("/expose_dec/alias3") self.assertStatus(200) self.assertBody("Mr. and Mrs. Watson")
24,100
Python
.py
488
35.122951
101
0.576053
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,921
test_conn.py
evilhero_mylar/lib/cherrypy/test/test_conn.py
"""Tests for TCP connection handling, including proper and timely close.""" import socket import sys import time timeout = 1 import cherrypy from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, NotConnected, BadStatusLine from cherrypy._cpcompat import ntob, urlopen, unicodestr from cherrypy.test import webtest from cherrypy import _cperror pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN' def setup_server(): def raise500(): raise cherrypy.HTTPError(500) class Root: def index(self): return pov index.exposed = True page1 = index page2 = index page3 = index def hello(self): return "Hello, world!" hello.exposed = True def timeout(self, t): return str(cherrypy.server.httpserver.timeout) timeout.exposed = True def stream(self, set_cl=False): if set_cl: cherrypy.response.headers['Content-Length'] = 10 def content(): for x in range(10): yield str(x) return content() stream.exposed = True stream._cp_config = {'response.stream': True} def error(self, code=500): raise cherrypy.HTTPError(code) error.exposed = True def upload(self): if not cherrypy.request.method == 'POST': raise AssertionError("'POST' != request.method %r" % cherrypy.request.method) return "thanks for '%s'" % cherrypy.request.body.read() upload.exposed = True def custom(self, response_code): cherrypy.response.status = response_code return "Code = %s" % response_code custom.exposed = True def err_before_read(self): return "ok" err_before_read.exposed = True err_before_read._cp_config = {'hooks.on_start_resource': raise500} def one_megabyte_of_a(self): return ["a" * 1024] * 1024 one_megabyte_of_a.exposed = True def custom_cl(self, body, cl): cherrypy.response.headers['Content-Length'] = cl if not isinstance(body, list): body = [body] newbody = [] for chunk in body: if isinstance(chunk, unicodestr): chunk = chunk.encode('ISO-8859-1') newbody.append(chunk) return newbody custom_cl.exposed = True # Turn off the encoding tool so it doens't collapse # our response body and reclaculate the Content-Length. custom_cl._cp_config = {'tools.encode.on': False} cherrypy.tree.mount(Root()) cherrypy.config.update({ 'server.max_request_body_size': 1001, 'server.socket_timeout': timeout, }) from cherrypy.test import helper class ConnectionCloseTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_HTTP11(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" self.persistent = True # Make the first request and assert there's no "Connection: close". self.getPage("/") self.assertStatus('200 OK') self.assertBody(pov) self.assertNoHeader("Connection") # Make another request on the same connection. self.getPage("/page1") self.assertStatus('200 OK') self.assertBody(pov) self.assertNoHeader("Connection") # Test client-side close. self.getPage("/page2", headers=[("Connection", "close")]) self.assertStatus('200 OK') self.assertBody(pov) self.assertHeader("Connection", "close") # Make another request on the same connection, which should error. self.assertRaises(NotConnected, self.getPage, "/") def test_Streaming_no_len(self): self._streaming(set_cl=False) def test_Streaming_with_len(self): self._streaming(set_cl=True) def _streaming(self, set_cl): if cherrypy.server.protocol_version == "HTTP/1.1": self.PROTOCOL = "HTTP/1.1" self.persistent = True # Make the first request and assert there's no "Connection: close". self.getPage("/") self.assertStatus('200 OK') self.assertBody(pov) self.assertNoHeader("Connection") # Make another, streamed request on the same connection. if set_cl: # When a Content-Length is provided, the content should stream # without closing the connection. self.getPage("/stream?set_cl=Yes") self.assertHeader("Content-Length") self.assertNoHeader("Connection", "close") self.assertNoHeader("Transfer-Encoding") self.assertStatus('200 OK') self.assertBody('0123456789') else: # When no Content-Length response header is provided, # streamed output will either close the connection, or use # chunked encoding, to determine transfer-length. self.getPage("/stream") self.assertNoHeader("Content-Length") self.assertStatus('200 OK') self.assertBody('0123456789') chunked_response = False for k, v in self.headers: if k.lower() == "transfer-encoding": if str(v) == "chunked": chunked_response = True if chunked_response: self.assertNoHeader("Connection", "close") else: self.assertHeader("Connection", "close") # Make another request on the same connection, which should error. self.assertRaises(NotConnected, self.getPage, "/") # Try HEAD. See http://www.cherrypy.org/ticket/864. self.getPage("/stream", method='HEAD') self.assertStatus('200 OK') self.assertBody('') self.assertNoHeader("Transfer-Encoding") else: self.PROTOCOL = "HTTP/1.0" self.persistent = True # Make the first request and assert Keep-Alive. self.getPage("/", headers=[("Connection", "Keep-Alive")]) self.assertStatus('200 OK') self.assertBody(pov) self.assertHeader("Connection", "Keep-Alive") # Make another, streamed request on the same connection. if set_cl: # When a Content-Length is provided, the content should # stream without closing the connection. self.getPage("/stream?set_cl=Yes", headers=[("Connection", "Keep-Alive")]) self.assertHeader("Content-Length") self.assertHeader("Connection", "Keep-Alive") self.assertNoHeader("Transfer-Encoding") self.assertStatus('200 OK') self.assertBody('0123456789') else: # When a Content-Length is not provided, # the server should close the connection. self.getPage("/stream", headers=[("Connection", "Keep-Alive")]) self.assertStatus('200 OK') self.assertBody('0123456789') self.assertNoHeader("Content-Length") self.assertNoHeader("Connection", "Keep-Alive") self.assertNoHeader("Transfer-Encoding") # Make another request on the same connection, which should error. self.assertRaises(NotConnected, self.getPage, "/") def test_HTTP10_KeepAlive(self): self.PROTOCOL = "HTTP/1.0" if self.scheme == "https": self.HTTP_CONN = HTTPSConnection else: self.HTTP_CONN = HTTPConnection # Test a normal HTTP/1.0 request. self.getPage("/page2") self.assertStatus('200 OK') self.assertBody(pov) # Apache, for example, may emit a Connection header even for HTTP/1.0 ## self.assertNoHeader("Connection") # Test a keep-alive HTTP/1.0 request. self.persistent = True self.getPage("/page3", headers=[("Connection", "Keep-Alive")]) self.assertStatus('200 OK') self.assertBody(pov) self.assertHeader("Connection", "Keep-Alive") # Remove the keep-alive header again. self.getPage("/page3") self.assertStatus('200 OK') self.assertBody(pov) # Apache, for example, may emit a Connection header even for HTTP/1.0 ## self.assertNoHeader("Connection") class PipelineTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_HTTP11_Timeout(self): # If we timeout without sending any data, # the server will close the conn with a 408. if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Connect but send nothing. self.persistent = True conn = self.HTTP_CONN conn.auto_open = False conn.connect() # Wait for our socket timeout time.sleep(timeout * 2) # The request should have returned 408 already. response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 408) conn.close() # Connect but send half the headers only. self.persistent = True conn = self.HTTP_CONN conn.auto_open = False conn.connect() conn.send(ntob('GET /hello HTTP/1.1')) conn.send(("Host: %s" % self.HOST).encode('ascii')) # Wait for our socket timeout time.sleep(timeout * 2) # The conn should have already sent 408. response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 408) conn.close() def test_HTTP11_Timeout_after_request(self): # If we timeout after at least one request has succeeded, # the server will close the conn without 408. if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Make an initial request self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/timeout?t=%s" % timeout, skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) self.body = response.read() self.assertBody(str(timeout)) # Make a second request on the same socket conn._output(ntob('GET /hello HTTP/1.1')) conn._output(ntob("Host: %s" % self.HOST, 'ascii')) conn._send_output() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) self.body = response.read() self.assertBody("Hello, world!") # Wait for our socket timeout time.sleep(timeout * 2) # Make another request on the same socket, which should error conn._output(ntob('GET /hello HTTP/1.1')) conn._output(ntob("Host: %s" % self.HOST, 'ascii')) conn._send_output() response = conn.response_class(conn.sock, method="GET") try: response.begin() except: if not isinstance(sys.exc_info()[1], (socket.error, BadStatusLine)): self.fail("Writing to timed out socket didn't fail" " as it should have: %s" % sys.exc_info()[1]) else: if response.status != 408: self.fail("Writing to timed out socket didn't fail" " as it should have: %s" % response.read()) conn.close() # Make another request on a new socket, which should work self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) self.body = response.read() self.assertBody(pov) # Make another request on the same socket, # but timeout on the headers conn.send(ntob('GET /hello HTTP/1.1')) # Wait for our socket timeout time.sleep(timeout * 2) response = conn.response_class(conn.sock, method="GET") try: response.begin() except: if not isinstance(sys.exc_info()[1], (socket.error, BadStatusLine)): self.fail("Writing to timed out socket didn't fail" " as it should have: %s" % sys.exc_info()[1]) else: self.fail("Writing to timed out socket didn't fail" " as it should have: %s" % response.read()) conn.close() # Retry the request on a new connection, which should work self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) self.body = response.read() self.assertBody(pov) conn.close() def test_HTTP11_pipelining(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Test pipelining. httplib doesn't support this directly. self.persistent = True conn = self.HTTP_CONN # Put request 1 conn.putrequest("GET", "/hello", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() for trial in range(5): # Put next request conn._output(ntob('GET /hello HTTP/1.1')) conn._output(ntob("Host: %s" % self.HOST, 'ascii')) conn._send_output() # Retrieve previous response response = conn.response_class(conn.sock, method="GET") response.begin() body = response.read(13) self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello, world!")) # Retrieve final response response = conn.response_class(conn.sock, method="GET") response.begin() body = response.read() self.assertEqual(response.status, 200) self.assertEqual(body, ntob("Hello, world!")) conn.close() def test_100_Continue(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" self.persistent = True conn = self.HTTP_CONN # Try a page without an Expect request header first. # Note that httplib's response.begin automatically ignores # 100 Continue responses, so we must manually check for it. conn.putrequest("POST", "/upload", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Content-Type", "text/plain") conn.putheader("Content-Length", "4") conn.endheaders() conn.send(ntob("d'oh")) response = conn.response_class(conn.sock, method="POST") version, status, reason = response._read_status() self.assertNotEqual(status, 100) conn.close() # Now try a page with an Expect header... conn.connect() conn.putrequest("POST", "/upload", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Content-Type", "text/plain") conn.putheader("Content-Length", "17") conn.putheader("Expect", "100-continue") conn.endheaders() response = conn.response_class(conn.sock, method="POST") # ...assert and then skip the 100 response version, status, reason = response._read_status() self.assertEqual(status, 100) while True: line = response.fp.readline().strip() if line: self.fail("100 Continue should not output any headers. Got %r" % line) else: break # ...send the body body = ntob("I am a small file") conn.send(body) # ...get the final response response.begin() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(200) self.assertBody("thanks for '%s'" % body) conn.close() class ConnectionTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_readall_or_close(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" if self.scheme == "https": self.HTTP_CONN = HTTPSConnection else: self.HTTP_CONN = HTTPConnection # Test a max of 0 (the default) and then reset to what it was above. old_max = cherrypy.server.max_request_body_size for new_max in (0, old_max): cherrypy.server.max_request_body_size = new_max self.persistent = True conn = self.HTTP_CONN # Get a POST page with an error conn.putrequest("POST", "/err_before_read", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Content-Type", "text/plain") conn.putheader("Content-Length", "1000") conn.putheader("Expect", "100-continue") conn.endheaders() response = conn.response_class(conn.sock, method="POST") # ...assert and then skip the 100 response version, status, reason = response._read_status() self.assertEqual(status, 100) while True: skip = response.fp.readline().strip() if not skip: break # ...send the body conn.send(ntob("x" * 1000)) # ...get the final response response.begin() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(500) # Now try a working page with an Expect header... conn._output(ntob('POST /upload HTTP/1.1')) conn._output(ntob("Host: %s" % self.HOST, 'ascii')) conn._output(ntob("Content-Type: text/plain")) conn._output(ntob("Content-Length: 17")) conn._output(ntob("Expect: 100-continue")) conn._send_output() response = conn.response_class(conn.sock, method="POST") # ...assert and then skip the 100 response version, status, reason = response._read_status() self.assertEqual(status, 100) while True: skip = response.fp.readline().strip() if not skip: break # ...send the body body = ntob("I am a small file") conn.send(body) # ...get the final response response.begin() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(200) self.assertBody("thanks for '%s'" % body) conn.close() def test_No_Message_Body(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Set our HTTP_CONN to an instance so it persists between requests. self.persistent = True # Make the first request and assert there's no "Connection: close". self.getPage("/") self.assertStatus('200 OK') self.assertBody(pov) self.assertNoHeader("Connection") # Make a 204 request on the same connection. self.getPage("/custom/204") self.assertStatus(204) self.assertNoHeader("Content-Length") self.assertBody("") self.assertNoHeader("Connection") # Make a 304 request on the same connection. self.getPage("/custom/304") self.assertStatus(304) self.assertNoHeader("Content-Length") self.assertBody("") self.assertNoHeader("Connection") def test_Chunked_Encoding(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() if (hasattr(self, 'harness') and "modpython" in self.harness.__class__.__name__.lower()): # mod_python forbids chunked encoding return self.skip() self.PROTOCOL = "HTTP/1.1" # Set our HTTP_CONN to an instance so it persists between requests. self.persistent = True conn = self.HTTP_CONN # Try a normal chunked request (with extensions) body = ntob("8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n" "Content-Type: application/json\r\n" "\r\n") conn.putrequest("POST", "/upload", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Transfer-Encoding", "chunked") conn.putheader("Trailer", "Content-Type") # Note that this is somewhat malformed: # we shouldn't be sending Content-Length. # RFC 2616 says the server should ignore it. conn.putheader("Content-Length", "3") conn.endheaders() conn.send(body) response = conn.getresponse() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus('200 OK') self.assertBody("thanks for '%s'" % ntob('xx\r\nxxxxyyyyy')) # Try a chunked request that exceeds server.max_request_body_size. # Note that the delimiters and trailer are included. body = ntob("3e3\r\n" + ("x" * 995) + "\r\n0\r\n\r\n") conn.putrequest("POST", "/upload", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Transfer-Encoding", "chunked") conn.putheader("Content-Type", "text/plain") # Chunked requests don't need a content-length ## conn.putheader("Content-Length", len(body)) conn.endheaders() conn.send(body) response = conn.getresponse() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(413) conn.close() def test_Content_Length_in(self): # Try a non-chunked request where Content-Length exceeds # server.max_request_body_size. Assert error before body send. self.persistent = True conn = self.HTTP_CONN conn.putrequest("POST", "/upload", skip_host=True) conn.putheader("Host", self.HOST) conn.putheader("Content-Type", "text/plain") conn.putheader("Content-Length", "9999") conn.endheaders() response = conn.getresponse() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(413) self.assertBody("The entity sent with the request exceeds " "the maximum allowed bytes.") conn.close() def test_Content_Length_out_preheaders(self): # Try a non-chunked response where Content-Length is less than # the actual bytes in the response body. self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/custom_cl?body=I+have+too+many+bytes&cl=5", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.getresponse() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(500) self.assertBody( "The requested resource returned more bytes than the " "declared Content-Length.") conn.close() def test_Content_Length_out_postheaders(self): # Try a non-chunked response where Content-Length is less than # the actual bytes in the response body. self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/custom_cl?body=I+too&body=+have+too+many&cl=5", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.getresponse() self.status, self.headers, self.body = webtest.shb(response) self.assertStatus(200) self.assertBody("I too") conn.close() def test_598(self): remote_data_conn = urlopen('%s://%s:%s/one_megabyte_of_a/' % (self.scheme, self.HOST, self.PORT,)) buf = remote_data_conn.read(512) time.sleep(timeout * 0.6) remaining = (1024 * 1024) - 512 while remaining: data = remote_data_conn.read(remaining) if not data: break else: buf += data remaining -= len(data) self.assertEqual(len(buf), 1024 * 1024) self.assertEqual(buf, ntob("a" * 1024 * 1024)) self.assertEqual(remaining, 0) remote_data_conn.close() class BadRequestTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_No_CRLF(self): self.persistent = True conn = self.HTTP_CONN conn.send(ntob('GET /hello HTTP/1.1\n\n')) response = conn.response_class(conn.sock, method="GET") response.begin() self.body = response.read() self.assertBody("HTTP requires CRLF terminators") conn.close() conn.connect() conn.send(ntob('GET /hello HTTP/1.1\r\n\n')) response = conn.response_class(conn.sock, method="GET") response.begin() self.body = response.read() self.assertBody("HTTP requires CRLF terminators") conn.close()
26,382
Python
.py
606
32.462046
91
0.588155
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,922
modwsgi.py
evilhero_mylar/lib/cherrypy/test/modwsgi.py
"""Wrapper for mod_wsgi, for use as a CherryPy HTTP server. To autostart modwsgi, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl" or "apache2ctl"-- create a symlink to them if needed. KNOWN BUGS ========== ##1. Apache processes Range headers automatically; CherryPy's truncated ## output is then truncated again by Apache. See test_core.testRanges. ## This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. ##4. Apache replaces status "reason phrases" automatically. For example, ## CherryPy may set "304 Not modified" but Apache will write out ## "304 Not Modified" (capital "M"). ##5. Apache does not allow custom error codes as per the spec. ##6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the ## Request-URI too early. 7. mod_wsgi will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. When responding with 204 No Content, mod_wsgi adds a Content-Length header for you. 9. When an error is raised, mod_wsgi has no facility for printing a traceback as the response content (it's sent to the Apache log instead). 10. Startup and shutdown of Apache when running mod_wsgi seems slow. """ import os curdir = os.path.abspath(os.path.dirname(__file__)) import re import sys import time import cherrypy from cherrypy.test import helper, webtest def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output if sys.platform == 'win32': APACHE_PATH = "httpd" else: APACHE_PATH = "apache" CONF_PATH = "test_mw.conf" conf_modwsgi = r""" # Apache2 server conf file for testing CherryPy with modpython_gateway. ServerName 127.0.0.1 DocumentRoot "/" Listen %(port)s AllowEncodedSlashes On LoadModule rewrite_module modules/mod_rewrite.so RewriteEngine on RewriteMap escaping int:escape LoadModule log_config_module modules/mod_log_config.so LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-agent}i\"" combined CustomLog "%(curdir)s/apache.access.log" combined ErrorLog "%(curdir)s/apache.error.log" LogLevel debug LoadModule wsgi_module modules/mod_wsgi.so LoadModule env_module modules/mod_env.so WSGIScriptAlias / "%(curdir)s/modwsgi.py" SetEnv testmod %(testmod)s """ class ModWSGISupervisor(helper.Supervisor): """Server Controller for ModWSGI and CherryPy.""" using_apache = True using_wsgi = True template=conf_modwsgi def __str__(self): return "ModWSGI Server on %s:%s" % (self.host, self.port) def start(self, modulename): mpconf = CONF_PATH if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) f = open(mpconf, 'wb') try: output = (self.template % {'port': self.port, 'testmod': modulename, 'curdir': curdir}) f.write(output) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % mpconf) if result: print(result) # Make a request so mod_wsgi starts up our app. # If we don't, concurrent initial requests will 404. cherrypy._cpserver.wait_for_occupied_port("127.0.0.1", self.port) webtest.openURL('/ihopetheresnodefault', port=self.port) time.sleep(1) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") loaded = False def application(environ, start_response): import cherrypy global loaded if not loaded: loaded = True modname = "cherrypy.test." + environ['testmod'] mod = __import__(modname, globals(), locals(), ['']) mod.setup_server() cherrypy.config.update({ "log.error_file": os.path.join(curdir, "test.error.log"), "log.access_file": os.path.join(curdir, "test.access.log"), "environment": "test_suite", "engine.SIGHUP": None, "engine.SIGTERM": None, }) return cherrypy.tree(environ, start_response)
4,880
Python
.py
118
35.5
90
0.680401
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,923
modfastcgi.py
evilhero_mylar/lib/cherrypy/test/modfastcgi.py
"""Wrapper for mod_fastcgi, for use as a CherryPy HTTP server when testing. To autostart fastcgi, the "apache" executable or script must be on your system path, or you must override the global APACHE_PATH. On some platforms, "apache" may be called "apachectl", "apache2ctl", or "httpd"--create a symlink to them if needed. You'll also need the WSGIServer from flup.servers. See http://projects.amor.org/misc/wiki/ModPythonGateway KNOWN BUGS ========== 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. This was worked around in http://www.cherrypy.org/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. 4. Apache replaces status "reason phrases" automatically. For example, CherryPy may set "304 Not modified" but Apache will write out "304 Not Modified" (capital "M"). 5. Apache does not allow custom error codes as per the spec. 6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the Request-URI too early. 7. mod_python will not read request bodies which use the "chunked" transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and mod_python's requestobject.c). 8. Apache will output a "Content-Length: 0" response header even if there's no response entity body. This isn't really a bug; it just differs from the CherryPy default. """ import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) import re import sys import time import cherrypy from cherrypy.process import plugins, servers from cherrypy.test import helper def read_process(cmd, args=""): pipein, pipeout = os.popen4("%s %s" % (cmd, args)) try: firstline = pipeout.readline() if (re.search(r"(not recognized|No such file|not found)", firstline, re.IGNORECASE)): raise IOError('%s must be on your system path.' % cmd) output = firstline + pipeout.read() finally: pipeout.close() return output APACHE_PATH = "apache2ctl" CONF_PATH = "fastcgi.conf" conf_fastcgi = """ # Apache2 server conf file for testing CherryPy with mod_fastcgi. # fumanchu: I had to hard-code paths due to crazy Debian layouts :( ServerRoot /usr/lib/apache2 User #1000 ErrorLog %(root)s/mod_fastcgi.error.log DocumentRoot "%(root)s" ServerName 127.0.0.1 Listen %(port)s LoadModule fastcgi_module modules/mod_fastcgi.so LoadModule rewrite_module modules/mod_rewrite.so Options +ExecCGI SetHandler fastcgi-script RewriteEngine On RewriteRule ^(.*)$ /fastcgi.pyc [L] FastCgiExternalServer "%(server)s" -host 127.0.0.1:4000 """ def erase_script_name(environ, start_response): environ['SCRIPT_NAME'] = '' return cherrypy.tree(environ, start_response) class ModFCGISupervisor(helper.LocalWSGISupervisor): httpserver_class = "cherrypy.process.servers.FlupFCGIServer" using_apache = True using_wsgi = True template = conf_fastcgi def __str__(self): return "FCGI Server on %s:%s" % (self.host, self.port) def start(self, modulename): cherrypy.server.httpserver = servers.FlupFCGIServer( application=erase_script_name, bindAddress=('127.0.0.1', 4000)) cherrypy.server.httpserver.bind_addr = ('127.0.0.1', 4000) cherrypy.server.socket_port = 4000 # For FCGI, we both start apache... self.start_apache() # ...and our local server cherrypy.engine.start() self.sync_apps() def start_apache(self): fcgiconf = CONF_PATH if not os.path.isabs(fcgiconf): fcgiconf = os.path.join(curdir, fcgiconf) # Write the Apache conf file. f = open(fcgiconf, 'wb') try: server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1] output = self.template % {'port': self.port, 'root': curdir, 'server': server} output = output.replace('\r\n', '\n') f.write(output) finally: f.close() result = read_process(APACHE_PATH, "-k start -f %s" % fcgiconf) if result: print(result) def stop(self): """Gracefully shutdown a server that is serving forever.""" read_process(APACHE_PATH, "-k stop") helper.LocalWSGISupervisor.stop(self) def sync_apps(self): cherrypy.server.httpserver.fcgiserver.application = self.get_app(erase_script_name)
4,709
Python
.py
110
36.927273
91
0.696295
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,924
helper.py
evilhero_mylar/lib/cherrypy/test/helper.py
"""A library of helper functions for the CherryPy test suite.""" import datetime import logging log = logging.getLogger(__name__) import os thisdir = os.path.abspath(os.path.dirname(__file__)) serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem') import re import sys import time import warnings import cherrypy from cherrypy._cpcompat import basestring, copyitems, HTTPSConnection, ntob from cherrypy.lib import httputil from cherrypy.lib.reprconf import unrepr from cherrypy.test import webtest import nose _testconfig = None def get_tst_config(overconf = {}): global _testconfig if _testconfig is None: conf = { 'scheme': 'http', 'protocol': "HTTP/1.1", 'port': 8080, 'host': '127.0.0.1', 'validate': False, 'conquer': False, 'server': 'wsgi', } try: import testconfig _conf = testconfig.config.get('supervisor', None) if _conf is not None: for k, v in _conf.items(): if isinstance(v, basestring): _conf[k] = unrepr(v) conf.update(_conf) except ImportError: pass _testconfig = conf conf = _testconfig.copy() conf.update(overconf) return conf class Supervisor(object): """Base class for modeling and controlling servers during testing.""" def __init__(self, **kwargs): for k, v in kwargs.items(): if k == 'port': setattr(self, k, int(v)) setattr(self, k, v) log_to_stderr = lambda msg, level: sys.stderr.write(msg + os.linesep) class LocalSupervisor(Supervisor): """Base class for modeling/controlling servers which run in the same process. When the server side runs in a different process, start/stop can dump all state between each test module easily. When the server side runs in the same process as the client, however, we have to do a bit more work to ensure config and mounted apps are reset between tests. """ using_apache = False using_wsgi = False def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) cherrypy.server.httpserver = self.httpserver_class engine = cherrypy.engine if hasattr(engine, "signal_handler"): engine.signal_handler.subscribe() if hasattr(engine, "console_control_handler"): engine.console_control_handler.subscribe() #engine.subscribe('log', log_to_stderr) def start(self, modulename=None): """Load and start the HTTP server.""" if modulename: # Unhook httpserver so cherrypy.server.start() creates a new # one (with config from setup_server, if declared). cherrypy.server.httpserver = None cherrypy.engine.start() self.sync_apps() def sync_apps(self): """Tell the server about any apps which the setup functions mounted.""" pass def stop(self): td = getattr(self, 'teardown', None) if td: td() cherrypy.engine.exit() for name, server in copyitems(getattr(cherrypy, 'servers', {})): server.unsubscribe() del cherrypy.servers[name] class NativeServerSupervisor(LocalSupervisor): """Server supervisor for the builtin HTTP server.""" httpserver_class = "cherrypy._cpnative_server.CPHTTPServer" using_apache = False using_wsgi = False def __str__(self): return "Builtin HTTP Server on %s:%s" % (self.host, self.port) class LocalWSGISupervisor(LocalSupervisor): """Server supervisor for the builtin WSGI server.""" httpserver_class = "cherrypy._cpwsgi_server.CPWSGIServer" using_apache = False using_wsgi = True def __str__(self): return "Builtin WSGI Server on %s:%s" % (self.host, self.port) def sync_apps(self): """Hook a new WSGI app into the origin server.""" cherrypy.server.httpserver.wsgi_app = self.get_app() def get_app(self, app=None): """Obtain a new (decorated) WSGI app to hook into the origin server.""" if app is None: app = cherrypy.tree if self.conquer: try: import wsgiconq except ImportError: warnings.warn("Error importing wsgiconq. pyconquer will not run.") else: app = wsgiconq.WSGILogger(app, c_calls=True) if self.validate: try: from wsgiref import validate except ImportError: warnings.warn("Error importing wsgiref. The validator will not run.") else: #wraps the app in the validator app = validate.validator(app) return app def get_cpmodpy_supervisor(**options): from cherrypy.test import modpy sup = modpy.ModPythonSupervisor(**options) sup.template = modpy.conf_cpmodpy return sup def get_modpygw_supervisor(**options): from cherrypy.test import modpy sup = modpy.ModPythonSupervisor(**options) sup.template = modpy.conf_modpython_gateway sup.using_wsgi = True return sup def get_modwsgi_supervisor(**options): from cherrypy.test import modwsgi return modwsgi.ModWSGISupervisor(**options) def get_modfcgid_supervisor(**options): from cherrypy.test import modfcgid return modfcgid.ModFCGISupervisor(**options) def get_modfastcgi_supervisor(**options): from cherrypy.test import modfastcgi return modfastcgi.ModFCGISupervisor(**options) def get_wsgi_u_supervisor(**options): cherrypy.server.wsgi_version = ('u', 0) return LocalWSGISupervisor(**options) class CPWebCase(webtest.WebCase): script_name = "" scheme = "http" available_servers = {'wsgi': LocalWSGISupervisor, 'wsgi_u': get_wsgi_u_supervisor, 'native': NativeServerSupervisor, 'cpmodpy': get_cpmodpy_supervisor, 'modpygw': get_modpygw_supervisor, 'modwsgi': get_modwsgi_supervisor, 'modfcgid': get_modfcgid_supervisor, 'modfastcgi': get_modfastcgi_supervisor, } default_server = "wsgi" def _setup_server(cls, supervisor, conf): v = sys.version.split()[0] log.info("Python version used to run this test script: %s" % v) log.info("CherryPy version: %s" % cherrypy.__version__) if supervisor.scheme == "https": ssl = " (ssl)" else: ssl = "" log.info("HTTP server version: %s%s" % (supervisor.protocol, ssl)) log.info("PID: %s" % os.getpid()) cherrypy.server.using_apache = supervisor.using_apache cherrypy.server.using_wsgi = supervisor.using_wsgi if sys.platform[:4] == 'java': cherrypy.config.update({'server.nodelay': False}) if isinstance(conf, basestring): parser = cherrypy.lib.reprconf.Parser() conf = parser.dict_from_file(conf).get('global', {}) else: conf = conf or {} baseconf = conf.copy() baseconf.update({'server.socket_host': supervisor.host, 'server.socket_port': supervisor.port, 'server.protocol_version': supervisor.protocol, 'environment': "test_suite", }) if supervisor.scheme == "https": #baseconf['server.ssl_module'] = 'builtin' baseconf['server.ssl_certificate'] = serverpem baseconf['server.ssl_private_key'] = serverpem # helper must be imported lazily so the coverage tool # can run against module-level statements within cherrypy. # Also, we have to do "from cherrypy.test import helper", # exactly like each test module does, because a relative import # would stick a second instance of webtest in sys.modules, # and we wouldn't be able to globally override the port anymore. if supervisor.scheme == "https": webtest.WebCase.HTTP_CONN = HTTPSConnection return baseconf _setup_server = classmethod(_setup_server) def setup_class(cls): '' #Creates a server conf = get_tst_config() supervisor_factory = cls.available_servers.get(conf.get('server', 'wsgi')) if supervisor_factory is None: raise RuntimeError('Unknown server in config: %s' % conf['server']) supervisor = supervisor_factory(**conf) #Copied from "run_test_suite" cherrypy.config.reset() baseconf = cls._setup_server(supervisor, conf) cherrypy.config.update(baseconf) setup_client() if hasattr(cls, 'setup_server'): # Clear the cherrypy tree and clear the wsgi server so that # it can be updated with the new root cherrypy.tree = cherrypy._cptree.Tree() cherrypy.server.httpserver = None cls.setup_server() supervisor.start(cls.__module__) cls.supervisor = supervisor setup_class = classmethod(setup_class) def teardown_class(cls): '' if hasattr(cls, 'setup_server'): cls.supervisor.stop() teardown_class = classmethod(teardown_class) def prefix(self): return self.script_name.rstrip("/") def base(self): if ((self.scheme == "http" and self.PORT == 80) or (self.scheme == "https" and self.PORT == 443)): port = "" else: port = ":%s" % self.PORT return "%s://%s%s%s" % (self.scheme, self.HOST, port, self.script_name.rstrip("/")) def exit(self): sys.exit() def getPage(self, url, headers=None, method="GET", body=None, protocol=None): """Open the url. Return status, headers, body.""" if self.script_name: url = httputil.urljoin(self.script_name, url) return webtest.WebCase.getPage(self, url, headers, method, body, protocol) def skip(self, msg='skipped '): raise nose.SkipTest(msg) def assertErrorPage(self, status, message=None, pattern=''): """Compare the response body with a built in error page. The function will optionally look for the regexp pattern, within the exception embedded in the error page.""" # This will never contain a traceback page = cherrypy._cperror.get_error_page(status, message=message) # First, test the response body without checking the traceback. # Stick a match-all group (.*) in to grab the traceback. esc = re.escape epage = esc(page) epage = epage.replace(esc('<pre id="traceback"></pre>'), esc('<pre id="traceback">') + '(.*)' + esc('</pre>')) m = re.match(ntob(epage, self.encoding), self.body, re.DOTALL) if not m: self._handlewebError('Error page does not match; expected:\n' + page) return # Now test the pattern against the traceback if pattern is None: # Special-case None to mean that there should be *no* traceback. if m and m.group(1): self._handlewebError('Error page contains traceback') else: if (m is None) or ( not re.search(ntob(re.escape(pattern), self.encoding), m.group(1))): msg = 'Error page does not contain %s in traceback' self._handlewebError(msg % repr(pattern)) date_tolerance = 2 def assertEqualDates(self, dt1, dt2, seconds=None): """Assert abs(dt1 - dt2) is within Y seconds.""" if seconds is None: seconds = self.date_tolerance if dt1 > dt2: diff = dt1 - dt2 else: diff = dt2 - dt1 if not diff < datetime.timedelta(seconds=seconds): raise AssertionError('%r and %r are not within %r seconds.' % (dt1, dt2, seconds)) def setup_client(): """Set up the WebCase classes to match the server's socket settings.""" webtest.WebCase.PORT = cherrypy.server.socket_port webtest.WebCase.HOST = cherrypy.server.socket_host if cherrypy.server.ssl_certificate: CPWebCase.scheme = 'https' # --------------------------- Spawning helpers --------------------------- # class CPProcess(object): pid_file = os.path.join(thisdir, 'test.pid') config_file = os.path.join(thisdir, 'test.conf') config_template = """[global] server.socket_host: '%(host)s' server.socket_port: %(port)s checker.on: False log.screen: False log.error_file: r'%(error_log)s' log.access_file: r'%(access_log)s' %(ssl)s %(extra)s """ error_log = os.path.join(thisdir, 'test.error.log') access_log = os.path.join(thisdir, 'test.access.log') def __init__(self, wait=False, daemonize=False, ssl=False, socket_host=None, socket_port=None): self.wait = wait self.daemonize = daemonize self.ssl = ssl self.host = socket_host or cherrypy.server.socket_host self.port = socket_port or cherrypy.server.socket_port def write_conf(self, extra=""): if self.ssl: serverpem = os.path.join(thisdir, 'test.pem') ssl = """ server.ssl_certificate: r'%s' server.ssl_private_key: r'%s' """ % (serverpem, serverpem) else: ssl = "" conf = self.config_template % { 'host': self.host, 'port': self.port, 'error_log': self.error_log, 'access_log': self.access_log, 'ssl': ssl, 'extra': extra, } f = open(self.config_file, 'wb') f.write(ntob(conf, 'utf-8')) f.close() def start(self, imports=None): """Start cherryd in a subprocess.""" cherrypy._cpserver.wait_for_free_port(self.host, self.port) args = [sys.executable, os.path.join(thisdir, '..', 'cherryd'), '-c', self.config_file, '-p', self.pid_file] if not isinstance(imports, (list, tuple)): imports = [imports] for i in imports: if i: args.append('-i') args.append(i) if self.daemonize: args.append('-d') env = os.environ.copy() # Make sure we import the cherrypy package in which this module is defined. grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..')) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = os.pathsep.join((grandparentdir, env['PYTHONPATH'])) else: env['PYTHONPATH'] = grandparentdir if self.wait: self.exit_code = os.spawnve(os.P_WAIT, sys.executable, args, env) else: os.spawnve(os.P_NOWAIT, sys.executable, args, env) cherrypy._cpserver.wait_for_occupied_port(self.host, self.port) # Give the engine a wee bit more time to finish STARTING if self.daemonize: time.sleep(2) else: time.sleep(1) def get_pid(self): return int(open(self.pid_file, 'rb').read()) def join(self): """Wait for the process to exit.""" try: try: # Mac, UNIX os.wait() except AttributeError: # Windows try: pid = self.get_pid() except IOError: # Assume the subprocess deleted the pidfile on shutdown. pass else: os.waitpid(pid, 0) except OSError: x = sys.exc_info()[1] if x.args != (10, 'No child processes'): raise
16,187
Python
.py
386
31.650259
99
0.595244
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,925
test_xmlrpc.py
evilhero_mylar/lib/cherrypy/test/test_xmlrpc.py
import sys from xmlrpclib import DateTime, Fault, ServerProxy, SafeTransport class HTTPSTransport(SafeTransport): """Subclass of SafeTransport to fix sock.recv errors (by using file).""" def request(self, host, handler, request_body, verbose=0): # issue XML-RPC request h = self.make_connection(host) if verbose: h.set_debuglevel(1) self.send_request(h, handler, request_body) self.send_host(h, host) self.send_user_agent(h) self.send_content(h, request_body) errcode, errmsg, headers = h.getreply() if errcode != 200: raise xmlrpclib.ProtocolError(host + handler, errcode, errmsg, headers) self.verbose = verbose # Here's where we differ from the superclass. It says: # try: # sock = h._conn.sock # except AttributeError: # sock = None # return self._parse_response(h.getfile(), sock) return self.parse_response(h.getfile()) import cherrypy def setup_server(): from cherrypy import _cptools class Root: def index(self): return "I'm a standard index!" index.exposed = True class XmlRpc(_cptools.XMLRPCController): def foo(self): return "Hello world!" foo.exposed = True def return_single_item_list(self): return [42] return_single_item_list.exposed = True def return_string(self): return "here is a string" return_string.exposed = True def return_tuple(self): return ('here', 'is', 1, 'tuple') return_tuple.exposed = True def return_dict(self): return dict(a=1, b=2, c=3) return_dict.exposed = True def return_composite(self): return dict(a=1,z=26), 'hi', ['welcome', 'friend'] return_composite.exposed = True def return_int(self): return 42 return_int.exposed = True def return_float(self): return 3.14 return_float.exposed = True def return_datetime(self): return DateTime((2003, 10, 7, 8, 1, 0, 1, 280, -1)) return_datetime.exposed = True def return_boolean(self): return True return_boolean.exposed = True def test_argument_passing(self, num): return num * 2 test_argument_passing.exposed = True def test_returning_Fault(self): return Fault(1, "custom Fault response") test_returning_Fault.exposed = True root = Root() root.xmlrpc = XmlRpc() cherrypy.tree.mount(root, config={'/': { 'request.dispatch': cherrypy.dispatch.XMLRPCDispatcher(), 'tools.xmlrpc.allow_none': 0, }}) from cherrypy.test import helper class XmlRpcTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def testXmlRpc(self): scheme = "http" try: scheme = self.harness.scheme except AttributeError: pass if scheme == "https": url = 'https://%s:%s/xmlrpc/' % (self.interface(), self.PORT) proxy = ServerProxy(url, transport=HTTPSTransport()) else: url = 'http://%s:%s/xmlrpc/' % (self.interface(), self.PORT) proxy = ServerProxy(url) # begin the tests ... self.getPage("/xmlrpc/foo") self.assertBody("Hello world!") self.assertEqual(proxy.return_single_item_list(), [42]) self.assertNotEqual(proxy.return_single_item_list(), 'one bazillion') self.assertEqual(proxy.return_string(), "here is a string") self.assertEqual(proxy.return_tuple(), list(('here', 'is', 1, 'tuple'))) self.assertEqual(proxy.return_dict(), {'a': 1, 'c': 3, 'b': 2}) self.assertEqual(proxy.return_composite(), [{'a': 1, 'z': 26}, 'hi', ['welcome', 'friend']]) self.assertEqual(proxy.return_int(), 42) self.assertEqual(proxy.return_float(), 3.14) self.assertEqual(proxy.return_datetime(), DateTime((2003, 10, 7, 8, 1, 0, 1, 280, -1))) self.assertEqual(proxy.return_boolean(), True) self.assertEqual(proxy.test_argument_passing(22), 22 * 2) # Test an error in the page handler (should raise an xmlrpclib.Fault) try: proxy.test_argument_passing({}) except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, ("unsupported operand type(s) " "for *: 'dict' and 'int'")) else: self.fail("Expected xmlrpclib.Fault") # http://www.cherrypy.org/ticket/533 # if a method is not found, an xmlrpclib.Fault should be raised try: proxy.non_method() except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, 'method "non_method" is not supported') else: self.fail("Expected xmlrpclib.Fault") # Test returning a Fault from the page handler. try: proxy.test_returning_Fault() except Exception: x = sys.exc_info()[1] self.assertEqual(x.__class__, Fault) self.assertEqual(x.faultString, ("custom Fault response")) else: self.fail("Expected xmlrpclib.Fault")
5,726
Python
.py
135
30.881481
83
0.582007
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,926
test_encoding.py
evilhero_mylar/lib/cherrypy/test/test_encoding.py
import gzip import sys import cherrypy from cherrypy._cpcompat import BytesIO, IncompleteRead, ntob, ntou europoundUnicode = ntou('\x80\xa3') sing = u"\u6bdb\u6cfd\u4e1c: Sing, Little Birdie?" sing8 = sing.encode('utf-8') sing16 = sing.encode('utf-16') from cherrypy.test import helper class EncodingTests(helper.CPWebCase): def setup_server(): class Root: def index(self, param): assert param == europoundUnicode, "%r != %r" % (param, europoundUnicode) yield europoundUnicode index.exposed = True def mao_zedong(self): return sing mao_zedong.exposed = True def utf8(self): return sing8 utf8.exposed = True utf8._cp_config = {'tools.encode.encoding': 'utf-8'} def cookies_and_headers(self): # if the headers have non-ascii characters and a cookie has # any part which is unicode (even ascii), the response # should not fail. cherrypy.response.cookie['candy'] = 'bar' cherrypy.response.cookie['candy']['domain'] = 'cherrypy.org' cherrypy.response.headers['Some-Header'] = 'My d\xc3\xb6g has fleas' return 'Any content' cookies_and_headers.exposed = True def reqparams(self, *args, **kwargs): return ntob(', ').join([": ".join((k, v)).encode('utf8') for k, v in cherrypy.request.params.items()]) reqparams.exposed = True def nontext(self, *args, **kwargs): cherrypy.response.headers['Content-Type'] = 'application/binary' return '\x00\x01\x02\x03' nontext.exposed = True nontext._cp_config = {'tools.encode.text_only': False, 'tools.encode.add_charset': True, } class GZIP: def index(self): yield "Hello, world" index.exposed = True def noshow(self): # Test for ticket #147, where yield showed no exceptions (content- # encoding was still gzip even though traceback wasn't zipped). raise IndexError() yield "Here be dragons" noshow.exposed = True # Turn encoding off so the gzip tool is the one doing the collapse. noshow._cp_config = {'tools.encode.on': False} def noshow_stream(self): # Test for ticket #147, where yield showed no exceptions (content- # encoding was still gzip even though traceback wasn't zipped). raise IndexError() yield "Here be dragons" noshow_stream.exposed = True noshow_stream._cp_config = {'response.stream': True} class Decode: def extra_charset(self, *args, **kwargs): return ', '.join([": ".join((k, v)) for k, v in cherrypy.request.params.items()]) extra_charset.exposed = True extra_charset._cp_config = { 'tools.decode.on': True, 'tools.decode.default_encoding': ['utf-16'], } def force_charset(self, *args, **kwargs): return ', '.join([": ".join((k, v)) for k, v in cherrypy.request.params.items()]) force_charset.exposed = True force_charset._cp_config = { 'tools.decode.on': True, 'tools.decode.encoding': 'utf-16', } root = Root() root.gzip = GZIP() root.decode = Decode() cherrypy.tree.mount(root, config={'/gzip': {'tools.gzip.on': True}}) setup_server = staticmethod(setup_server) def test_query_string_decoding(self): europoundUtf8 = europoundUnicode.encode('utf-8') self.getPage(ntob('/?param=') + europoundUtf8) self.assertBody(europoundUtf8) # Encoded utf8 query strings MUST be parsed correctly. # Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX self.getPage("/reqparams?q=%C2%A3") # The return value will be encoded as utf8. self.assertBody(ntob("q: \xc2\xa3")) # Query strings that are incorrectly encoded MUST raise 404. # Here, q is the POUND SIGN U+00A3 encoded in latin1 and then %HEX self.getPage("/reqparams?q=%A3") self.assertStatus(404) self.assertErrorPage(404, "The given query string could not be processed. Query " "strings for this resource must be encoded with 'utf8'.") def test_urlencoded_decoding(self): # Test the decoding of an application/x-www-form-urlencoded entity. europoundUtf8 = europoundUnicode.encode('utf-8') body=ntob("param=") + europoundUtf8 self.getPage('/', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(europoundUtf8) # Encoded utf8 entities MUST be parsed and decoded correctly. # Here, q is the POUND SIGN U+00A3 encoded in utf8 body = ntob("q=\xc2\xa3") self.getPage('/reqparams', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("q: \xc2\xa3")) # ...and in utf16, which is not in the default attempt_charsets list: body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00") self.getPage('/reqparams', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded;charset=utf-16"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("q: \xc2\xa3")) # Entities that are incorrectly encoded MUST raise 400. # Here, q is the POUND SIGN U+00A3 encoded in utf16, but # the Content-Type incorrectly labels it utf-8. body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00") self.getPage('/reqparams', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded;charset=utf-8"), ("Content-Length", str(len(body))), ], body=body), self.assertStatus(400) self.assertErrorPage(400, "The request entity could not be decoded. The following charsets " "were attempted: ['utf-8']") def test_decode_tool(self): # An extra charset should be tried first, and succeed if it matches. # Here, we add utf-16 as a charset and pass a utf-16 body. body = ntob("\xff\xfeq\x00=\xff\xfe\xa3\x00") self.getPage('/decode/extra_charset', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("q: \xc2\xa3")) # An extra charset should be tried first, and continue to other default # charsets if it doesn't match. # Here, we add utf-16 as a charset but still pass a utf-8 body. body = ntob("q=\xc2\xa3") self.getPage('/decode/extra_charset', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("q: \xc2\xa3")) # An extra charset should error if force is True and it doesn't match. # Here, we force utf-16 as a charset but still pass a utf-8 body. body = ntob("q=\xc2\xa3") self.getPage('/decode/force_charset', method='POST', headers=[("Content-Type", "application/x-www-form-urlencoded"), ("Content-Length", str(len(body))), ], body=body), self.assertErrorPage(400, "The request entity could not be decoded. The following charsets " "were attempted: ['utf-16']") def test_multipart_decoding(self): # Test the decoding of a multipart entity when the charset (utf16) is # explicitly given. body=ntob('\r\n'.join(['--X', 'Content-Type: text/plain;charset=utf-16', 'Content-Disposition: form-data; name="text"', '', '\xff\xfea\x00b\x00\x1c c\x00', '--X', 'Content-Type: text/plain;charset=utf-16', 'Content-Disposition: form-data; name="submit"', '', '\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00', '--X--'])) self.getPage('/reqparams', method='POST', headers=[("Content-Type", "multipart/form-data;boundary=X"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("text: ab\xe2\x80\x9cc, submit: Create")) def test_multipart_decoding_no_charset(self): # Test the decoding of a multipart entity when the charset (utf8) is # NOT explicitly given, but is in the list of charsets to attempt. body=ntob('\r\n'.join(['--X', 'Content-Disposition: form-data; name="text"', '', '\xe2\x80\x9c', '--X', 'Content-Disposition: form-data; name="submit"', '', 'Create', '--X--'])) self.getPage('/reqparams', method='POST', headers=[("Content-Type", "multipart/form-data;boundary=X"), ("Content-Length", str(len(body))), ], body=body), self.assertBody(ntob("text: \xe2\x80\x9c, submit: Create")) def test_multipart_decoding_no_successful_charset(self): # Test the decoding of a multipart entity when the charset (utf16) is # NOT explicitly given, and is NOT in the list of charsets to attempt. body=ntob('\r\n'.join(['--X', 'Content-Disposition: form-data; name="text"', '', '\xff\xfea\x00b\x00\x1c c\x00', '--X', 'Content-Disposition: form-data; name="submit"', '', '\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00', '--X--'])) self.getPage('/reqparams', method='POST', headers=[("Content-Type", "multipart/form-data;boundary=X"), ("Content-Length", str(len(body))), ], body=body), self.assertStatus(400) self.assertErrorPage(400, "The request entity could not be decoded. The following charsets " "were attempted: ['us-ascii', 'utf-8']") def test_nontext(self): self.getPage('/nontext') self.assertHeader('Content-Type', 'application/binary;charset=utf-8') self.assertBody('\x00\x01\x02\x03') def testEncoding(self): # Default encoding should be utf-8 self.getPage('/mao_zedong') self.assertBody(sing8) # Ask for utf-16. self.getPage('/mao_zedong', [('Accept-Charset', 'utf-16')]) self.assertHeader('Content-Type', 'text/html;charset=utf-16') self.assertBody(sing16) # Ask for multiple encodings. ISO-8859-1 should fail, and utf-16 # should be produced. self.getPage('/mao_zedong', [('Accept-Charset', 'iso-8859-1;q=1, utf-16;q=0.5')]) self.assertBody(sing16) # The "*" value should default to our default_encoding, utf-8 self.getPage('/mao_zedong', [('Accept-Charset', '*;q=1, utf-7;q=.2')]) self.assertBody(sing8) # Only allow iso-8859-1, which should fail and raise 406. self.getPage('/mao_zedong', [('Accept-Charset', 'iso-8859-1, *;q=0')]) self.assertStatus("406 Not Acceptable") self.assertInBody("Your client sent this Accept-Charset header: " "iso-8859-1, *;q=0. We tried these charsets: " "iso-8859-1.") # Ask for x-mac-ce, which should be unknown. See ticket #569. self.getPage('/mao_zedong', [('Accept-Charset', 'us-ascii, ISO-8859-1, x-mac-ce')]) self.assertStatus("406 Not Acceptable") self.assertInBody("Your client sent this Accept-Charset header: " "us-ascii, ISO-8859-1, x-mac-ce. We tried these " "charsets: ISO-8859-1, us-ascii, x-mac-ce.") # Test the 'encoding' arg to encode. self.getPage('/utf8') self.assertBody(sing8) self.getPage('/utf8', [('Accept-Charset', 'us-ascii, ISO-8859-1')]) self.assertStatus("406 Not Acceptable") def testGzip(self): zbuf = BytesIO() zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9) zfile.write(ntob("Hello, world")) zfile.close() self.getPage('/gzip/', headers=[("Accept-Encoding", "gzip")]) self.assertInBody(zbuf.getvalue()[:3]) self.assertHeader("Vary", "Accept-Encoding") self.assertHeader("Content-Encoding", "gzip") # Test when gzip is denied. self.getPage('/gzip/', headers=[("Accept-Encoding", "identity")]) self.assertHeader("Vary", "Accept-Encoding") self.assertNoHeader("Content-Encoding") self.assertBody("Hello, world") self.getPage('/gzip/', headers=[("Accept-Encoding", "gzip;q=0")]) self.assertHeader("Vary", "Accept-Encoding") self.assertNoHeader("Content-Encoding") self.assertBody("Hello, world") self.getPage('/gzip/', headers=[("Accept-Encoding", "*;q=0")]) self.assertStatus(406) self.assertNoHeader("Content-Encoding") self.assertErrorPage(406, "identity, gzip") # Test for ticket #147 self.getPage('/gzip/noshow', headers=[("Accept-Encoding", "gzip")]) self.assertNoHeader('Content-Encoding') self.assertStatus(500) self.assertErrorPage(500, pattern="IndexError\n") # In this case, there's nothing we can do to deliver a # readable page, since 1) the gzip header is already set, # and 2) we may have already written some of the body. # The fix is to never stream yields when using gzip. if (cherrypy.server.protocol_version == "HTTP/1.0" or getattr(cherrypy.server, "using_apache", False)): self.getPage('/gzip/noshow_stream', headers=[("Accept-Encoding", "gzip")]) self.assertHeader('Content-Encoding', 'gzip') self.assertInBody('\x1f\x8b\x08\x00') else: # The wsgiserver will simply stop sending data, and the HTTP client # will error due to an incomplete chunk-encoded stream. self.assertRaises((ValueError, IncompleteRead), self.getPage, '/gzip/noshow_stream', headers=[("Accept-Encoding", "gzip")]) def test_UnicodeHeaders(self): self.getPage('/cookies_and_headers') self.assertBody('Any content')
16,652
Python
.py
314
36.828025
99
0.5346
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,927
test_static.py
evilhero_mylar/lib/cherrypy/test/test_static.py
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob from cherrypy._cpcompat import BytesIO import os curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) has_space_filepath = os.path.join(curdir, 'static', 'has space.html') bigfile_filepath = os.path.join(curdir, "static", "bigfile.log") BIGFILE_SIZE = 1024 * 1024 import threading import cherrypy from cherrypy.lib import static from cherrypy.test import helper class StaticTest(helper.CPWebCase): def setup_server(): if not os.path.exists(has_space_filepath): open(has_space_filepath, 'wb').write(ntob('Hello, world\r\n')) if not os.path.exists(bigfile_filepath): open(bigfile_filepath, 'wb').write(ntob("x" * BIGFILE_SIZE)) class Root: def bigfile(self): from cherrypy.lib import static self.f = static.serve_file(bigfile_filepath) return self.f bigfile.exposed = True bigfile._cp_config = {'response.stream': True} def tell(self): if self.f.input.closed: return '' return repr(self.f.input.tell()).rstrip('L') tell.exposed = True def fileobj(self): f = open(os.path.join(curdir, 'style.css'), 'rb') return static.serve_fileobj(f, content_type='text/css') fileobj.exposed = True def bytesio(self): f = BytesIO(ntob('Fee\nfie\nfo\nfum')) return static.serve_fileobj(f, content_type='text/plain') bytesio.exposed = True class Static: def index(self): return 'You want the Baron? You can have the Baron!' index.exposed = True def dynamic(self): return "This is a DYNAMIC page" dynamic.exposed = True root = Root() root.static = Static() rootconf = { '/static': { 'tools.staticdir.on': True, 'tools.staticdir.dir': 'static', 'tools.staticdir.root': curdir, }, '/style.css': { 'tools.staticfile.on': True, 'tools.staticfile.filename': os.path.join(curdir, 'style.css'), }, '/docroot': { 'tools.staticdir.on': True, 'tools.staticdir.root': curdir, 'tools.staticdir.dir': 'static', 'tools.staticdir.index': 'index.html', }, '/error': { 'tools.staticdir.on': True, 'request.show_tracebacks': True, }, } rootApp = cherrypy.Application(root) rootApp.merge(rootconf) test_app_conf = { '/test': { 'tools.staticdir.index': 'index.html', 'tools.staticdir.on': True, 'tools.staticdir.root': curdir, 'tools.staticdir.dir': 'static', }, } testApp = cherrypy.Application(Static()) testApp.merge(test_app_conf) vhost = cherrypy._cpwsgi.VirtualHost(rootApp, {'virt.net': testApp}) cherrypy.tree.graft(vhost) setup_server = staticmethod(setup_server) def teardown_server(): for f in (has_space_filepath, bigfile_filepath): if os.path.exists(f): try: os.unlink(f) except: pass teardown_server = staticmethod(teardown_server) def testStatic(self): self.getPage("/static/index.html") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html') self.assertBody('Hello, world\r\n') # Using a staticdir.root value in a subdir... self.getPage("/docroot/index.html") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html') self.assertBody('Hello, world\r\n') # Check a filename with spaces in it self.getPage("/static/has%20space.html") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html') self.assertBody('Hello, world\r\n') self.getPage("/style.css") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/css') # Note: The body should be exactly 'Dummy stylesheet\n', but # unfortunately some tools such as WinZip sometimes turn \n # into \r\n on Windows when extracting the CherryPy tarball so # we just check the content self.assertMatchesBody('^Dummy stylesheet') def test_fallthrough(self): # Test that NotFound will then try dynamic handlers (see [878]). self.getPage("/static/dynamic") self.assertBody("This is a DYNAMIC page") # Check a directory via fall-through to dynamic handler. self.getPage("/static/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('You want the Baron? You can have the Baron!') def test_index(self): # Check a directory via "staticdir.index". self.getPage("/docroot/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html') self.assertBody('Hello, world\r\n') # The same page should be returned even if redirected. self.getPage("/docroot") self.assertStatus(301) self.assertHeader('Location', '%s/docroot/' % self.base()) self.assertMatchesBody("This resource .* <a href='%s/docroot/'>" "%s/docroot/</a>." % (self.base(), self.base())) def test_config_errors(self): # Check that we get an error if no .file or .dir self.getPage("/error/thing.html") self.assertErrorPage(500) self.assertMatchesBody(ntob("TypeError: staticdir\(\) takes at least 2 " "(positional )?arguments \(0 given\)")) def test_security(self): # Test up-level security self.getPage("/static/../../test/style.css") self.assertStatus((400, 403)) def test_modif(self): # Test modified-since on a reasonably-large file self.getPage("/static/dirback.jpg") self.assertStatus("200 OK") lastmod = "" for k, v in self.headers: if k == 'Last-Modified': lastmod = v ims = ("If-Modified-Since", lastmod) self.getPage("/static/dirback.jpg", headers=[ims]) self.assertStatus(304) self.assertNoHeader("Content-Type") self.assertNoHeader("Content-Length") self.assertNoHeader("Content-Disposition") self.assertBody("") def test_755_vhost(self): self.getPage("/test/", [('Host', 'virt.net')]) self.assertStatus(200) self.getPage("/test", [('Host', 'virt.net')]) self.assertStatus(301) self.assertHeader('Location', self.scheme + '://virt.net/test/') def test_serve_fileobj(self): self.getPage("/fileobj") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/css;charset=utf-8') self.assertMatchesBody('^Dummy stylesheet') def test_serve_bytesio(self): self.getPage("/bytesio") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/plain;charset=utf-8') self.assertHeader('Content-Length', 14) self.assertMatchesBody('Fee\nfie\nfo\nfum') def test_file_stream(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Make an initial request self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/bigfile", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) body = ntob('') remaining = BIGFILE_SIZE while remaining > 0: data = response.fp.read(65536) if not data: break body += data remaining -= len(data) if self.scheme == "https": newconn = HTTPSConnection else: newconn = HTTPConnection s, h, b = helper.webtest.openURL( ntob("/tell"), headers=[], host=self.HOST, port=self.PORT, http_conn=newconn) if not b: # The file was closed on the server. tell_position = BIGFILE_SIZE else: tell_position = int(b) expected = len(body) if tell_position >= BIGFILE_SIZE: # We can't exactly control how much content the server asks for. # Fudge it by only checking the first half of the reads. if expected < (BIGFILE_SIZE / 2): self.fail( "The file should have advanced to position %r, but has " "already advanced to the end of the file. It may not be " "streamed as intended, or at the wrong chunk size (64k)" % expected) elif tell_position < expected: self.fail( "The file should have advanced to position %r, but has " "only advanced to position %r. It may not be streamed " "as intended, or at the wrong chunk size (65536)" % (expected, tell_position)) if body != ntob("x" * BIGFILE_SIZE): self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." % (BIGFILE_SIZE, body[:50], len(body))) conn.close() def test_file_stream_deadlock(self): if cherrypy.server.protocol_version != "HTTP/1.1": return self.skip() self.PROTOCOL = "HTTP/1.1" # Make an initial request but abort early. self.persistent = True conn = self.HTTP_CONN conn.putrequest("GET", "/bigfile", skip_host=True) conn.putheader("Host", self.HOST) conn.endheaders() response = conn.response_class(conn.sock, method="GET") response.begin() self.assertEqual(response.status, 200) body = response.fp.read(65536) if body != ntob("x" * len(body)): self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." % (65536, body[:50], len(body))) response.close() conn.close() # Make a second request, which should fetch the whole file. self.persistent = False self.getPage("/bigfile") if self.body != ntob("x" * BIGFILE_SIZE): self.fail("Body != 'x' * %d. Got %r instead (%d bytes)." % (BIGFILE_SIZE, self.body[:50], len(body)))
11,354
Python
.py
254
31.988189
82
0.56546
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,928
test_misc_tools.py
evilhero_mylar/lib/cherrypy/test/test_misc_tools.py
import os localDir = os.path.dirname(__file__) logfile = os.path.join(localDir, "test_misc_tools.log") import cherrypy from cherrypy import tools def setup_server(): class Root: def index(self): yield "Hello, world" index.exposed = True h = [("Content-Language", "en-GB"), ('Content-Type', 'text/plain')] tools.response_headers(headers=h)(index) def other(self): return "salut" other.exposed = True other._cp_config = { 'tools.response_headers.on': True, 'tools.response_headers.headers': [("Content-Language", "fr"), ('Content-Type', 'text/plain')], 'tools.log_hooks.on': True, } class Accept: _cp_config = {'tools.accept.on': True} def index(self): return '<a href="feed">Atom feed</a>' index.exposed = True # In Python 2.4+, we could use a decorator instead: # @tools.accept('application/atom+xml') def feed(self): return """<?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom"> <title>Unknown Blog</title> </feed>""" feed.exposed = True feed._cp_config = {'tools.accept.media': 'application/atom+xml'} def select(self): # We could also write this: mtype = cherrypy.lib.accept.accept(...) mtype = tools.accept.callable(['text/html', 'text/plain']) if mtype == 'text/html': return "<h2>Page Title</h2>" else: return "PAGE TITLE" select.exposed = True class Referer: def accept(self): return "Accepted!" accept.exposed = True reject = accept class AutoVary: def index(self): # Read a header directly with 'get' ae = cherrypy.request.headers.get('Accept-Encoding') # Read a header directly with '__getitem__' cl = cherrypy.request.headers['Host'] # Read a header directly with '__contains__' hasif = 'If-Modified-Since' in cherrypy.request.headers # Read a header directly with 'has_key' has = cherrypy.request.headers.has_key('Range') # Call a lib function mtype = tools.accept.callable(['text/html', 'text/plain']) return "Hello, world!" index.exposed = True conf = {'/referer': {'tools.referer.on': True, 'tools.referer.pattern': r'http://[^/]*example\.com', }, '/referer/reject': {'tools.referer.accept': False, 'tools.referer.accept_missing': True, }, '/autovary': {'tools.autovary.on': True}, } root = Root() root.referer = Referer() root.accept = Accept() root.autovary = AutoVary() cherrypy.tree.mount(root, config=conf) cherrypy.config.update({'log.error_file': logfile}) from cherrypy.test import helper class ResponseHeadersTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def testResponseHeadersDecorator(self): self.getPage('/') self.assertHeader("Content-Language", "en-GB") self.assertHeader('Content-Type', 'text/plain;charset=utf-8') def testResponseHeaders(self): self.getPage('/other') self.assertHeader("Content-Language", "fr") self.assertHeader('Content-Type', 'text/plain;charset=utf-8') class RefererTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def testReferer(self): self.getPage('/referer/accept') self.assertErrorPage(403, 'Forbidden Referer header.') self.getPage('/referer/accept', headers=[('Referer', 'http://www.example.com/')]) self.assertStatus(200) self.assertBody('Accepted!') # Reject self.getPage('/referer/reject') self.assertStatus(200) self.assertBody('Accepted!') self.getPage('/referer/reject', headers=[('Referer', 'http://www.example.com/')]) self.assertErrorPage(403, 'Forbidden Referer header.') class AcceptTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def test_Accept_Tool(self): # Test with no header provided self.getPage('/accept/feed') self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify exact media type self.getPage('/accept/feed', headers=[('Accept', 'application/atom+xml')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify matching media range self.getPage('/accept/feed', headers=[('Accept', 'application/*')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify all media ranges self.getPage('/accept/feed', headers=[('Accept', '*/*')]) self.assertStatus(200) self.assertInBody('<title>Unknown Blog</title>') # Specify unacceptable media types self.getPage('/accept/feed', headers=[('Accept', 'text/html')]) self.assertErrorPage(406, "Your client sent this Accept header: text/html. " "But this resource only emits these media types: " "application/atom+xml.") # Test resource where tool is 'on' but media is None (not set). self.getPage('/accept/') self.assertStatus(200) self.assertBody('<a href="feed">Atom feed</a>') def test_accept_selection(self): # Try both our expected media types self.getPage('/accept/select', [('Accept', 'text/html')]) self.assertStatus(200) self.assertBody('<h2>Page Title</h2>') self.getPage('/accept/select', [('Accept', 'text/plain')]) self.assertStatus(200) self.assertBody('PAGE TITLE') self.getPage('/accept/select', [('Accept', 'text/plain, text/*;q=0.5')]) self.assertStatus(200) self.assertBody('PAGE TITLE') # text/* and */* should prefer text/html since it comes first # in our 'media' argument to tools.accept self.getPage('/accept/select', [('Accept', 'text/*')]) self.assertStatus(200) self.assertBody('<h2>Page Title</h2>') self.getPage('/accept/select', [('Accept', '*/*')]) self.assertStatus(200) self.assertBody('<h2>Page Title</h2>') # Try unacceptable media types self.getPage('/accept/select', [('Accept', 'application/xml')]) self.assertErrorPage(406, "Your client sent this Accept header: application/xml. " "But this resource only emits these media types: " "text/html, text/plain.") class AutoVaryTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def testAutoVary(self): self.getPage('/autovary/') self.assertHeader( "Vary", 'Accept, Accept-Charset, Accept-Encoding, Host, If-Modified-Since, Range')
7,411
Python
.py
163
34.03681
94
0.588019
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,929
test_config.py
evilhero_mylar/lib/cherrypy/test/test_config.py
"""Tests for the CherryPy configuration system.""" import os, sys localDir = os.path.join(os.getcwd(), os.path.dirname(__file__)) from cherrypy._cpcompat import ntob, StringIO import unittest import cherrypy def setup_server(): class Root: _cp_config = {'foo': 'this', 'bar': 'that'} def __init__(self): cherrypy.config.namespaces['db'] = self.db_namespace def db_namespace(self, k, v): if k == "scheme": self.db = v # @cherrypy.expose(alias=('global_', 'xyz')) def index(self, key): return cherrypy.request.config.get(key, "None") index = cherrypy.expose(index, alias=('global_', 'xyz')) def repr(self, key): return repr(cherrypy.request.config.get(key, None)) repr.exposed = True def dbscheme(self): return self.db dbscheme.exposed = True def plain(self, x): return x plain.exposed = True plain._cp_config = {'request.body.attempt_charsets': ['utf-16']} favicon_ico = cherrypy.tools.staticfile.handler( filename=os.path.join(localDir, '../favicon.ico')) class Foo: _cp_config = {'foo': 'this2', 'baz': 'that2'} def index(self, key): return cherrypy.request.config.get(key, "None") index.exposed = True nex = index def silly(self): return 'Hello world' silly.exposed = True silly._cp_config = {'response.headers.X-silly': 'sillyval'} def bar(self, key): return repr(cherrypy.request.config.get(key, None)) bar.exposed = True bar._cp_config = {'foo': 'this3', 'bax': 'this4'} class Another: def index(self, key): return str(cherrypy.request.config.get(key, "None")) index.exposed = True def raw_namespace(key, value): if key == 'input.map': handler = cherrypy.request.handler def wrapper(): params = cherrypy.request.params for name, coercer in list(value.items()): try: params[name] = coercer(params[name]) except KeyError: pass return handler() cherrypy.request.handler = wrapper elif key == 'output': handler = cherrypy.request.handler def wrapper(): # 'value' is a type (like int or str). return value(handler()) cherrypy.request.handler = wrapper class Raw: _cp_config = {'raw.output': repr} def incr(self, num): return num + 1 incr.exposed = True incr._cp_config = {'raw.input.map': {'num': int}} ioconf = StringIO(""" [/] neg: -1234 filename: os.path.join(sys.prefix, "hello.py") thing1: cherrypy.lib.httputil.response_codes[404] thing2: __import__('cherrypy.tutorial', globals(), locals(), ['']).thing2 complex: 3+2j ones: "11" twos: "22" stradd: %%(ones)s + %%(twos)s + "33" [/favicon.ico] tools.staticfile.filename = %r """ % os.path.join(localDir, 'static/dirback.jpg')) root = Root() root.foo = Foo() root.raw = Raw() app = cherrypy.tree.mount(root, config=ioconf) app.request_class.namespaces['raw'] = raw_namespace cherrypy.tree.mount(Another(), "/another") cherrypy.config.update({'luxuryyacht': 'throatwobblermangrove', 'db.scheme': r"sqlite///memory", }) # Client-side code # from cherrypy.test import helper class ConfigTests(helper.CPWebCase): setup_server = staticmethod(setup_server) def testConfig(self): tests = [ ('/', 'nex', 'None'), ('/', 'foo', 'this'), ('/', 'bar', 'that'), ('/xyz', 'foo', 'this'), ('/foo/', 'foo', 'this2'), ('/foo/', 'bar', 'that'), ('/foo/', 'bax', 'None'), ('/foo/bar', 'baz', "'that2'"), ('/foo/nex', 'baz', 'that2'), # If 'foo' == 'this', then the mount point '/another' leaks into '/'. ('/another/','foo', 'None'), ] for path, key, expected in tests: self.getPage(path + "?key=" + key) self.assertBody(expected) expectedconf = { # From CP defaults 'tools.log_headers.on': False, 'tools.log_tracebacks.on': True, 'request.show_tracebacks': True, 'log.screen': False, 'environment': 'test_suite', 'engine.autoreload_on': False, # From global config 'luxuryyacht': 'throatwobblermangrove', # From Root._cp_config 'bar': 'that', # From Foo._cp_config 'baz': 'that2', # From Foo.bar._cp_config 'foo': 'this3', 'bax': 'this4', } for key, expected in expectedconf.items(): self.getPage("/foo/bar?key=" + key) self.assertBody(repr(expected)) def testUnrepr(self): self.getPage("/repr?key=neg") self.assertBody("-1234") self.getPage("/repr?key=filename") self.assertBody(repr(os.path.join(sys.prefix, "hello.py"))) self.getPage("/repr?key=thing1") self.assertBody(repr(cherrypy.lib.httputil.response_codes[404])) if not getattr(cherrypy.server, "using_apache", False): # The object ID's won't match up when using Apache, since the # server and client are running in different processes. self.getPage("/repr?key=thing2") from cherrypy.tutorial import thing2 self.assertBody(repr(thing2)) self.getPage("/repr?key=complex") self.assertBody("(3+2j)") self.getPage("/repr?key=stradd") self.assertBody(repr("112233")) def testRespNamespaces(self): self.getPage("/foo/silly") self.assertHeader('X-silly', 'sillyval') self.assertBody('Hello world') def testCustomNamespaces(self): self.getPage("/raw/incr?num=12") self.assertBody("13") self.getPage("/dbscheme") self.assertBody(r"sqlite///memory") def testHandlerToolConfigOverride(self): # Assert that config overrides tool constructor args. Above, we set # the favicon in the page handler to be '../favicon.ico', # but then overrode it in config to be './static/dirback.jpg'. self.getPage("/favicon.ico") self.assertBody(open(os.path.join(localDir, "static/dirback.jpg"), "rb").read()) def test_request_body_namespace(self): self.getPage("/plain", method='POST', headers=[ ('Content-Type', 'application/x-www-form-urlencoded'), ('Content-Length', '13')], body=ntob('\xff\xfex\x00=\xff\xfea\x00b\x00c\x00')) self.assertBody("abc") class VariableSubstitutionTests(unittest.TestCase): setup_server = staticmethod(setup_server) def test_config(self): from textwrap import dedent # variable substitution with [DEFAULT] conf = dedent(""" [DEFAULT] dir = "/some/dir" my.dir = %(dir)s + "/sub" [my] my.dir = %(dir)s + "/my/dir" my.dir2 = %(my.dir)s + '/dir2' """) fp = StringIO(conf) cherrypy.config.update(fp) self.assertEqual(cherrypy.config["my"]["my.dir"], "/some/dir/my/dir") self.assertEqual(cherrypy.config["my"]["my.dir2"], "/some/dir/my/dir/dir2")
7,799
Python
.py
194
30.06701
83
0.551391
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,930
sessiondemo.py
evilhero_mylar/lib/cherrypy/test/sessiondemo.py
#!/usr/bin/python """A session demonstration app.""" import calendar from datetime import datetime import sys import cherrypy from cherrypy.lib import sessions from cherrypy._cpcompat import copyitems page = """ <html> <head> <style type='text/css'> table { border-collapse: collapse; border: 1px solid #663333; } th { text-align: right; background-color: #663333; color: white; padding: 0.5em; } td { white-space: pre-wrap; font-family: monospace; padding: 0.5em; border: 1px solid #663333; } .warn { font-family: serif; color: #990000; } </style> <script type="text/javascript"> <!-- function twodigit(d) { return d < 10 ? "0" + d : d; } function formattime(t) { var month = t.getUTCMonth() + 1; var day = t.getUTCDate(); var year = t.getUTCFullYear(); var hours = t.getUTCHours(); var minutes = t.getUTCMinutes(); return (year + "/" + twodigit(month) + "/" + twodigit(day) + " " + hours + ":" + twodigit(minutes) + " UTC"); } function interval(s) { // Return the given interval (in seconds) as an English phrase var seconds = s %% 60; s = Math.floor(s / 60); var minutes = s %% 60; s = Math.floor(s / 60); var hours = s %% 24; var v = twodigit(hours) + ":" + twodigit(minutes) + ":" + twodigit(seconds); var days = Math.floor(s / 24); if (days != 0) v = days + ' days, ' + v; return v; } var fudge_seconds = 5; function init() { // Set the content of the 'btime' cell. var currentTime = new Date(); var bunixtime = Math.floor(currentTime.getTime() / 1000); var v = formattime(currentTime); v += " (Unix time: " + bunixtime + ")"; var diff = Math.abs(%(serverunixtime)s - bunixtime); if (diff > fudge_seconds) v += "<p class='warn'>Browser and Server times disagree.</p>"; document.getElementById('btime').innerHTML = v; // Warn if response cookie expires is not close to one hour in the future. // Yes, we want this to happen when wit hit the 'Expire' link, too. var expires = Date.parse("%(expires)s") / 1000; var onehour = (60 * 60); if (Math.abs(expires - (bunixtime + onehour)) > fudge_seconds) { diff = Math.floor(expires - bunixtime); if (expires > (bunixtime + onehour)) { var msg = "Response cookie 'expires' date is " + interval(diff) + " in the future."; } else { var msg = "Response cookie 'expires' date is " + interval(0 - diff) + " in the past."; } document.getElementById('respcookiewarn').innerHTML = msg; } } //--> </script> </head> <body onload='init()'> <h2>Session Demo</h2> <p>Reload this page. The session ID should not change from one reload to the next</p> <p><a href='../'>Index</a> | <a href='expire'>Expire</a> | <a href='regen'>Regenerate</a></p> <table> <tr><th>Session ID:</th><td>%(sessionid)s<p class='warn'>%(changemsg)s</p></td></tr> <tr><th>Request Cookie</th><td>%(reqcookie)s</td></tr> <tr><th>Response Cookie</th><td>%(respcookie)s<p id='respcookiewarn' class='warn'></p></td></tr> <tr><th>Session Data</th><td>%(sessiondata)s</td></tr> <tr><th>Server Time</th><td id='stime'>%(servertime)s (Unix time: %(serverunixtime)s)</td></tr> <tr><th>Browser Time</th><td id='btime'>&nbsp;</td></tr> <tr><th>Cherrypy Version:</th><td>%(cpversion)s</td></tr> <tr><th>Python Version:</th><td>%(pyversion)s</td></tr> </table> </body></html> """ class Root(object): def page(self): changemsg = [] if cherrypy.session.id != cherrypy.session.originalid: if cherrypy.session.originalid is None: changemsg.append('Created new session because no session id was given.') if cherrypy.session.missing: changemsg.append('Created new session due to missing (expired or malicious) session.') if cherrypy.session.regenerated: changemsg.append('Application generated a new session.') try: expires = cherrypy.response.cookie['session_id']['expires'] except KeyError: expires = '' return page % { 'sessionid': cherrypy.session.id, 'changemsg': '<br>'.join(changemsg), 'respcookie': cherrypy.response.cookie.output(), 'reqcookie': cherrypy.request.cookie.output(), 'sessiondata': copyitems(cherrypy.session), 'servertime': datetime.utcnow().strftime("%Y/%m/%d %H:%M") + " UTC", 'serverunixtime': calendar.timegm(datetime.utcnow().timetuple()), 'cpversion': cherrypy.__version__, 'pyversion': sys.version, 'expires': expires, } def index(self): # Must modify data or the session will not be saved. cherrypy.session['color'] = 'green' return self.page() index.exposed = True def expire(self): sessions.expire() return self.page() expire.exposed = True def regen(self): cherrypy.session.regenerate() # Must modify data or the session will not be saved. cherrypy.session['color'] = 'yellow' return self.page() regen.exposed = True if __name__ == '__main__': cherrypy.config.update({ #'environment': 'production', 'log.screen': True, 'tools.sessions.on': True, }) cherrypy.quickstart(Root())
5,420
Python
.py
133
34.323308
102
0.615444
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,931
test_dynamicobjectmapping.py
evilhero_mylar/lib/cherrypy/test/test_dynamicobjectmapping.py
import cherrypy from cherrypy._cptree import Application from cherrypy.test import helper script_names = ["", "/foo", "/users/fred/blog", "/corp/blog"] def setup_server(): class SubSubRoot: def index(self): return "SubSubRoot index" index.exposed = True def default(self, *args): return "SubSubRoot default" default.exposed = True def handler(self): return "SubSubRoot handler" handler.exposed = True def dispatch(self): return "SubSubRoot dispatch" dispatch.exposed = True subsubnodes = { '1': SubSubRoot(), '2': SubSubRoot(), } class SubRoot: def index(self): return "SubRoot index" index.exposed = True def default(self, *args): return "SubRoot %s" % (args,) default.exposed = True def handler(self): return "SubRoot handler" handler.exposed = True def _cp_dispatch(self, vpath): return subsubnodes.get(vpath[0], None) subnodes = { '1': SubRoot(), '2': SubRoot(), } class Root: def index(self): return "index" index.exposed = True def default(self, *args): return "default %s" % (args,) default.exposed = True def handler(self): return "handler" handler.exposed = True def _cp_dispatch(self, vpath): return subnodes.get(vpath[0]) #-------------------------------------------------------------------------- # DynamicNodeAndMethodDispatcher example. # This example exposes a fairly naive HTTP api class User(object): def __init__(self, id, name): self.id = id self.name = name def __unicode__(self): return unicode(self.name) user_lookup = { 1: User(1, 'foo'), 2: User(2, 'bar'), } def make_user(name, id=None): if not id: id = max(*user_lookup.keys()) + 1 user_lookup[id] = User(id, name) return id class UserContainerNode(object): exposed = True def POST(self, name): """ Allow the creation of a new Object """ return "POST %d" % make_user(name) def GET(self): keys = user_lookup.keys() keys.sort() return unicode(keys) def dynamic_dispatch(self, vpath): try: id = int(vpath[0]) except (ValueError, IndexError): return None return UserInstanceNode(id) class UserInstanceNode(object): exposed = True def __init__(self, id): self.id = id self.user = user_lookup.get(id, None) # For all but PUT methods there MUST be a valid user identified # by self.id if not self.user and cherrypy.request.method != 'PUT': raise cherrypy.HTTPError(404) def GET(self, *args, **kwargs): """ Return the appropriate representation of the instance. """ return unicode(self.user) def POST(self, name): """ Update the fields of the user instance. """ self.user.name = name return "POST %d" % self.user.id def PUT(self, name): """ Create a new user with the specified id, or edit it if it already exists """ if self.user: # Edit the current user self.user.name = name return "PUT %d" % self.user.id else: # Make a new user with said attributes. return "PUT %d" % make_user(name, self.id) def DELETE(self): """ Delete the user specified at the id. """ id = self.user.id del user_lookup[self.user.id] del self.user return "DELETE %d" % id class ABHandler: class CustomDispatch: def index(self, a, b): return "custom" index.exposed = True def _cp_dispatch(self, vpath): """Make sure that if we don't pop anything from vpath, processing still works. """ return self.CustomDispatch() def index(self, a, b=None): body = [ 'a:' + str(a) ] if b is not None: body.append(',b:' + str(b)) return ''.join(body) index.exposed = True def delete(self, a, b): return 'deleting ' + str(a) + ' and ' + str(b) delete.exposed = True class IndexOnly: def _cp_dispatch(self, vpath): """Make sure that popping ALL of vpath still shows the index handler. """ while vpath: vpath.pop() return self def index(self): return "IndexOnly index" index.exposed = True class DecoratedPopArgs: """Test _cp_dispatch with @cherrypy.popargs.""" def index(self): return "no params" index.exposed = True def hi(self): return "hi was not interpreted as 'a' param" hi.exposed = True DecoratedPopArgs = cherrypy.popargs('a', 'b', handler=ABHandler())(DecoratedPopArgs) class NonDecoratedPopArgs: """Test _cp_dispatch = cherrypy.popargs()""" _cp_dispatch = cherrypy.popargs('a') def index(self, a): return "index: " + str(a) index.exposed = True class ParameterizedHandler: """Special handler created for each request""" def __init__(self, a): self.a = a def index(self): if 'a' in cherrypy.request.params: raise Exception("Parameterized handler argument ended up in request.params") return self.a index.exposed = True class ParameterizedPopArgs: """Test cherrypy.popargs() with a function call handler""" ParameterizedPopArgs = cherrypy.popargs('a', handler=ParameterizedHandler)(ParameterizedPopArgs) Root.decorated = DecoratedPopArgs() Root.undecorated = NonDecoratedPopArgs() Root.index_only = IndexOnly() Root.parameter_test = ParameterizedPopArgs() Root.users = UserContainerNode() md = cherrypy.dispatch.MethodDispatcher('dynamic_dispatch') for url in script_names: conf = {'/': { 'user': (url or "/").split("/")[-2], }, '/users': { 'request.dispatch': md }, } cherrypy.tree.mount(Root(), url, conf) class DynamicObjectMappingTest(helper.CPWebCase): setup_server = staticmethod(setup_server) def testObjectMapping(self): for url in script_names: prefix = self.script_name = url self.getPage('/') self.assertBody('index') self.getPage('/handler') self.assertBody('handler') # Dynamic dispatch will succeed here for the subnodes # so the subroot gets called self.getPage('/1/') self.assertBody('SubRoot index') self.getPage('/2/') self.assertBody('SubRoot index') self.getPage('/1/handler') self.assertBody('SubRoot handler') self.getPage('/2/handler') self.assertBody('SubRoot handler') # Dynamic dispatch will fail here for the subnodes # so the default gets called self.getPage('/asdf/') self.assertBody("default ('asdf',)") self.getPage('/asdf/asdf') self.assertBody("default ('asdf', 'asdf')") self.getPage('/asdf/handler') self.assertBody("default ('asdf', 'handler')") # Dynamic dispatch will succeed here for the subsubnodes # so the subsubroot gets called self.getPage('/1/1/') self.assertBody('SubSubRoot index') self.getPage('/2/2/') self.assertBody('SubSubRoot index') self.getPage('/1/1/handler') self.assertBody('SubSubRoot handler') self.getPage('/2/2/handler') self.assertBody('SubSubRoot handler') self.getPage('/2/2/dispatch') self.assertBody('SubSubRoot dispatch') # The exposed dispatch will not be called as a dispatch # method. self.getPage('/2/2/foo/foo') self.assertBody("SubSubRoot default") # Dynamic dispatch will fail here for the subsubnodes # so the SubRoot gets called self.getPage('/1/asdf/') self.assertBody("SubRoot ('asdf',)") self.getPage('/1/asdf/asdf') self.assertBody("SubRoot ('asdf', 'asdf')") self.getPage('/1/asdf/handler') self.assertBody("SubRoot ('asdf', 'handler')") def testMethodDispatch(self): # GET acts like a container self.getPage("/users") self.assertBody("[1, 2]") self.assertHeader('Allow', 'GET, HEAD, POST') # POST to the container URI allows creation self.getPage("/users", method="POST", body="name=baz") self.assertBody("POST 3") self.assertHeader('Allow', 'GET, HEAD, POST') # POST to a specific instanct URI results in a 404 # as the resource does not exit. self.getPage("/users/5", method="POST", body="name=baz") self.assertStatus(404) # PUT to a specific instanct URI results in creation self.getPage("/users/5", method="PUT", body="name=boris") self.assertBody("PUT 5") self.assertHeader('Allow', 'DELETE, GET, HEAD, POST, PUT') # GET acts like a container self.getPage("/users") self.assertBody("[1, 2, 3, 5]") self.assertHeader('Allow', 'GET, HEAD, POST') test_cases = ( (1, 'foo', 'fooupdated', 'DELETE, GET, HEAD, POST, PUT'), (2, 'bar', 'barupdated', 'DELETE, GET, HEAD, POST, PUT'), (3, 'baz', 'bazupdated', 'DELETE, GET, HEAD, POST, PUT'), (5, 'boris', 'borisupdated', 'DELETE, GET, HEAD, POST, PUT'), ) for id, name, updatedname, headers in test_cases: self.getPage("/users/%d" % id) self.assertBody(name) self.assertHeader('Allow', headers) # Make sure POSTs update already existings resources self.getPage("/users/%d" % id, method='POST', body="name=%s" % updatedname) self.assertBody("POST %d" % id) self.assertHeader('Allow', headers) # Make sure PUTs Update already existing resources. self.getPage("/users/%d" % id, method='PUT', body="name=%s" % updatedname) self.assertBody("PUT %d" % id) self.assertHeader('Allow', headers) # Make sure DELETES Remove already existing resources. self.getPage("/users/%d" % id, method='DELETE') self.assertBody("DELETE %d" % id) self.assertHeader('Allow', headers) # GET acts like a container self.getPage("/users") self.assertBody("[]") self.assertHeader('Allow', 'GET, HEAD, POST') def testVpathDispatch(self): self.getPage("/decorated/") self.assertBody("no params") self.getPage("/decorated/hi") self.assertBody("hi was not interpreted as 'a' param") self.getPage("/decorated/yo/") self.assertBody("a:yo") self.getPage("/decorated/yo/there/") self.assertBody("a:yo,b:there") self.getPage("/decorated/yo/there/delete") self.assertBody("deleting yo and there") self.getPage("/decorated/yo/there/handled_by_dispatch/") self.assertBody("custom") self.getPage("/undecorated/blah/") self.assertBody("index: blah") self.getPage("/index_only/a/b/c/d/e/f/g/") self.assertBody("IndexOnly index") self.getPage("/parameter_test/argument2/") self.assertBody("argument2")
12,565
Python
.py
313
28.450479
100
0.553982
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,932
test_httplib.py
evilhero_mylar/lib/cherrypy/test/test_httplib.py
"""Tests for cherrypy/lib/httputil.py.""" import unittest from cherrypy.lib import httputil class UtilityTests(unittest.TestCase): def test_urljoin(self): # Test all slash+atom combinations for SCRIPT_NAME and PATH_INFO self.assertEqual(httputil.urljoin("/sn/", "/pi/"), "/sn/pi/") self.assertEqual(httputil.urljoin("/sn/", "/pi"), "/sn/pi") self.assertEqual(httputil.urljoin("/sn/", "/"), "/sn/") self.assertEqual(httputil.urljoin("/sn/", ""), "/sn/") self.assertEqual(httputil.urljoin("/sn", "/pi/"), "/sn/pi/") self.assertEqual(httputil.urljoin("/sn", "/pi"), "/sn/pi") self.assertEqual(httputil.urljoin("/sn", "/"), "/sn/") self.assertEqual(httputil.urljoin("/sn", ""), "/sn") self.assertEqual(httputil.urljoin("/", "/pi/"), "/pi/") self.assertEqual(httputil.urljoin("/", "/pi"), "/pi") self.assertEqual(httputil.urljoin("/", "/"), "/") self.assertEqual(httputil.urljoin("/", ""), "/") self.assertEqual(httputil.urljoin("", "/pi/"), "/pi/") self.assertEqual(httputil.urljoin("", "/pi"), "/pi") self.assertEqual(httputil.urljoin("", "/"), "/") self.assertEqual(httputil.urljoin("", ""), "/") if __name__ == '__main__': unittest.main()
1,291
Python
.py
24
46.416667
72
0.58903
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,933
checkerdemo.py
evilhero_mylar/lib/cherrypy/test/checkerdemo.py
"""Demonstration app for cherrypy.checker. This application is intentionally broken and badly designed. To demonstrate the output of the CherryPy Checker, simply execute this module. """ import os import cherrypy thisdir = os.path.dirname(os.path.abspath(__file__)) class Root: pass if __name__ == '__main__': conf = {'/base': {'tools.staticdir.root': thisdir, # Obsolete key. 'throw_errors': True, }, # This entry should be OK. '/base/static': {'tools.staticdir.on': True, 'tools.staticdir.dir': 'static'}, # Warn on missing folder. '/base/js': {'tools.staticdir.on': True, 'tools.staticdir.dir': 'js'}, # Warn on dir with an abs path even though we provide root. '/base/static2': {'tools.staticdir.on': True, 'tools.staticdir.dir': '/static'}, # Warn on dir with a relative path with no root. '/static3': {'tools.staticdir.on': True, 'tools.staticdir.dir': 'static'}, # Warn on unknown namespace '/unknown': {'toobles.gzip.on': True}, # Warn special on cherrypy.<known ns>.* '/cpknown': {'cherrypy.tools.encode.on': True}, # Warn on mismatched types '/conftype': {'request.show_tracebacks': 14}, # Warn on unknown tool. '/web': {'tools.unknown.on': True}, # Warn on server.* in app config. '/app1': {'server.socket_host': '0.0.0.0'}, # Warn on 'localhost' 'global': {'server.socket_host': 'localhost'}, # Warn on '[name]' '[/extra_brackets]': {}, } cherrypy.quickstart(Root(), config=conf)
1,844
Python
.py
43
31.372093
71
0.532554
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,934
test_etags.py
evilhero_mylar/lib/cherrypy/test/test_etags.py
import cherrypy from cherrypy.test import helper class ETagTest(helper.CPWebCase): def setup_server(): class Root: def resource(self): return "Oh wah ta goo Siam." resource.exposed = True def fail(self, code): code = int(code) if 300 <= code <= 399: raise cherrypy.HTTPRedirect([], code) else: raise cherrypy.HTTPError(code) fail.exposed = True def unicoded(self): return u'I am a \u1ee4nicode string.' unicoded.exposed = True unicoded._cp_config = {'tools.encode.on': True} conf = {'/': {'tools.etags.on': True, 'tools.etags.autotags': True, }} cherrypy.tree.mount(Root(), config=conf) setup_server = staticmethod(setup_server) def test_etags(self): self.getPage("/resource") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('Oh wah ta goo Siam.') etag = self.assertHeader('ETag') # Test If-Match (both valid and invalid) self.getPage("/resource", headers=[('If-Match', etag)]) self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "*")]) self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "*")], method="POST") self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "a bogus tag")]) self.assertStatus("412 Precondition Failed") # Test If-None-Match (both valid and invalid) self.getPage("/resource", headers=[('If-None-Match', etag)]) self.assertStatus(304) self.getPage("/resource", method='POST', headers=[('If-None-Match', etag)]) self.assertStatus("412 Precondition Failed") self.getPage("/resource", headers=[('If-None-Match', "*")]) self.assertStatus(304) self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")]) self.assertStatus("200 OK") def test_errors(self): self.getPage("/resource") self.assertStatus(200) etag = self.assertHeader('ETag') # Test raising errors in page handler self.getPage("/fail/412", headers=[('If-Match', etag)]) self.assertStatus(412) self.getPage("/fail/304", headers=[('If-Match', etag)]) self.assertStatus(304) self.getPage("/fail/412", headers=[('If-None-Match', "*")]) self.assertStatus(412) self.getPage("/fail/304", headers=[('If-None-Match', "*")]) self.assertStatus(304) def test_unicode_body(self): self.getPage("/unicoded") self.assertStatus(200) etag1 = self.assertHeader('ETag') self.getPage("/unicoded", headers=[('If-Match', etag1)]) self.assertStatus(200) self.assertHeader('ETag', etag1)
3,071
Python
.py
68
33.970588
83
0.580546
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,935
test_http.py
evilhero_mylar/lib/cherrypy/test/test_http.py
"""Tests for managing HTTP issues (malformed requests, etc).""" import mimetypes import cherrypy from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob def encode_multipart_formdata(files): """Return (content_type, body) ready for httplib.HTTP instance. files: a sequence of (name, filename, value) tuples for multipart uploads. """ BOUNDARY = '________ThIs_Is_tHe_bouNdaRY_$' L = [] for key, filename, value in files: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) ct = mimetypes.guess_type(filename)[0] or 'application/octet-stream' L.append('Content-Type: %s' % ct) L.append('') L.append(value) L.append('--' + BOUNDARY + '--') L.append('') body = '\r\n'.join(L) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body from cherrypy.test import helper class HTTPTests(helper.CPWebCase): def setup_server(): class Root: def index(self, *args, **kwargs): return "Hello world!" index.exposed = True def no_body(self, *args, **kwargs): return "Hello world!" no_body.exposed = True no_body._cp_config = {'request.process_request_body': False} def post_multipart(self, file): """Return a summary ("a * 65536\nb * 65536") of the uploaded file.""" contents = file.file.read() summary = [] curchar = "" count = 0 for c in contents: if c == curchar: count += 1 else: if count: summary.append("%s * %d" % (curchar, count)) count = 1 curchar = c if count: summary.append("%s * %d" % (curchar, count)) return ", ".join(summary) post_multipart.exposed = True cherrypy.tree.mount(Root()) cherrypy.config.update({'server.max_request_body_size': 30000000}) setup_server = staticmethod(setup_server) def test_no_content_length(self): # "The presence of a message-body in a request is signaled by the # inclusion of a Content-Length or Transfer-Encoding header field in # the request's message-headers." # # Send a message with neither header and no body. Even though # the request is of method POST, this should be OK because we set # request.process_request_body to False for our handler. if self.scheme == "https": c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) c.request("POST", "/no_body") response = c.getresponse() self.body = response.fp.read() self.status = str(response.status) self.assertStatus(200) self.assertBody(ntob('Hello world!')) # Now send a message that has no Content-Length, but does send a body. # Verify that CP times out the socket and responds # with 411 Length Required. if self.scheme == "https": c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) c.request("POST", "/") response = c.getresponse() self.body = response.fp.read() self.status = str(response.status) self.assertStatus(411) def test_post_multipart(self): alphabet = "abcdefghijklmnopqrstuvwxyz" # generate file contents for a large post contents = "".join([c * 65536 for c in alphabet]) # encode as multipart form data files=[('file', 'file.txt', contents)] content_type, body = encode_multipart_formdata(files) body = body.encode('Latin-1') # post file if self.scheme == 'https': c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) c.putrequest('POST', '/post_multipart') c.putheader('Content-Type', content_type) c.putheader('Content-Length', str(len(body))) c.endheaders() c.send(body) response = c.getresponse() self.body = response.fp.read() self.status = str(response.status) self.assertStatus(200) self.assertBody(", ".join(["%s * 65536" % c for c in alphabet])) def test_malformed_request_line(self): if getattr(cherrypy.server, "using_apache", False): return self.skip("skipped due to known Apache differences...") # Test missing version in Request-Line if self.scheme == 'https': c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) c._output(ntob('GET /')) c._send_output() if hasattr(c, 'strict'): response = c.response_class(c.sock, strict=c.strict, method='GET') else: # Python 3.2 removed the 'strict' feature, saying: # "http.client now always assumes HTTP/1.x compliant servers." response = c.response_class(c.sock, method='GET') response.begin() self.assertEqual(response.status, 400) self.assertEqual(response.fp.read(22), ntob("Malformed Request-Line")) c.close() def test_malformed_header(self): if self.scheme == 'https': c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT)) else: c = HTTPConnection('%s:%s' % (self.interface(), self.PORT)) c.putrequest('GET', '/') c.putheader('Content-Type', 'text/plain') # See http://www.cherrypy.org/ticket/941 c._output(ntob('Re, 1.2.3.4#015#012')) c.endheaders() response = c.getresponse() self.status = str(response.status) self.assertStatus(400) self.body = response.fp.read(20) self.assertBody("Illegal header line.")
6,423
Python
.py
143
33.846154
85
0.574769
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,936
test_bus.py
evilhero_mylar/lib/cherrypy/test/test_bus.py
import threading import time import unittest import cherrypy from cherrypy._cpcompat import get_daemon, set from cherrypy.process import wspbus msg = "Listener %d on channel %s: %s." class PublishSubscribeTests(unittest.TestCase): def get_listener(self, channel, index): def listener(arg=None): self.responses.append(msg % (index, channel, arg)) return listener def test_builtin_channels(self): b = wspbus.Bus() self.responses, expected = [], [] for channel in b.listeners: for index, priority in enumerate([100, 50, 0, 51]): b.subscribe(channel, self.get_listener(channel, index), priority) for channel in b.listeners: b.publish(channel) expected.extend([msg % (i, channel, None) for i in (2, 1, 3, 0)]) b.publish(channel, arg=79347) expected.extend([msg % (i, channel, 79347) for i in (2, 1, 3, 0)]) self.assertEqual(self.responses, expected) def test_custom_channels(self): b = wspbus.Bus() self.responses, expected = [], [] custom_listeners = ('hugh', 'louis', 'dewey') for channel in custom_listeners: for index, priority in enumerate([None, 10, 60, 40]): b.subscribe(channel, self.get_listener(channel, index), priority) for channel in custom_listeners: b.publish(channel, 'ah so') expected.extend([msg % (i, channel, 'ah so') for i in (1, 3, 0, 2)]) b.publish(channel) expected.extend([msg % (i, channel, None) for i in (1, 3, 0, 2)]) self.assertEqual(self.responses, expected) def test_listener_errors(self): b = wspbus.Bus() self.responses, expected = [], [] channels = [c for c in b.listeners if c != 'log'] for channel in channels: b.subscribe(channel, self.get_listener(channel, 1)) # This will break since the lambda takes no args. b.subscribe(channel, lambda: None, priority=20) for channel in channels: self.assertRaises(wspbus.ChannelFailures, b.publish, channel, 123) expected.append(msg % (1, channel, 123)) self.assertEqual(self.responses, expected) class BusMethodTests(unittest.TestCase): def log(self, bus): self._log_entries = [] def logit(msg, level): self._log_entries.append(msg) bus.subscribe('log', logit) def assertLog(self, entries): self.assertEqual(self._log_entries, entries) def get_listener(self, channel, index): def listener(arg=None): self.responses.append(msg % (index, channel, arg)) return listener def test_start(self): b = wspbus.Bus() self.log(b) self.responses = [] num = 3 for index in range(num): b.subscribe('start', self.get_listener('start', index)) b.start() try: # The start method MUST call all 'start' listeners. self.assertEqual(set(self.responses), set([msg % (i, 'start', None) for i in range(num)])) # The start method MUST move the state to STARTED # (or EXITING, if errors occur) self.assertEqual(b.state, b.states.STARTED) # The start method MUST log its states. self.assertLog(['Bus STARTING', 'Bus STARTED']) finally: # Exit so the atexit handler doesn't complain. b.exit() def test_stop(self): b = wspbus.Bus() self.log(b) self.responses = [] num = 3 for index in range(num): b.subscribe('stop', self.get_listener('stop', index)) b.stop() # The stop method MUST call all 'stop' listeners. self.assertEqual(set(self.responses), set([msg % (i, 'stop', None) for i in range(num)])) # The stop method MUST move the state to STOPPED self.assertEqual(b.state, b.states.STOPPED) # The stop method MUST log its states. self.assertLog(['Bus STOPPING', 'Bus STOPPED']) def test_graceful(self): b = wspbus.Bus() self.log(b) self.responses = [] num = 3 for index in range(num): b.subscribe('graceful', self.get_listener('graceful', index)) b.graceful() # The graceful method MUST call all 'graceful' listeners. self.assertEqual(set(self.responses), set([msg % (i, 'graceful', None) for i in range(num)])) # The graceful method MUST log its states. self.assertLog(['Bus graceful']) def test_exit(self): b = wspbus.Bus() self.log(b) self.responses = [] num = 3 for index in range(num): b.subscribe('stop', self.get_listener('stop', index)) b.subscribe('exit', self.get_listener('exit', index)) b.exit() # The exit method MUST call all 'stop' listeners, # and then all 'exit' listeners. self.assertEqual(set(self.responses), set([msg % (i, 'stop', None) for i in range(num)] + [msg % (i, 'exit', None) for i in range(num)])) # The exit method MUST move the state to EXITING self.assertEqual(b.state, b.states.EXITING) # The exit method MUST log its states. self.assertLog(['Bus STOPPING', 'Bus STOPPED', 'Bus EXITING', 'Bus EXITED']) def test_wait(self): b = wspbus.Bus() def f(method): time.sleep(0.2) getattr(b, method)() for method, states in [('start', [b.states.STARTED]), ('stop', [b.states.STOPPED]), ('start', [b.states.STARTING, b.states.STARTED]), ('exit', [b.states.EXITING]), ]: threading.Thread(target=f, args=(method,)).start() b.wait(states) # The wait method MUST wait for the given state(s). if b.state not in states: self.fail("State %r not in %r" % (b.state, states)) def test_block(self): b = wspbus.Bus() self.log(b) def f(): time.sleep(0.2) b.exit() def g(): time.sleep(0.4) threading.Thread(target=f).start() threading.Thread(target=g).start() threads = [t for t in threading.enumerate() if not get_daemon(t)] self.assertEqual(len(threads), 3) b.block() # The block method MUST wait for the EXITING state. self.assertEqual(b.state, b.states.EXITING) # The block method MUST wait for ALL non-main, non-daemon threads to finish. threads = [t for t in threading.enumerate() if not get_daemon(t)] self.assertEqual(len(threads), 1) # The last message will mention an indeterminable thread name; ignore it self.assertEqual(self._log_entries[:-1], ['Bus STOPPING', 'Bus STOPPED', 'Bus EXITING', 'Bus EXITED', 'Waiting for child threads to terminate...']) def test_start_with_callback(self): b = wspbus.Bus() self.log(b) try: events = [] def f(*args, **kwargs): events.append(("f", args, kwargs)) def g(): events.append("g") b.subscribe("start", g) b.start_with_callback(f, (1, 3, 5), {"foo": "bar"}) # Give wait() time to run f() time.sleep(0.2) # The callback method MUST wait for the STARTED state. self.assertEqual(b.state, b.states.STARTED) # The callback method MUST run after all start methods. self.assertEqual(events, ["g", ("f", (1, 3, 5), {"foo": "bar"})]) finally: b.exit() def test_log(self): b = wspbus.Bus() self.log(b) self.assertLog([]) # Try a normal message. expected = [] for msg in ["O mah darlin'"] * 3 + ["Clementiiiiiiiine"]: b.log(msg) expected.append(msg) self.assertLog(expected) # Try an error message try: foo except NameError: b.log("You are lost and gone forever", traceback=True) lastmsg = self._log_entries[-1] if "Traceback" not in lastmsg or "NameError" not in lastmsg: self.fail("Last log message %r did not contain " "the expected traceback." % lastmsg) else: self.fail("NameError was not raised as expected.") if __name__ == "__main__": unittest.main()
8,871
Python
.py
207
31.63285
84
0.558086
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,937
__init__.py
evilhero_mylar/lib/cherrypy/scaffold/__init__.py
"""<MyProject>, a CherryPy application. Use this as a base for creating new CherryPy applications. When you want to make a new app, copy and paste this folder to some other location (maybe site-packages) and rename it to the name of your project, then tweak as desired. Even before any tweaking, this should serve a few demonstration pages. Change to this directory and run: ../cherryd -c site.conf """ import cherrypy from cherrypy import tools, url import os local_dir = os.path.join(os.getcwd(), os.path.dirname(__file__)) class Root: _cp_config = {'tools.log_tracebacks.on': True, } def index(self): return """<html> <body>Try some <a href='%s?a=7'>other</a> path, or a <a href='%s?n=14'>default</a> path.<br /> Or, just look at the pretty picture:<br /> <img src='%s' /> </body></html>""" % (url("other"), url("else"), url("files/made_with_cherrypy_small.png")) index.exposed = True def default(self, *args, **kwargs): return "args: %s kwargs: %s" % (args, kwargs) default.exposed = True def other(self, a=2, b='bananas', c=None): cherrypy.response.headers['Content-Type'] = 'text/plain' if c is None: return "Have %d %s." % (int(a), b) else: return "Have %d %s, %s." % (int(a), b, c) other.exposed = True files = cherrypy.tools.staticdir.handler( section="/files", dir=os.path.join(local_dir, "static"), # Ignore .php files, etc. match=r'\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$', ) root = Root() # Uncomment the following to use your own favicon instead of CP's default. #favicon_path = os.path.join(local_dir, "favicon.ico") #root.favicon_ico = tools.staticfile.handler(filename=favicon_path)
1,803
Python
.py
45
34.644444
74
0.641791
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,938
reprconf.py
evilhero_mylar/lib/cherrypy/lib/reprconf.py
"""Generic configuration system using unrepr. Configuration data may be supplied as a Python dictionary, as a filename, or as an open file object. When you supply a filename or file, Python's builtin ConfigParser is used (with some extensions). Namespaces ---------- Configuration keys are separated into namespaces by the first "." in the key. The only key that cannot exist in a namespace is the "environment" entry. This special entry 'imports' other config entries from a template stored in the Config.environments dict. You can define your own namespaces to be called when new config is merged by adding a named handler to Config.namespaces. The name can be any string, and the handler must be either a callable or a context manager. """ try: # Python 3.0+ from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser try: set except NameError: from sets import Set as set try: basestring except NameError: basestring = str try: # Python 3 import builtins except ImportError: # Python 2 import __builtin__ as builtins import operator as _operator import sys def as_dict(config): """Return a dict from 'config' whether it is a dict, file, or filename.""" if isinstance(config, basestring): config = Parser().dict_from_file(config) elif hasattr(config, 'read'): config = Parser().dict_from_file(config) return config class NamespaceSet(dict): """A dict of config namespace names and handlers. Each config entry should begin with a namespace name; the corresponding namespace handler will be called once for each config entry in that namespace, and will be passed two arguments: the config key (with the namespace removed) and the config value. Namespace handlers may be any Python callable; they may also be Python 2.5-style 'context managers', in which case their __enter__ method should return a callable to be used as the handler. See cherrypy.tools (the Toolbox class) for an example. """ def __call__(self, config): """Iterate through config and pass it to each namespace handler. config A flat dict, where keys use dots to separate namespaces, and values are arbitrary. The first name in each config key is used to look up the corresponding namespace handler. For example, a config entry of {'tools.gzip.on': v} will call the 'tools' namespace handler with the args: ('gzip.on', v) """ # Separate the given config into namespaces ns_confs = {} for k in config: if "." in k: ns, name = k.split(".", 1) bucket = ns_confs.setdefault(ns, {}) bucket[name] = config[k] # I chose __enter__ and __exit__ so someday this could be # rewritten using Python 2.5's 'with' statement: # for ns, handler in self.iteritems(): # with handler as callable: # for k, v in ns_confs.get(ns, {}).iteritems(): # callable(k, v) for ns, handler in self.items(): exit = getattr(handler, "__exit__", None) if exit: callable = handler.__enter__() no_exc = True try: try: for k, v in ns_confs.get(ns, {}).items(): callable(k, v) except: # The exceptional case is handled here no_exc = False if exit is None: raise if not exit(*sys.exc_info()): raise # The exception is swallowed if exit() returns true finally: # The normal and non-local-goto cases are handled here if no_exc and exit: exit(None, None, None) else: for k, v in ns_confs.get(ns, {}).items(): handler(k, v) def __repr__(self): return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, dict.__repr__(self)) def __copy__(self): newobj = self.__class__() newobj.update(self) return newobj copy = __copy__ class Config(dict): """A dict-like set of configuration data, with defaults and namespaces. May take a file, filename, or dict. """ defaults = {} environments = {} namespaces = NamespaceSet() def __init__(self, file=None, **kwargs): self.reset() if file is not None: self.update(file) if kwargs: self.update(kwargs) def reset(self): """Reset self to default values.""" self.clear() dict.update(self, self.defaults) def update(self, config): """Update self from a dict, file or filename.""" if isinstance(config, basestring): # Filename config = Parser().dict_from_file(config) elif hasattr(config, 'read'): # Open file object config = Parser().dict_from_file(config) else: config = config.copy() self._apply(config) def _apply(self, config): """Update self from a dict.""" which_env = config.get('environment') if which_env: env = self.environments[which_env] for k in env: if k not in config: config[k] = env[k] dict.update(self, config) self.namespaces(config) def __setitem__(self, k, v): dict.__setitem__(self, k, v) self.namespaces({k: v}) class Parser(ConfigParser): """Sub-class of ConfigParser that keeps the case of options and that raises an exception if the file cannot be read. """ def optionxform(self, optionstr): return optionstr def read(self, filenames): if isinstance(filenames, basestring): filenames = [filenames] for filename in filenames: # try: # fp = open(filename) # except IOError: # continue fp = open(filename) try: self._read(fp, filename) finally: fp.close() def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = unrepr(value) except Exception: x = sys.exc_info()[1] msg = ("Config error in section: %r, option: %r, " "value: %r. Config values must be valid Python." % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result def dict_from_file(self, file): if hasattr(file, 'read'): self.readfp(file) else: self.read(file) return self.as_dict() # public domain "unrepr" implementation, found on the web and then improved. class _Builder2: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python2 ast Node compiled from a string.""" try: import compiler except ImportError: # Fallback to eval when compiler package is not available, # e.g. IronPython 1.0. return eval(s) p = compiler.parse("__tempvalue__ = " + s) return p.getChildren()[1].getChildren()[0].getChildren()[1] def build_Subscript(self, o): expr, flags, subs = o.getChildren() expr = self.build(expr) subs = self.build(subs) return expr[subs] def build_CallFunc(self, o): children = o.getChildren() # Build callee from first child callee = self.build(children[0]) # Build args and kwargs from remaining children args = [] kwargs = {} for child in children[1:]: class_name = child.__class__.__name__ # None is ignored if class_name == 'NoneType': continue # Keywords become kwargs if class_name == 'Keyword': kwargs.update(self.build(child)) # Everything else becomes args else : args.append(self.build(child)) return callee(*args, **kwargs) def build_Keyword(self, o): key, value_obj = o.getChildren() value = self.build(value_obj) kw_dict = {key: value} return kw_dict def build_List(self, o): return map(self.build, o.getChildren()) def build_Const(self, o): return o.value def build_Dict(self, o): d = {} i = iter(map(self.build, o.getChildren())) for el in i: d[el] = i.next() return d def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.name if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_Add(self, o): left, right = map(self.build, o.getChildren()) return left + right def build_Mul(self, o): left, right = map(self.build, o.getChildren()) return left * right def build_Getattr(self, o): parent = self.build(o.expr) return getattr(parent, o.attrname) def build_NoneType(self, o): return None def build_UnarySub(self, o): return -self.build(o.getChildren()[0]) def build_UnaryAdd(self, o): return self.build(o.getChildren()[0]) class _Builder3: def build(self, o): m = getattr(self, 'build_' + o.__class__.__name__, None) if m is None: raise TypeError("unrepr does not recognize %s" % repr(o.__class__.__name__)) return m(o) def astnode(self, s): """Return a Python3 ast Node compiled from a string.""" try: import ast except ImportError: # Fallback to eval when ast package is not available, # e.g. IronPython 1.0. return eval(s) p = ast.parse("__tempvalue__ = " + s) return p.body[0].value def build_Subscript(self, o): return self.build(o.value)[self.build(o.slice)] def build_Index(self, o): return self.build(o.value) def build_Call(self, o): callee = self.build(o.func) if o.args is None: args = () else: args = tuple([self.build(a) for a in o.args]) if o.starargs is None: starargs = () else: starargs = self.build(o.starargs) if o.kwargs is None: kwargs = {} else: kwargs = self.build(o.kwargs) return callee(*(args + starargs), **kwargs) def build_List(self, o): return list(map(self.build, o.elts)) def build_Str(self, o): return o.s def build_Num(self, o): return o.n def build_Dict(self, o): return dict([(self.build(k), self.build(v)) for k, v in zip(o.keys, o.values)]) def build_Tuple(self, o): return tuple(self.build_List(o)) def build_Name(self, o): name = o.id if name == 'None': return None if name == 'True': return True if name == 'False': return False # See if the Name is a package or module. If it is, import it. try: return modules(name) except ImportError: pass # See if the Name is in builtins. try: import builtins return getattr(builtins, name) except AttributeError: pass raise TypeError("unrepr could not resolve the name %s" % repr(name)) def build_NameConstant(self, o): return o.value def build_UnaryOp(self, o): op, operand = map(self.build, [o.op, o.operand]) return op(operand) def build_BinOp(self, o): left, op, right = map(self.build, [o.left, o.op, o.right]) return op(left, right) def build_Add(self, o): return _operator.add def build_Mult(self, o): return _operator.mul def build_USub(self, o): return _operator.neg def build_Attribute(self, o): parent = self.build(o.value) return getattr(parent, o.attr) def build_NoneType(self, o): return None def unrepr(s): """Return a Python object compiled from a string.""" if not s: return s if sys.version_info < (3, 0): b = _Builder2() else: b = _Builder3() obj = b.astnode(s) return b.build(obj) def modules(modulePath): """Load a module and retrieve a reference to that module.""" __import__(modulePath) return sys.modules[modulePath] def attributes(full_attribute_name): """Load a module and retrieve an attribute of that module.""" # Parse out the path, module, and attribute last_dot = full_attribute_name.rfind(".") attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[:last_dot] mod = modules(mod_path) # Let an AttributeError propagate outward. try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) # Return a reference to the attribute. return attr
14,704
Python
.py
400
27.095
78
0.567425
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,939
sessions.py
evilhero_mylar/lib/cherrypy/lib/sessions.py
"""Session implementation for CherryPy. You need to edit your config file to use sessions. Here's an example:: [/] tools.sessions.on = True tools.sessions.storage_type = "file" tools.sessions.storage_path = "/home/site/sessions" tools.sessions.timeout = 60 This sets the session to be stored in files in the directory /home/site/sessions, and the session timeout to 60 minutes. If you omit ``storage_type`` the sessions will be saved in RAM. ``tools.sessions.on`` is the only required line for working sessions, the rest are optional. By default, the session ID is passed in a cookie, so the client's browser must have cookies enabled for your site. To set data for the current session, use ``cherrypy.session['fieldname'] = 'fieldvalue'``; to get data use ``cherrypy.session.get('fieldname')``. ================ Locking sessions ================ By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means the session is locked early and unlocked late. Be mindful of this default mode for any requests that take a long time to process (streaming responses, expensive calculations, database lookups, API calls, etc), as other concurrent requests that also utilize sessions will hang until the session is unlocked. If you want to control when the session data is locked and unlocked, set ``tools.sessions.locking = 'explicit'``. Then call ``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``. Regardless of which mode you use, the session is guaranteed to be unlocked when the request is complete. ================= Expiring Sessions ================= You can force a session to expire with :func:`cherrypy.lib.sessions.expire`. Simply call that function at the point you want the session to expire, and it will cause the session cookie to expire client-side. =========================== Session Fixation Protection =========================== If CherryPy receives, via a request cookie, a session id that it does not recognize, it will reject that id and create a new one to return in the response cookie. This `helps prevent session fixation attacks <http://en.wikipedia.org/wiki/Session_fixation#Regenerate_SID_on_each_request>`_. However, CherryPy "recognizes" a session id by looking up the saved session data for that id. Therefore, if you never save any session data, **you will get a new session id for every request**. ================ Sharing Sessions ================ If you run multiple instances of CherryPy (for example via mod_python behind Apache prefork), you most likely cannot use the RAM session backend, since each instance of CherryPy will have its own memory space. Use a different backend instead, and verify that all instances are pointing at the same file or db location. Alternately, you might try a load balancer which makes sessions "sticky". Google is your friend, there. ================ Expiration Dates ================ The response cookie will possess an expiration date to inform the client at which point to stop sending the cookie back in requests. If the server time and client time differ, expect sessions to be unreliable. **Make sure the system time of your server is accurate**. CherryPy defaults to a 60-minute session timeout, which also applies to the cookie which is sent to the client. Unfortunately, some versions of Safari ("4 public beta" on Windows XP at least) appear to have a bug in their parsing of the GMT expiration date--they appear to interpret the date as one hour in the past. Sixty minutes minus one hour is pretty close to zero, so you may experience this bug as a new session id for every request, unless the requests are less than one second apart. To fix, try increasing the session.timeout. On the other extreme, some users report Firefox sending cookies after their expiration date, although this was on a system with an inaccurate system time. Maybe FF doesn't trust system time. """ import sys import datetime import os import time import threading import types import cherrypy from cherrypy._cpcompat import copyitems, pickle, random20, unicodestr from cherrypy.lib import httputil from cherrypy.lib import lockfile from cherrypy.lib import locking from cherrypy.lib import is_iterator missing = object() class Session(object): """A CherryPy dict-like Session object (one per request).""" _id = None id_observers = None "A list of callbacks to which to pass new id's." def _get_id(self): return self._id def _set_id(self, value): self._id = value for o in self.id_observers: o(value) id = property(_get_id, _set_id, doc="The current session ID.") timeout = 60 "Number of minutes after which to delete session data." locked = False """ If True, this session instance has exclusive read/write access to session data.""" loaded = False """ If True, data has been retrieved from storage. This should happen automatically on the first attempt to access session data.""" clean_thread = None "Class-level Monitor which calls self.clean_up." clean_freq = 5 "The poll rate for expired session cleanup in minutes." originalid = None "The session id passed by the client. May be missing or unsafe." missing = False "True if the session requested by the client did not exist." regenerated = False """ True if the application called session.regenerate(). This is not set by internal calls to regenerate the session id.""" debug = False "If True, log debug information." # --------------------- Session management methods --------------------- # def __init__(self, id=None, **kwargs): self.id_observers = [] self._data = {} for k, v in kwargs.items(): setattr(self, k, v) self.originalid = id self.missing = False if id is None: if self.debug: cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS') self._regenerate() else: self.id = id if self._exists(): if self.debug: cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS') else: if self.debug: cherrypy.log('Expired or malicious session %r; ' 'making a new one' % id, 'TOOLS.SESSIONS') # Expired or malicious session. Make a new one. # See https://bitbucket.org/cherrypy/cherrypy/issue/709. self.id = None self.missing = True self._regenerate() def now(self): """Generate the session specific concept of 'now'. Other session providers can override this to use alternative, possibly timezone aware, versions of 'now'. """ return datetime.datetime.now() def regenerate(self): """Replace the current session (with a new id).""" self.regenerated = True self._regenerate() def _regenerate(self): if self.id is not None: if self.debug: cherrypy.log( 'Deleting the existing session %r before ' 'regeneration.' % self.id, 'TOOLS.SESSIONS') self.delete() old_session_was_locked = self.locked if old_session_was_locked: self.release_lock() if self.debug: cherrypy.log('Old lock released.', 'TOOLS.SESSIONS') self.id = None while self.id is None: self.id = self.generate_id() # Assert that the generated id is not already stored. if self._exists(): self.id = None if self.debug: cherrypy.log('Set id to generated %s.' % self.id, 'TOOLS.SESSIONS') if old_session_was_locked: self.acquire_lock() if self.debug: cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS') def clean_up(self): """Clean up expired sessions.""" pass def generate_id(self): """Return a new session id.""" return random20() def save(self): """Save session data.""" try: # If session data has never been loaded then it's never been # accessed: no need to save it if self.loaded: t = datetime.timedelta(seconds=self.timeout * 60) expiration_time = self.now() + t if self.debug: cherrypy.log('Saving session %r with expiry %s' % (self.id, expiration_time), 'TOOLS.SESSIONS') self._save(expiration_time) else: if self.debug: cherrypy.log( 'Skipping save of session %r (no session loaded).' % self.id, 'TOOLS.SESSIONS') finally: if self.locked: # Always release the lock if the user didn't release it self.release_lock() if self.debug: cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS') def load(self): """Copy stored session data into this session instance.""" data = self._load() # data is either None or a tuple (session_data, expiration_time) if data is None or data[1] < self.now(): if self.debug: cherrypy.log('Expired session %r, flushing data.' % self.id, 'TOOLS.SESSIONS') self._data = {} else: if self.debug: cherrypy.log('Data loaded for session %r.' % self.id, 'TOOLS.SESSIONS') self._data = data[0] self.loaded = True # Stick the clean_thread in the class, not the instance. # The instances are created and destroyed per-request. cls = self.__class__ if self.clean_freq and not cls.clean_thread: # clean_up is an instancemethod and not a classmethod, # so that tool config can be accessed inside the method. t = cherrypy.process.plugins.Monitor( cherrypy.engine, self.clean_up, self.clean_freq * 60, name='Session cleanup') t.subscribe() cls.clean_thread = t t.start() if self.debug: cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS') def delete(self): """Delete stored session data.""" self._delete() if self.debug: cherrypy.log('Deleted session %s.' % self.id, 'TOOLS.SESSIONS') # -------------------- Application accessor methods -------------------- # def __getitem__(self, key): if not self.loaded: self.load() return self._data[key] def __setitem__(self, key, value): if not self.loaded: self.load() self._data[key] = value def __delitem__(self, key): if not self.loaded: self.load() del self._data[key] def pop(self, key, default=missing): """Remove the specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised. """ if not self.loaded: self.load() if default is missing: return self._data.pop(key) else: return self._data.pop(key, default) def __contains__(self, key): if not self.loaded: self.load() return key in self._data if hasattr({}, 'has_key'): def has_key(self, key): """D.has_key(k) -> True if D has a key k, else False.""" if not self.loaded: self.load() return key in self._data def get(self, key, default=None): """D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.""" if not self.loaded: self.load() return self._data.get(key, default) def update(self, d): """D.update(E) -> None. Update D from E: for k in E: D[k] = E[k].""" if not self.loaded: self.load() self._data.update(d) def setdefault(self, key, default=None): """D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D.""" if not self.loaded: self.load() return self._data.setdefault(key, default) def clear(self): """D.clear() -> None. Remove all items from D.""" if not self.loaded: self.load() self._data.clear() def keys(self): """D.keys() -> list of D's keys.""" if not self.loaded: self.load() return self._data.keys() def items(self): """D.items() -> list of D's (key, value) pairs, as 2-tuples.""" if not self.loaded: self.load() return self._data.items() def values(self): """D.values() -> list of D's values.""" if not self.loaded: self.load() return self._data.values() class RamSession(Session): # Class-level objects. Don't rebind these! cache = {} locks = {} def clean_up(self): """Clean up expired sessions.""" now = self.now() for _id, (data, expiration_time) in copyitems(self.cache): if expiration_time <= now: try: del self.cache[_id] except KeyError: pass try: if self.locks[_id].acquire(blocking=False): lock = self.locks.pop(_id) lock.release() except KeyError: pass # added to remove obsolete lock objects for _id in list(self.locks): if _id not in self.cache and self.locks[_id].acquire(blocking=False): lock = self.locks.pop(_id) lock.release() def _exists(self): return self.id in self.cache def _load(self): return self.cache.get(self.id) def _save(self, expiration_time): self.cache[self.id] = (self._data, expiration_time) def _delete(self): self.cache.pop(self.id, None) def acquire_lock(self): """Acquire an exclusive lock on the currently-loaded session data.""" self.locked = True self.locks.setdefault(self.id, threading.RLock()).acquire() def release_lock(self): """Release the lock on the currently-loaded session data.""" self.locks[self.id].release() self.locked = False def __len__(self): """Return the number of active sessions.""" return len(self.cache) class FileSession(Session): """Implementation of the File backend for sessions storage_path The folder where session data will be saved. Each session will be saved as pickle.dump(data, expiration_time) in its own file; the filename will be self.SESSION_PREFIX + self.id. lock_timeout A timedelta or numeric seconds indicating how long to block acquiring a lock. If None (default), acquiring a lock will block indefinitely. """ SESSION_PREFIX = 'session-' LOCK_SUFFIX = '.lock' pickle_protocol = pickle.HIGHEST_PROTOCOL def __init__(self, id=None, **kwargs): # The 'storage_path' arg is required for file-based sessions. kwargs['storage_path'] = os.path.abspath(kwargs['storage_path']) kwargs.setdefault('lock_timeout', None) Session.__init__(self, id=id, **kwargs) # validate self.lock_timeout if isinstance(self.lock_timeout, (int, float)): self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout) if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))): raise ValueError("Lock timeout must be numeric seconds or " "a timedelta instance.") def setup(cls, **kwargs): """Set up the storage system for file-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does). """ # The 'storage_path' arg is required for file-based sessions. kwargs['storage_path'] = os.path.abspath(kwargs['storage_path']) for k, v in kwargs.items(): setattr(cls, k, v) setup = classmethod(setup) def _get_file_path(self): f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id) if not os.path.abspath(f).startswith(self.storage_path): raise cherrypy.HTTPError(400, "Invalid session id in cookie.") return f def _exists(self): path = self._get_file_path() return os.path.exists(path) def _load(self, path=None): assert self.locked, ("The session load without being locked. " "Check your tools' priority levels.") if path is None: path = self._get_file_path() try: f = open(path, "rb") try: return pickle.load(f) finally: f.close() except (IOError, EOFError): e = sys.exc_info()[1] if self.debug: cherrypy.log("Error loading the session pickle: %s" % e, 'TOOLS.SESSIONS') return None def _save(self, expiration_time): assert self.locked, ("The session was saved without being locked. " "Check your tools' priority levels.") f = open(self._get_file_path(), "wb") try: pickle.dump((self._data, expiration_time), f, self.pickle_protocol) finally: f.close() def _delete(self): assert self.locked, ("The session deletion without being locked. " "Check your tools' priority levels.") try: os.unlink(self._get_file_path()) except OSError: pass def acquire_lock(self, path=None): """Acquire an exclusive lock on the currently-loaded session data.""" if path is None: path = self._get_file_path() path += self.LOCK_SUFFIX checker = locking.LockChecker(self.id, self.lock_timeout) while not checker.expired(): try: self.lock = lockfile.LockFile(path) except lockfile.LockError: time.sleep(0.1) else: break self.locked = True if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS') def release_lock(self, path=None): """Release the lock on the currently-loaded session data.""" self.lock.release() self.lock.remove() self.locked = False def clean_up(self): """Clean up expired sessions.""" now = self.now() # Iterate over all session files in self.storage_path for fname in os.listdir(self.storage_path): if (fname.startswith(self.SESSION_PREFIX) and not fname.endswith(self.LOCK_SUFFIX)): # We have a session file: lock and load it and check # if it's expired. If it fails, nevermind. path = os.path.join(self.storage_path, fname) self.acquire_lock(path) if self.debug: # This is a bit of a hack, since we're calling clean_up # on the first instance rather than the entire class, # so depending on whether you have "debug" set on the # path of the first session called, this may not run. cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS') try: contents = self._load(path) # _load returns None on IOError if contents is not None: data, expiration_time = contents if expiration_time < now: # Session expired: deleting it os.unlink(path) finally: self.release_lock(path) def __len__(self): """Return the number of active sessions.""" return len([fname for fname in os.listdir(self.storage_path) if (fname.startswith(self.SESSION_PREFIX) and not fname.endswith(self.LOCK_SUFFIX))]) class PostgresqlSession(Session): """ Implementation of the PostgreSQL backend for sessions. It assumes a table like this:: create table session ( id varchar(40), data text, expiration_time timestamp ) You must provide your own get_db function. """ pickle_protocol = pickle.HIGHEST_PROTOCOL def __init__(self, id=None, **kwargs): Session.__init__(self, id, **kwargs) self.cursor = self.db.cursor() def setup(cls, **kwargs): """Set up the storage system for Postgres-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does). """ for k, v in kwargs.items(): setattr(cls, k, v) self.db = self.get_db() setup = classmethod(setup) def __del__(self): if self.cursor: self.cursor.close() self.db.commit() def _exists(self): # Select session data from table self.cursor.execute('select data, expiration_time from session ' 'where id=%s', (self.id,)) rows = self.cursor.fetchall() return bool(rows) def _load(self): # Select session data from table self.cursor.execute('select data, expiration_time from session ' 'where id=%s', (self.id,)) rows = self.cursor.fetchall() if not rows: return None pickled_data, expiration_time = rows[0] data = pickle.loads(pickled_data) return data, expiration_time def _save(self, expiration_time): pickled_data = pickle.dumps(self._data, self.pickle_protocol) self.cursor.execute('update session set data = %s, ' 'expiration_time = %s where id = %s', (pickled_data, expiration_time, self.id)) def _delete(self): self.cursor.execute('delete from session where id=%s', (self.id,)) def acquire_lock(self): """Acquire an exclusive lock on the currently-loaded session data.""" # We use the "for update" clause to lock the row self.locked = True self.cursor.execute('select id from session where id=%s for update', (self.id,)) if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS') def release_lock(self): """Release the lock on the currently-loaded session data.""" # We just close the cursor and that will remove the lock # introduced by the "for update" clause self.cursor.close() self.locked = False def clean_up(self): """Clean up expired sessions.""" self.cursor.execute('delete from session where expiration_time < %s', (self.now(),)) class MemcachedSession(Session): # The most popular memcached client for Python isn't thread-safe. # Wrap all .get and .set operations in a single lock. mc_lock = threading.RLock() # This is a seperate set of locks per session id. locks = {} servers = ['127.0.0.1:11211'] def setup(cls, **kwargs): """Set up the storage system for memcached-based sessions. This should only be called once per process; this will be done automatically when using sessions.init (as the built-in Tool does). """ for k, v in kwargs.items(): setattr(cls, k, v) import memcache cls.cache = memcache.Client(cls.servers) setup = classmethod(setup) def _get_id(self): return self._id def _set_id(self, value): # This encode() call is where we differ from the superclass. # Memcache keys MUST be byte strings, not unicode. if isinstance(value, unicodestr): value = value.encode('utf-8') self._id = value for o in self.id_observers: o(value) id = property(_get_id, _set_id, doc="The current session ID.") def _exists(self): self.mc_lock.acquire() try: return bool(self.cache.get(self.id)) finally: self.mc_lock.release() def _load(self): self.mc_lock.acquire() try: return self.cache.get(self.id) finally: self.mc_lock.release() def _save(self, expiration_time): # Send the expiration time as "Unix time" (seconds since 1/1/1970) td = int(time.mktime(expiration_time.timetuple())) self.mc_lock.acquire() try: if not self.cache.set(self.id, (self._data, expiration_time), td): raise AssertionError( "Session data for id %r not set." % self.id) finally: self.mc_lock.release() def _delete(self): self.cache.delete(self.id) def acquire_lock(self): """Acquire an exclusive lock on the currently-loaded session data.""" self.locked = True self.locks.setdefault(self.id, threading.RLock()).acquire() if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS') def release_lock(self): """Release the lock on the currently-loaded session data.""" self.locks[self.id].release() self.locked = False def __len__(self): """Return the number of active sessions.""" raise NotImplementedError # Hook functions (for CherryPy tools) def save(): """Save any changed session data.""" if not hasattr(cherrypy.serving, "session"): return request = cherrypy.serving.request response = cherrypy.serving.response # Guard against running twice if hasattr(request, "_sessionsaved"): return request._sessionsaved = True if response.stream: # If the body is being streamed, we have to save the data # *after* the response has been written out request.hooks.attach('on_end_request', cherrypy.session.save) else: # If the body is not being streamed, we save the data now # (so we can release the lock). if is_iterator(response.body): response.collapse_body() cherrypy.session.save() save.failsafe = True def close(): """Close the session object for this request.""" sess = getattr(cherrypy.serving, "session", None) if getattr(sess, "locked", False): # If the session is still locked we release the lock sess.release_lock() if sess.debug: cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS') close.failsafe = True close.priority = 90 def init(storage_type='ram', path=None, path_header=None, name='session_id', timeout=60, domain=None, secure=False, clean_freq=5, persistent=True, httponly=False, debug=False, **kwargs): """Initialize session object (using cookies). storage_type One of 'ram', 'file', 'postgresql', 'memcached'. This will be used to look up the corresponding class in cherrypy.lib.sessions globals. For example, 'file' will use the FileSession class. path The 'path' value to stick in the response cookie metadata. path_header If 'path' is None (the default), then the response cookie 'path' will be pulled from request.headers[path_header]. name The name of the cookie. timeout The expiration timeout (in minutes) for the stored session data. If 'persistent' is True (the default), this is also the timeout for the cookie. domain The cookie domain. secure If False (the default) the cookie 'secure' value will not be set. If True, the cookie 'secure' value will be set (to 1). clean_freq (minutes) The poll rate for expired session cleanup. persistent If True (the default), the 'timeout' argument will be used to expire the cookie. If False, the cookie will not have an expiry, and the cookie will be a "session cookie" which expires when the browser is closed. httponly If False (the default) the cookie 'httponly' value will not be set. If True, the cookie 'httponly' value will be set (to 1). Any additional kwargs will be bound to the new Session instance, and may be specific to the storage type. See the subclass of Session you're using for more information. """ request = cherrypy.serving.request # Guard against running twice if hasattr(request, "_session_init_flag"): return request._session_init_flag = True # Check if request came with a session ID id = None if name in request.cookie: id = request.cookie[name].value if debug: cherrypy.log('ID obtained from request.cookie: %r' % id, 'TOOLS.SESSIONS') # Find the storage class and call setup (first time only). storage_class = storage_type.title() + 'Session' storage_class = globals()[storage_class] if not hasattr(cherrypy, "session"): if hasattr(storage_class, "setup"): storage_class.setup(**kwargs) # Create and attach a new Session instance to cherrypy.serving. # It will possess a reference to (and lock, and lazily load) # the requested session data. kwargs['timeout'] = timeout kwargs['clean_freq'] = clean_freq cherrypy.serving.session = sess = storage_class(id, **kwargs) sess.debug = debug def update_cookie(id): """Update the cookie every time the session id changes.""" cherrypy.serving.response.cookie[name] = id sess.id_observers.append(update_cookie) # Create cherrypy.session which will proxy to cherrypy.serving.session if not hasattr(cherrypy, "session"): cherrypy.session = cherrypy._ThreadLocalProxy('session') if persistent: cookie_timeout = timeout else: # See http://support.microsoft.com/kb/223799/EN-US/ # and http://support.mozilla.com/en-US/kb/Cookies cookie_timeout = None set_response_cookie(path=path, path_header=path_header, name=name, timeout=cookie_timeout, domain=domain, secure=secure, httponly=httponly) def set_response_cookie(path=None, path_header=None, name='session_id', timeout=60, domain=None, secure=False, httponly=False): """Set a response cookie for the client. path the 'path' value to stick in the response cookie metadata. path_header if 'path' is None (the default), then the response cookie 'path' will be pulled from request.headers[path_header]. name the name of the cookie. timeout the expiration timeout for the cookie. If 0 or other boolean False, no 'expires' param will be set, and the cookie will be a "session cookie" which expires when the browser is closed. domain the cookie domain. secure if False (the default) the cookie 'secure' value will not be set. If True, the cookie 'secure' value will be set (to 1). httponly If False (the default) the cookie 'httponly' value will not be set. If True, the cookie 'httponly' value will be set (to 1). """ # Set response cookie cookie = cherrypy.serving.response.cookie cookie[name] = cherrypy.serving.session.id cookie[name]['path'] = ( path or cherrypy.serving.request.headers.get(path_header) or '/' ) # We'd like to use the "max-age" param as indicated in # http://www.faqs.org/rfcs/rfc2109.html but IE doesn't # save it to disk and the session is lost if people close # the browser. So we have to use the old "expires" ... sigh ... ## cookie[name]['max-age'] = timeout * 60 if timeout: e = time.time() + (timeout * 60) cookie[name]['expires'] = httputil.HTTPDate(e) if domain is not None: cookie[name]['domain'] = domain if secure: cookie[name]['secure'] = 1 if httponly: if not cookie[name].isReservedKey('httponly'): raise ValueError("The httponly cookie token is not supported.") cookie[name]['httponly'] = 1 def expire(): """Expire the current session cookie.""" name = cherrypy.serving.request.config.get( 'tools.sessions.name', 'session_id') one_year = 60 * 60 * 24 * 365 e = time.time() - one_year cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
33,435
Python
.py
787
33.170267
81
0.608002
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,940
locking.py
evilhero_mylar/lib/cherrypy/lib/locking.py
import datetime class NeverExpires(object): def expired(self): return False class Timer(object): """ A simple timer that will indicate when an expiration time has passed. """ def __init__(self, expiration): "Create a timer that expires at `expiration` (UTC datetime)" self.expiration = expiration @classmethod def after(cls, elapsed): """ Return a timer that will expire after `elapsed` passes. """ return cls(datetime.datetime.utcnow() + elapsed) def expired(self): return datetime.datetime.utcnow() >= self.expiration class LockTimeout(Exception): "An exception when a lock could not be acquired before a timeout period" class LockChecker(object): """ Keep track of the time and detect if a timeout has expired """ def __init__(self, session_id, timeout): self.session_id = session_id if timeout: self.timer = Timer.after(timeout) else: self.timer = NeverExpires() def expired(self): if self.timer.expired(): raise LockTimeout( "Timeout acquiring lock for %(session_id)s" % vars(self)) return False
1,224
Python
.py
36
26.805556
76
0.640612
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,941
profiler.py
evilhero_mylar/lib/cherrypy/lib/profiler.py
"""Profiler tools for CherryPy. CherryPy users ============== You can profile any of your pages as follows:: from cherrypy.lib import profiler class Root: p = profile.Profiler("/path/to/profile/dir") def index(self): self.p.run(self._index) index.exposed = True def _index(self): return "Hello, world!" cherrypy.tree.mount(Root()) You can also turn on profiling for all requests using the ``make_app`` function as WSGI middleware. CherryPy developers =================== This module can be used whenever you make changes to CherryPy, to get a quick sanity-check on overall CP performance. Use the ``--profile`` flag when running the test suite. Then, use the ``serve()`` function to browse the results in a web browser. If you run this module from the command line, it will call ``serve()`` for you. """ def new_func_strip_path(func_name): """Make profiler output more readable by adding `__init__` modules' parents """ filename, line, name = func_name if filename.endswith("__init__.py"): return os.path.basename(filename[:-12]) + filename[-12:], line, name return os.path.basename(filename), line, name try: import profile import pstats pstats.func_strip_path = new_func_strip_path except ImportError: profile = None pstats = None import os import os.path import sys import warnings from cherrypy._cpcompat import StringIO _count = 0 class Profiler(object): def __init__(self, path=None): if not path: path = os.path.join(os.path.dirname(__file__), "profile") self.path = path if not os.path.exists(path): os.makedirs(path) def run(self, func, *args, **params): """Dump profile data into self.path.""" global _count c = _count = _count + 1 path = os.path.join(self.path, "cp_%04d.prof" % c) prof = profile.Profile() result = prof.runcall(func, *args, **params) prof.dump_stats(path) return result def statfiles(self): """:rtype: list of available profiles. """ return [f for f in os.listdir(self.path) if f.startswith("cp_") and f.endswith(".prof")] def stats(self, filename, sortby='cumulative'): """:rtype stats(index): output of print_stats() for the given profile. """ sio = StringIO() if sys.version_info >= (2, 5): s = pstats.Stats(os.path.join(self.path, filename), stream=sio) s.strip_dirs() s.sort_stats(sortby) s.print_stats() else: # pstats.Stats before Python 2.5 didn't take a 'stream' arg, # but just printed to stdout. So re-route stdout. s = pstats.Stats(os.path.join(self.path, filename)) s.strip_dirs() s.sort_stats(sortby) oldout = sys.stdout try: sys.stdout = sio s.print_stats() finally: sys.stdout = oldout response = sio.getvalue() sio.close() return response def index(self): return """<html> <head><title>CherryPy profile data</title></head> <frameset cols='200, 1*'> <frame src='menu' /> <frame name='main' src='' /> </frameset> </html> """ index.exposed = True def menu(self): yield "<h2>Profiling runs</h2>" yield "<p>Click on one of the runs below to see profiling data.</p>" runs = self.statfiles() runs.sort() for i in runs: yield "<a href='report?filename=%s' target='main'>%s</a><br />" % ( i, i) menu.exposed = True def report(self, filename): import cherrypy cherrypy.response.headers['Content-Type'] = 'text/plain' return self.stats(filename) report.exposed = True class ProfileAggregator(Profiler): def __init__(self, path=None): Profiler.__init__(self, path) global _count self.count = _count = _count + 1 self.profiler = profile.Profile() def run(self, func, *args, **params): path = os.path.join(self.path, "cp_%04d.prof" % self.count) result = self.profiler.runcall(func, *args, **params) self.profiler.dump_stats(path) return result class make_app: def __init__(self, nextapp, path=None, aggregate=False): """Make a WSGI middleware app which wraps 'nextapp' with profiling. nextapp the WSGI application to wrap, usually an instance of cherrypy.Application. path where to dump the profiling output. aggregate if True, profile data for all HTTP requests will go in a single file. If False (the default), each HTTP request will dump its profile data into a separate file. """ if profile is None or pstats is None: msg = ("Your installation of Python does not have a profile " "module. If you're on Debian, try " "`sudo apt-get install python-profiler`. " "See http://www.cherrypy.org/wiki/ProfilingOnDebian " "for details.") warnings.warn(msg) self.nextapp = nextapp self.aggregate = aggregate if aggregate: self.profiler = ProfileAggregator(path) else: self.profiler = Profiler(path) def __call__(self, environ, start_response): def gather(): result = [] for line in self.nextapp(environ, start_response): result.append(line) return result return self.profiler.run(gather) def serve(path=None, port=8080): if profile is None or pstats is None: msg = ("Your installation of Python does not have a profile module. " "If you're on Debian, try " "`sudo apt-get install python-profiler`. " "See http://www.cherrypy.org/wiki/ProfilingOnDebian " "for details.") warnings.warn(msg) import cherrypy cherrypy.config.update({'server.socket_port': int(port), 'server.thread_pool': 10, 'environment': "production", }) cherrypy.quickstart(Profiler(path)) if __name__ == "__main__": serve(*tuple(sys.argv[1:]))
6,514
Python
.py
172
28.872093
79
0.588282
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,942
httpauth.py
evilhero_mylar/lib/cherrypy/lib/httpauth.py
""" This module defines functions to implement HTTP Digest Authentication (:rfc:`2617`). This has full compliance with 'Digest' and 'Basic' authentication methods. In 'Digest' it supports both MD5 and MD5-sess algorithms. Usage: First use 'doAuth' to request the client authentication for a certain resource. You should send an httplib.UNAUTHORIZED response to the client so he knows he has to authenticate itself. Then use 'parseAuthorization' to retrieve the 'auth_map' used in 'checkResponse'. To use 'checkResponse' you must have already verified the password associated with the 'username' key in 'auth_map' dict. Then you use the 'checkResponse' function to verify if the password matches the one sent by the client. SUPPORTED_ALGORITHM - list of supported 'Digest' algorithms SUPPORTED_QOP - list of supported 'Digest' 'qop'. """ __version__ = 1, 0, 1 __author__ = "Tiago Cogumbreiro <cogumbreiro@users.sf.net>" __credits__ = """ Peter van Kampen for its recipe which implement most of Digest authentication: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302378 """ __license__ = """ Copyright (c) 2005, Tiago Cogumbreiro <cogumbreiro@users.sf.net> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sylvain Hellegouarch nor the names of his contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ __all__ = ("digestAuth", "basicAuth", "doAuth", "checkResponse", "parseAuthorization", "SUPPORTED_ALGORITHM", "md5SessionKey", "calculateNonce", "SUPPORTED_QOP") ########################################################################## import time from cherrypy._cpcompat import base64_decode, ntob, md5 from cherrypy._cpcompat import parse_http_list, parse_keqv_list MD5 = "MD5" MD5_SESS = "MD5-sess" AUTH = "auth" AUTH_INT = "auth-int" SUPPORTED_ALGORITHM = (MD5, MD5_SESS) SUPPORTED_QOP = (AUTH, AUTH_INT) ########################################################################## # doAuth # DIGEST_AUTH_ENCODERS = { MD5: lambda val: md5(ntob(val)).hexdigest(), MD5_SESS: lambda val: md5(ntob(val)).hexdigest(), # SHA: lambda val: sha.new(ntob(val)).hexdigest (), } def calculateNonce(realm, algorithm=MD5): """This is an auxaliary function that calculates 'nonce' value. It is used to handle sessions.""" global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS assert algorithm in SUPPORTED_ALGORITHM try: encoder = DIGEST_AUTH_ENCODERS[algorithm] except KeyError: raise NotImplementedError("The chosen algorithm (%s) does not have " "an implementation yet" % algorithm) return encoder("%d:%s" % (time.time(), realm)) def digestAuth(realm, algorithm=MD5, nonce=None, qop=AUTH): """Challenges the client for a Digest authentication.""" global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS, SUPPORTED_QOP assert algorithm in SUPPORTED_ALGORITHM assert qop in SUPPORTED_QOP if nonce is None: nonce = calculateNonce(realm, algorithm) return 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % ( realm, nonce, algorithm, qop ) def basicAuth(realm): """Challengenes the client for a Basic authentication.""" assert '"' not in realm, "Realms cannot contain the \" (quote) character." return 'Basic realm="%s"' % realm def doAuth(realm): """'doAuth' function returns the challenge string b giving priority over Digest and fallback to Basic authentication when the browser doesn't support the first one. This should be set in the HTTP header under the key 'WWW-Authenticate'.""" return digestAuth(realm) + " " + basicAuth(realm) ########################################################################## # Parse authorization parameters # def _parseDigestAuthorization(auth_params): # Convert the auth params to a dict items = parse_http_list(auth_params) params = parse_keqv_list(items) # Now validate the params # Check for required parameters required = ["username", "realm", "nonce", "uri", "response"] for k in required: if k not in params: return None # If qop is sent then cnonce and nc MUST be present if "qop" in params and not ("cnonce" in params and "nc" in params): return None # If qop is not sent, neither cnonce nor nc can be present if ("cnonce" in params or "nc" in params) and \ "qop" not in params: return None return params def _parseBasicAuthorization(auth_params): username, password = base64_decode(auth_params).split(":", 1) return {"username": username, "password": password} AUTH_SCHEMES = { "basic": _parseBasicAuthorization, "digest": _parseDigestAuthorization, } def parseAuthorization(credentials): """parseAuthorization will convert the value of the 'Authorization' key in the HTTP header to a map itself. If the parsing fails 'None' is returned. """ global AUTH_SCHEMES auth_scheme, auth_params = credentials.split(" ", 1) auth_scheme = auth_scheme.lower() parser = AUTH_SCHEMES[auth_scheme] params = parser(auth_params) if params is None: return assert "auth_scheme" not in params params["auth_scheme"] = auth_scheme return params ########################################################################## # Check provided response for a valid password # def md5SessionKey(params, password): """ If the "algorithm" directive's value is "MD5-sess", then A1 [the session key] is calculated only once - on the first request by the client following receipt of a WWW-Authenticate challenge from the server. This creates a 'session key' for the authentication of subsequent requests and responses which is different for each "authentication session", thus limiting the amount of material hashed with any one key. Because the server need only use the hash of the user credentials in order to create the A1 value, this construction could be used in conjunction with a third party authentication service so that the web server would not need the actual password value. The specification of such a protocol is beyond the scope of this specification. """ keys = ("username", "realm", "nonce", "cnonce") params_copy = {} for key in keys: params_copy[key] = params[key] params_copy["algorithm"] = MD5_SESS return _A1(params_copy, password) def _A1(params, password): algorithm = params.get("algorithm", MD5) H = DIGEST_AUTH_ENCODERS[algorithm] if algorithm == MD5: # If the "algorithm" directive's value is "MD5" or is # unspecified, then A1 is: # A1 = unq(username-value) ":" unq(realm-value) ":" passwd return "%s:%s:%s" % (params["username"], params["realm"], password) elif algorithm == MD5_SESS: # This is A1 if qop is set # A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd ) # ":" unq(nonce-value) ":" unq(cnonce-value) h_a1 = H("%s:%s:%s" % (params["username"], params["realm"], password)) return "%s:%s:%s" % (h_a1, params["nonce"], params["cnonce"]) def _A2(params, method, kwargs): # If the "qop" directive's value is "auth" or is unspecified, then A2 is: # A2 = Method ":" digest-uri-value qop = params.get("qop", "auth") if qop == "auth": return method + ":" + params["uri"] elif qop == "auth-int": # If the "qop" value is "auth-int", then A2 is: # A2 = Method ":" digest-uri-value ":" H(entity-body) entity_body = kwargs.get("entity_body", "") H = kwargs["H"] return "%s:%s:%s" % ( method, params["uri"], H(entity_body) ) else: raise NotImplementedError("The 'qop' method is unknown: %s" % qop) def _computeDigestResponse(auth_map, password, method="GET", A1=None, **kwargs): """ Generates a response respecting the algorithm defined in RFC 2617 """ params = auth_map algorithm = params.get("algorithm", MD5) H = DIGEST_AUTH_ENCODERS[algorithm] KD = lambda secret, data: H(secret + ":" + data) qop = params.get("qop", None) H_A2 = H(_A2(params, method, kwargs)) if algorithm == MD5_SESS and A1 is not None: H_A1 = H(A1) else: H_A1 = H(_A1(params, password)) if qop in ("auth", "auth-int"): # If the "qop" value is "auth" or "auth-int": # request-digest = <"> < KD ( H(A1), unq(nonce-value) # ":" nc-value # ":" unq(cnonce-value) # ":" unq(qop-value) # ":" H(A2) # ) <"> request = "%s:%s:%s:%s:%s" % ( params["nonce"], params["nc"], params["cnonce"], params["qop"], H_A2, ) elif qop is None: # If the "qop" directive is not present (this construction is # for compatibility with RFC 2069): # request-digest = # <"> < KD ( H(A1), unq(nonce-value) ":" H(A2) ) > <"> request = "%s:%s" % (params["nonce"], H_A2) return KD(H_A1, request) def _checkDigestResponse(auth_map, password, method="GET", A1=None, **kwargs): """This function is used to verify the response given by the client when he tries to authenticate. Optional arguments: entity_body - when 'qop' is set to 'auth-int' you MUST provide the raw data you are going to send to the client (usually the HTML page. request_uri - the uri from the request line compared with the 'uri' directive of the authorization map. They must represent the same resource (unused at this time). """ if auth_map['realm'] != kwargs.get('realm', None): return False response = _computeDigestResponse( auth_map, password, method, A1, **kwargs) return response == auth_map["response"] def _checkBasicResponse(auth_map, password, method='GET', encrypt=None, **kwargs): # Note that the Basic response doesn't provide the realm value so we cannot # test it pass_through = lambda password, username=None: password encrypt = encrypt or pass_through try: candidate = encrypt(auth_map["password"], auth_map["username"]) except TypeError: # if encrypt only takes one parameter, it's the password candidate = encrypt(auth_map["password"]) return candidate == password AUTH_RESPONSES = { "basic": _checkBasicResponse, "digest": _checkDigestResponse, } def checkResponse(auth_map, password, method="GET", encrypt=None, **kwargs): """'checkResponse' compares the auth_map with the password and optionally other arguments that each implementation might need. If the response is of type 'Basic' then the function has the following signature:: checkBasicResponse(auth_map, password) -> bool If the response is of type 'Digest' then the function has the following signature:: checkDigestResponse(auth_map, password, method='GET', A1=None) -> bool The 'A1' argument is only used in MD5_SESS algorithm based responses. Check md5SessionKey() for more info. """ checker = AUTH_RESPONSES[auth_map["auth_scheme"]] return checker(auth_map, password, method=method, encrypt=encrypt, **kwargs)
13,030
Python
.py
285
39.740351
79
0.6525
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,943
xmlrpcutil.py
evilhero_mylar/lib/cherrypy/lib/xmlrpcutil.py
import sys import cherrypy from cherrypy._cpcompat import ntob def get_xmlrpclib(): try: import xmlrpc.client as x except ImportError: import xmlrpclib as x return x def process_body(): """Return (params, method) from request body.""" try: return get_xmlrpclib().loads(cherrypy.request.body.read()) except Exception: return ('ERROR PARAMS', ), 'ERRORMETHOD' def patched_path(path): """Return 'path', doctored for RPC.""" if not path.endswith('/'): path += '/' if path.startswith('/RPC2/'): # strip the first /rpc2 path = path[5:] return path def _set_response(body): # The XML-RPC spec (http://www.xmlrpc.com/spec) says: # "Unless there's a lower-level error, always return 200 OK." # Since Python's xmlrpclib interprets a non-200 response # as a "Protocol Error", we'll just return 200 every time. response = cherrypy.response response.status = '200 OK' response.body = ntob(body, 'utf-8') response.headers['Content-Type'] = 'text/xml' response.headers['Content-Length'] = len(body) def respond(body, encoding='utf-8', allow_none=0): xmlrpclib = get_xmlrpclib() if not isinstance(body, xmlrpclib.Fault): body = (body,) _set_response(xmlrpclib.dumps(body, methodresponse=1, encoding=encoding, allow_none=allow_none)) def on_error(*args, **kwargs): body = str(sys.exc_info()[1]) xmlrpclib = get_xmlrpclib() _set_response(xmlrpclib.dumps(xmlrpclib.Fault(1, body)))
1,608
Python
.py
44
29.977273
66
0.640877
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,944
httputil.py
evilhero_mylar/lib/cherrypy/lib/httputil.py
"""HTTP library functions. This module contains functions for building an HTTP application framework: any one, not just one whose name starts with "Ch". ;) If you reference any modules from some popular framework inside *this* module, FuManChu will personally hang you up by your thumbs and submit you to a public caning. """ from binascii import b2a_base64 from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou from cherrypy._cpcompat import basestring, bytestr, iteritems, nativestr from cherrypy._cpcompat import reversed, sorted, unicodestr, unquote_qs response_codes = BaseHTTPRequestHandler.responses.copy() # From https://bitbucket.org/cherrypy/cherrypy/issue/361 response_codes[500] = ('Internal Server Error', 'The server encountered an unexpected condition ' 'which prevented it from fulfilling the request.') response_codes[503] = ('Service Unavailable', 'The server is currently unable to handle the ' 'request due to a temporary overloading or ' 'maintenance of the server.') import re import urllib def urljoin(*atoms): """Return the given path \*atoms, joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank. """ url = "/".join([x for x in atoms if x]) while "//" in url: url = url.replace("//", "/") # Special-case the final url of "", and return "/" instead. return url or "/" def urljoin_bytes(*atoms): """Return the given path *atoms, joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank. """ url = ntob("/").join([x for x in atoms if x]) while ntob("//") in url: url = url.replace(ntob("//"), ntob("/")) # Special-case the final url of "", and return "/" instead. return url or ntob("/") def protocol_from_http(protocol_str): """Return a protocol tuple from the given 'HTTP/x.y' string.""" return int(protocol_str[5]), int(protocol_str[7]) def get_ranges(headervalue, content_length): """Return a list of (start, stop) indices from a Range header, or None. Each (start, stop) tuple will be composed of two ints, which are suitable for use in a slicing operation. That is, the header "Range: bytes=3-6", if applied against a Python string, is requesting resource[3:7]. This function will return the list [(3, 7)]. If this function returns an empty list, you should return HTTP 416. """ if not headervalue: return None result = [] bytesunit, byteranges = headervalue.split("=", 1) for brange in byteranges.split(","): start, stop = [x.strip() for x in brange.split("-", 1)] if start: if not stop: stop = content_length - 1 start, stop = int(start), int(stop) if start >= content_length: # From rfc 2616 sec 14.16: # "If the server receives a request (other than one # including an If-Range request-header field) with an # unsatisfiable Range request-header field (that is, # all of whose byte-range-spec values have a first-byte-pos # value greater than the current length of the selected # resource), it SHOULD return a response code of 416 # (Requested range not satisfiable)." continue if stop < start: # From rfc 2616 sec 14.16: # "If the server ignores a byte-range-spec because it # is syntactically invalid, the server SHOULD treat # the request as if the invalid Range header field # did not exist. (Normally, this means return a 200 # response containing the full entity)." return None result.append((start, stop + 1)) else: if not stop: # See rfc quote above. return None # Negative subscript (last N bytes) # # RFC 2616 Section 14.35.1: # If the entity is shorter than the specified suffix-length, # the entire entity-body is used. if int(stop) > content_length: result.append((0, content_length)) else: result.append((content_length - int(stop), content_length)) return result class HeaderElement(object): """An element (with parameters) from an HTTP header's element list.""" def __init__(self, value, params=None): self.value = value if params is None: params = {} self.params = params def __cmp__(self, other): return cmp(self.value, other.value) def __lt__(self, other): return self.value < other.value def __str__(self): p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)] return str("%s%s" % (self.value, "".join(p))) def __bytes__(self): return ntob(self.__str__()) def __unicode__(self): return ntou(self.__str__()) def parse(elementstr): """Transform 'token;key=val' to ('token', {'key': 'val'}).""" # Split the element into a value and parameters. The 'value' may # be of the form, "token=token", but we don't split that here. atoms = [x.strip() for x in elementstr.split(";") if x.strip()] if not atoms: initial_value = '' else: initial_value = atoms.pop(0).strip() params = {} for atom in atoms: atom = [x.strip() for x in atom.split("=", 1) if x.strip()] key = atom.pop(0) if atom: val = atom[0] else: val = "" params[key] = val return initial_value, params parse = staticmethod(parse) def from_str(cls, elementstr): """Construct an instance from a string of the form 'token;key=val'.""" ival, params = cls.parse(elementstr) return cls(ival, params) from_str = classmethod(from_str) q_separator = re.compile(r'; *q *=') class AcceptElement(HeaderElement): """An element (with parameters) from an Accept* header's element list. AcceptElement objects are comparable; the more-preferred object will be "less than" the less-preferred object. They are also therefore sortable; if you sort a list of AcceptElement objects, they will be listed in priority order; the most preferred value will be first. Yes, it should have been the other way around, but it's too late to fix now. """ def from_str(cls, elementstr): qvalue = None # The first "q" parameter (if any) separates the initial # media-range parameter(s) (if any) from the accept-params. atoms = q_separator.split(elementstr, 1) media_range = atoms.pop(0).strip() if atoms: # The qvalue for an Accept header can have extensions. The other # headers cannot, but it's easier to parse them as if they did. qvalue = HeaderElement.from_str(atoms[0].strip()) media_type, params = cls.parse(media_range) if qvalue is not None: params["q"] = qvalue return cls(media_type, params) from_str = classmethod(from_str) def qvalue(self): val = self.params.get("q", "1") if isinstance(val, HeaderElement): val = val.value return float(val) qvalue = property(qvalue, doc="The qvalue, or priority, of this value.") def __cmp__(self, other): diff = cmp(self.qvalue, other.qvalue) if diff == 0: diff = cmp(str(self), str(other)) return diff def __lt__(self, other): if self.qvalue == other.qvalue: return str(self) < str(other) else: return self.qvalue < other.qvalue RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)') def header_elements(fieldname, fieldvalue): """Return a sorted HeaderElement list from a comma-separated header string. """ if not fieldvalue: return [] result = [] for element in RE_HEADER_SPLIT.split(fieldvalue): if fieldname.startswith("Accept") or fieldname == 'TE': hv = AcceptElement.from_str(element) else: hv = HeaderElement.from_str(element) result.append(hv) return list(reversed(sorted(result))) def decode_TEXT(value): r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr").""" try: # Python 3 from email.header import decode_header except ImportError: from email.Header import decode_header atoms = decode_header(value) decodedvalue = "" for atom, charset in atoms: if charset is not None: atom = atom.decode(charset) decodedvalue += atom return decodedvalue def valid_status(status): """Return legal HTTP status Code, Reason-phrase and Message. The status arg must be an int, or a str that begins with an int. If status is an int, or a str and no reason-phrase is supplied, a default reason-phrase will be provided. """ if not status: status = 200 status = str(status) parts = status.split(" ", 1) if len(parts) == 1: # No reason supplied. code, = parts reason = None else: code, reason = parts reason = reason.strip() try: code = int(code) except ValueError: raise ValueError("Illegal response status from server " "(%s is non-numeric)." % repr(code)) if code < 100 or code > 599: raise ValueError("Illegal response status from server " "(%s is out of range)." % repr(code)) if code not in response_codes: # code is unknown but not illegal default_reason, message = "", "" else: default_reason, message = response_codes[code] if reason is None: reason = default_reason return code, reason, message # NOTE: the parse_qs functions that follow are modified version of those # in the python3.0 source - we need to pass through an encoding to the unquote # method, but the default parse_qs function doesn't allow us to. These do. def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'): """Parse a query given as a string argument. Arguments: qs: URL-encoded query string to be parsed keep_blank_values: flag indicating whether blank values in URL encoded queries should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. Returns a dict, as G-d intended. """ pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] d = {} for name_value in pairs: if not name_value and not strict_parsing: continue nv = name_value.split('=', 1) if len(nv) != 2: if strict_parsing: raise ValueError("bad query field: %r" % (name_value,)) # Handle case of a control-name with no equal sign if keep_blank_values: nv.append('') else: continue if len(nv[1]) or keep_blank_values: name = unquote_qs(nv[0], encoding) value = unquote_qs(nv[1], encoding) if name in d: if not isinstance(d[name], list): d[name] = [d[name]] d[name].append(value) else: d[name] = value return d image_map_pattern = re.compile(r"[0-9]+,[0-9]+") def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'): """Build a params dictionary from a query_string. Duplicate key/value pairs in the provided query_string will be returned as {'key': [val1, val2, ...]}. Single key/values will be returned as strings: {'key': 'value'}. """ if image_map_pattern.match(query_string): # Server-side image map. Map the coords to 'x' and 'y' # (like CGI::Request does). pm = query_string.split(",") pm = {'x': int(pm[0]), 'y': int(pm[1])} else: pm = _parse_qs(query_string, keep_blank_values, encoding=encoding) return pm class CaseInsensitiveDict(dict): """A case-insensitive dict subclass. Each key is changed on entry to str(key).title(). """ def __getitem__(self, key): return dict.__getitem__(self, str(key).title()) def __setitem__(self, key, value): dict.__setitem__(self, str(key).title(), value) def __delitem__(self, key): dict.__delitem__(self, str(key).title()) def __contains__(self, key): return dict.__contains__(self, str(key).title()) def get(self, key, default=None): return dict.get(self, str(key).title(), default) if hasattr({}, 'has_key'): def has_key(self, key): return str(key).title() in self def update(self, E): for k in E.keys(): self[str(k).title()] = E[k] def fromkeys(cls, seq, value=None): newdict = cls() for k in seq: newdict[str(k).title()] = value return newdict fromkeys = classmethod(fromkeys) def setdefault(self, key, x=None): key = str(key).title() try: return self[key] except KeyError: self[key] = x return x def pop(self, key, default): return dict.pop(self, str(key).title(), default) # TEXT = <any OCTET except CTLs, but including LWS> # # A CRLF is allowed in the definition of TEXT only as part of a header # field continuation. It is expected that the folding LWS will be # replaced with a single SP before interpretation of the TEXT value." if nativestr == bytestr: header_translate_table = ''.join([chr(i) for i in xrange(256)]) header_translate_deletechars = ''.join( [chr(i) for i in xrange(32)]) + chr(127) else: header_translate_table = None header_translate_deletechars = bytes(range(32)) + bytes([127]) class HeaderMap(CaseInsensitiveDict): """A dict subclass for HTTP request and response headers. Each key is changed on entry to str(key).title(). This allows headers to be case-insensitive and avoid duplicates. Values are header values (decoded according to :rfc:`2047` if necessary). """ protocol = (1, 1) encodings = ["ISO-8859-1"] # Someday, when http-bis is done, this will probably get dropped # since few servers, clients, or intermediaries do it. But until then, # we're going to obey the spec as is. # "Words of *TEXT MAY contain characters from character sets other than # ISO-8859-1 only when encoded according to the rules of RFC 2047." use_rfc_2047 = True def elements(self, key): """Return a sorted list of HeaderElements for the given header.""" key = str(key).title() value = self.get(key) return header_elements(key, value) def values(self, key): """Return a sorted list of HeaderElement.value for the given header.""" return [e.value for e in self.elements(key)] def output(self): """Transform self into a list of (name, value) tuples.""" return list(self.encode_header_items(self.items())) def encode_header_items(cls, header_items): """ Prepare the sequence of name, value tuples into a form suitable for transmitting on the wire for HTTP. """ for k, v in header_items: if isinstance(k, unicodestr): k = cls.encode(k) if not isinstance(v, basestring): v = str(v) if isinstance(v, unicodestr): v = cls.encode(v) # See header_translate_* constants above. # Replace only if you really know what you're doing. k = k.translate(header_translate_table, header_translate_deletechars) v = v.translate(header_translate_table, header_translate_deletechars) yield (k, v) encode_header_items = classmethod(encode_header_items) def encode(cls, v): """Return the given header name or value, encoded for HTTP output.""" for enc in cls.encodings: try: return v.encode(enc) except UnicodeEncodeError: continue if cls.protocol == (1, 1) and cls.use_rfc_2047: # Encode RFC-2047 TEXT # (e.g. u"\u8200" -> "=?utf-8?b?6IiA?="). # We do our own here instead of using the email module # because we never want to fold lines--folding has # been deprecated by the HTTP working group. v = b2a_base64(v.encode('utf-8')) return (ntob('=?utf-8?b?') + v.strip(ntob('\n')) + ntob('?=')) raise ValueError("Could not encode header part %r using " "any of the encodings %r." % (v, cls.encodings)) encode = classmethod(encode) class Host(object): """An internet address. name Should be the client's host name. If not available (because no DNS lookup is performed), the IP address should be used instead. """ ip = "0.0.0.0" port = 80 name = "unknown.tld" def __init__(self, ip, port, name=None): self.ip = ip self.port = port if name is None: name = ip self.name = name def __repr__(self): return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
18,204
Python
.py
424
34.200472
79
0.608388
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,945
auth_digest.py
evilhero_mylar/lib/cherrypy/lib/auth_digest.py
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 __doc__ = """An implementation of the server-side of HTTP Digest Access Authentication, which is described in :rfc:`2617`. Example usage, using the built-in get_ha1_dict_plain function which uses a dict of plaintext passwords as the credentials store:: userpassdict = {'alice' : '4x5istwelve'} get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict) digest_auth = {'tools.auth_digest.on': True, 'tools.auth_digest.realm': 'wonderland', 'tools.auth_digest.get_ha1': get_ha1, 'tools.auth_digest.key': 'a565c27146791cfb', } app_config = { '/' : digest_auth } """ __author__ = 'visteya' __date__ = 'April 2009' import time from cherrypy._cpcompat import parse_http_list, parse_keqv_list import cherrypy from cherrypy._cpcompat import md5, ntob md5_hex = lambda s: md5(ntob(s)).hexdigest() qop_auth = 'auth' qop_auth_int = 'auth-int' valid_qops = (qop_auth, qop_auth_int) valid_algorithms = ('MD5', 'MD5-sess') def TRACE(msg): cherrypy.log(msg, context='TOOLS.AUTH_DIGEST') # Three helper functions for users of the tool, providing three variants # of get_ha1() functions for three different kinds of credential stores. def get_ha1_dict_plain(user_password_dict): """Returns a get_ha1 function which obtains a plaintext password from a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, with plaintext passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the get_ha1 argument to digest_auth(). """ def get_ha1(realm, username): password = user_password_dict.get(username) if password: return md5_hex('%s:%s:%s' % (username, realm, password)) return None return get_ha1 def get_ha1_dict(user_ha1_dict): """Returns a get_ha1 function which obtains a HA1 password hash from a dictionary of the form: {username : HA1}. If you want a dictionary-based authentication scheme, but with pre-computed HA1 hashes instead of plain-text passwords, use get_ha1_dict(my_userha1_dict) as the value for the get_ha1 argument to digest_auth(). """ def get_ha1(realm, username): return user_ha1_dict.get(username) return get_ha1 def get_ha1_file_htdigest(filename): """Returns a get_ha1 function which obtains a HA1 password hash from a flat file with lines of the same format as that produced by the Apache htdigest utility. For example, for realm 'wonderland', username 'alice', and password '4x5istwelve', the htdigest line would be:: alice:wonderland:3238cdfe91a8b2ed8e39646921a02d4c If you want to use an Apache htdigest file as the credentials store, then use get_ha1_file_htdigest(my_htdigest_file) as the value for the get_ha1 argument to digest_auth(). It is recommended that the filename argument be an absolute path, to avoid problems. """ def get_ha1(realm, username): result = None f = open(filename, 'r') for line in f: u, r, ha1 = line.rstrip().split(':') if u == username and r == realm: result = ha1 break f.close() return result return get_ha1 def synthesize_nonce(s, key, timestamp=None): """Synthesize a nonce value which resists spoofing and can be checked for staleness. Returns a string suitable as the value for 'nonce' in the www-authenticate header. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. timestamp An integer seconds-since-the-epoch timestamp """ if timestamp is None: timestamp = int(time.time()) h = md5_hex('%s:%s:%s' % (timestamp, s, key)) nonce = '%s:%s' % (timestamp, h) return nonce def H(s): """The hash function H""" return md5_hex(s) class HttpDigestAuthorization (object): """Class to parse a Digest Authorization header and perform re-calculation of the digest. """ def errmsg(self, s): return 'Digest Authorization header: %s' % s def __init__(self, auth_header, http_method, debug=False): self.http_method = http_method self.debug = debug scheme, params = auth_header.split(" ", 1) self.scheme = scheme.lower() if self.scheme != 'digest': raise ValueError('Authorization scheme is not "Digest"') self.auth_header = auth_header # make a dict of the params items = parse_http_list(params) paramsd = parse_keqv_list(items) self.realm = paramsd.get('realm') self.username = paramsd.get('username') self.nonce = paramsd.get('nonce') self.uri = paramsd.get('uri') self.method = paramsd.get('method') self.response = paramsd.get('response') # the response digest self.algorithm = paramsd.get('algorithm', 'MD5').upper() self.cnonce = paramsd.get('cnonce') self.opaque = paramsd.get('opaque') self.qop = paramsd.get('qop') # qop self.nc = paramsd.get('nc') # nonce count # perform some correctness checks if self.algorithm not in valid_algorithms: raise ValueError( self.errmsg("Unsupported value for algorithm: '%s'" % self.algorithm)) has_reqd = ( self.username and self.realm and self.nonce and self.uri and self.response ) if not has_reqd: raise ValueError( self.errmsg("Not all required parameters are present.")) if self.qop: if self.qop not in valid_qops: raise ValueError( self.errmsg("Unsupported value for qop: '%s'" % self.qop)) if not (self.cnonce and self.nc): raise ValueError( self.errmsg("If qop is sent then " "cnonce and nc MUST be present")) else: if self.cnonce or self.nc: raise ValueError( self.errmsg("If qop is not sent, " "neither cnonce nor nc can be present")) def __str__(self): return 'authorization : %s' % self.auth_header def validate_nonce(self, s, key): """Validate the nonce. Returns True if nonce was generated by synthesize_nonce() and the timestamp is not spoofed, else returns False. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. Both s and key must be the same values which were used to synthesize the nonce we are trying to validate. """ try: timestamp, hashpart = self.nonce.split(':', 1) s_timestamp, s_hashpart = synthesize_nonce( s, key, timestamp).split(':', 1) is_valid = s_hashpart == hashpart if self.debug: TRACE('validate_nonce: %s' % is_valid) return is_valid except ValueError: # split() error pass return False def is_nonce_stale(self, max_age_seconds=600): """Returns True if a validated nonce is stale. The nonce contains a timestamp in plaintext and also a secure hash of the timestamp. You should first validate the nonce to ensure the plaintext timestamp is not spoofed. """ try: timestamp, hashpart = self.nonce.split(':', 1) if int(timestamp) + max_age_seconds > int(time.time()): return False except ValueError: # int() error pass if self.debug: TRACE("nonce is stale") return True def HA2(self, entity_body=''): """Returns the H(A2) string. See :rfc:`2617` section 3.2.2.3.""" # RFC 2617 3.2.2.3 # If the "qop" directive's value is "auth" or is unspecified, # then A2 is: # A2 = method ":" digest-uri-value # # If the "qop" value is "auth-int", then A2 is: # A2 = method ":" digest-uri-value ":" H(entity-body) if self.qop is None or self.qop == "auth": a2 = '%s:%s' % (self.http_method, self.uri) elif self.qop == "auth-int": a2 = "%s:%s:%s" % (self.http_method, self.uri, H(entity_body)) else: # in theory, this should never happen, since I validate qop in # __init__() raise ValueError(self.errmsg("Unrecognized value for qop!")) return H(a2) def request_digest(self, ha1, entity_body=''): """Calculates the Request-Digest. See :rfc:`2617` section 3.2.2.1. ha1 The HA1 string obtained from the credentials store. entity_body If 'qop' is set to 'auth-int', then A2 includes a hash of the "entity body". The entity body is the part of the message which follows the HTTP headers. See :rfc:`2617` section 4.3. This refers to the entity the user agent sent in the request which has the Authorization header. Typically GET requests don't have an entity, and POST requests do. """ ha2 = self.HA2(entity_body) # Request-Digest -- RFC 2617 3.2.2.1 if self.qop: req = "%s:%s:%s:%s:%s" % ( self.nonce, self.nc, self.cnonce, self.qop, ha2) else: req = "%s:%s" % (self.nonce, ha2) # RFC 2617 3.2.2.2 # # If the "algorithm" directive's value is "MD5" or is unspecified, # then A1 is: # A1 = unq(username-value) ":" unq(realm-value) ":" passwd # # If the "algorithm" directive's value is "MD5-sess", then A1 is # calculated only once - on the first request by the client following # receipt of a WWW-Authenticate challenge from the server. # A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd ) # ":" unq(nonce-value) ":" unq(cnonce-value) if self.algorithm == 'MD5-sess': ha1 = H('%s:%s:%s' % (ha1, self.nonce, self.cnonce)) digest = H('%s:%s' % (ha1, req)) return digest def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False): """Constructs a WWW-Authenticate header for Digest authentication.""" if qop not in valid_qops: raise ValueError("Unsupported value for qop: '%s'" % qop) if algorithm not in valid_algorithms: raise ValueError("Unsupported value for algorithm: '%s'" % algorithm) if nonce is None: nonce = synthesize_nonce(realm, key) s = 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % ( realm, nonce, algorithm, qop) if stale: s += ', stale="true"' return s def digest_auth(realm, get_ha1, key, debug=False): """A CherryPy tool which hooks at before_handler to perform HTTP Digest Access Authentication, as specified in :rfc:`2617`. If the request has an 'authorization' header with a 'Digest' scheme, this tool authenticates the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not "Digest", or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Digest header. realm A string containing the authentication realm. get_ha1 A callable which looks up a username in a credentials store and returns the HA1 string, which is defined in the RFC to be MD5(username : realm : password). The function's signature is: ``get_ha1(realm, username)`` where username is obtained from the request's 'authorization' header. If username is not found in the credentials store, get_ha1() returns None. key A secret string known only to the server, used in the synthesis of nonces. """ request = cherrypy.serving.request auth_header = request.headers.get('authorization') nonce_is_stale = False if auth_header is not None: try: auth = HttpDigestAuthorization( auth_header, request.method, debug=debug) except ValueError: raise cherrypy.HTTPError( 400, "The Authorization header could not be parsed.") if debug: TRACE(str(auth)) if auth.validate_nonce(realm, key): ha1 = get_ha1(realm, auth.username) if ha1 is not None: # note that for request.body to be available we need to # hook in at before_handler, not on_start_resource like # 3.1.x digest_auth does. digest = auth.request_digest(ha1, entity_body=request.body) if digest == auth.response: # authenticated if debug: TRACE("digest matches auth.response") # Now check if nonce is stale. # The choice of ten minutes' lifetime for nonce is somewhat # arbitrary nonce_is_stale = auth.is_nonce_stale(max_age_seconds=600) if not nonce_is_stale: request.login = auth.username if debug: TRACE("authentication of %s successful" % auth.username) return # Respond with 401 status and a WWW-Authenticate header header = www_authenticate(realm, key, stale=nonce_is_stale) if debug: TRACE(header) cherrypy.serving.response.headers['WWW-Authenticate'] = header raise cherrypy.HTTPError( 401, "You are not authorized to access that resource")
14,185
Python
.py
319
34.968652
79
0.610294
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,946
encoding.py
evilhero_mylar/lib/cherrypy/lib/encoding.py
import struct import time import cherrypy from cherrypy._cpcompat import basestring, BytesIO, ntob, set, unicodestr from cherrypy.lib import file_generator from cherrypy.lib import is_closable_iterator from cherrypy.lib import set_vary_header def decode(encoding=None, default_encoding='utf-8'): """Replace or extend the list of charsets used to decode a request entity. Either argument may be a single string or a list of strings. encoding If not None, restricts the set of charsets attempted while decoding a request entity to the given set (even if a different charset is given in the Content-Type request header). default_encoding Only in effect if the 'encoding' argument is not given. If given, the set of charsets attempted while decoding a request entity is *extended* with the given value(s). """ body = cherrypy.request.body if encoding is not None: if not isinstance(encoding, list): encoding = [encoding] body.attempt_charsets = encoding elif default_encoding: if not isinstance(default_encoding, list): default_encoding = [default_encoding] body.attempt_charsets = body.attempt_charsets + default_encoding class UTF8StreamEncoder: def __init__(self, iterator): self._iterator = iterator def __iter__(self): return self def next(self): return self.__next__() def __next__(self): res = next(self._iterator) if isinstance(res, unicodestr): res = res.encode('utf-8') return res def close(self): if is_closable_iterator(self._iterator): self._iterator.close() def __getattr__(self, attr): if attr.startswith('__'): raise AttributeError(self, attr) return getattr(self._iterator, attr) class ResponseEncoder: default_encoding = 'utf-8' failmsg = "Response body could not be encoded with %r." encoding = None errors = 'strict' text_only = True add_charset = True debug = False def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) self.attempted_charsets = set() request = cherrypy.serving.request if request.handler is not None: # Replace request.handler with self if self.debug: cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE') self.oldhandler = request.handler request.handler = self def encode_stream(self, encoding): """Encode a streaming response body. Use a generator wrapper, and just pray it works as the stream is being written out. """ if encoding in self.attempted_charsets: return False self.attempted_charsets.add(encoding) def encoder(body): for chunk in body: if isinstance(chunk, unicodestr): chunk = chunk.encode(encoding, self.errors) yield chunk self.body = encoder(self.body) return True def encode_string(self, encoding): """Encode a buffered response body.""" if encoding in self.attempted_charsets: return False self.attempted_charsets.add(encoding) body = [] for chunk in self.body: if isinstance(chunk, unicodestr): try: chunk = chunk.encode(encoding, self.errors) except (LookupError, UnicodeError): return False body.append(chunk) self.body = body return True def find_acceptable_charset(self): request = cherrypy.serving.request response = cherrypy.serving.response if self.debug: cherrypy.log('response.stream %r' % response.stream, 'TOOLS.ENCODE') if response.stream: encoder = self.encode_stream else: encoder = self.encode_string if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. # Encoded strings may be of different lengths from their # unicode equivalents, and even from each other. For example: # >>> t = u"\u7007\u3040" # >>> len(t) # 2 # >>> len(t.encode("UTF-8")) # 6 # >>> len(t.encode("utf7")) # 8 del response.headers["Content-Length"] # Parse the Accept-Charset request header, and try to provide one # of the requested charsets (in order of user preference). encs = request.headers.elements('Accept-Charset') charsets = [enc.value.lower() for enc in encs] if self.debug: cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE') if self.encoding is not None: # If specified, force this encoding to be used, or fail. encoding = self.encoding.lower() if self.debug: cherrypy.log('Specified encoding %r' % encoding, 'TOOLS.ENCODE') if (not charsets) or "*" in charsets or encoding in charsets: if self.debug: cherrypy.log('Attempting encoding %r' % encoding, 'TOOLS.ENCODE') if encoder(encoding): return encoding else: if not encs: if self.debug: cherrypy.log('Attempting default encoding %r' % self.default_encoding, 'TOOLS.ENCODE') # Any character-set is acceptable. if encoder(self.default_encoding): return self.default_encoding else: raise cherrypy.HTTPError(500, self.failmsg % self.default_encoding) else: for element in encs: if element.qvalue > 0: if element.value == "*": # Matches any charset. Try our default. if self.debug: cherrypy.log('Attempting default encoding due ' 'to %r' % element, 'TOOLS.ENCODE') if encoder(self.default_encoding): return self.default_encoding else: encoding = element.value if self.debug: cherrypy.log('Attempting encoding %s (qvalue >' '0)' % element, 'TOOLS.ENCODE') if encoder(encoding): return encoding if "*" not in charsets: # If no "*" is present in an Accept-Charset field, then all # character sets not explicitly mentioned get a quality # value of 0, except for ISO-8859-1, which gets a quality # value of 1 if not explicitly mentioned. iso = 'iso-8859-1' if iso not in charsets: if self.debug: cherrypy.log('Attempting ISO-8859-1 encoding', 'TOOLS.ENCODE') if encoder(iso): return iso # No suitable encoding found. ac = request.headers.get('Accept-Charset') if ac is None: msg = "Your client did not send an Accept-Charset header." else: msg = "Your client sent this Accept-Charset header: %s." % ac _charsets = ", ".join(sorted(self.attempted_charsets)) msg += " We tried these charsets: %s." % (_charsets,) raise cherrypy.HTTPError(406, msg) def __call__(self, *args, **kwargs): response = cherrypy.serving.response self.body = self.oldhandler(*args, **kwargs) if isinstance(self.body, basestring): # strings get wrapped in a list because iterating over a single # item list is much faster than iterating over every character # in a long string. if self.body: self.body = [self.body] else: # [''] doesn't evaluate to False, so replace it with []. self.body = [] elif hasattr(self.body, 'read'): self.body = file_generator(self.body) elif self.body is None: self.body = [] ct = response.headers.elements("Content-Type") if self.debug: cherrypy.log('Content-Type: %r' % [str(h) for h in ct], 'TOOLS.ENCODE') if ct and self.add_charset: ct = ct[0] if self.text_only: if ct.value.lower().startswith("text/"): if self.debug: cherrypy.log( 'Content-Type %s starts with "text/"' % ct, 'TOOLS.ENCODE') do_find = True else: if self.debug: cherrypy.log('Not finding because Content-Type %s ' 'does not start with "text/"' % ct, 'TOOLS.ENCODE') do_find = False else: if self.debug: cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE') do_find = True if do_find: # Set "charset=..." param on response Content-Type header ct.params['charset'] = self.find_acceptable_charset() if self.debug: cherrypy.log('Setting Content-Type %s' % ct, 'TOOLS.ENCODE') response.headers["Content-Type"] = str(ct) return self.body # GZIP def compress(body, compress_level): """Compress 'body' at the given compress_level.""" import zlib # See http://www.gzip.org/zlib/rfc-gzip.html yield ntob('\x1f\x8b') # ID1 and ID2: gzip marker yield ntob('\x08') # CM: compression method yield ntob('\x00') # FLG: none set # MTIME: 4 bytes yield struct.pack("<L", int(time.time()) & int('FFFFFFFF', 16)) yield ntob('\x02') # XFL: max compression, slowest algo yield ntob('\xff') # OS: unknown crc = zlib.crc32(ntob("")) size = 0 zobj = zlib.compressobj(compress_level, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) for line in body: size += len(line) crc = zlib.crc32(line, crc) yield zobj.compress(line) yield zobj.flush() # CRC32: 4 bytes yield struct.pack("<L", crc & int('FFFFFFFF', 16)) # ISIZE: 4 bytes yield struct.pack("<L", size & int('FFFFFFFF', 16)) def decompress(body): import gzip zbuf = BytesIO() zbuf.write(body) zbuf.seek(0) zfile = gzip.GzipFile(mode='rb', fileobj=zbuf) data = zfile.read() zfile.close() return data def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False): """Try to gzip the response body if Content-Type in mime_types. cherrypy.response.headers['Content-Type'] must be set to one of the values in the mime_types arg before calling this function. The provided list of mime-types must be of one of the following form: * type/subtype * type/* * type/*+subtype No compression is performed if any of the following hold: * The client sends no Accept-Encoding request header * No 'gzip' or 'x-gzip' is present in the Accept-Encoding header * No 'gzip' or 'x-gzip' with a qvalue > 0 is present * The 'identity' value is given with a qvalue > 0. """ request = cherrypy.serving.request response = cherrypy.serving.response set_vary_header(response, "Accept-Encoding") if not response.body: # Response body is empty (might be a 304 for instance) if debug: cherrypy.log('No response body', context='TOOLS.GZIP') return # If returning cached content (which should already have been gzipped), # don't re-zip. if getattr(request, "cached", False): if debug: cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP') return acceptable = request.headers.elements('Accept-Encoding') if not acceptable: # If no Accept-Encoding field is present in a request, # the server MAY assume that the client will accept any # content coding. In this case, if "identity" is one of # the available content-codings, then the server SHOULD use # the "identity" content-coding, unless it has additional # information that a different content-coding is meaningful # to the client. if debug: cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP') return ct = response.headers.get('Content-Type', '').split(';')[0] for coding in acceptable: if coding.value == 'identity' and coding.qvalue != 0: if debug: cherrypy.log('Non-zero identity qvalue: %s' % coding, context='TOOLS.GZIP') return if coding.value in ('gzip', 'x-gzip'): if coding.qvalue == 0: if debug: cherrypy.log('Zero gzip qvalue: %s' % coding, context='TOOLS.GZIP') return if ct not in mime_types: # If the list of provided mime-types contains tokens # such as 'text/*' or 'application/*+xml', # we go through them and find the most appropriate one # based on the given content-type. # The pattern matching is only caring about the most # common cases, as stated above, and doesn't support # for extra parameters. found = False if '/' in ct: ct_media_type, ct_sub_type = ct.split('/') for mime_type in mime_types: if '/' in mime_type: media_type, sub_type = mime_type.split('/') if ct_media_type == media_type: if sub_type == '*': found = True break elif '+' in sub_type and '+' in ct_sub_type: ct_left, ct_right = ct_sub_type.split('+') left, right = sub_type.split('+') if left == '*' and ct_right == right: found = True break if not found: if debug: cherrypy.log('Content-Type %s not in mime_types %r' % (ct, mime_types), context='TOOLS.GZIP') return if debug: cherrypy.log('Gzipping', context='TOOLS.GZIP') # Return a generator that compresses the page response.headers['Content-Encoding'] = 'gzip' response.body = compress(response.body, compress_level) if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. del response.headers["Content-Length"] return if debug: cherrypy.log('No acceptable encoding found.', context='GZIP') cherrypy.HTTPError(406, "identity, gzip").set_response()
16,226
Python
.py
363
30.77686
79
0.536096
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,947
jsontools.py
evilhero_mylar/lib/cherrypy/lib/jsontools.py
import cherrypy from cherrypy._cpcompat import basestring, ntou, json_encode, json_decode def json_processor(entity): """Read application/json data into request.json.""" if not entity.headers.get(ntou("Content-Length"), ntou("")): raise cherrypy.HTTPError(411) body = entity.fp.read() try: cherrypy.serving.request.json = json_decode(body.decode('utf-8')) except ValueError: raise cherrypy.HTTPError(400, 'Invalid JSON document') def json_in(content_type=[ntou('application/json'), ntou('text/javascript')], force=True, debug=False, processor=json_processor): """Add a processor to parse JSON request entities: The default processor places the parsed data into request.json. Incoming request entities which match the given content_type(s) will be deserialized from JSON to the Python equivalent, and the result stored at cherrypy.request.json. The 'content_type' argument may be a Content-Type string or a list of allowable Content-Type strings. If the 'force' argument is True (the default), then entities of other content types will not be allowed; "415 Unsupported Media Type" is raised instead. Supply your own processor to use a custom decoder, or to handle the parsed data differently. The processor can be configured via tools.json_in.processor or via the decorator method. Note that the deserializer requires the client send a Content-Length request header, or it will raise "411 Length Required". If for any other reason the request entity cannot be deserialized from JSON, it will raise "400 Bad Request: Invalid JSON document". You must be using Python 2.6 or greater, or have the 'simplejson' package importable; otherwise, ValueError is raised during processing. """ request = cherrypy.serving.request if isinstance(content_type, basestring): content_type = [content_type] if force: if debug: cherrypy.log('Removing body processors %s' % repr(request.body.processors.keys()), 'TOOLS.JSON_IN') request.body.processors.clear() request.body.default_proc = cherrypy.HTTPError( 415, 'Expected an entity of content type %s' % ', '.join(content_type)) for ct in content_type: if debug: cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN') request.body.processors[ct] = processor def json_handler(*args, **kwargs): value = cherrypy.serving.request._json_inner_handler(*args, **kwargs) return json_encode(value) def json_out(content_type='application/json', debug=False, handler=json_handler): """Wrap request.handler to serialize its output to JSON. Sets Content-Type. If the given content_type is None, the Content-Type response header is not set. Provide your own handler to use a custom encoder. For example cherrypy.config['tools.json_out.handler'] = <function>, or @json_out(handler=function). You must be using Python 2.6 or greater, or have the 'simplejson' package importable; otherwise, ValueError is raised during processing. """ request = cherrypy.serving.request # request.handler may be set to None by e.g. the caching tool # to signal to all components that a response body has already # been attached, in which case we don't need to wrap anything. if request.handler is None: return if debug: cherrypy.log('Replacing %s with JSON handler' % request.handler, 'TOOLS.JSON_OUT') request._json_inner_handler = request.handler request.handler = handler if content_type is not None: if debug: cherrypy.log('Setting Content-Type to %s' % content_type, 'TOOLS.JSON_OUT') cherrypy.serving.response.headers['Content-Type'] = content_type
3,935
Python
.py
77
43.987013
79
0.699401
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,948
auth.py
evilhero_mylar/lib/cherrypy/lib/auth.py
import cherrypy from cherrypy.lib import httpauth def check_auth(users, encrypt=None, realm=None): """If an authorization header contains credentials, return True or False. """ request = cherrypy.serving.request if 'authorization' in request.headers: # make sure the provided credentials are correctly set ah = httpauth.parseAuthorization(request.headers['authorization']) if ah is None: raise cherrypy.HTTPError(400, 'Bad Request') if not encrypt: encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5] if hasattr(users, '__call__'): try: # backward compatibility users = users() # expect it to return a dictionary if not isinstance(users, dict): raise ValueError( "Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) except TypeError: # returns a password (encrypted or clear text) password = users(ah["username"]) else: if not isinstance(users, dict): raise ValueError("Authentication users must be a dictionary") # fetch the user password password = users.get(ah["username"], None) # validate the authorization by re-computing it here # and compare it with what the user-agent provided if httpauth.checkResponse(ah, password, method=request.method, encrypt=encrypt, realm=realm): request.login = ah["username"] return True request.login = False return False def basic_auth(realm, users, encrypt=None, debug=False): """If auth fails, raise 401 with a basic authentication header. realm A string containing the authentication realm. users A dict of the form: {username: password} or a callable returning a dict. encrypt callable used to encrypt the password returned from the user-agent. if None it defaults to a md5 encryption. """ if check_auth(users, encrypt): if debug: cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH') return # inform the user-agent this path is protected cherrypy.serving.response.headers[ 'www-authenticate'] = httpauth.basicAuth(realm) raise cherrypy.HTTPError( 401, "You are not authorized to access that resource") def digest_auth(realm, users, debug=False): """If auth fails, raise 401 with a digest authentication header. realm A string containing the authentication realm. users A dict of the form: {username: password} or a callable returning a dict. """ if check_auth(users, realm=realm): if debug: cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH') return # inform the user-agent this path is protected cherrypy.serving.response.headers[ 'www-authenticate'] = httpauth.digestAuth(realm) raise cherrypy.HTTPError( 401, "You are not authorized to access that resource")
3,224
Python
.py
75
33.24
77
0.637992
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,949
__init__.py
evilhero_mylar/lib/cherrypy/lib/__init__.py
"""CherryPy Library""" # Deprecated in CherryPy 3.2 -- remove in CherryPy 3.3 from cherrypy.lib.reprconf import unrepr, modules, attributes def is_iterator(obj): '''Returns a boolean indicating if the object provided implements the iterator protocol (i.e. like a generator). This will return false for objects which iterable, but not iterators themselves.''' from types import GeneratorType if isinstance(obj, GeneratorType): return True elif not hasattr(obj, '__iter__'): return False else: # Types which implement the protocol must return themselves when # invoking 'iter' upon them. return iter(obj) is obj def is_closable_iterator(obj): # Not an iterator. if not is_iterator(obj): return False # A generator - the easiest thing to deal with. import inspect if inspect.isgenerator(obj): return True # A custom iterator. Look for a close method... if not (hasattr(obj, 'close') and callable(obj.close)): return False # ... which doesn't require any arguments. try: inspect.getcallargs(obj.close) except TypeError: return False else: return True class file_generator(object): """Yield the given input (a file object) in chunks (default 64k). (Core)""" def __init__(self, input, chunkSize=65536): self.input = input self.chunkSize = chunkSize def __iter__(self): return self def __next__(self): chunk = self.input.read(self.chunkSize) if chunk: return chunk else: if hasattr(self.input, 'close'): self.input.close() raise StopIteration() next = __next__ def file_generator_limited(fileobj, count, chunk_size=65536): """Yield the given file object in chunks, stopping after `count` bytes has been emitted. Default chunk size is 64kB. (Core) """ remaining = count while remaining > 0: chunk = fileobj.read(min(chunk_size, remaining)) chunklen = len(chunk) if chunklen == 0: return remaining -= chunklen yield chunk def set_vary_header(response, header_name): "Add a Vary header to a response" varies = response.headers.get("Vary", "") varies = [x.strip() for x in varies.split(",") if x.strip()] if header_name not in varies: varies.append(header_name) response.headers['Vary'] = ", ".join(varies)
2,520
Python
.py
69
29.405797
79
0.64662
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,950
static.py
evilhero_mylar/lib/cherrypy/lib/static.py
import os import re import stat import mimetypes try: from io import UnsupportedOperation except ImportError: UnsupportedOperation = object() import cherrypy from cherrypy._cpcompat import ntob, unquote from cherrypy.lib import cptools, httputil, file_generator_limited mimetypes.init() mimetypes.types_map['.dwg'] = 'image/x-dwg' mimetypes.types_map['.ico'] = 'image/x-icon' mimetypes.types_map['.bz2'] = 'application/x-bzip2' mimetypes.types_map['.gz'] = 'application/x-gzip' def serve_file(path, content_type=None, disposition=None, name=None, debug=False): """Set status, headers, and body in order to serve the given path. The Content-Type header will be set to the content_type arg, if provided. If not provided, the Content-Type will be guessed by the file extension of the 'path' argument. If disposition is not None, the Content-Disposition header will be set to "<disposition>; filename=<name>". If name is None, it will be set to the basename of path. If disposition is None, no Content-Disposition header will be written. """ response = cherrypy.serving.response # If path is relative, users should fix it by making path absolute. # That is, CherryPy should not guess where the application root is. # It certainly should *not* use cwd (since CP may be invoked from a # variety of paths). If using tools.staticdir, you can make your relative # paths become absolute by supplying a value for "tools.staticdir.root". if not os.path.isabs(path): msg = "'%s' is not an absolute path." % path if debug: cherrypy.log(msg, 'TOOLS.STATICFILE') raise ValueError(msg) try: st = os.stat(path) except OSError: if debug: cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC') raise cherrypy.NotFound() # Check if path is a directory. if stat.S_ISDIR(st.st_mode): # Let the caller deal with it as they like. if debug: cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC') raise cherrypy.NotFound() # Set the Last-Modified response header, so that # modified-since validation code can work. response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime) cptools.validate_since() if content_type is None: # Set content-type based on filename extension ext = "" i = path.rfind('.') if i != -1: ext = path[i:].lower() content_type = mimetypes.types_map.get(ext, None) if content_type is not None: response.headers['Content-Type'] = content_type if debug: cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC') cd = None if disposition is not None: if name is None: name = os.path.basename(path) cd = '%s; filename="%s"' % (disposition, name) response.headers["Content-Disposition"] = cd if debug: cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC') # Set Content-Length and use an iterable (file object) # this way CP won't load the whole file in memory content_length = st.st_size fileobj = open(path, 'rb') return _serve_fileobj(fileobj, content_type, content_length, debug=debug) def serve_fileobj(fileobj, content_type=None, disposition=None, name=None, debug=False): """Set status, headers, and body in order to serve the given file object. The Content-Type header will be set to the content_type arg, if provided. If disposition is not None, the Content-Disposition header will be set to "<disposition>; filename=<name>". If name is None, 'filename' will not be set. If disposition is None, no Content-Disposition header will be written. CAUTION: If the request contains a 'Range' header, one or more seek()s will be performed on the file object. This may cause undesired behavior if the file object is not seekable. It could also produce undesired results if the caller set the read position of the file object prior to calling serve_fileobj(), expecting that the data would be served starting from that position. """ response = cherrypy.serving.response try: st = os.fstat(fileobj.fileno()) except AttributeError: if debug: cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC') content_length = None except UnsupportedOperation: content_length = None else: # Set the Last-Modified response header, so that # modified-since validation code can work. response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime) cptools.validate_since() content_length = st.st_size if content_type is not None: response.headers['Content-Type'] = content_type if debug: cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC') cd = None if disposition is not None: if name is None: cd = disposition else: cd = '%s; filename="%s"' % (disposition, name) response.headers["Content-Disposition"] = cd if debug: cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC') return _serve_fileobj(fileobj, content_type, content_length, debug=debug) def _serve_fileobj(fileobj, content_type, content_length, debug=False): """Internal. Set response.body to the given file object, perhaps ranged.""" response = cherrypy.serving.response # HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code request = cherrypy.serving.request if request.protocol >= (1, 1): response.headers["Accept-Ranges"] = "bytes" r = httputil.get_ranges(request.headers.get('Range'), content_length) if r == []: response.headers['Content-Range'] = "bytes */%s" % content_length message = ("Invalid Range (first-byte-pos greater than " "Content-Length)") if debug: cherrypy.log(message, 'TOOLS.STATIC') raise cherrypy.HTTPError(416, message) if r: if len(r) == 1: # Return a single-part response. start, stop = r[0] if stop > content_length: stop = content_length r_len = stop - start if debug: cherrypy.log( 'Single part; start: %r, stop: %r' % (start, stop), 'TOOLS.STATIC') response.status = "206 Partial Content" response.headers['Content-Range'] = ( "bytes %s-%s/%s" % (start, stop - 1, content_length)) response.headers['Content-Length'] = r_len fileobj.seek(start) response.body = file_generator_limited(fileobj, r_len) else: # Return a multipart/byteranges response. response.status = "206 Partial Content" try: # Python 3 from email.generator import _make_boundary as make_boundary except ImportError: # Python 2 from mimetools import choose_boundary as make_boundary boundary = make_boundary() ct = "multipart/byteranges; boundary=%s" % boundary response.headers['Content-Type'] = ct if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. del response.headers["Content-Length"] def file_ranges(): # Apache compatibility: yield ntob("\r\n") for start, stop in r: if debug: cherrypy.log( 'Multipart; start: %r, stop: %r' % ( start, stop), 'TOOLS.STATIC') yield ntob("--" + boundary, 'ascii') yield ntob("\r\nContent-type: %s" % content_type, 'ascii') yield ntob( "\r\nContent-range: bytes %s-%s/%s\r\n\r\n" % ( start, stop - 1, content_length), 'ascii') fileobj.seek(start) gen = file_generator_limited(fileobj, stop - start) for chunk in gen: yield chunk yield ntob("\r\n") # Final boundary yield ntob("--" + boundary + "--", 'ascii') # Apache compatibility: yield ntob("\r\n") response.body = file_ranges() return response.body else: if debug: cherrypy.log('No byteranges requested', 'TOOLS.STATIC') # Set Content-Length and use an iterable (file object) # this way CP won't load the whole file in memory response.headers['Content-Length'] = content_length response.body = fileobj return response.body def serve_download(path, name=None): """Serve 'path' as an application/x-download attachment.""" # This is such a common idiom I felt it deserved its own wrapper. return serve_file(path, "application/x-download", "attachment", name) def _attempt(filename, content_types, debug=False): if debug: cherrypy.log('Attempting %r (content_types %r)' % (filename, content_types), 'TOOLS.STATICDIR') try: # you can set the content types for a # complete directory per extension content_type = None if content_types: r, ext = os.path.splitext(filename) content_type = content_types.get(ext[1:], None) serve_file(filename, content_type=content_type, debug=debug) return True except cherrypy.NotFound: # If we didn't find the static file, continue handling the # request. We might find a dynamic handler instead. if debug: cherrypy.log('NotFound', 'TOOLS.STATICFILE') return False def staticdir(section, dir, root="", match="", content_types=None, index="", debug=False): """Serve a static resource from the given (root +) dir. match If given, request.path_info will be searched for the given regular expression before attempting to serve static content. content_types If given, it should be a Python dictionary of {file-extension: content-type} pairs, where 'file-extension' is a string (e.g. "gif") and 'content-type' is the value to write out in the Content-Type response header (e.g. "image/gif"). index If provided, it should be the (relative) name of a file to serve for directory requests. For example, if the dir argument is '/home/me', the Request-URI is 'myapp', and the index arg is 'index.html', the file '/home/me/myapp/index.html' will be sought. """ request = cherrypy.serving.request if request.method not in ('GET', 'HEAD'): if debug: cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR') return False if match and not re.search(match, request.path_info): if debug: cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICDIR') return False # Allow the use of '~' to refer to a user's home directory. dir = os.path.expanduser(dir) # If dir is relative, make absolute using "root". if not os.path.isabs(dir): if not root: msg = "Static dir requires an absolute dir (or root)." if debug: cherrypy.log(msg, 'TOOLS.STATICDIR') raise ValueError(msg) dir = os.path.join(root, dir) # Determine where we are in the object tree relative to 'section' # (where the static tool was defined). if section == 'global': section = "/" section = section.rstrip(r"\/") branch = request.path_info[len(section) + 1:] branch = unquote(branch.lstrip(r"\/")) # If branch is "", filename will end in a slash filename = os.path.join(dir, branch) if debug: cherrypy.log('Checking file %r to fulfill %r' % (filename, request.path_info), 'TOOLS.STATICDIR') # There's a chance that the branch pulled from the URL might # have ".." or similar uplevel attacks in it. Check that the final # filename is a child of dir. if not os.path.normpath(filename).startswith(os.path.normpath(dir)): raise cherrypy.HTTPError(403) # Forbidden handled = _attempt(filename, content_types) if not handled: # Check for an index file if a folder was requested. if index: handled = _attempt(os.path.join(filename, index), content_types) if handled: request.is_index = filename[-1] in (r"\/") return handled def staticfile(filename, root=None, match="", content_types=None, debug=False): """Serve a static resource from the given (root +) filename. match If given, request.path_info will be searched for the given regular expression before attempting to serve static content. content_types If given, it should be a Python dictionary of {file-extension: content-type} pairs, where 'file-extension' is a string (e.g. "gif") and 'content-type' is the value to write out in the Content-Type response header (e.g. "image/gif"). """ request = cherrypy.serving.request if request.method not in ('GET', 'HEAD'): if debug: cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE') return False if match and not re.search(match, request.path_info): if debug: cherrypy.log('request.path_info %r does not match pattern %r' % (request.path_info, match), 'TOOLS.STATICFILE') return False # If filename is relative, make absolute using "root". if not os.path.isabs(filename): if not root: msg = "Static tool requires an absolute filename (got '%s')." % ( filename,) if debug: cherrypy.log(msg, 'TOOLS.STATICFILE') raise ValueError(msg) filename = os.path.join(root, filename) return _attempt(filename, content_types, debug=debug)
14,778
Python
.py
320
35.871875
79
0.607778
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,951
gctools.py
evilhero_mylar/lib/cherrypy/lib/gctools.py
import gc import inspect import os import sys import time try: import objgraph except ImportError: objgraph = None import cherrypy from cherrypy import _cprequest, _cpwsgi from cherrypy.process.plugins import SimplePlugin class ReferrerTree(object): """An object which gathers all referrers of an object to a given depth.""" peek_length = 40 def __init__(self, ignore=None, maxdepth=2, maxparents=10): self.ignore = ignore or [] self.ignore.append(inspect.currentframe().f_back) self.maxdepth = maxdepth self.maxparents = maxparents def ascend(self, obj, depth=1): """Return a nested list containing referrers of the given object.""" depth += 1 parents = [] # Gather all referrers in one step to minimize # cascading references due to repr() logic. refs = gc.get_referrers(obj) self.ignore.append(refs) if len(refs) > self.maxparents: return [("[%s referrers]" % len(refs), [])] try: ascendcode = self.ascend.__code__ except AttributeError: ascendcode = self.ascend.im_func.func_code for parent in refs: if inspect.isframe(parent) and parent.f_code is ascendcode: continue if parent in self.ignore: continue if depth <= self.maxdepth: parents.append((parent, self.ascend(parent, depth))) else: parents.append((parent, [])) return parents def peek(self, s): """Return s, restricted to a sane length.""" if len(s) > (self.peek_length + 3): half = self.peek_length // 2 return s[:half] + '...' + s[-half:] else: return s def _format(self, obj, descend=True): """Return a string representation of a single object.""" if inspect.isframe(obj): filename, lineno, func, context, index = inspect.getframeinfo(obj) return "<frame of function '%s'>" % func if not descend: return self.peek(repr(obj)) if isinstance(obj, dict): return "{" + ", ".join(["%s: %s" % (self._format(k, descend=False), self._format(v, descend=False)) for k, v in obj.items()]) + "}" elif isinstance(obj, list): return "[" + ", ".join([self._format(item, descend=False) for item in obj]) + "]" elif isinstance(obj, tuple): return "(" + ", ".join([self._format(item, descend=False) for item in obj]) + ")" r = self.peek(repr(obj)) if isinstance(obj, (str, int, float)): return r return "%s: %s" % (type(obj), r) def format(self, tree): """Return a list of string reprs from a nested list of referrers.""" output = [] def ascend(branch, depth=1): for parent, grandparents in branch: output.append((" " * depth) + self._format(parent)) if grandparents: ascend(grandparents, depth + 1) ascend(tree) return output def get_instances(cls): return [x for x in gc.get_objects() if isinstance(x, cls)] class RequestCounter(SimplePlugin): def start(self): self.count = 0 def before_request(self): self.count += 1 def after_request(self): self.count -= 1 request_counter = RequestCounter(cherrypy.engine) request_counter.subscribe() def get_context(obj): if isinstance(obj, _cprequest.Request): return "path=%s;stage=%s" % (obj.path_info, obj.stage) elif isinstance(obj, _cprequest.Response): return "status=%s" % obj.status elif isinstance(obj, _cpwsgi.AppResponse): return "PATH_INFO=%s" % obj.environ.get('PATH_INFO', '') elif hasattr(obj, "tb_lineno"): return "tb_lineno=%s" % obj.tb_lineno return "" class GCRoot(object): """A CherryPy page handler for testing reference leaks.""" classes = [ (_cprequest.Request, 2, 2, "Should be 1 in this request thread and 1 in the main thread."), (_cprequest.Response, 2, 2, "Should be 1 in this request thread and 1 in the main thread."), (_cpwsgi.AppResponse, 1, 1, "Should be 1 in this request thread only."), ] def index(self): return "Hello, world!" index.exposed = True def stats(self): output = ["Statistics:"] for trial in range(10): if request_counter.count > 0: break time.sleep(0.5) else: output.append("\nNot all requests closed properly.") # gc_collect isn't perfectly synchronous, because it may # break reference cycles that then take time to fully # finalize. Call it thrice and hope for the best. gc.collect() gc.collect() unreachable = gc.collect() if unreachable: if objgraph is not None: final = objgraph.by_type('Nondestructible') if final: objgraph.show_backrefs(final, filename='finalizers.png') trash = {} for x in gc.garbage: trash[type(x)] = trash.get(type(x), 0) + 1 if trash: output.insert(0, "\n%s unreachable objects:" % unreachable) trash = [(v, k) for k, v in trash.items()] trash.sort() for pair in trash: output.append(" " + repr(pair)) # Check declared classes to verify uncollected instances. # These don't have to be part of a cycle; they can be # any objects that have unanticipated referrers that keep # them from being collected. allobjs = {} for cls, minobj, maxobj, msg in self.classes: allobjs[cls] = get_instances(cls) for cls, minobj, maxobj, msg in self.classes: objs = allobjs[cls] lenobj = len(objs) if lenobj < minobj or lenobj > maxobj: if minobj == maxobj: output.append( "\nExpected %s %r references, got %s." % (minobj, cls, lenobj)) else: output.append( "\nExpected %s to %s %r references, got %s." % (minobj, maxobj, cls, lenobj)) for obj in objs: if objgraph is not None: ig = [id(objs), id(inspect.currentframe())] fname = "graph_%s_%s.png" % (cls.__name__, id(obj)) objgraph.show_backrefs( obj, extra_ignore=ig, max_depth=4, too_many=20, filename=fname, extra_info=get_context) output.append("\nReferrers for %s (refcount=%s):" % (repr(obj), sys.getrefcount(obj))) t = ReferrerTree(ignore=[objs], maxdepth=3) tree = t.ascend(obj) output.extend(t.format(tree)) return "\n".join(output) stats.exposed = True
7,362
Python
.py
177
29.875706
79
0.545836
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,952
cptools.py
evilhero_mylar/lib/cherrypy/lib/cptools.py
"""Functions for builtin CherryPy tools.""" import logging import re import cherrypy from cherrypy._cpcompat import basestring, md5, set, unicodestr from cherrypy.lib import httputil as _httputil from cherrypy.lib import is_iterator # Conditional HTTP request support # def validate_etags(autotags=False, debug=False): """Validate the current ETag against If-Match, If-None-Match headers. If autotags is True, an ETag response-header value will be provided from an MD5 hash of the response body (unless some other code has already provided an ETag header). If False (the default), the ETag will not be automatic. WARNING: the autotags feature is not designed for URL's which allow methods other than GET. For example, if a POST to the same URL returns no content, the automatic ETag will be incorrect, breaking a fundamental use for entity tags in a possibly destructive fashion. Likewise, if you raise 304 Not Modified, the response body will be empty, the ETag hash will be incorrect, and your application will break. See :rfc:`2616` Section 14.24. """ response = cherrypy.serving.response # Guard against being run twice. if hasattr(response, "ETag"): return status, reason, msg = _httputil.valid_status(response.status) etag = response.headers.get('ETag') # Automatic ETag generation. See warning in docstring. if etag: if debug: cherrypy.log('ETag already set: %s' % etag, 'TOOLS.ETAGS') elif not autotags: if debug: cherrypy.log('Autotags off', 'TOOLS.ETAGS') elif status != 200: if debug: cherrypy.log('Status not 200', 'TOOLS.ETAGS') else: etag = response.collapse_body() etag = '"%s"' % md5(etag).hexdigest() if debug: cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS') response.headers['ETag'] = etag response.ETag = etag # "If the request would, without the If-Match header field, result in # anything other than a 2xx or 412 status, then the If-Match header # MUST be ignored." if debug: cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS') if status >= 200 and status <= 299: request = cherrypy.serving.request conditions = request.headers.elements('If-Match') or [] conditions = [str(x) for x in conditions] if debug: cherrypy.log('If-Match conditions: %s' % repr(conditions), 'TOOLS.ETAGS') if conditions and not (conditions == ["*"] or etag in conditions): raise cherrypy.HTTPError(412, "If-Match failed: ETag %r did " "not match %r" % (etag, conditions)) conditions = request.headers.elements('If-None-Match') or [] conditions = [str(x) for x in conditions] if debug: cherrypy.log('If-None-Match conditions: %s' % repr(conditions), 'TOOLS.ETAGS') if conditions == ["*"] or etag in conditions: if debug: cherrypy.log('request.method: %s' % request.method, 'TOOLS.ETAGS') if request.method in ("GET", "HEAD"): raise cherrypy.HTTPRedirect([], 304) else: raise cherrypy.HTTPError(412, "If-None-Match failed: ETag %r " "matched %r" % (etag, conditions)) def validate_since(): """Validate the current Last-Modified against If-Modified-Since headers. If no code has set the Last-Modified response header, then no validation will be performed. """ response = cherrypy.serving.response lastmod = response.headers.get('Last-Modified') if lastmod: status, reason, msg = _httputil.valid_status(response.status) request = cherrypy.serving.request since = request.headers.get('If-Unmodified-Since') if since and since != lastmod: if (status >= 200 and status <= 299) or status == 412: raise cherrypy.HTTPError(412) since = request.headers.get('If-Modified-Since') if since and since == lastmod: if (status >= 200 and status <= 299) or status == 304: if request.method in ("GET", "HEAD"): raise cherrypy.HTTPRedirect([], 304) else: raise cherrypy.HTTPError(412) # Tool code # def allow(methods=None, debug=False): """Raise 405 if request.method not in methods (default ['GET', 'HEAD']). The given methods are case-insensitive, and may be in any order. If only one method is allowed, you may supply a single string; if more than one, supply a list of strings. Regardless of whether the current method is allowed or not, this also emits an 'Allow' response header, containing the given methods. """ if not isinstance(methods, (tuple, list)): methods = [methods] methods = [m.upper() for m in methods if m] if not methods: methods = ['GET', 'HEAD'] elif 'GET' in methods and 'HEAD' not in methods: methods.append('HEAD') cherrypy.response.headers['Allow'] = ', '.join(methods) if cherrypy.request.method not in methods: if debug: cherrypy.log('request.method %r not in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW') raise cherrypy.HTTPError(405) else: if debug: cherrypy.log('request.method %r in methods %r' % (cherrypy.request.method, methods), 'TOOLS.ALLOW') def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For', scheme='X-Forwarded-Proto', debug=False): """Change the base URL (scheme://host[:port][/path]). For running a CP server behind Apache, lighttpd, or other HTTP server. For Apache and lighttpd, you should leave the 'local' argument at the default value of 'X-Forwarded-Host'. For Squid, you probably want to set tools.proxy.local = 'Origin'. If you want the new request.base to include path info (not just the host), you must explicitly set base to the full base path, and ALSO set 'local' to '', so that the X-Forwarded-Host request header (which never includes path info) does not override it. Regardless, the value for 'base' MUST NOT end in a slash. cherrypy.request.remote.ip (the IP address of the client) will be rewritten if the header specified by the 'remote' arg is valid. By default, 'remote' is set to 'X-Forwarded-For'. If you do not want to rewrite remote.ip, set the 'remote' arg to an empty string. """ request = cherrypy.serving.request if scheme: s = request.headers.get(scheme, None) if debug: cherrypy.log('Testing scheme %r:%r' % (scheme, s), 'TOOLS.PROXY') if s == 'on' and 'ssl' in scheme.lower(): # This handles e.g. webfaction's 'X-Forwarded-Ssl: on' header scheme = 'https' else: # This is for lighttpd/pound/Mongrel's 'X-Forwarded-Proto: https' scheme = s if not scheme: scheme = request.base[:request.base.find("://")] if local: lbase = request.headers.get(local, None) if debug: cherrypy.log('Testing local %r:%r' % (local, lbase), 'TOOLS.PROXY') if lbase is not None: base = lbase.split(',')[0] if not base: port = request.local.port if port == 80: base = '127.0.0.1' else: base = '127.0.0.1:%s' % port if base.find("://") == -1: # add http:// or https:// if needed base = scheme + "://" + base request.base = base if remote: xff = request.headers.get(remote) if debug: cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY') if xff: if remote == 'X-Forwarded-For': #Bug #1268 xff = xff.split(',')[0].strip() request.remote.ip = xff def ignore_headers(headers=('Range',), debug=False): """Delete request headers whose field names are included in 'headers'. This is a useful tool for working behind certain HTTP servers; for example, Apache duplicates the work that CP does for 'Range' headers, and will doubly-truncate the response. """ request = cherrypy.serving.request for name in headers: if name in request.headers: if debug: cherrypy.log('Ignoring request header %r' % name, 'TOOLS.IGNORE_HEADERS') del request.headers[name] def response_headers(headers=None, debug=False): """Set headers on the response.""" if debug: cherrypy.log('Setting response headers: %s' % repr(headers), 'TOOLS.RESPONSE_HEADERS') for name, value in (headers or []): cherrypy.serving.response.headers[name] = value response_headers.failsafe = True def referer(pattern, accept=True, accept_missing=False, error=403, message='Forbidden Referer header.', debug=False): """Raise HTTPError if Referer header does/does not match the given pattern. pattern A regular expression pattern to test against the Referer. accept If True, the Referer must match the pattern; if False, the Referer must NOT match the pattern. accept_missing If True, permit requests with no Referer header. error The HTTP error code to return to the client on failure. message A string to include in the response body on failure. """ try: ref = cherrypy.serving.request.headers['Referer'] match = bool(re.match(pattern, ref)) if debug: cherrypy.log('Referer %r matches %r' % (ref, pattern), 'TOOLS.REFERER') if accept == match: return except KeyError: if debug: cherrypy.log('No Referer header', 'TOOLS.REFERER') if accept_missing: return raise cherrypy.HTTPError(error, message) class SessionAuth(object): """Assert that the user is logged in.""" session_key = "username" debug = False def check_username_and_password(self, username, password): pass def anonymous(self): """Provide a temporary user name for anonymous users.""" pass def on_login(self, username): pass def on_logout(self, username): pass def on_check(self, username): pass def login_screen(self, from_page='..', username='', error_msg='', **kwargs): return (unicodestr("""<html><body> Message: %(error_msg)s <form method="post" action="do_login"> Login: <input type="text" name="username" value="%(username)s" size="10" /> <br /> Password: <input type="password" name="password" size="10" /> <br /> <input type="hidden" name="from_page" value="%(from_page)s" /> <br /> <input type="submit" /> </form> </body></html>""") % vars()).encode("utf-8") def do_login(self, username, password, from_page='..', **kwargs): """Login. May raise redirect, or return True if request handled.""" response = cherrypy.serving.response error_msg = self.check_username_and_password(username, password) if error_msg: body = self.login_screen(from_page, username, error_msg) response.body = body if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. del response.headers["Content-Length"] return True else: cherrypy.serving.request.login = username cherrypy.session[self.session_key] = username self.on_login(username) raise cherrypy.HTTPRedirect(from_page or "/") def do_logout(self, from_page='..', **kwargs): """Logout. May raise redirect, or return True if request handled.""" sess = cherrypy.session username = sess.get(self.session_key) sess[self.session_key] = None if username: cherrypy.serving.request.login = None self.on_logout(username) raise cherrypy.HTTPRedirect(from_page) def do_check(self): """Assert username. Raise redirect, or return True if request handled. """ sess = cherrypy.session request = cherrypy.serving.request response = cherrypy.serving.response username = sess.get(self.session_key) if not username: sess[self.session_key] = username = self.anonymous() self._debug_message('No session[username], trying anonymous') if not username: url = cherrypy.url(qs=request.query_string) self._debug_message( 'No username, routing to login_screen with from_page %(url)r', locals(), ) response.body = self.login_screen(url) if "Content-Length" in response.headers: # Delete Content-Length header so finalize() recalcs it. del response.headers["Content-Length"] return True self._debug_message('Setting request.login to %(username)r', locals()) request.login = username self.on_check(username) def _debug_message(self, template, context={}): if not self.debug: return cherrypy.log(template % context, 'TOOLS.SESSAUTH') def run(self): request = cherrypy.serving.request response = cherrypy.serving.response path = request.path_info if path.endswith('login_screen'): self._debug_message('routing %(path)r to login_screen', locals()) response.body = self.login_screen() return True elif path.endswith('do_login'): if request.method != 'POST': response.headers['Allow'] = "POST" self._debug_message('do_login requires POST') raise cherrypy.HTTPError(405) self._debug_message('routing %(path)r to do_login', locals()) return self.do_login(**request.params) elif path.endswith('do_logout'): if request.method != 'POST': response.headers['Allow'] = "POST" raise cherrypy.HTTPError(405) self._debug_message('routing %(path)r to do_logout', locals()) return self.do_logout(**request.params) else: self._debug_message('No special path, running do_check') return self.do_check() def session_auth(**kwargs): sa = SessionAuth() for k, v in kwargs.items(): setattr(sa, k, v) return sa.run() session_auth.__doc__ = """Session authentication hook. Any attribute of the SessionAuth class may be overridden via a keyword arg to this function: """ + "\n".join(["%s: %s" % (k, type(getattr(SessionAuth, k)).__name__) for k in dir(SessionAuth) if not k.startswith("__")]) def log_traceback(severity=logging.ERROR, debug=False): """Write the last error's traceback to the cherrypy error log.""" cherrypy.log("", "HTTP", severity=severity, traceback=True) def log_request_headers(debug=False): """Write request headers to the cherrypy error log.""" h = [" %s: %s" % (k, v) for k, v in cherrypy.serving.request.header_list] cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), "HTTP") def log_hooks(debug=False): """Write request.hooks to the cherrypy error log.""" request = cherrypy.serving.request msg = [] # Sort by the standard points if possible. from cherrypy import _cprequest points = _cprequest.hookpoints for k in request.hooks.keys(): if k not in points: points.append(k) for k in points: msg.append(" %s:" % k) v = request.hooks.get(k, []) v.sort() for h in v: msg.append(" %r" % h) cherrypy.log('\nRequest Hooks for ' + cherrypy.url() + ':\n' + '\n'.join(msg), "HTTP") def redirect(url='', internal=True, debug=False): """Raise InternalRedirect or HTTPRedirect to the given url.""" if debug: cherrypy.log('Redirecting %sto: %s' % ({True: 'internal ', False: ''}[internal], url), 'TOOLS.REDIRECT') if internal: raise cherrypy.InternalRedirect(url) else: raise cherrypy.HTTPRedirect(url) def trailing_slash(missing=True, extra=False, status=None, debug=False): """Redirect if path_info has (missing|extra) trailing slash.""" request = cherrypy.serving.request pi = request.path_info if debug: cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' % (request.is_index, missing, extra, pi), 'TOOLS.TRAILING_SLASH') if request.is_index is True: if missing: if not pi.endswith('/'): new_url = cherrypy.url(pi + '/', request.query_string) raise cherrypy.HTTPRedirect(new_url, status=status or 301) elif request.is_index is False: if extra: # If pi == '/', don't redirect to ''! if pi.endswith('/') and pi != '/': new_url = cherrypy.url(pi[:-1], request.query_string) raise cherrypy.HTTPRedirect(new_url, status=status or 301) def flatten(debug=False): """Wrap response.body in a generator that recursively iterates over body. This allows cherrypy.response.body to consist of 'nested generators'; that is, a set of generators that yield generators. """ def flattener(input): numchunks = 0 for x in input: if not is_iterator(x): numchunks += 1 yield x else: for y in flattener(x): numchunks += 1 yield y if debug: cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN') response = cherrypy.serving.response response.body = flattener(response.body) def accept(media=None, debug=False): """Return the client's preferred media-type (from the given Content-Types). If 'media' is None (the default), no test will be performed. If 'media' is provided, it should be the Content-Type value (as a string) or values (as a list or tuple of strings) which the current resource can emit. The client's acceptable media ranges (as declared in the Accept request header) will be matched in order to these Content-Type values; the first such string is returned. That is, the return value will always be one of the strings provided in the 'media' arg (or None if 'media' is None). If no match is found, then HTTPError 406 (Not Acceptable) is raised. Note that most web browsers send */* as a (low-quality) acceptable media range, which should match any Content-Type. In addition, "...if no Accept header field is present, then it is assumed that the client accepts all media types." Matching types are checked in order of client preference first, and then in the order of the given 'media' values. Note that this function does not honor accept-params (other than "q"). """ if not media: return if isinstance(media, basestring): media = [media] request = cherrypy.serving.request # Parse the Accept request header, and try to match one # of the requested media-ranges (in order of preference). ranges = request.headers.elements('Accept') if not ranges: # Any media type is acceptable. if debug: cherrypy.log('No Accept header elements', 'TOOLS.ACCEPT') return media[0] else: # Note that 'ranges' is sorted in order of preference for element in ranges: if element.qvalue > 0: if element.value == "*/*": # Matches any type or subtype if debug: cherrypy.log('Match due to */*', 'TOOLS.ACCEPT') return media[0] elif element.value.endswith("/*"): # Matches any subtype mtype = element.value[:-1] # Keep the slash for m in media: if m.startswith(mtype): if debug: cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT') return m else: # Matches exact value if element.value in media: if debug: cherrypy.log('Match due to %s' % element.value, 'TOOLS.ACCEPT') return element.value # No suitable media-range found. ah = request.headers.get('Accept') if ah is None: msg = "Your client did not send an Accept header." else: msg = "Your client sent this Accept header: %s." % ah msg += (" But this resource only emits these media types: %s." % ", ".join(media)) raise cherrypy.HTTPError(406, msg) class MonitoredHeaderMap(_httputil.HeaderMap): def __init__(self): self.accessed_headers = set() def __getitem__(self, key): self.accessed_headers.add(key) return _httputil.HeaderMap.__getitem__(self, key) def __contains__(self, key): self.accessed_headers.add(key) return _httputil.HeaderMap.__contains__(self, key) def get(self, key, default=None): self.accessed_headers.add(key) return _httputil.HeaderMap.get(self, key, default=default) if hasattr({}, 'has_key'): # Python 2 def has_key(self, key): self.accessed_headers.add(key) return _httputil.HeaderMap.has_key(self, key) def autovary(ignore=None, debug=False): """Auto-populate the Vary response header based on request.header access. """ request = cherrypy.serving.request req_h = request.headers request.headers = MonitoredHeaderMap() request.headers.update(req_h) if ignore is None: ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type']) def set_response_header(): resp_h = cherrypy.serving.response.headers v = set([e.value for e in resp_h.elements('Vary')]) if debug: cherrypy.log( 'Accessed headers: %s' % request.headers.accessed_headers, 'TOOLS.AUTOVARY') v = v.union(request.headers.accessed_headers) v = v.difference(ignore) v = list(v) v.sort() resp_h['Vary'] = ', '.join(v) request.hooks.attach('before_finalize', set_response_header, 95)
23,188
Python
.py
520
35.094231
79
0.607368
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,953
caching.py
evilhero_mylar/lib/cherrypy/lib/caching.py
""" CherryPy implements a simple caching system as a pluggable Tool. This tool tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there yet, but it's probably good enough for most sites. In general, GET responses are cached (along with selecting headers) and, if another request arrives for the same resource, the caching Tool will return 304 Not Modified if possible, or serve the cached response otherwise. It also sets request.cached to True if serving a cached representation, and sets request.cacheable to False (so it doesn't get cached again). If POST, PUT, or DELETE requests are made for a cached resource, they invalidate (delete) any cached response. Usage ===== Configuration file example:: [/] tools.caching.on = True tools.caching.delay = 3600 You may use a class other than the default :class:`MemoryCache<cherrypy.lib.caching.MemoryCache>` by supplying the config entry ``cache_class``; supply the full dotted name of the replacement class as the config value. It must implement the basic methods ``get``, ``put``, ``delete``, and ``clear``. You may set any attribute, including overriding methods, on the cache instance by providing them in config. The above sets the :attr:`delay<cherrypy.lib.caching.MemoryCache.delay>` attribute, for example. """ import datetime import sys import threading import time import cherrypy from cherrypy.lib import cptools, httputil from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted, Event class Cache(object): """Base class for Cache implementations.""" def get(self): """Return the current variant if in the cache, else None.""" raise NotImplemented def put(self, obj, size): """Store the current variant in the cache.""" raise NotImplemented def delete(self): """Remove ALL cached variants of the current resource.""" raise NotImplemented def clear(self): """Reset the cache to its initial, empty state.""" raise NotImplemented # ------------------------------ Memory Cache ------------------------------- # class AntiStampedeCache(dict): """A storage system for cached items which reduces stampede collisions.""" def wait(self, key, timeout=5, debug=False): """Return the cached value for the given key, or None. If timeout is not None, and the value is already being calculated by another thread, wait until the given timeout has elapsed. If the value is available before the timeout expires, it is returned. If not, None is returned, and a sentinel placed in the cache to signal other threads to wait. If timeout is None, no waiting is performed nor sentinels used. """ value = self.get(key) if isinstance(value, Event): if timeout is None: # Ignore the other thread and recalc it ourselves. if debug: cherrypy.log('No timeout', 'TOOLS.CACHING') return None # Wait until it's done or times out. if debug: cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING') value.wait(timeout) if value.result is not None: # The other thread finished its calculation. Use it. if debug: cherrypy.log('Result!', 'TOOLS.CACHING') return value.result # Timed out. Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return None elif value is None: # Stick an Event in the slot so other threads wait # on this one to finish calculating the value. if debug: cherrypy.log('Timed out', 'TOOLS.CACHING') e = threading.Event() e.result = None dict.__setitem__(self, key, e) return value def __setitem__(self, key, value): """Set the cached value for the given key.""" existing = self.get(key) dict.__setitem__(self, key, value) if isinstance(existing, Event): # Set Event.result so other threads waiting on it have # immediate access without needing to poll the cache again. existing.result = value existing.set() class MemoryCache(Cache): """An in-memory cache for varying response content. Each key in self.store is a URI, and each value is an AntiStampedeCache. The response for any given URI may vary based on the values of "selecting request headers"; that is, those named in the Vary response header. We assume the list of header names to be constant for each URI throughout the lifetime of the application, and store that list in ``self.store[uri].selecting_headers``. The items contained in ``self.store[uri]`` have keys which are tuples of request header values (in the same order as the names in its selecting_headers), and values which are the actual responses. """ maxobjects = 1000 """The maximum number of cached objects; defaults to 1000.""" maxobj_size = 100000 """The maximum size of each cached object in bytes; defaults to 100 KB.""" maxsize = 10000000 """The maximum size of the entire cache in bytes; defaults to 10 MB.""" delay = 600 """Seconds until the cached content expires; defaults to 600 (10 minutes). """ antistampede_timeout = 5 """Seconds to wait for other threads to release a cache lock.""" expire_freq = 0.1 """Seconds to sleep between cache expiration sweeps.""" debug = False def __init__(self): self.clear() # Run self.expire_cache in a separate daemon thread. t = threading.Thread(target=self.expire_cache, name='expire_cache') self.expiration_thread = t set_daemon(t, True) t.start() def clear(self): """Reset the cache to its initial, empty state.""" self.store = {} self.expirations = {} self.tot_puts = 0 self.tot_gets = 0 self.tot_hist = 0 self.tot_expires = 0 self.tot_non_modified = 0 self.cursize = 0 def expire_cache(self): """Continuously examine cached objects, expiring stale ones. This function is designed to be run in its own daemon thread, referenced at ``self.expiration_thread``. """ # It's possible that "time" will be set to None # arbitrarily, so we check "while time" to avoid exceptions. # See tickets #99 and #180 for more information. while time: now = time.time() # Must make a copy of expirations so it doesn't change size # during iteration for expiration_time, objects in copyitems(self.expirations): if expiration_time <= now: for obj_size, uri, sel_header_values in objects: try: del self.store[uri][tuple(sel_header_values)] self.tot_expires += 1 self.cursize -= obj_size except KeyError: # the key may have been deleted elsewhere pass del self.expirations[expiration_time] time.sleep(self.expire_freq) def get(self): """Return the current variant if in the cache, else None.""" request = cherrypy.serving.request self.tot_gets += 1 uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: return None header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] variant = uricache.wait(key=tuple(sorted(header_values)), timeout=self.antistampede_timeout, debug=self.debug) if variant is not None: self.tot_hist += 1 return variant def put(self, variant, size): """Store the current variant in the cache.""" request = cherrypy.serving.request response = cherrypy.serving.response uri = cherrypy.url(qs=request.query_string) uricache = self.store.get(uri) if uricache is None: uricache = AntiStampedeCache() uricache.selecting_headers = [ e.value for e in response.headers.elements('Vary')] self.store[uri] = uricache if len(self.store) < self.maxobjects: total_size = self.cursize + size # checks if there's space for the object if (size < self.maxobj_size and total_size < self.maxsize): # add to the expirations list expiration_time = response.time + self.delay bucket = self.expirations.setdefault(expiration_time, []) bucket.append((size, uri, uricache.selecting_headers)) # add to the cache header_values = [request.headers.get(h, '') for h in uricache.selecting_headers] uricache[tuple(sorted(header_values))] = variant self.tot_puts += 1 self.cursize = total_size def delete(self): """Remove ALL cached variants of the current resource.""" uri = cherrypy.url(qs=cherrypy.serving.request.query_string) self.store.pop(uri, None) def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs): """Try to obtain cached output. If fresh enough, raise HTTPError(304). If POST, PUT, or DELETE: * invalidates (deletes) any cached response for this resource * sets request.cached = False * sets request.cacheable = False else if a cached copy exists: * sets request.cached = True * sets request.cacheable = False * sets response.headers to the cached values * checks the cached Last-Modified response header against the current If-(Un)Modified-Since request headers; raises 304 if necessary. * sets response.status and response.body to the cached values * returns True otherwise: * sets request.cached = False * sets request.cacheable = True * returns False """ request = cherrypy.serving.request response = cherrypy.serving.response if not hasattr(cherrypy, "_cache"): # Make a process-wide Cache object. cherrypy._cache = kwargs.pop("cache_class", MemoryCache)() # Take all remaining kwargs and set them on the Cache object. for k, v in kwargs.items(): setattr(cherrypy._cache, k, v) cherrypy._cache.debug = debug # POST, PUT, DELETE should invalidate (delete) the cached copy. # See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10. if request.method in invalid_methods: if debug: cherrypy.log('request.method %r in invalid_methods %r' % (request.method, invalid_methods), 'TOOLS.CACHING') cherrypy._cache.delete() request.cached = False request.cacheable = False return False if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]: request.cached = False request.cacheable = True return False cache_data = cherrypy._cache.get() request.cached = bool(cache_data) request.cacheable = not request.cached if request.cached: # Serve the cached copy. max_age = cherrypy._cache.delay for v in [e.value for e in request.headers.elements('Cache-Control')]: atoms = v.split('=', 1) directive = atoms.pop(0) if directive == 'max-age': if len(atoms) != 1 or not atoms[0].isdigit(): raise cherrypy.HTTPError( 400, "Invalid Cache-Control header") max_age = int(atoms[0]) break elif directive == 'no-cache': if debug: cherrypy.log( 'Ignoring cache due to Cache-Control: no-cache', 'TOOLS.CACHING') request.cached = False request.cacheable = True return False if debug: cherrypy.log('Reading response from cache', 'TOOLS.CACHING') s, h, b, create_time = cache_data age = int(response.time - create_time) if (age > max_age): if debug: cherrypy.log('Ignoring cache due to age > %d' % max_age, 'TOOLS.CACHING') request.cached = False request.cacheable = True return False # Copy the response headers. See # https://bitbucket.org/cherrypy/cherrypy/issue/721. response.headers = rh = httputil.HeaderMap() for k in h: dict.__setitem__(rh, k, dict.__getitem__(h, k)) # Add the required Age header response.headers["Age"] = str(age) try: # Note that validate_since depends on a Last-Modified header; # this was put into the cached copy, and should have been # resurrected just above (response.headers = cache_data[1]). cptools.validate_since() except cherrypy.HTTPRedirect: x = sys.exc_info()[1] if x.status == 304: cherrypy._cache.tot_non_modified += 1 raise # serve it & get out from the request response.status = s response.body = b else: if debug: cherrypy.log('request is not cached', 'TOOLS.CACHING') return request.cached def tee_output(): """Tee response output to cache storage. Internal.""" # Used by CachingTool by attaching to request.hooks request = cherrypy.serving.request if 'no-store' in request.headers.values('Cache-Control'): return def tee(body): """Tee response.body into a list.""" if ('no-cache' in response.headers.values('Pragma') or 'no-store' in response.headers.values('Cache-Control')): for chunk in body: yield chunk return output = [] for chunk in body: output.append(chunk) yield chunk # save the cache data body = ntob('').join(output) cherrypy._cache.put((response.status, response.headers or {}, body, response.time), len(body)) response = cherrypy.serving.response response.body = tee(response.body) def expires(secs=0, force=False, debug=False): """Tool for influencing cache mechanisms using the 'Expires' header. secs Must be either an int or a datetime.timedelta, and indicates the number of seconds between response.time and when the response should expire. The 'Expires' header will be set to response.time + secs. If secs is zero, the 'Expires' header is set one year in the past, and the following "cache prevention" headers are also set: * Pragma: no-cache * Cache-Control': no-cache, must-revalidate force If False, the following headers are checked: * Etag * Last-Modified * Age * Expires If any are already present, none of the above response headers are set. """ response = cherrypy.serving.response headers = response.headers cacheable = False if not force: # some header names that indicate that the response can be cached for indicator in ('Etag', 'Last-Modified', 'Age', 'Expires'): if indicator in headers: cacheable = True break if not cacheable and not force: if debug: cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES') else: if debug: cherrypy.log('request is cacheable', 'TOOLS.EXPIRES') if isinstance(secs, datetime.timedelta): secs = (86400 * secs.days) + secs.seconds if secs == 0: if force or ("Pragma" not in headers): headers["Pragma"] = "no-cache" if cherrypy.serving.request.protocol >= (1, 1): if force or "Cache-Control" not in headers: headers["Cache-Control"] = "no-cache, must-revalidate" # Set an explicit Expires date in the past. expiry = httputil.HTTPDate(1169942400.0) else: expiry = httputil.HTTPDate(response.time + secs) if force or "Expires" not in headers: headers["Expires"] = expiry
17,149
Python
.py
385
34.246753
79
0.60843
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,954
covercp.py
evilhero_mylar/lib/cherrypy/lib/covercp.py
"""Code-coverage tools for CherryPy. To use this module, or the coverage tools in the test suite, you need to download 'coverage.py', either Gareth Rees' `original implementation <http://www.garethrees.org/2001/12/04/python-coverage/>`_ or Ned Batchelder's `enhanced version: <http://www.nedbatchelder.com/code/modules/coverage.html>`_ To turn on coverage tracing, use the following code:: cherrypy.engine.subscribe('start', covercp.start) DO NOT subscribe anything on the 'start_thread' channel, as previously recommended. Calling start once in the main thread should be sufficient to start coverage on all threads. Calling start again in each thread effectively clears any coverage data gathered up to that point. Run your code, then use the ``covercp.serve()`` function to browse the results in a web browser. If you run this module from the command line, it will call ``serve()`` for you. """ import re import sys import cgi from cherrypy._cpcompat import quote_plus import os import os.path localFile = os.path.join(os.path.dirname(__file__), "coverage.cache") the_coverage = None try: from coverage import coverage the_coverage = coverage(data_file=localFile) def start(): the_coverage.start() except ImportError: # Setting the_coverage to None will raise errors # that need to be trapped downstream. the_coverage = None import warnings warnings.warn( "No code coverage will be performed; " "coverage.py could not be imported.") def start(): pass start.priority = 20 TEMPLATE_MENU = """<html> <head> <title>CherryPy Coverage Menu</title> <style> body {font: 9pt Arial, serif;} #tree { font-size: 8pt; font-family: Andale Mono, monospace; white-space: pre; } #tree a:active, a:focus { background-color: black; padding: 1px; color: white; border: 0px solid #9999FF; -moz-outline-style: none; } .fail { color: red;} .pass { color: #888;} #pct { text-align: right;} h3 { font-size: small; font-weight: bold; font-style: italic; margin-top: 5px; } input { border: 1px solid #ccc; padding: 2px; } .directory { color: #933; font-style: italic; font-weight: bold; font-size: 10pt; } .file { color: #400; } a { text-decoration: none; } #crumbs { color: white; font-size: 8pt; font-family: Andale Mono, monospace; width: 100%; background-color: black; } #crumbs a { color: #f88; } #options { line-height: 2.3em; border: 1px solid black; background-color: #eee; padding: 4px; } #exclude { width: 100%; margin-bottom: 3px; border: 1px solid #999; } #submit { background-color: black; color: white; border: 0; margin-bottom: -9px; } </style> </head> <body> <h2>CherryPy Coverage</h2>""" TEMPLATE_FORM = """ <div id="options"> <form action='menu' method=GET> <input type='hidden' name='base' value='%(base)s' /> Show percentages <input type='checkbox' %(showpct)s name='showpct' value='checked' /><br /> Hide files over <input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br /> Exclude files matching<br /> <input type='text' id='exclude' name='exclude' value='%(exclude)s' size='20' /> <br /> <input type='submit' value='Change view' id="submit"/> </form> </div>""" TEMPLATE_FRAMESET = """<html> <head><title>CherryPy coverage data</title></head> <frameset cols='250, 1*'> <frame src='menu?base=%s' /> <frame name='main' src='' /> </frameset> </html> """ TEMPLATE_COVERAGE = """<html> <head> <title>Coverage for %(name)s</title> <style> h2 { margin-bottom: .25em; } p { margin: .25em; } .covered { color: #000; background-color: #fff; } .notcovered { color: #fee; background-color: #500; } .excluded { color: #00f; background-color: #fff; } table .covered, table .notcovered, table .excluded { font-family: Andale Mono, monospace; font-size: 10pt; white-space: pre; } .lineno { background-color: #eee;} .notcovered .lineno { background-color: #000;} table { border-collapse: collapse; </style> </head> <body> <h2>%(name)s</h2> <p>%(fullpath)s</p> <p>Coverage: %(pc)s%%</p>""" TEMPLATE_LOC_COVERED = """<tr class="covered"> <td class="lineno">%s&nbsp;</td> <td>%s</td> </tr>\n""" TEMPLATE_LOC_NOT_COVERED = """<tr class="notcovered"> <td class="lineno">%s&nbsp;</td> <td>%s</td> </tr>\n""" TEMPLATE_LOC_EXCLUDED = """<tr class="excluded"> <td class="lineno">%s&nbsp;</td> <td>%s</td> </tr>\n""" TEMPLATE_ITEM = ( "%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n" ) def _percent(statements, missing): s = len(statements) e = s - len(missing) if s > 0: return int(round(100.0 * e / s)) return 0 def _show_branch(root, base, path, pct=0, showpct=False, exclude="", coverage=the_coverage): # Show the directory name and any of our children dirs = [k for k, v in root.items() if v] dirs.sort() for name in dirs: newpath = os.path.join(path, name) if newpath.lower().startswith(base): relpath = newpath[len(base):] yield "| " * relpath.count(os.sep) yield ( "<a class='directory' " "href='menu?base=%s&exclude=%s'>%s</a>\n" % (newpath, quote_plus(exclude), name) ) for chunk in _show_branch( root[name], base, newpath, pct, showpct, exclude, coverage=coverage ): yield chunk # Now list the files if path.lower().startswith(base): relpath = path[len(base):] files = [k for k, v in root.items() if not v] files.sort() for name in files: newpath = os.path.join(path, name) pc_str = "" if showpct: try: _, statements, _, missing, _ = coverage.analysis2(newpath) except: # Yes, we really want to pass on all errors. pass else: pc = _percent(statements, missing) pc_str = ("%3d%% " % pc).replace(' ', '&nbsp;') if pc < float(pct) or pc == -1: pc_str = "<span class='fail'>%s</span>" % pc_str else: pc_str = "<span class='pass'>%s</span>" % pc_str yield TEMPLATE_ITEM % ("| " * (relpath.count(os.sep) + 1), pc_str, newpath, name) def _skip_file(path, exclude): if exclude: return bool(re.search(exclude, path)) def _graft(path, tree): d = tree p = path atoms = [] while True: p, tail = os.path.split(p) if not tail: break atoms.append(tail) atoms.append(p) if p != "/": atoms.append("/") atoms.reverse() for node in atoms: if node: d = d.setdefault(node, {}) def get_tree(base, exclude, coverage=the_coverage): """Return covered module names as a nested dict.""" tree = {} runs = coverage.data.executed_files() for path in runs: if not _skip_file(path, exclude) and not os.path.isdir(path): _graft(path, tree) return tree class CoverStats(object): def __init__(self, coverage, root=None): self.coverage = coverage if root is None: # Guess initial depth. Files outside this path will not be # reachable from the web interface. import cherrypy root = os.path.dirname(cherrypy.__file__) self.root = root def index(self): return TEMPLATE_FRAMESET % self.root.lower() index.exposed = True def menu(self, base="/", pct="50", showpct="", exclude=r'python\d\.\d|test|tut\d|tutorial'): # The coverage module uses all-lower-case names. base = base.lower().rstrip(os.sep) yield TEMPLATE_MENU yield TEMPLATE_FORM % locals() # Start by showing links for parent paths yield "<div id='crumbs'>" path = "" atoms = base.split(os.sep) atoms.pop() for atom in atoms: path += atom + os.sep yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s" % (path, quote_plus(exclude), atom, os.sep)) yield "</div>" yield "<div id='tree'>" # Then display the tree tree = get_tree(base, exclude, self.coverage) if not tree: yield "<p>No modules covered.</p>" else: for chunk in _show_branch(tree, base, "/", pct, showpct == 'checked', exclude, coverage=self.coverage): yield chunk yield "</div>" yield "</body></html>" menu.exposed = True def annotated_file(self, filename, statements, excluded, missing): source = open(filename, 'r') buffer = [] for lineno, line in enumerate(source.readlines()): lineno += 1 line = line.strip("\n\r") empty_the_buffer = True if lineno in excluded: template = TEMPLATE_LOC_EXCLUDED elif lineno in missing: template = TEMPLATE_LOC_NOT_COVERED elif lineno in statements: template = TEMPLATE_LOC_COVERED else: empty_the_buffer = False buffer.append((lineno, line)) if empty_the_buffer: for lno, pastline in buffer: yield template % (lno, cgi.escape(pastline)) buffer = [] yield template % (lineno, cgi.escape(line)) def report(self, name): filename, statements, excluded, missing, _ = self.coverage.analysis2( name) pc = _percent(statements, missing) yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name), fullpath=name, pc=pc) yield '<table>\n' for line in self.annotated_file(filename, statements, excluded, missing): yield line yield '</table>' yield '</body>' yield '</html>' report.exposed = True def serve(path=localFile, port=8080, root=None): if coverage is None: raise ImportError("The coverage module could not be imported.") from coverage import coverage cov = coverage(data_file=path) cov.load() import cherrypy cherrypy.config.update({'server.socket_port': int(port), 'server.thread_pool': 10, 'environment': "production", }) cherrypy.quickstart(CoverStats(cov, root)) if __name__ == "__main__": serve(*tuple(sys.argv[1:]))
11,592
Python
.py
334
25.502994
78
0.551183
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,955
auth_basic.py
evilhero_mylar/lib/cherrypy/lib/auth_basic.py
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 __doc__ = """This module provides a CherryPy 3.x tool which implements the server-side of HTTP Basic Access Authentication, as described in :rfc:`2617`. Example usage, using the built-in checkpassword_dict function which uses a dict as the credentials store:: userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'} checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict) basic_auth = {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'earth', 'tools.auth_basic.checkpassword': checkpassword, } app_config = { '/' : basic_auth } """ __author__ = 'visteya' __date__ = 'April 2009' import binascii from cherrypy._cpcompat import base64_decode import cherrypy def checkpassword_dict(user_password_dict): """Returns a checkpassword function which checks credentials against a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, use checkpassword_dict(my_credentials_dict) as the value for the checkpassword argument to basic_auth(). """ def checkpassword(realm, user, password): p = user_password_dict.get(user) return p and p == password or False return checkpassword def basic_auth(realm, checkpassword, debug=False): """A CherryPy tool which hooks at before_handler to perform HTTP Basic Access Authentication, as specified in :rfc:`2617`. If the request has an 'authorization' header with a 'Basic' scheme, this tool attempts to authenticate the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not 'Basic', or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Basic header. realm A string containing the authentication realm. checkpassword A callable which checks the authentication credentials. Its signature is checkpassword(realm, username, password). where username and password are the values obtained from the request's 'authorization' header. If authentication succeeds, checkpassword returns True, else it returns False. """ if '"' in realm: raise ValueError('Realm cannot contain the " (quote) character.') request = cherrypy.serving.request auth_header = request.headers.get('authorization') if auth_header is not None: try: scheme, params = auth_header.split(' ', 1) if scheme.lower() == 'basic': username, password = base64_decode(params).split(':', 1) if checkpassword(realm, username, password): if debug: cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC') request.login = username return # successful authentication # split() error, base64.decodestring() error except (ValueError, binascii.Error): raise cherrypy.HTTPError(400, 'Bad Request') # Respond with 401 status and a WWW-Authenticate header cherrypy.serving.response.headers[ 'www-authenticate'] = 'Basic realm="%s"' % realm raise cherrypy.HTTPError( 401, "You are not authorized to access that resource")
3,426
Python
.py
71
41.070423
79
0.685252
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,956
xmlrpc.py
evilhero_mylar/lib/cherrypy/lib/xmlrpc.py
import sys import cherrypy def process_body(): """Return (params, method) from request body.""" try: import xmlrpclib return xmlrpclib.loads(cherrypy.request.body.read()) except Exception: return ('ERROR PARAMS', ), 'ERRORMETHOD' def patched_path(path): """Return 'path', doctored for RPC.""" if not path.endswith('/'): path += '/' if path.startswith('/RPC2/'): # strip the first /rpc2 path = path[5:] return path def _set_response(body): # The XML-RPC spec (http://www.xmlrpc.com/spec) says: # "Unless there's a lower-level error, always return 200 OK." # Since Python's xmlrpclib interprets a non-200 response # as a "Protocol Error", we'll just return 200 every time. response = cherrypy.response response.status = '200 OK' response.body = body response.headers['Content-Type'] = 'text/xml' response.headers['Content-Length'] = len(body) def respond(body, encoding='utf-8', allow_none=0): from xmlrpclib import Fault, dumps if not isinstance(body, Fault): body = (body,) _set_response(dumps(body, methodresponse=1, encoding=encoding, allow_none=allow_none)) def on_error(*args, **kwargs): body = str(sys.exc_info()[1]) from xmlrpclib import Fault, dumps _set_response(dumps(Fault(1, body)))
1,397
Python
.py
38
30.421053
65
0.643175
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,957
cpstats.py
evilhero_mylar/lib/cherrypy/lib/cpstats.py
"""CPStats, a package for collecting and reporting on program statistics. Overview ======== Statistics about program operation are an invaluable monitoring and debugging tool. Unfortunately, the gathering and reporting of these critical values is usually ad-hoc. This package aims to add a centralized place for gathering statistical performance data, a structure for recording that data which provides for extrapolation of that data into more useful information, and a method of serving that data to both human investigators and monitoring software. Let's examine each of those in more detail. Data Gathering -------------- Just as Python's `logging` module provides a common importable for gathering and sending messages, performance statistics would benefit from a similar common mechanism, and one that does *not* require each package which wishes to collect stats to import a third-party module. Therefore, we choose to re-use the `logging` module by adding a `statistics` object to it. That `logging.statistics` object is a nested dict. It is not a custom class, because that would: 1. require libraries and applications to import a third-party module in order to participate 2. inhibit innovation in extrapolation approaches and in reporting tools, and 3. be slow. There are, however, some specifications regarding the structure of the dict.:: { +----"SQLAlchemy": { | "Inserts": 4389745, | "Inserts per Second": | lambda s: s["Inserts"] / (time() - s["Start"]), | C +---"Table Statistics": { | o | "widgets": {-----------+ N | l | "Rows": 1.3M, | Record a | l | "Inserts": 400, | m | e | },---------------------+ e | c | "froobles": { s | t | "Rows": 7845, p | i | "Inserts": 0, a | o | }, c | n +---}, e | "Slow Queries": | [{"Query": "SELECT * FROM widgets;", | "Processing Time": 47.840923343, | }, | ], +----}, } The `logging.statistics` dict has four levels. The topmost level is nothing more than a set of names to introduce modularity, usually along the lines of package names. If the SQLAlchemy project wanted to participate, for example, it might populate the item `logging.statistics['SQLAlchemy']`, whose value would be a second-layer dict we call a "namespace". Namespaces help multiple packages to avoid collisions over key names, and make reports easier to read, to boot. The maintainers of SQLAlchemy should feel free to use more than one namespace if needed (such as 'SQLAlchemy ORM'). Note that there are no case or other syntax constraints on the namespace names; they should be chosen to be maximally readable by humans (neither too short nor too long). Each namespace, then, is a dict of named statistical values, such as 'Requests/sec' or 'Uptime'. You should choose names which will look good on a report: spaces and capitalization are just fine. In addition to scalars, values in a namespace MAY be a (third-layer) dict, or a list, called a "collection". For example, the CherryPy :class:`StatsTool` keeps track of what each request is doing (or has most recently done) in a 'Requests' collection, where each key is a thread ID; each value in the subdict MUST be a fourth dict (whew!) of statistical data about each thread. We call each subdict in the collection a "record". Similarly, the :class:`StatsTool` also keeps a list of slow queries, where each record contains data about each slow query, in order. Values in a namespace or record may also be functions, which brings us to: Extrapolation ------------- The collection of statistical data needs to be fast, as close to unnoticeable as possible to the host program. That requires us to minimize I/O, for example, but in Python it also means we need to minimize function calls. So when you are designing your namespace and record values, try to insert the most basic scalar values you already have on hand. When it comes time to report on the gathered data, however, we usually have much more freedom in what we can calculate. Therefore, whenever reporting tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents of `logging.statistics` for reporting, they first call `extrapolate_statistics` (passing the whole `statistics` dict as the only argument). This makes a deep copy of the statistics dict so that the reporting tool can both iterate over it and even change it without harming the original. But it also expands any functions in the dict by calling them. For example, you might have a 'Current Time' entry in the namespace with the value "lambda scope: time.time()". The "scope" parameter is the current namespace dict (or record, if we're currently expanding one of those instead), allowing you access to existing static entries. If you're truly evil, you can even modify more than one entry at a time. However, don't try to calculate an entry and then use its value in further extrapolations; the order in which the functions are called is not guaranteed. This can lead to a certain amount of duplicated work (or a redesign of your schema), but that's better than complicating the spec. After the whole thing has been extrapolated, it's time for: Reporting --------- The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates it all, and then transforms it to HTML for easy viewing. Each namespace gets its own header and attribute table, plus an extra table for each collection. This is NOT part of the statistics specification; other tools can format how they like. You can control which columns are output and how they are formatted by updating StatsPage.formatting, which is a dict that mirrors the keys and nesting of `logging.statistics`. The difference is that, instead of data values, it has formatting values. Use None for a given key to indicate to the StatsPage that a given column should not be output. Use a string with formatting (such as '%.3f') to interpolate the value(s), or use a callable (such as lambda v: v.isoformat()) for more advanced formatting. Any entry which is not mentioned in the formatting dict is output unchanged. Monitoring ---------- Although the HTML output takes pains to assign unique id's to each <td> with statistical data, you're probably better off fetching /cpstats/data, which outputs the whole (extrapolated) `logging.statistics` dict in JSON format. That is probably easier to parse, and doesn't have any formatting controls, so you get the "original" data in a consistently-serialized format. Note: there's no treatment yet for datetime objects. Try time.time() instead for now if you can. Nagios will probably thank you. Turning Collection Off ---------------------- It is recommended each namespace have an "Enabled" item which, if False, stops collection (but not reporting) of statistical data. Applications SHOULD provide controls to pause and resume collection by setting these entries to False or True, if present. Usage ===== To collect statistics on CherryPy applications:: from cherrypy.lib import cpstats appconfig['/']['tools.cpstats.on'] = True To collect statistics on your own code:: import logging # Initialize the repository if not hasattr(logging, 'statistics'): logging.statistics = {} # Initialize my namespace mystats = logging.statistics.setdefault('My Stuff', {}) # Initialize my namespace's scalars and collections mystats.update({ 'Enabled': True, 'Start Time': time.time(), 'Important Events': 0, 'Events/Second': lambda s: ( (s['Important Events'] / (time.time() - s['Start Time']))), }) ... for event in events: ... # Collect stats if mystats.get('Enabled', False): mystats['Important Events'] += 1 To report statistics:: root.cpstats = cpstats.StatsPage() To format statistics reports:: See 'Reporting', above. """ # ------------------------------- Statistics -------------------------------- # import logging if not hasattr(logging, 'statistics'): logging.statistics = {} def extrapolate_statistics(scope): """Return an extrapolated copy of the given scope.""" c = {} for k, v in list(scope.items()): if isinstance(v, dict): v = extrapolate_statistics(v) elif isinstance(v, (list, tuple)): v = [extrapolate_statistics(record) for record in v] elif hasattr(v, '__call__'): v = v(scope) c[k] = v return c # -------------------- CherryPy Applications Statistics --------------------- # import threading import time import cherrypy appstats = logging.statistics.setdefault('CherryPy Applications', {}) appstats.update({ 'Enabled': True, 'Bytes Read/Request': lambda s: ( s['Total Requests'] and (s['Total Bytes Read'] / float(s['Total Requests'])) or 0.0 ), 'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s), 'Bytes Written/Request': lambda s: ( s['Total Requests'] and (s['Total Bytes Written'] / float(s['Total Requests'])) or 0.0 ), 'Bytes Written/Second': lambda s: ( s['Total Bytes Written'] / s['Uptime'](s) ), 'Current Time': lambda s: time.time(), 'Current Requests': 0, 'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s), 'Server Version': cherrypy.__version__, 'Start Time': time.time(), 'Total Bytes Read': 0, 'Total Bytes Written': 0, 'Total Requests': 0, 'Total Time': 0, 'Uptime': lambda s: time.time() - s['Start Time'], 'Requests': {}, }) proc_time = lambda s: time.time() - s['Start Time'] class ByteCountWrapper(object): """Wraps a file-like object, counting the number of bytes read.""" def __init__(self, rfile): self.rfile = rfile self.bytes_read = 0 def read(self, size=-1): data = self.rfile.read(size) self.bytes_read += len(data) return data def readline(self, size=-1): data = self.rfile.readline(size) self.bytes_read += len(data) return data def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def close(self): self.rfile.close() def __iter__(self): return self def next(self): data = self.rfile.next() self.bytes_read += len(data) return data average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0 class StatsTool(cherrypy.Tool): """Record various information about the current request.""" def __init__(self): cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop) def _setup(self): """Hook this tool into cherrypy.request. The standard CherryPy request object will automatically call this method when the tool is "turned on" in config. """ if appstats.get('Enabled', False): cherrypy.Tool._setup(self) self.record_start() def record_start(self): """Record the beginning of a request.""" request = cherrypy.serving.request if not hasattr(request.rfile, 'bytes_read'): request.rfile = ByteCountWrapper(request.rfile) request.body.fp = request.rfile r = request.remote appstats['Current Requests'] += 1 appstats['Total Requests'] += 1 appstats['Requests'][threading._get_ident()] = { 'Bytes Read': None, 'Bytes Written': None, # Use a lambda so the ip gets updated by tools.proxy later 'Client': lambda s: '%s:%s' % (r.ip, r.port), 'End Time': None, 'Processing Time': proc_time, 'Request-Line': request.request_line, 'Response Status': None, 'Start Time': time.time(), } def record_stop( self, uriset=None, slow_queries=1.0, slow_queries_count=100, debug=False, **kwargs): """Record the end of a request.""" resp = cherrypy.serving.response w = appstats['Requests'][threading._get_ident()] r = cherrypy.request.rfile.bytes_read w['Bytes Read'] = r appstats['Total Bytes Read'] += r if resp.stream: w['Bytes Written'] = 'chunked' else: cl = int(resp.headers.get('Content-Length', 0)) w['Bytes Written'] = cl appstats['Total Bytes Written'] += cl w['Response Status'] = getattr( resp, 'output_status', None) or resp.status w['End Time'] = time.time() p = w['End Time'] - w['Start Time'] w['Processing Time'] = p appstats['Total Time'] += p appstats['Current Requests'] -= 1 if debug: cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS') if uriset: rs = appstats.setdefault('URI Set Tracking', {}) r = rs.setdefault(uriset, { 'Min': None, 'Max': None, 'Count': 0, 'Sum': 0, 'Avg': average_uriset_time}) if r['Min'] is None or p < r['Min']: r['Min'] = p if r['Max'] is None or p > r['Max']: r['Max'] = p r['Count'] += 1 r['Sum'] += p if slow_queries and p > slow_queries: sq = appstats.setdefault('Slow Queries', []) sq.append(w.copy()) if len(sq) > slow_queries_count: sq.pop(0) import cherrypy cherrypy.tools.cpstats = StatsTool() # ---------------------- CherryPy Statistics Reporting ---------------------- # import os thisdir = os.path.abspath(os.path.dirname(__file__)) try: import json except ImportError: try: import simplejson as json except ImportError: json = None missing = object() locale_date = lambda v: time.strftime('%c', time.gmtime(v)) iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v)) def pause_resume(ns): def _pause_resume(enabled): pause_disabled = '' resume_disabled = '' if enabled: resume_disabled = 'disabled="disabled" ' else: pause_disabled = 'disabled="disabled" ' return """ <form action="pause" method="POST" style="display:inline"> <input type="hidden" name="namespace" value="%s" /> <input type="submit" value="Pause" %s/> </form> <form action="resume" method="POST" style="display:inline"> <input type="hidden" name="namespace" value="%s" /> <input type="submit" value="Resume" %s/> </form> """ % (ns, pause_disabled, ns, resume_disabled) return _pause_resume class StatsPage(object): formatting = { 'CherryPy Applications': { 'Enabled': pause_resume('CherryPy Applications'), 'Bytes Read/Request': '%.3f', 'Bytes Read/Second': '%.3f', 'Bytes Written/Request': '%.3f', 'Bytes Written/Second': '%.3f', 'Current Time': iso_format, 'Requests/Second': '%.3f', 'Start Time': iso_format, 'Total Time': '%.3f', 'Uptime': '%.3f', 'Slow Queries': { 'End Time': None, 'Processing Time': '%.3f', 'Start Time': iso_format, }, 'URI Set Tracking': { 'Avg': '%.3f', 'Max': '%.3f', 'Min': '%.3f', 'Sum': '%.3f', }, 'Requests': { 'Bytes Read': '%s', 'Bytes Written': '%s', 'End Time': None, 'Processing Time': '%.3f', 'Start Time': None, }, }, 'CherryPy WSGIServer': { 'Enabled': pause_resume('CherryPy WSGIServer'), 'Connections/second': '%.3f', 'Start time': iso_format, }, } def index(self): # Transform the raw data into pretty output for HTML yield """ <html> <head> <title>Statistics</title> <style> th, td { padding: 0.25em 0.5em; border: 1px solid #666699; } table { border-collapse: collapse; } table.stats1 { width: 100%; } table.stats1 th { font-weight: bold; text-align: right; background-color: #CCD5DD; } table.stats2, h2 { margin-left: 50px; } table.stats2 th { font-weight: bold; text-align: center; background-color: #CCD5DD; } </style> </head> <body> """ for title, scalars, collections in self.get_namespaces(): yield """ <h1>%s</h1> <table class='stats1'> <tbody> """ % title for i, (key, value) in enumerate(scalars): colnum = i % 3 if colnum == 0: yield """ <tr>""" yield ( """ <th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" % vars() ) if colnum == 2: yield """ </tr>""" if colnum == 0: yield """ <th></th><td></td> <th></th><td></td> </tr>""" elif colnum == 1: yield """ <th></th><td></td> </tr>""" yield """ </tbody> </table>""" for subtitle, headers, subrows in collections: yield """ <h2>%s</h2> <table class='stats2'> <thead> <tr>""" % subtitle for key in headers: yield """ <th>%s</th>""" % key yield """ </tr> </thead> <tbody>""" for subrow in subrows: yield """ <tr>""" for value in subrow: yield """ <td>%s</td>""" % value yield """ </tr>""" yield """ </tbody> </table>""" yield """ </body> </html> """ index.exposed = True def get_namespaces(self): """Yield (title, scalars, collections) for each namespace.""" s = extrapolate_statistics(logging.statistics) for title, ns in sorted(s.items()): scalars = [] collections = [] ns_fmt = self.formatting.get(title, {}) for k, v in sorted(ns.items()): fmt = ns_fmt.get(k, {}) if isinstance(v, dict): headers, subrows = self.get_dict_collection(v, fmt) collections.append((k, ['ID'] + headers, subrows)) elif isinstance(v, (list, tuple)): headers, subrows = self.get_list_collection(v, fmt) collections.append((k, headers, subrows)) else: format = ns_fmt.get(k, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v = format(v) elif format is not missing: v = format % v scalars.append((k, v)) yield title, scalars, collections def get_dict_collection(self, v, formatting): """Return ([headers], [rows]) for the given collection.""" # E.g., the 'Requests' dict. headers = [] for record in v.itervalues(): for k3 in record: format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if k3 not in headers: headers.append(k3) headers.sort() subrows = [] for k2, record in sorted(v.items()): subrow = [k2] for k3 in headers: v3 = record.get(k3, '') format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v3 = format(v3) elif format is not missing: v3 = format % v3 subrow.append(v3) subrows.append(subrow) return headers, subrows def get_list_collection(self, v, formatting): """Return ([headers], [subrows]) for the given collection.""" # E.g., the 'Slow Queries' list. headers = [] for record in v: for k3 in record: format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if k3 not in headers: headers.append(k3) headers.sort() subrows = [] for record in v: subrow = [] for k3 in headers: v3 = record.get(k3, '') format = formatting.get(k3, missing) if format is None: # Don't output this column. continue if hasattr(format, '__call__'): v3 = format(v3) elif format is not missing: v3 = format % v3 subrow.append(v3) subrows.append(subrow) return headers, subrows if json is not None: def data(self): s = extrapolate_statistics(logging.statistics) cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps(s, sort_keys=True, indent=4) data.exposed = True def pause(self, namespace): logging.statistics.get(namespace, {})['Enabled'] = False raise cherrypy.HTTPRedirect('./') pause.exposed = True pause.cp_config = {'tools.allow.on': True, 'tools.allow.methods': ['POST']} def resume(self, namespace): logging.statistics.get(namespace, {})['Enabled'] = True raise cherrypy.HTTPRedirect('./') resume.exposed = True resume.cp_config = {'tools.allow.on': True, 'tools.allow.methods': ['POST']}
22,770
Python
.py
577
31.058925
79
0.586016
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,958
http.py
evilhero_mylar/lib/cherrypy/lib/http.py
import warnings warnings.warn('cherrypy.lib.http has been deprecated and will be removed ' 'in CherryPy 3.3 use cherrypy.lib.httputil instead.', DeprecationWarning) from cherrypy.lib.httputil import *
230
Python
.py
5
39.2
74
0.723214
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,959
lockfile.py
evilhero_mylar/lib/cherrypy/lib/lockfile.py
""" Platform-independent file locking. Inspired by and modeled after zc.lockfile. """ import os try: import msvcrt except ImportError: pass try: import fcntl except ImportError: pass class LockError(Exception): "Could not obtain a lock" msg = "Unable to lock %r" def __init__(self, path): super(LockError, self).__init__(self.msg % path) class UnlockError(LockError): "Could not release a lock" msg = "Unable to unlock %r" # first, a default, naive locking implementation class LockFile(object): """ A default, naive locking implementation. Always fails if the file already exists. """ def __init__(self, path): self.path = path try: fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL) except OSError: raise LockError(self.path) os.close(fd) def release(self): os.remove(self.path) def remove(self): pass class SystemLockFile(object): """ An abstract base class for platform-specific locking. """ def __init__(self, path): self.path = path try: # Open lockfile for writing without truncation: self.fp = open(path, 'r+') except IOError: # If the file doesn't exist, IOError is raised; Use a+ instead. # Note that there may be a race here. Multiple processes # could fail on the r+ open and open the file a+, but only # one will get the the lock and write a pid. self.fp = open(path, 'a+') try: self._lock_file() except: self.fp.seek(1) self.fp.close() del self.fp raise self.fp.write(" %s\n" % os.getpid()) self.fp.truncate() self.fp.flush() def release(self): if not hasattr(self, 'fp'): return self._unlock_file() self.fp.close() del self.fp def remove(self): """ Attempt to remove the file """ try: os.remove(self.path) except: pass #@abc.abstract_method # def _lock_file(self): # """Attempt to obtain the lock on self.fp. Raise LockError if not # acquired.""" def _unlock_file(self): """Attempt to obtain the lock on self.fp. Raise UnlockError if not released.""" class WindowsLockFile(SystemLockFile): def _lock_file(self): # Lock just the first byte try: msvcrt.locking(self.fp.fileno(), msvcrt.LK_NBLCK, 1) except IOError: raise LockError(self.fp.name) def _unlock_file(self): try: self.fp.seek(0) msvcrt.locking(self.fp.fileno(), msvcrt.LK_UNLCK, 1) except IOError: raise UnlockError(self.fp.name) if 'msvcrt' in globals(): LockFile = WindowsLockFile class UnixLockFile(SystemLockFile): def _lock_file(self): flags = fcntl.LOCK_EX | fcntl.LOCK_NB try: fcntl.flock(self.fp.fileno(), flags) except IOError: raise LockError(self.fp.name) # no need to implement _unlock_file, it will be unlocked on close() if 'fcntl' in globals(): LockFile = UnixLockFile
3,301
Python
.py
108
22.796296
77
0.591947
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,960
win32.py
evilhero_mylar/lib/cherrypy/process/win32.py
"""Windows service. Requires pywin32.""" import os import win32api import win32con import win32event import win32service import win32serviceutil from cherrypy.process import wspbus, plugins class ConsoleCtrlHandler(plugins.SimplePlugin): """A WSPBus plugin for handling Win32 console events (like Ctrl-C).""" def __init__(self, bus): self.is_set = False plugins.SimplePlugin.__init__(self, bus) def start(self): if self.is_set: self.bus.log('Handler for console events already set.', level=40) return result = win32api.SetConsoleCtrlHandler(self.handle, 1) if result == 0: self.bus.log('Could not SetConsoleCtrlHandler (error %r)' % win32api.GetLastError(), level=40) else: self.bus.log('Set handler for console events.', level=40) self.is_set = True def stop(self): if not self.is_set: self.bus.log('Handler for console events already off.', level=40) return try: result = win32api.SetConsoleCtrlHandler(self.handle, 0) except ValueError: # "ValueError: The object has not been registered" result = 1 if result == 0: self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' % win32api.GetLastError(), level=40) else: self.bus.log('Removed handler for console events.', level=40) self.is_set = False def handle(self, event): """Handle console control events (like Ctrl-C).""" if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT, win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT, win32con.CTRL_CLOSE_EVENT): self.bus.log('Console event %s: shutting down bus' % event) # Remove self immediately so repeated Ctrl-C doesn't re-call it. try: self.stop() except ValueError: pass self.bus.exit() # 'First to return True stops the calls' return 1 return 0 class Win32Bus(wspbus.Bus): """A Web Site Process Bus implementation for Win32. Instead of time.sleep, this bus blocks using native win32event objects. """ def __init__(self): self.events = {} wspbus.Bus.__init__(self) def _get_state_event(self, state): """Return a win32event for the given state (creating it if needed).""" try: return self.events[state] except KeyError: event = win32event.CreateEvent(None, 0, 0, "WSPBus %s Event (pid=%r)" % (state.name, os.getpid())) self.events[state] = event return event def _get_state(self): return self._state def _set_state(self, value): self._state = value event = self._get_state_event(value) win32event.PulseEvent(event) state = property(_get_state, _set_state) def wait(self, state, interval=0.1, channel=None): """Wait for the given state(s), KeyboardInterrupt or SystemExit. Since this class uses native win32event objects, the interval argument is ignored. """ if isinstance(state, (tuple, list)): # Don't wait for an event that beat us to the punch ;) if self.state not in state: events = tuple([self._get_state_event(s) for s in state]) win32event.WaitForMultipleObjects( events, 0, win32event.INFINITE) else: # Don't wait for an event that beat us to the punch ;) if self.state != state: event = self._get_state_event(state) win32event.WaitForSingleObject(event, win32event.INFINITE) class _ControlCodes(dict): """Control codes used to "signal" a service via ControlService. User-defined control codes are in the range 128-255. We generally use the standard Python value for the Linux signal and add 128. Example: >>> signal.SIGUSR1 10 control_codes['graceful'] = 128 + 10 """ def key_for(self, obj): """For the given value, return its corresponding key.""" for key, val in self.items(): if val is obj: return key raise ValueError("The given object could not be found: %r" % obj) control_codes = _ControlCodes({'graceful': 138}) def signal_child(service, command): if command == 'stop': win32serviceutil.StopService(service) elif command == 'restart': win32serviceutil.RestartService(service) else: win32serviceutil.ControlService(service, control_codes[command]) class PyWebService(win32serviceutil.ServiceFramework): """Python Web Service.""" _svc_name_ = "Python Web Service" _svc_display_name_ = "Python Web Service" _svc_deps_ = None # sequence of service names on which this depends _exe_name_ = "pywebsvc" _exe_args_ = None # Default to no arguments # Only exists on Windows 2000 or later, ignored on windows NT _svc_description_ = "Python Web Service" def SvcDoRun(self): from cherrypy import process process.bus.start() process.bus.block() def SvcStop(self): from cherrypy import process self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) process.bus.exit() def SvcOther(self, control): process.bus.publish(control_codes.key_for(control)) if __name__ == '__main__': win32serviceutil.HandleCommandLine(PyWebService)
5,772
Python
.py
137
32.394161
78
0.614807
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,961
plugins.py
evilhero_mylar/lib/cherrypy/process/plugins.py
"""Site services for use with a Web Site Process Bus.""" import os import re import signal as _signal import sys import time import threading from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident from cherrypy._cpcompat import ntob, set, Timer, SetDaemonProperty # _module__file__base is used by Autoreload to make # absolute any filenames retrieved from sys.modules which are not # already absolute paths. This is to work around Python's quirk # of importing the startup script and using a relative filename # for it in sys.modules. # # Autoreload examines sys.modules afresh every time it runs. If an application # changes the current directory by executing os.chdir(), then the next time # Autoreload runs, it will not be able to find any filenames which are # not absolute paths, because the current directory is not the same as when the # module was first imported. Autoreload will then wrongly conclude the file # has "changed", and initiate the shutdown/re-exec sequence. # See ticket #917. # For this workaround to have a decent probability of success, this module # needs to be imported as early as possible, before the app has much chance # to change the working directory. _module__file__base = os.getcwd() class SimplePlugin(object): """Plugin base class which auto-subscribes methods for known channels.""" bus = None """A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine. """ def __init__(self, bus): self.bus = bus def subscribe(self): """Register this object as a (multi-channel) listener on the bus.""" for channel in self.bus.listeners: # Subscribe self.start, self.exit, etc. if present. method = getattr(self, channel, None) if method is not None: self.bus.subscribe(channel, method) def unsubscribe(self): """Unregister this object as a listener on the bus.""" for channel in self.bus.listeners: # Unsubscribe self.start, self.exit, etc. if present. method = getattr(self, channel, None) if method is not None: self.bus.unsubscribe(channel, method) class SignalHandler(object): """Register bus channels (and listeners) for system signals. You can modify what signals your application listens for, and what it does when it receives signals, by modifying :attr:`SignalHandler.handlers`, a dict of {signal name: callback} pairs. The default set is:: handlers = {'SIGTERM': self.bus.exit, 'SIGHUP': self.handle_SIGHUP, 'SIGUSR1': self.bus.graceful, } The :func:`SignalHandler.handle_SIGHUP`` method calls :func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>` if the process is daemonized, but :func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>` if the process is attached to a TTY. This is because Unix window managers tend to send SIGHUP to terminal windows when the user closes them. Feel free to add signals which are not available on every platform. The :class:`SignalHandler` will ignore errors raised from attempting to register handlers for unknown signals. """ handlers = {} """A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit).""" signals = {} """A map from signal numbers to names.""" for k, v in vars(_signal).items(): if k.startswith('SIG') and not k.startswith('SIG_'): signals[v] = k del k, v def __init__(self, bus): self.bus = bus # Set default handlers self.handlers = {'SIGTERM': self.bus.exit, 'SIGHUP': self.handle_SIGHUP, 'SIGUSR1': self.bus.graceful, } if sys.platform[:4] == 'java': del self.handlers['SIGUSR1'] self.handlers['SIGUSR2'] = self.bus.graceful self.bus.log("SIGUSR1 cannot be set on the JVM platform. " "Using SIGUSR2 instead.") self.handlers['SIGINT'] = self._jython_SIGINT_handler self._previous_handlers = {} def _jython_SIGINT_handler(self, signum=None, frame=None): # See http://bugs.jython.org/issue1313 self.bus.log('Keyboard Interrupt: shutting down bus') self.bus.exit() def subscribe(self): """Subscribe self.handlers to signals.""" for sig, func in self.handlers.items(): try: self.set_handler(sig, func) except ValueError: pass def unsubscribe(self): """Unsubscribe self.handlers from signals.""" for signum, handler in self._previous_handlers.items(): signame = self.signals[signum] if handler is None: self.bus.log("Restoring %s handler to SIG_DFL." % signame) handler = _signal.SIG_DFL else: self.bus.log("Restoring %s handler %r." % (signame, handler)) try: our_handler = _signal.signal(signum, handler) if our_handler is None: self.bus.log("Restored old %s handler %r, but our " "handler was not registered." % (signame, handler), level=30) except ValueError: self.bus.log("Unable to restore %s handler %r." % (signame, handler), level=40, traceback=True) def set_handler(self, signal, listener=None): """Subscribe a handler for the given signal (number or name). If the optional 'listener' argument is provided, it will be subscribed as a listener for the given signal's channel. If the given signal name or number is not available on the current platform, ValueError is raised. """ if isinstance(signal, basestring): signum = getattr(_signal, signal, None) if signum is None: raise ValueError("No such signal: %r" % signal) signame = signal else: try: signame = self.signals[signal] except KeyError: raise ValueError("No such signal: %r" % signal) signum = signal prev = _signal.signal(signum, self._handle_signal) self._previous_handlers[signum] = prev if listener is not None: self.bus.log("Listening for %s." % signame) self.bus.subscribe(signame, listener) def _handle_signal(self, signum=None, frame=None): """Python signal handler (self.set_handler subscribes it for you).""" signame = self.signals[signum] self.bus.log("Caught signal %s." % signame) self.bus.publish(signame) def handle_SIGHUP(self): """Restart if daemonized, else exit.""" if os.isatty(sys.stdin.fileno()): # not daemonized (may be foreground or background) self.bus.log("SIGHUP caught but not daemonized. Exiting.") self.bus.exit() else: self.bus.log("SIGHUP caught while daemonized. Restarting.") self.bus.restart() try: import pwd import grp except ImportError: pwd, grp = None, None class DropPrivileges(SimplePlugin): """Drop privileges. uid/gid arguments not available on Windows. Special thanks to `Gavin Baker <http://antonym.org/2005/12/dropping-privileges-in-python.html>`_ """ def __init__(self, bus, umask=None, uid=None, gid=None): SimplePlugin.__init__(self, bus) self.finalized = False self.uid = uid self.gid = gid self.umask = umask def _get_uid(self): return self._uid def _set_uid(self, val): if val is not None: if pwd is None: self.bus.log("pwd module not available; ignoring uid.", level=30) val = None elif isinstance(val, basestring): val = pwd.getpwnam(val)[2] self._uid = val uid = property(_get_uid, _set_uid, doc="The uid under which to run. Availability: Unix.") def _get_gid(self): return self._gid def _set_gid(self, val): if val is not None: if grp is None: self.bus.log("grp module not available; ignoring gid.", level=30) val = None elif isinstance(val, basestring): val = grp.getgrnam(val)[2] self._gid = val gid = property(_get_gid, _set_gid, doc="The gid under which to run. Availability: Unix.") def _get_umask(self): return self._umask def _set_umask(self, val): if val is not None: try: os.umask except AttributeError: self.bus.log("umask function not available; ignoring umask.", level=30) val = None self._umask = val umask = property( _get_umask, _set_umask, doc="""The default permission mode for newly created files and directories. Usually expressed in octal format, for example, ``0644``. Availability: Unix, Windows. """) def start(self): # uid/gid def current_ids(): """Return the current (uid, gid) if available.""" name, group = None, None if pwd: name = pwd.getpwuid(os.getuid())[0] if grp: group = grp.getgrgid(os.getgid())[0] return name, group if self.finalized: if not (self.uid is None and self.gid is None): self.bus.log('Already running as uid: %r gid: %r' % current_ids()) else: if self.uid is None and self.gid is None: if pwd or grp: self.bus.log('uid/gid not set', level=30) else: self.bus.log('Started as uid: %r gid: %r' % current_ids()) if self.gid is not None: os.setgid(self.gid) os.setgroups([]) if self.uid is not None: os.setuid(self.uid) self.bus.log('Running as uid: %r gid: %r' % current_ids()) # umask if self.finalized: if self.umask is not None: self.bus.log('umask already set to: %03o' % self.umask) else: if self.umask is None: self.bus.log('umask not set', level=30) else: old_umask = os.umask(self.umask) self.bus.log('umask old: %03o, new: %03o' % (old_umask, self.umask)) self.finalized = True # This is slightly higher than the priority for server.start # in order to facilitate the most common use: starting on a low # port (which requires root) and then dropping to another user. start.priority = 77 class Daemonizer(SimplePlugin): """Daemonize the running script. Use this with a Web Site Process Bus via:: Daemonizer(bus).subscribe() When this component finishes, the process is completely decoupled from the parent environment. Please note that when this component is used, the return code from the parent process will still be 0 if a startup error occurs in the forked children. Errors in the initial daemonizing process still return proper exit codes. Therefore, if you use this plugin to daemonize, don't use the return code as an accurate indicator of whether the process fully started. In fact, that return code only indicates if the process succesfully finished the first fork. """ def __init__(self, bus, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): SimplePlugin.__init__(self, bus) self.stdin = stdin self.stdout = stdout self.stderr = stderr self.finalized = False def start(self): if self.finalized: self.bus.log('Already deamonized.') # forking has issues with threads: # http://www.opengroup.org/onlinepubs/000095399/functions/fork.html # "The general problem with making fork() work in a multi-threaded # world is what to do with all of the threads..." # So we check for active threads: if threading.activeCount() != 1: self.bus.log('There are %r active threads. ' 'Daemonizing now may cause strange failures.' % threading.enumerate(), level=30) # See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 # (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7) # and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 # Finish up with the current stdout/stderr sys.stdout.flush() sys.stderr.flush() # Do first fork. try: pid = os.fork() if pid == 0: # This is the child process. Continue. pass else: # This is the first parent. Exit, now that we've forked. self.bus.log('Forking once.') os._exit(0) except OSError: # Python raises OSError rather than returning negative numbers. exc = sys.exc_info()[1] sys.exit("%s: fork #1 failed: (%d) %s\n" % (sys.argv[0], exc.errno, exc.strerror)) os.setsid() # Do second fork try: pid = os.fork() if pid > 0: self.bus.log('Forking twice.') os._exit(0) # Exit second parent except OSError: exc = sys.exc_info()[1] sys.exit("%s: fork #2 failed: (%d) %s\n" % (sys.argv[0], exc.errno, exc.strerror)) os.chdir("/") os.umask(0) si = open(self.stdin, "r") so = open(self.stdout, "a+") se = open(self.stderr, "a+") # os.dup2(fd, fd2) will close fd2 if necessary, # so we don't explicitly close stdin/out/err. # See http://docs.python.org/lib/os-fd-ops.html os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) self.bus.log('Daemonized to PID: %s' % os.getpid()) self.finalized = True start.priority = 65 class PIDFile(SimplePlugin): """Maintain a PID file via a WSPBus.""" def __init__(self, bus, pidfile): SimplePlugin.__init__(self, bus) self.pidfile = pidfile self.finalized = False def start(self): pid = os.getpid() if self.finalized: self.bus.log('PID %r already written to %r.' % (pid, self.pidfile)) else: open(self.pidfile, "wb").write(ntob("%s\n" % pid, 'utf8')) self.bus.log('PID %r written to %r.' % (pid, self.pidfile)) self.finalized = True start.priority = 70 def exit(self): try: os.remove(self.pidfile) self.bus.log('PID file removed: %r.' % self.pidfile) except (KeyboardInterrupt, SystemExit): raise except: pass class PerpetualTimer(Timer): """A responsive subclass of threading.Timer whose run() method repeats. Use this timer only when you really need a very interruptible timer; this checks its 'finished' condition up to 20 times a second, which can results in pretty high CPU usage """ def __init__(self, *args, **kwargs): "Override parent constructor to allow 'bus' to be provided." self.bus = kwargs.pop('bus', None) super(PerpetualTimer, self).__init__(*args, **kwargs) def run(self): while True: self.finished.wait(self.interval) if self.finished.isSet(): return try: self.function(*self.args, **self.kwargs) except Exception: if self.bus: self.bus.log( "Error in perpetual timer thread function %r." % self.function, level=40, traceback=True) # Quit on first error to avoid massive logs. raise class BackgroundTask(SetDaemonProperty, threading.Thread): """A subclass of threading.Thread whose run() method repeats. Use this class for most repeating tasks. It uses time.sleep() to wait for each interval, which isn't very responsive; that is, even if you call self.cancel(), you'll have to wait until the sleep() call finishes before the thread stops. To compensate, it defaults to being daemonic, which means it won't delay stopping the whole process. """ def __init__(self, interval, function, args=[], kwargs={}, bus=None): threading.Thread.__init__(self) self.interval = interval self.function = function self.args = args self.kwargs = kwargs self.running = False self.bus = bus # default to daemonic self.daemon = True def cancel(self): self.running = False def run(self): self.running = True while self.running: time.sleep(self.interval) if not self.running: return try: self.function(*self.args, **self.kwargs) except Exception: if self.bus: self.bus.log("Error in background task thread function %r." % self.function, level=40, traceback=True) # Quit on first error to avoid massive logs. raise class Monitor(SimplePlugin): """WSPBus listener to periodically run a callback in its own thread.""" callback = None """The function to call at intervals.""" frequency = 60 """The time in seconds between callback runs.""" thread = None """A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread. """ def __init__(self, bus, callback, frequency=60, name=None): SimplePlugin.__init__(self, bus) self.callback = callback self.frequency = frequency self.thread = None self.name = name def start(self): """Start our callback in its own background thread.""" if self.frequency > 0: threadname = self.name or self.__class__.__name__ if self.thread is None: self.thread = BackgroundTask(self.frequency, self.callback, bus=self.bus) self.thread.setName(threadname) self.thread.start() self.bus.log("Started monitor thread %r." % threadname) else: self.bus.log("Monitor thread %r already started." % threadname) start.priority = 70 def stop(self): """Stop our callback's background task thread.""" if self.thread is None: self.bus.log("No thread running for %s." % self.name or self.__class__.__name__) else: if self.thread is not threading.currentThread(): name = self.thread.getName() self.thread.cancel() if not get_daemon(self.thread): self.bus.log("Joining %r" % name) self.thread.join() self.bus.log("Stopped thread %r." % name) self.thread = None def graceful(self): """Stop the callback's background task thread and restart it.""" self.stop() self.start() class Autoreloader(Monitor): """Monitor which re-executes the process when files change. This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`) if any of the files it monitors change (or is deleted). By default, the autoreloader monitors all imported modules; you can add to the set by adding to ``autoreload.files``:: cherrypy.engine.autoreload.files.add(myFile) If there are imported files you do *not* wish to monitor, you can adjust the ``match`` attribute, a regular expression. For example, to stop monitoring cherrypy itself:: cherrypy.engine.autoreload.match = r'^(?!cherrypy).+' Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins, the autoreload plugin takes a ``frequency`` argument. The default is 1 second; that is, the autoreloader will examine files once each second. """ files = None """The set of files to poll for modifications.""" frequency = 1 """The interval in seconds at which to poll for modified files.""" match = '.*' """A regular expression by which to match filenames.""" def __init__(self, bus, frequency=1, match='.*'): self.mtimes = {} self.files = set() self.match = match Monitor.__init__(self, bus, self.run, frequency) def start(self): """Start our own background task thread for self.run.""" if self.thread is None: self.mtimes = {} Monitor.start(self) start.priority = 70 def sysfiles(self): """Return a Set of sys.modules filenames to monitor.""" files = set() for k, m in list(sys.modules.items()): if re.match(self.match, k): if ( hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive') ): f = m.__loader__.archive else: f = getattr(m, '__file__', None) if f is not None and not os.path.isabs(f): # ensure absolute paths so a os.chdir() in the app # doesn't break me f = os.path.normpath( os.path.join(_module__file__base, f)) files.add(f) return files def run(self): """Reload the process if registered files have been modified.""" for filename in self.sysfiles() | self.files: if filename: if filename.endswith('.pyc'): filename = filename[:-1] oldtime = self.mtimes.get(filename, 0) if oldtime is None: # Module with no .py file. Skip it. continue try: mtime = os.stat(filename).st_mtime except OSError: # Either a module with no .py file, or it's been deleted. mtime = None if filename not in self.mtimes: # If a module has no .py file, this will be None. self.mtimes[filename] = mtime else: if mtime is None or mtime > oldtime: # The file has been deleted or modified. self.bus.log("Restarting because %s changed." % filename) self.thread.cancel() self.bus.log("Stopped thread %r." % self.thread.getName()) self.bus.restart() return class ThreadManager(SimplePlugin): """Manager for HTTP request threads. If you have control over thread creation and destruction, publish to the 'acquire_thread' and 'release_thread' channels (for each thread). This will register/unregister the current thread and publish to 'start_thread' and 'stop_thread' listeners in the bus as needed. If threads are created and destroyed by code you do not control (e.g., Apache), then, at the beginning of every HTTP request, publish to 'acquire_thread' only. You should not publish to 'release_thread' in this case, since you do not know whether the thread will be re-used or not. The bus will call 'stop_thread' listeners for you when it stops. """ threads = None """A map of {thread ident: index number} pairs.""" def __init__(self, bus): self.threads = {} SimplePlugin.__init__(self, bus) self.bus.listeners.setdefault('acquire_thread', set()) self.bus.listeners.setdefault('start_thread', set()) self.bus.listeners.setdefault('release_thread', set()) self.bus.listeners.setdefault('stop_thread', set()) def acquire_thread(self): """Run 'start_thread' listeners for the current thread. If the current thread has already been seen, any 'start_thread' listeners will not be run again. """ thread_ident = get_thread_ident() if thread_ident not in self.threads: # We can't just use get_ident as the thread ID # because some platforms reuse thread ID's. i = len(self.threads) + 1 self.threads[thread_ident] = i self.bus.publish('start_thread', i) def release_thread(self): """Release the current thread and run 'stop_thread' listeners.""" thread_ident = get_thread_ident() i = self.threads.pop(thread_ident, None) if i is not None: self.bus.publish('stop_thread', i) def stop(self): """Release all threads and run all 'stop_thread' listeners.""" for thread_ident, i in self.threads.items(): self.bus.publish('stop_thread', i) self.threads.clear() graceful = stop
25,833
Python
.py
587
33.042589
101
0.584687
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,962
servers.py
evilhero_mylar/lib/cherrypy/process/servers.py
""" Starting in CherryPy 3.1, cherrypy.server is implemented as an :ref:`Engine Plugin<plugins>`. It's an instance of :class:`cherrypy._cpserver.Server`, which is a subclass of :class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class is designed to control other servers, as well. Multiple servers/ports ====================== If you need to start more than one HTTP server (to serve on multiple ports, or protocols, etc.), you can manually register each one and then start them all with engine.start:: s1 = ServerAdapter(cherrypy.engine, MyWSGIServer(host='0.0.0.0', port=80)) s2 = ServerAdapter(cherrypy.engine, another.HTTPServer(host='127.0.0.1', SSL=True)) s1.subscribe() s2.subscribe() cherrypy.engine.start() .. index:: SCGI FastCGI/SCGI ============ There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in :mod:`cherrypy.process.servers`. To start an fcgi server, for example, wrap an instance of it in a ServerAdapter:: addr = ('0.0.0.0', 4000) f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr) s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr) s.subscribe() The :doc:`cherryd</deployguide/cherryd>` startup script will do the above for you via its `-f` flag. Note that you need to download and install `flup <http://trac.saddi.com/flup>`_ yourself, whether you use ``cherryd`` or not. .. _fastcgi: .. index:: FastCGI FastCGI ------- A very simple setup lets your cherry run with FastCGI. You just need the flup library, plus a running Apache server (with ``mod_fastcgi``) or lighttpd server. CherryPy code ^^^^^^^^^^^^^ hello.py:: #!/usr/bin/python import cherrypy class HelloWorld: \"""Sample request handler class.\""" def index(self): return "Hello world!" index.exposed = True cherrypy.tree.mount(HelloWorld()) # CherryPy autoreload must be disabled for the flup server to work cherrypy.config.update({'engine.autoreload.on':False}) Then run :doc:`/deployguide/cherryd` with the '-f' arg:: cherryd -c <myconfig> -d -f -i hello.py Apache ^^^^^^ At the top level in httpd.conf:: FastCgiIpcDir /tmp FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4 And inside the relevant VirtualHost section:: # FastCGI config AddHandler fastcgi-script .fcgi ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1 Lighttpd ^^^^^^^^ For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is active within ``server.modules``. Then, within your ``$HTTP["host"]`` directive, configure your fastcgi script like the following:: $HTTP["url"] =~ "" { fastcgi.server = ( "/" => ( "script.fcgi" => ( "bin-path" => "/path/to/your/script.fcgi", "socket" => "/tmp/script.sock", "check-local" => "disable", "disable-time" => 1, "min-procs" => 1, "max-procs" => 1, # adjust as needed ), ), ) } # end of $HTTP["url"] =~ "^/" Please see `Lighttpd FastCGI Docs <http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for an explanation of the possible configuration options. """ import sys import time import warnings class ServerAdapter(object): """Adapter for an HTTP server. If you need to start more than one HTTP server (to serve on multiple ports, or protocols, etc.), you can manually register each one and then start them all with bus.start: s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80)) s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True)) s1.subscribe() s2.subscribe() bus.start() """ def __init__(self, bus, httpserver=None, bind_addr=None): self.bus = bus self.httpserver = httpserver self.bind_addr = bind_addr self.interrupt = None self.running = False def subscribe(self): self.bus.subscribe('start', self.start) self.bus.subscribe('stop', self.stop) def unsubscribe(self): self.bus.unsubscribe('start', self.start) self.bus.unsubscribe('stop', self.stop) def start(self): """Start the HTTP server.""" if self.bind_addr is None: on_what = "unknown interface (dynamic?)" elif isinstance(self.bind_addr, tuple): on_what = self._get_base() else: on_what = "socket file: %s" % self.bind_addr if self.running: self.bus.log("Already serving on %s" % on_what) return self.interrupt = None if not self.httpserver: raise ValueError("No HTTP server has been created.") # Start the httpserver in a new thread. if isinstance(self.bind_addr, tuple): wait_for_free_port(*self.bind_addr) import threading t = threading.Thread(target=self._start_http_thread) t.setName("HTTPServer " + t.getName()) t.start() self.wait() self.running = True self.bus.log("Serving on %s" % on_what) start.priority = 75 def _get_base(self): if not self.httpserver: return '' host, port = self.bind_addr if getattr(self.httpserver, 'ssl_certificate', None): scheme = "https" if port != 443: host += ":%s" % port else: scheme = "http" if port != 80: host += ":%s" % port return "%s://%s" % (scheme, host) def _start_http_thread(self): """HTTP servers MUST be running in new threads, so that the main thread persists to receive KeyboardInterrupt's. If an exception is raised in the httpserver's thread then it's trapped here, and the bus (and therefore our httpserver) are shut down. """ try: self.httpserver.start() except KeyboardInterrupt: self.bus.log("<Ctrl-C> hit: shutting down HTTP server") self.interrupt = sys.exc_info()[1] self.bus.exit() except SystemExit: self.bus.log("SystemExit raised: shutting down HTTP server") self.interrupt = sys.exc_info()[1] self.bus.exit() raise except: self.interrupt = sys.exc_info()[1] self.bus.log("Error in HTTP server: shutting down", traceback=True, level=40) self.bus.exit() raise def wait(self): """Wait until the HTTP server is ready to receive requests.""" while not getattr(self.httpserver, "ready", False): if self.interrupt: raise self.interrupt time.sleep(.1) # Wait for port to be occupied if isinstance(self.bind_addr, tuple): host, port = self.bind_addr wait_for_occupied_port(host, port) def stop(self): """Stop the HTTP server.""" if self.running: # stop() MUST block until the server is *truly* stopped. self.httpserver.stop() # Wait for the socket to be truly freed. if isinstance(self.bind_addr, tuple): wait_for_free_port(*self.bind_addr) self.running = False self.bus.log("HTTP Server %s shut down" % self.httpserver) else: self.bus.log("HTTP Server %s already shut down" % self.httpserver) stop.priority = 25 def restart(self): """Restart the HTTP server.""" self.stop() self.start() class FlupCGIServer(object): """Adapter for a flup.server.cgi.WSGIServer.""" def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.ready = False def start(self): """Start the CGI server.""" # We have to instantiate the server class here because its __init__ # starts a threadpool. If we do it too early, daemonize won't work. from flup.server.cgi import WSGIServer self.cgiserver = WSGIServer(*self.args, **self.kwargs) self.ready = True self.cgiserver.run() def stop(self): """Stop the HTTP server.""" self.ready = False class FlupFCGIServer(object): """Adapter for a flup.server.fcgi.WSGIServer.""" def __init__(self, *args, **kwargs): if kwargs.get('bindAddress', None) is None: import socket if not hasattr(socket, 'fromfd'): raise ValueError( 'Dynamic FCGI server not available on this platform. ' 'You must use a static or external one by providing a ' 'legal bindAddress.') self.args = args self.kwargs = kwargs self.ready = False def start(self): """Start the FCGI server.""" # We have to instantiate the server class here because its __init__ # starts a threadpool. If we do it too early, daemonize won't work. from flup.server.fcgi import WSGIServer self.fcgiserver = WSGIServer(*self.args, **self.kwargs) # TODO: report this bug upstream to flup. # If we don't set _oldSIGs on Windows, we get: # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py", # line 108, in run # self._restoreSignalHandlers() # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py", # line 156, in _restoreSignalHandlers # for signum,handler in self._oldSIGs: # AttributeError: 'WSGIServer' object has no attribute '_oldSIGs' self.fcgiserver._installSignalHandlers = lambda: None self.fcgiserver._oldSIGs = [] self.ready = True self.fcgiserver.run() def stop(self): """Stop the HTTP server.""" # Forcibly stop the fcgi server main event loop. self.fcgiserver._keepGoing = False # Force all worker threads to die off. self.fcgiserver._threadPool.maxSpare = ( self.fcgiserver._threadPool._idleCount) self.ready = False class FlupSCGIServer(object): """Adapter for a flup.server.scgi.WSGIServer.""" def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.ready = False def start(self): """Start the SCGI server.""" # We have to instantiate the server class here because its __init__ # starts a threadpool. If we do it too early, daemonize won't work. from flup.server.scgi import WSGIServer self.scgiserver = WSGIServer(*self.args, **self.kwargs) # TODO: report this bug upstream to flup. # If we don't set _oldSIGs on Windows, we get: # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py", # line 108, in run # self._restoreSignalHandlers() # File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py", # line 156, in _restoreSignalHandlers # for signum,handler in self._oldSIGs: # AttributeError: 'WSGIServer' object has no attribute '_oldSIGs' self.scgiserver._installSignalHandlers = lambda: None self.scgiserver._oldSIGs = [] self.ready = True self.scgiserver.run() def stop(self): """Stop the HTTP server.""" self.ready = False # Forcibly stop the scgi server main event loop. self.scgiserver._keepGoing = False # Force all worker threads to die off. self.scgiserver._threadPool.maxSpare = 0 def client_host(server_host): """Return the host on which a client can connect to the given listener.""" if server_host == '0.0.0.0': # 0.0.0.0 is INADDR_ANY, which should answer on localhost. return '127.0.0.1' if server_host in ('::', '::0', '::0.0.0.0'): # :: is IN6ADDR_ANY, which should answer on localhost. # ::0 and ::0.0.0.0 are non-canonical but common # ways to write IN6ADDR_ANY. return '::1' return server_host def check_port(host, port, timeout=1.0): """Raise an error if the given port is not free on the given host.""" if not host: raise ValueError("Host values of '' or None are not allowed.") host = client_host(host) port = int(port) import socket # AF_INET or AF_INET6 socket # Get the correct address family for our host (allows IPv6 addresses) try: info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM) except socket.gaierror: if ':' in host: info = [( socket.AF_INET6, socket.SOCK_STREAM, 0, "", (host, port, 0, 0) )] else: info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", (host, port))] for res in info: af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(timeout) s.connect((host, port)) s.close() except socket.error: if s: s.close() else: raise IOError("Port %s is in use on %s; perhaps the previous " "httpserver did not shut down properly." % (repr(port), repr(host))) # Feel free to increase these defaults on slow systems: free_port_timeout = 0.1 occupied_port_timeout = 1.0 def wait_for_free_port(host, port, timeout=None): """Wait for the specified port to become free (drop requests).""" if not host: raise ValueError("Host values of '' or None are not allowed.") if timeout is None: timeout = free_port_timeout for trial in range(50): try: # we are expecting a free port, so reduce the timeout check_port(host, port, timeout=timeout) except IOError: # Give the old server thread time to free the port. time.sleep(timeout) else: return raise IOError("Port %r not free on %r" % (port, host)) def wait_for_occupied_port(host, port, timeout=None): """Wait for the specified port to become active (receive requests).""" if not host: raise ValueError("Host values of '' or None are not allowed.") if timeout is None: timeout = occupied_port_timeout for trial in range(50): try: check_port(host, port, timeout=timeout) except IOError: # port is occupied return else: time.sleep(timeout) if host == client_host(host): raise IOError("Port %r not bound on %r" % (port, host)) # On systems where a loopback interface is not available and the # server is bound to all interfaces, it's difficult to determine # whether the server is in fact occupying the port. In this case, # just issue a warning and move on. See issue #1100. msg = "Unable to verify that the server is bound on %r" % port warnings.warn(msg)
15,443
Python
.py
379
32.424802
79
0.611897
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,963
__init__.py
evilhero_mylar/lib/cherrypy/process/__init__.py
"""Site container for an HTTP server. A Web Site Process Bus object is used to connect applications, servers, and frameworks with site-wide services such as daemonization, process reload, signal handling, drop privileges, PID file management, logging for all of these, and many more. The 'plugins' module defines a few abstract and concrete services for use with the bus. Some use tool-specific channels; see the documentation for each class. """ from cherrypy.process.wspbus import bus from cherrypy.process import plugins, servers
536
Python
.py
11
47.454545
72
0.819923
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,964
wspbus.py
evilhero_mylar/lib/cherrypy/process/wspbus.py
"""An implementation of the Web Site Process Bus. This module is completely standalone, depending only on the stdlib. Web Site Process Bus -------------------- A Bus object is used to contain and manage site-wide behavior: daemonization, HTTP server start/stop, process reload, signal handling, drop privileges, PID file management, logging for all of these, and many more. In addition, a Bus object provides a place for each web framework to register code that runs in response to site-wide events (like process start and stop), or which controls or otherwise interacts with the site-wide components mentioned above. For example, a framework which uses file-based templates would add known template filenames to an autoreload component. Ideally, a Bus object will be flexible enough to be useful in a variety of invocation scenarios: 1. The deployer starts a site from the command line via a framework-neutral deployment script; applications from multiple frameworks are mixed in a single site. Command-line arguments and configuration files are used to define site-wide components such as the HTTP server, WSGI component graph, autoreload behavior, signal handling, etc. 2. The deployer starts a site via some other process, such as Apache; applications from multiple frameworks are mixed in a single site. Autoreload and signal handling (from Python at least) are disabled. 3. The deployer starts a site via a framework-specific mechanism; for example, when running tests, exploring tutorials, or deploying single applications from a single framework. The framework controls which site-wide components are enabled as it sees fit. The Bus object in this package uses topic-based publish-subscribe messaging to accomplish all this. A few topic channels are built in ('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and site containers are free to define their own. If a message is sent to a channel that has not been defined or has no listeners, there is no effect. In general, there should only ever be a single Bus object per process. Frameworks and site containers share a single Bus object by publishing messages and subscribing listeners. The Bus object works as a finite state machine which models the current state of the process. Bus methods move it from one state to another; those methods then publish to subscribed listeners on the channel for the new state.:: O | V STOPPING --> STOPPED --> EXITING -> X A A | | \___ | | \ | | V V STARTED <-- STARTING """ import atexit import os import sys import threading import time import traceback as _traceback import warnings from cherrypy._cpcompat import set # Here I save the value of os.getcwd(), which, if I am imported early enough, # will be the directory from which the startup script was run. This is needed # by _do_execv(), to change back to the original directory before execv()ing a # new process. This is a defense against the application having changed the # current working directory (which could make sys.executable "not found" if # sys.executable is a relative-path, and/or cause other problems). _startup_cwd = os.getcwd() class ChannelFailures(Exception): """Exception raised when errors occur in a listener during Bus.publish(). """ delimiter = '\n' def __init__(self, *args, **kwargs): # Don't use 'super' here; Exceptions are old-style in Py2.4 # See https://bitbucket.org/cherrypy/cherrypy/issue/959 Exception.__init__(self, *args, **kwargs) self._exceptions = list() def handle_exception(self): """Append the current exception to self.""" self._exceptions.append(sys.exc_info()[1]) def get_instances(self): """Return a list of seen exception instances.""" return self._exceptions[:] def __str__(self): exception_strings = map(repr, self.get_instances()) return self.delimiter.join(exception_strings) __repr__ = __str__ def __bool__(self): return bool(self._exceptions) __nonzero__ = __bool__ # Use a flag to indicate the state of the bus. class _StateEnum(object): class State(object): name = None def __repr__(self): return "states.%s" % self.name def __setattr__(self, key, value): if isinstance(value, self.State): value.name = key object.__setattr__(self, key, value) states = _StateEnum() states.STOPPED = states.State() states.STARTING = states.State() states.STARTED = states.State() states.STOPPING = states.State() states.EXITING = states.State() try: import fcntl except ImportError: max_files = 0 else: try: max_files = os.sysconf('SC_OPEN_MAX') except AttributeError: max_files = 1024 class Bus(object): """Process state-machine and messenger for HTTP site deployment. All listeners for a given channel are guaranteed to be called even if others at the same channel fail. Each failure is logged, but execution proceeds on to the next listener. The only way to stop all processing from inside a listener is to raise SystemExit and stop the whole server. """ states = states state = states.STOPPED execv = False max_cloexec_files = max_files def __init__(self): self.execv = False self.state = states.STOPPED self.listeners = dict( [(channel, set()) for channel in ('start', 'stop', 'exit', 'graceful', 'log', 'main')]) self._priorities = {} def subscribe(self, channel, callback, priority=None): """Add the given callback at the given channel (if not present).""" if channel not in self.listeners: self.listeners[channel] = set() self.listeners[channel].add(callback) if priority is None: priority = getattr(callback, 'priority', 50) self._priorities[(channel, callback)] = priority def unsubscribe(self, channel, callback): """Discard the given callback (if present).""" listeners = self.listeners.get(channel) if listeners and callback in listeners: listeners.discard(callback) del self._priorities[(channel, callback)] def publish(self, channel, *args, **kwargs): """Return output of all subscribers for the given channel.""" if channel not in self.listeners: return [] exc = ChannelFailures() output = [] items = [(self._priorities[(channel, listener)], listener) for listener in self.listeners[channel]] try: items.sort(key=lambda item: item[0]) except TypeError: # Python 2.3 had no 'key' arg, but that doesn't matter # since it could sort dissimilar types just fine. items.sort() for priority, listener in items: try: output.append(listener(*args, **kwargs)) except KeyboardInterrupt: raise except SystemExit: e = sys.exc_info()[1] # If we have previous errors ensure the exit code is non-zero if exc and e.code == 0: e.code = 1 raise except: exc.handle_exception() if channel == 'log': # Assume any further messages to 'log' will fail. pass else: self.log("Error in %r listener %r" % (channel, listener), level=40, traceback=True) if exc: raise exc return output def _clean_exit(self): """An atexit handler which asserts the Bus is not running.""" if self.state != states.EXITING: warnings.warn( "The main thread is exiting, but the Bus is in the %r state; " "shutting it down automatically now. You must either call " "bus.block() after start(), or call bus.exit() before the " "main thread exits." % self.state, RuntimeWarning) self.exit() def start(self): """Start all services.""" atexit.register(self._clean_exit) self.state = states.STARTING self.log('Bus STARTING') try: self.publish('start') self.state = states.STARTED self.log('Bus STARTED') except (KeyboardInterrupt, SystemExit): raise except: self.log("Shutting down due to error in start listener:", level=40, traceback=True) e_info = sys.exc_info()[1] try: self.exit() except: # Any stop/exit errors will be logged inside publish(). pass # Re-raise the original error raise e_info def exit(self): """Stop all services and prepare to exit the process.""" exitstate = self.state try: self.stop() self.state = states.EXITING self.log('Bus EXITING') self.publish('exit') # This isn't strictly necessary, but it's better than seeing # "Waiting for child threads to terminate..." and then nothing. self.log('Bus EXITED') except: # This method is often called asynchronously (whether thread, # signal handler, console handler, or atexit handler), so we # can't just let exceptions propagate out unhandled. # Assume it's been logged and just die. os._exit(70) # EX_SOFTWARE if exitstate == states.STARTING: # exit() was called before start() finished, possibly due to # Ctrl-C because a start listener got stuck. In this case, # we could get stuck in a loop where Ctrl-C never exits the # process, so we just call os.exit here. os._exit(70) # EX_SOFTWARE def restart(self): """Restart the process (may close connections). This method does not restart the process from the calling thread; instead, it stops the bus and asks the main thread to call execv. """ self.execv = True self.exit() def graceful(self): """Advise all services to reload.""" self.log('Bus graceful') self.publish('graceful') def block(self, interval=0.1): """Wait for the EXITING state, KeyboardInterrupt or SystemExit. This function is intended to be called only by the main thread. After waiting for the EXITING state, it also waits for all threads to terminate, and then calls os.execv if self.execv is True. This design allows another thread to call bus.restart, yet have the main thread perform the actual execv call (required on some platforms). """ try: self.wait(states.EXITING, interval=interval, channel='main') except (KeyboardInterrupt, IOError): # The time.sleep call might raise # "IOError: [Errno 4] Interrupted function call" on KBInt. self.log('Keyboard Interrupt: shutting down bus') self.exit() except SystemExit: self.log('SystemExit raised: shutting down bus') self.exit() raise # Waiting for ALL child threads to finish is necessary on OS X. # See https://bitbucket.org/cherrypy/cherrypy/issue/581. # It's also good to let them all shut down before allowing # the main thread to call atexit handlers. # See https://bitbucket.org/cherrypy/cherrypy/issue/751. self.log("Waiting for child threads to terminate...") for t in threading.enumerate(): # Validate the we're not trying to join the MainThread # that will cause a deadlock and the case exist when # implemented as a windows service and in any other case # that another thread executes cherrypy.engine.exit() if ( t != threading.currentThread() and t.isAlive() and not isinstance(t, threading._MainThread) ): # Note that any dummy (external) threads are always daemonic. if hasattr(threading.Thread, "daemon"): # Python 2.6+ d = t.daemon else: d = t.isDaemon() if not d: self.log("Waiting for thread %s." % t.getName()) t.join() if self.execv: self._do_execv() def wait(self, state, interval=0.1, channel=None): """Poll for the given state(s) at intervals; publish to channel.""" if isinstance(state, (tuple, list)): states = state else: states = [state] def _wait(): while self.state not in states: time.sleep(interval) self.publish(channel) # From http://psyco.sourceforge.net/psycoguide/bugs.html: # "The compiled machine code does not include the regular polling # done by Python, meaning that a KeyboardInterrupt will not be # detected before execution comes back to the regular Python # interpreter. Your program cannot be interrupted if caught # into an infinite Psyco-compiled loop." try: sys.modules['psyco'].cannotcompile(_wait) except (KeyError, AttributeError): pass _wait() def _do_execv(self): """Re-execute the current process. This must be called from the main thread, because certain platforms (OS X) don't allow execv to be called in a child thread very well. """ args = sys.argv[:] self.log('Re-spawning %s' % ' '.join(args)) if sys.platform[:4] == 'java': from _systemrestart import SystemRestart raise SystemRestart else: args.insert(0, sys.executable) if sys.platform == 'win32': args = ['"%s"' % arg for arg in args] os.chdir(_startup_cwd) if self.max_cloexec_files: self._set_cloexec() os.execv(sys.executable, args) def _set_cloexec(self): """Set the CLOEXEC flag on all open files (except stdin/out/err). If self.max_cloexec_files is an integer (the default), then on platforms which support it, it represents the max open files setting for the operating system. This function will be called just before the process is restarted via os.execv() to prevent open files from persisting into the new process. Set self.max_cloexec_files to 0 to disable this behavior. """ for fd in range(3, self.max_cloexec_files): # skip stdin/out/err try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) except IOError: continue fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def stop(self): """Stop all services.""" self.state = states.STOPPING self.log('Bus STOPPING') self.publish('stop') self.state = states.STOPPED self.log('Bus STOPPED') def start_with_callback(self, func, args=None, kwargs=None): """Start 'func' in a new thread T, then start self (and return T).""" if args is None: args = () if kwargs is None: kwargs = {} args = (func,) + args def _callback(func, *a, **kw): self.wait(states.STARTED) func(*a, **kw) t = threading.Thread(target=_callback, args=args, kwargs=kwargs) t.setName('Bus Callback ' + t.getName()) t.start() self.start() return t def log(self, msg="", level=20, traceback=False): """Log the given message. Append the last traceback if requested.""" if traceback: msg += "\n" + "".join(_traceback.format_exception(*sys.exc_info())) self.publish('log', msg, level) bus = Bus()
16,432
Python
.py
374
34.508021
79
0.619494
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,965
wsgiserver2.py
evilhero_mylar/lib/cherrypy/wsgiserver/wsgiserver2.py
"""A high-speed, production ready, thread pooled, generic HTTP server. Simplest example on how to use this module directly (without using CherryPy's application machinery):: from cherrypy import wsgiserver def my_crazy_app(environ, start_response): status = '200 OK' response_headers = [('Content-type','text/plain')] start_response(status, response_headers) return ['Hello world!'] server = wsgiserver.CherryPyWSGIServer( ('0.0.0.0', 8070), my_crazy_app, server_name='www.cherrypy.example') server.start() The CherryPy WSGI server can serve as many WSGI applications as you want in one instance by using a WSGIPathInfoDispatcher:: d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. This won't call the CherryPy engine (application side) at all, only the HTTP server, which is independent from the rest of CherryPy. Don't let the name "CherryPyWSGIServer" throw you; the name merely reflects its origin, not its coupling. For those of you wanting to understand internals of this module, here's the basic call flow. The server's listening thread runs a very tight loop, sticking incoming connections onto a Queue:: server = CherryPyWSGIServer(...) server.start() while True: tick() # This blocks until a request comes in: child = socket.accept() conn = HTTPConnection(child, ...) server.requests.put(conn) Worker threads are kept in a pool and poll the Queue, popping off and then handling each connection in turn. Each connection can consist of an arbitrary number of requests and their responses, so we run a nested loop:: while True: conn = server.requests.get() conn.communicate() -> while True: req = HTTPRequest(...) req.parse_request() -> # Read the Request-Line, e.g. "GET /page HTTP/1.1" req.rfile.readline() read_headers(req.rfile, req.inheaders) req.respond() -> response = app(...) try: for chunk in response: if chunk: req.write(chunk) finally: if hasattr(response, "close"): response.close() if req.close_connection: return """ __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 'CP_fileobject', 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 'WorkerThread', 'ThreadPool', 'SSLAdapter', 'CherryPyWSGIServer', 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] import os try: import queue except: import Queue as queue import re import rfc822 import socket import sys if 'win' in sys.platform and hasattr(socket, "AF_INET6"): if not hasattr(socket, 'IPPROTO_IPV6'): socket.IPPROTO_IPV6 = 41 if not hasattr(socket, 'IPV6_V6ONLY'): socket.IPV6_V6ONLY = 27 try: import cStringIO as StringIO except ImportError: import StringIO DEFAULT_BUFFER_SIZE = -1 class FauxSocket(object): """Faux socket with the minimal interface required by pypy""" def _reuse(self): pass _fileobject_uses_str_type = isinstance( socket._fileobject(FauxSocket())._rbuf, basestring) del FauxSocket # this class is not longer required for anything. import threading import time import traceback def format_exc(limit=None): """Like print_exc() but return a string. Backport for Python 2.3.""" try: etype, value, tb = sys.exc_info() return ''.join(traceback.format_exception(etype, value, tb, limit)) finally: etype = value = tb = None import operator from urllib import unquote import warnings if sys.version_info >= (3, 0): bytestr = bytes unicodestr = str basestring = (bytes, str) def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ # In Python 3, the native string type is unicode return n.encode(encoding) else: bytestr = str unicodestr = unicode basestring = basestring def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ # In Python 2, the native string type is bytes. Assume it's already # in the given encoding, which for ISO-8859-1 is almost always what # was intended. return n LF = ntob('\n') CRLF = ntob('\r\n') TAB = ntob('\t') SPACE = ntob(' ') COLON = ntob(':') SEMICOLON = ntob(';') EMPTY = ntob('') NUMBER_SIGN = ntob('#') QUESTION_MARK = ntob('?') ASTERISK = ntob('*') FORWARD_SLASH = ntob('/') quoted_slash = re.compile(ntob("(?i)%2F")) import errno def plat_specific_errors(*errnames): """Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names. """ errno_names = dir(errno) nums = [getattr(errno, k) for k in errnames if k in errno_names] # de-dupe the list return list(dict.fromkeys(nums).keys()) socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") socket_errors_to_ignore = plat_specific_errors( "EPIPE", "EBADF", "WSAEBADF", "ENOTSOCK", "WSAENOTSOCK", "ETIMEDOUT", "WSAETIMEDOUT", "ECONNREFUSED", "WSAECONNREFUSED", "ECONNRESET", "WSAECONNRESET", "ECONNABORTED", "WSAECONNABORTED", "ENETRESET", "WSAENETRESET", "EHOSTDOWN", "EHOSTUNREACH", ) socket_errors_to_ignore.append("timed out") socket_errors_to_ignore.append("The read operation timed out") socket_errors_nonblocking = plat_specific_errors( 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') comma_separated_headers = [ ntob(h) for h in ['Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'WWW-Authenticate'] ] import logging if not hasattr(logging, 'statistics'): logging.statistics = {} def read_headers(rfile, hdict=None): """Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. """ if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in (SPACE, TAB): # It's a continuation line. v = line.strip() else: try: k, v = line.split(COLON, 1) except ValueError: raise ValueError("Illegal header line.") # TODO: what about TE and WWW-Authenticate? k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = ", ".join((existing, v)) hdict[hname] = v return hdict class MaxSizeExceeded(Exception): pass class SizeCheckWrapper(object): """Wraps a file-like object, raising MaxSizeExceeded if too large.""" def __init__(self, rfile, maxlen): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 def _check_length(self): if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded() def read(self, size=None): data = self.rfile.read(size) self.bytes_read += len(data) self._check_length() return data def readline(self, size=None): if size is not None: data = self.rfile.readline(size) self.bytes_read += len(data) self._check_length() return data # User didn't specify a size ... # We read the line in chunks to make sure it's not a 100MB line ! res = [] while True: data = self.rfile.readline(256) self.bytes_read += len(data) self._check_length() res.append(data) # See https://bitbucket.org/cherrypy/cherrypy/issue/421 if len(data) < 256 or data[-1:] == LF: return EMPTY.join(res) def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.bytes_read += len(data) self._check_length() return data def next(self): data = self.rfile.next() self.bytes_read += len(data) self._check_length() return data class KnownLengthRFile(object): """Wraps a file-like object, returning an empty string when exhausted.""" def __init__(self, rfile, content_length): self.rfile = rfile self.remaining = content_length def read(self, size=None): if self.remaining == 0: return '' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.read(size) self.remaining -= len(data) return data def readline(self, size=None): if self.remaining == 0: return '' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.readline(size) self.remaining -= len(data) return data def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.remaining -= len(data) return data class ChunkedRFile(object): """Wraps a file-like object, returning an empty string when exhausted. This class is intended to provide a conforming wsgi.input value for request entities that have been encoded with the 'chunked' transfer encoding. """ def __init__(self, rfile, maxlen, bufsize=8192): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 self.buffer = EMPTY self.bufsize = bufsize self.closed = False def _fetch(self): if self.closed: return line = self.rfile.readline() self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) line = line.strip().split(SEMICOLON, 1) try: chunk_size = line.pop(0) chunk_size = int(chunk_size, 16) except ValueError: raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) if chunk_size <= 0: self.closed = True return ## if line: chunk_extension = line[0] if self.maxlen and self.bytes_read + chunk_size > self.maxlen: raise IOError("Request Entity Too Large") chunk = self.rfile.read(chunk_size) self.bytes_read += len(chunk) self.buffer += chunk crlf = self.rfile.read(2) if crlf != CRLF: raise ValueError( "Bad chunked transfer coding (expected '\\r\\n', " "got " + repr(crlf) + ")") def read(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data if size: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: data += self.buffer def readline(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data newline_pos = self.buffer.find(LF) if size: if newline_pos == -1: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: remaining = min(size - len(data), newline_pos) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: if newline_pos == -1: data += self.buffer else: data += self.buffer[:newline_pos] self.buffer = self.buffer[newline_pos:] def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def read_trailer_lines(self): if not self.closed: raise ValueError( "Cannot read trailers until the request body has been read.") while True: line = self.rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise IOError("Request Entity Too Large") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") yield line def close(self): self.rfile.close() def __iter__(self): # Shamelessly stolen from StringIO total = 0 line = self.readline(sizehint) while line: yield line total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) class HTTPRequest(object): """An HTTP Request (and response). A single HTTP connection may consist of multiple request/response pairs. """ server = None """The HTTPServer object which is receiving this request.""" conn = None """The HTTPConnection object on which this request connected.""" inheaders = {} """A dict of request headers.""" outheaders = [] """A list of header tuples to write in the response.""" ready = False """When True, the request has been parsed and is ready to begin generating the response. When False, signals the calling Connection that the response should not be generated and the connection should close.""" close_connection = False """Signals the calling Connection that the request should close. This does not imply an error! The client and/or server may each request that the connection be closed.""" chunked_write = False """If True, output will be encoded with the "chunked" transfer-coding. This value is set automatically inside send_headers.""" def __init__(self, server, conn): self.server = server self.conn = conn self.ready = False self.started_request = False self.scheme = ntob("http") if self.server.ssl_adapter is not None: self.scheme = ntob("https") # Use the lowest-common protocol in case read_request_line errors. self.response_protocol = 'HTTP/1.0' self.inheaders = {} self.status = "" self.outheaders = [] self.sent_headers = False self.close_connection = self.__class__.close_connection self.chunked_read = False self.chunked_write = self.__class__.chunked_write def parse_request(self): """Parse the next HTTP request start-line and message-headers.""" self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size) try: success = self.read_request_line() except MaxSizeExceeded: self.simple_response( "414 Request-URI Too Long", "The Request-URI sent with the request exceeds the maximum " "allowed bytes.") return else: if not success: return try: success = self.read_request_headers() except MaxSizeExceeded: self.simple_response( "413 Request Entity Too Large", "The headers sent with the request exceed the maximum " "allowed bytes.") return else: if not success: return self.ready = True def read_request_line(self): # HTTP/1.1 connections are persistent by default. If a client # requests a page, then idles (leaves the connection open), # then rfile.readline() will raise socket.error("timed out"). # Note that it does this based on the value given to settimeout(), # and doesn't need the client to request or acknowledge the close # (although your TCP stack might suffer for it: cf Apache's history # with FIN_WAIT_2). request_line = self.rfile.readline() # Set started_request to True so communicate() knows to send 408 # from here on out. self.started_request = True if not request_line: return False if request_line == CRLF: # RFC 2616 sec 4.1: "...if the server is reading the protocol # stream at the beginning of a message and receives a CRLF # first, it should ignore the CRLF." # But only ignore one leading line! else we enable a DoS. request_line = self.rfile.readline() if not request_line: return False if not request_line.endswith(CRLF): self.simple_response( "400 Bad Request", "HTTP requires CRLF terminators") return False try: method, uri, req_protocol = request_line.strip().split(SPACE, 2) rp = int(req_protocol[5]), int(req_protocol[7]) except (ValueError, IndexError): self.simple_response("400 Bad Request", "Malformed Request-Line") return False self.uri = uri self.method = method # uri may be an abs_path (including "http://host.domain.tld"); scheme, authority, path = self.parse_request_uri(uri) if NUMBER_SIGN in path: self.simple_response("400 Bad Request", "Illegal #fragment in Request-URI.") return False if scheme: self.scheme = scheme qs = EMPTY if QUESTION_MARK in path: path, qs = path.split(QUESTION_MARK, 1) # Unquote the path+params (e.g. "/this%20path" -> "/this path"). # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # # But note that "...a URI must be separated into its components # before the escaped characters within those components can be # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". try: atoms = [unquote(x) for x in quoted_slash.split(path)] except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False path = "%2F".join(atoms) self.path = path # Note that, like wsgiref and most other HTTP servers, # we "% HEX HEX"-unquote the path but not the query string. self.qs = qs # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. sp = int(self.server.protocol[5]), int(self.server.protocol[7]) if sp[0] != rp[0]: self.simple_response("505 HTTP Version Not Supported") return False self.request_protocol = req_protocol self.response_protocol = "HTTP/%s.%s" % min(rp, sp) return True def read_request_headers(self): """Read self.rfile into self.inheaders. Return success.""" # then all the http headers try: read_headers(self.rfile, self.inheaders) except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False mrbs = self.server.max_request_body_size if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs: self.simple_response( "413 Request Entity Too Large", "The entity sent with the request exceeds the maximum " "allowed bytes.") return False # Persistent connection support if self.response_protocol == "HTTP/1.1": # Both server and client are HTTP/1.1 if self.inheaders.get("Connection", "") == "close": self.close_connection = True else: # Either the server or client (or both) are HTTP/1.0 if self.inheaders.get("Connection", "") != "Keep-Alive": self.close_connection = True # Transfer-Encoding support te = None if self.response_protocol == "HTTP/1.1": te = self.inheaders.get("Transfer-Encoding") if te: te = [x.strip().lower() for x in te.split(",") if x.strip()] self.chunked_read = False if te: for enc in te: if enc == "chunked": self.chunked_read = True else: # Note that, even if we see "chunked", we must reject # if there is an extension we don't recognize. self.simple_response("501 Unimplemented") self.close_connection = True return False # From PEP 333: # "Servers and gateways that implement HTTP 1.1 must provide # transparent support for HTTP 1.1's "expect/continue" mechanism. # This may be done in any of several ways: # 1. Respond to requests containing an Expect: 100-continue request # with an immediate "100 Continue" response, and proceed normally. # 2. Proceed with the request normally, but provide the application # with a wsgi.input stream that will send the "100 Continue" # response if/when the application first attempts to read from # the input stream. The read request must then remain blocked # until the client responds. # 3. Wait until the client decides that the server does not support # expect/continue, and sends the request body on its own. # (This is suboptimal, and is not recommended.) # # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, # but it seems like it would be a big slowdown for such a rare case. if self.inheaders.get("Expect", "") == "100-continue": # Don't use simple_response here, because it emits headers # we don't want. See # https://bitbucket.org/cherrypy/cherrypy/issue/951 msg = self.server.protocol + " 100 Continue\r\n\r\n" try: self.conn.wfile.sendall(msg) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return True def parse_request_uri(self, uri): """Parse a Request-URI into (scheme, authority, path). Note that Request-URI's must be one of:: Request-URI = "*" | absoluteURI | abs_path | authority Therefore, a Request-URI which starts with a double forward-slash cannot be a "net_path":: net_path = "//" authority [ abs_path ] Instead, it must be interpreted as an "abs_path" with an empty first path segment:: abs_path = "/" path_segments path_segments = segment *( "/" segment ) segment = *pchar *( ";" param ) param = *pchar """ if uri == ASTERISK: return None, None, uri i = uri.find('://') if i > 0 and QUESTION_MARK not in uri[:i]: # An absoluteURI. # If there's a scheme (and it must be http or https), then: # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query # ]] scheme, remainder = uri[:i].lower(), uri[i + 3:] authority, path = remainder.split(FORWARD_SLASH, 1) path = FORWARD_SLASH + path return scheme, authority, path if uri.startswith(FORWARD_SLASH): # An abs_path. return None, None, uri else: # An authority. return None, uri, None def respond(self): """Call the gateway and write its iterable output.""" mrbs = self.server.max_request_body_size if self.chunked_read: self.rfile = ChunkedRFile(self.conn.rfile, mrbs) else: cl = int(self.inheaders.get("Content-Length", 0)) if mrbs and mrbs < cl: if not self.sent_headers: self.simple_response( "413 Request Entity Too Large", "The entity sent with the request exceeds the maximum " "allowed bytes.") return self.rfile = KnownLengthRFile(self.conn.rfile, cl) self.server.gateway(self).respond() if (self.ready and not self.sent_headers): self.sent_headers = True self.send_headers() if self.chunked_write: self.conn.wfile.sendall("0\r\n\r\n") def simple_response(self, status, msg=""): """Write a simple response back to the client.""" status = str(status) buf = [self.server.protocol + SPACE + status + CRLF, "Content-Length: %s\r\n" % len(msg), "Content-Type: text/plain\r\n"] if status[:3] in ("413", "414"): # Request Entity Too Large / Request-URI Too Long self.close_connection = True if self.response_protocol == 'HTTP/1.1': # This will not be true for 414, since read_request_line # usually raises 414 before reading the whole line, and we # therefore cannot know the proper response_protocol. buf.append("Connection: close\r\n") else: # HTTP/1.0 had no 413/414 status nor Connection header. # Emit 400 instead and trust the message body is enough. status = "400 Bad Request" buf.append(CRLF) if msg: if isinstance(msg, unicodestr): msg = msg.encode("ISO-8859-1") buf.append(msg) try: self.conn.wfile.sendall("".join(buf)) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise def write(self, chunk): """Write unbuffered data to the client.""" if self.chunked_write and chunk: buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF] self.conn.wfile.sendall(EMPTY.join(buf)) else: self.conn.wfile.sendall(chunk) def send_headers(self): """Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this. """ hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif "content-length" not in hkeys: # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in (204, 205, 304): pass else: if (self.response_protocol == 'HTTP/1.1' and self.method != 'HEAD'): # Use the chunked transfer-coding self.chunked_write = True self.outheaders.append(("Transfer-Encoding", "chunked")) else: # Closing the conn is the only way to determine len. self.close_connection = True if "connection" not in hkeys: if self.response_protocol == 'HTTP/1.1': # Both server and client are HTTP/1.1 or better if self.close_connection: self.outheaders.append(("Connection", "close")) else: # Server and/or client are HTTP/1.0 if not self.close_connection: self.outheaders.append(("Connection", "Keep-Alive")) if (not self.close_connection) and (not self.chunked_read): # Read any remaining request body data on the socket. # "If an origin server receives a request that does not include an # Expect request-header field with the "100-continue" expectation, # the request includes a request body, and the server responds # with a final status code before reading the entire request body # from the transport connection, then the server SHOULD NOT close # the transport connection until it has read the entire request, # or until the client closes the connection. Otherwise, the client # might not reliably receive the response message. However, this # requirement is not be construed as preventing a server from # defending itself against denial-of-service attacks, or from # badly broken client implementations." remaining = getattr(self.rfile, 'remaining', 0) if remaining > 0: self.rfile.read(remaining) if "date" not in hkeys: self.outheaders.append(("Date", rfc822.formatdate())) if "server" not in hkeys: self.outheaders.append(("Server", self.server.server_name)) buf = [self.server.protocol + SPACE + self.status + CRLF] for k, v in self.outheaders: buf.append(k + COLON + SPACE + v + CRLF) buf.append(CRLF) self.conn.wfile.sendall(EMPTY.join(buf)) class NoSSLError(Exception): """Exception raised when a client speaks HTTP to an HTTPS socket.""" pass class FatalSSLAlert(Exception): """Exception raised when the SSL implementation signals a fatal alert.""" pass class CP_fileobject(socket._fileobject): """Faux file object attached to a socket object.""" def __init__(self, *args, **kwargs): self.bytes_read = 0 self.bytes_written = 0 socket._fileobject.__init__(self, *args, **kwargs) def sendall(self, data): """Sendall for non-blocking sockets.""" while data: try: bytes_sent = self.send(data) data = data[bytes_sent:] except socket.error, e: if e.args[0] not in socket_errors_nonblocking: raise def send(self, data): bytes_sent = self._sock.send(data) self.bytes_written += bytes_sent return bytes_sent def flush(self): if self._wbuf: buffer = "".join(self._wbuf) self._wbuf = [] self.sendall(buffer) def recv(self, size): while True: try: data = self._sock.recv(size) self.bytes_read += len(data) return data except socket.error, e: if (e.args[0] not in socket_errors_nonblocking and e.args[0] not in socket_error_eintr): raise if not _fileobject_uses_str_type: def read(self, size=-1): # Use max, disallow tiny reads in a loop as they are very # inefficient. # We never leave read() with any leftover data from a new recv() # call in our internal buffer. rbufsize = max(self._rbufsize, self.default_bufsize) # Our use of StringIO rather than lists of string objects returned # by recv() minimizes memory usage and fragmentation that occurs # when rbufsize is large compared to the typical return value of # recv(). buf = self._rbuf buf.seek(0, 2) # seek end if size < 0: # Read until EOF # reset _rbuf. we consume it via buf. self._rbuf = StringIO.StringIO() while True: data = self.recv(rbufsize) if not data: break buf.write(data) return buf.getvalue() else: # Read until size bytes or EOF seen, whichever comes first buf_len = buf.tell() if buf_len >= size: # Already have size bytes in our buffer? Extract and # return. buf.seek(0) rv = buf.read(size) self._rbuf = StringIO.StringIO() self._rbuf.write(buf.read()) return rv # reset _rbuf. we consume it via buf. self._rbuf = StringIO.StringIO() while True: left = size - buf_len # recv() will malloc the amount of memory given as its # parameter even though it often returns much less data # than that. The returned data string is short lived # as we copy it into a StringIO and free it. This avoids # fragmentation issues on many platforms. data = self.recv(left) if not data: break n = len(data) if n == size and not buf_len: # Shortcut. Avoid buffer data copies when: # - We have no data in our buffer. # AND # - Our call to recv returned exactly the # number of bytes we were asked to read. return data if n == left: buf.write(data) del data # explicit free break assert n <= left, "recv(%d) returned %d bytes" % (left, n) buf.write(data) buf_len += n del data # explicit free #assert buf_len == buf.tell() return buf.getvalue() def readline(self, size=-1): buf = self._rbuf buf.seek(0, 2) # seek end if buf.tell() > 0: # check if we already have it in our buffer buf.seek(0) bline = buf.readline(size) if bline.endswith('\n') or len(bline) == size: self._rbuf = StringIO.StringIO() self._rbuf.write(buf.read()) return bline del bline if size < 0: # Read until \n or EOF, whichever comes first if self._rbufsize <= 1: # Speed up unbuffered case buf.seek(0) buffers = [buf.read()] # reset _rbuf. we consume it via buf. self._rbuf = StringIO.StringIO() data = None recv = self.recv while data != "\n": data = recv(1) if not data: break buffers.append(data) return "".join(buffers) buf.seek(0, 2) # seek end # reset _rbuf. we consume it via buf. self._rbuf = StringIO.StringIO() while True: data = self.recv(self._rbufsize) if not data: break nl = data.find('\n') if nl >= 0: nl += 1 buf.write(data[:nl]) self._rbuf.write(data[nl:]) del data break buf.write(data) return buf.getvalue() else: # Read until size bytes or \n or EOF seen, whichever comes # first buf.seek(0, 2) # seek end buf_len = buf.tell() if buf_len >= size: buf.seek(0) rv = buf.read(size) self._rbuf = StringIO.StringIO() self._rbuf.write(buf.read()) return rv # reset _rbuf. we consume it via buf. self._rbuf = StringIO.StringIO() while True: data = self.recv(self._rbufsize) if not data: break left = size - buf_len # did we just receive a newline? nl = data.find('\n', 0, left) if nl >= 0: nl += 1 # save the excess data to _rbuf self._rbuf.write(data[nl:]) if buf_len: buf.write(data[:nl]) break else: # Shortcut. Avoid data copy through buf when # returning a substring of our first recv(). return data[:nl] n = len(data) if n == size and not buf_len: # Shortcut. Avoid data copy through buf when # returning exactly all of our first recv(). return data if n >= left: buf.write(data[:left]) self._rbuf.write(data[left:]) break buf.write(data) buf_len += n #assert buf_len == buf.tell() return buf.getvalue() else: def read(self, size=-1): if size < 0: # Read until EOF buffers = [self._rbuf] self._rbuf = "" if self._rbufsize <= 1: recv_size = self.default_bufsize else: recv_size = self._rbufsize while True: data = self.recv(recv_size) if not data: break buffers.append(data) return "".join(buffers) else: # Read until size bytes or EOF seen, whichever comes first data = self._rbuf buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: left = size - buf_len recv_size = max(self._rbufsize, left) data = self.recv(recv_size) if not data: break buffers.append(data) n = len(data) if n >= left: self._rbuf = data[left:] buffers[-1] = data[:left] break buf_len += n return "".join(buffers) def readline(self, size=-1): data = self._rbuf if size < 0: # Read until \n or EOF, whichever comes first if self._rbufsize <= 1: # Speed up unbuffered case assert data == "" buffers = [] while data != "\n": data = self.recv(1) if not data: break buffers.append(data) return "".join(buffers) nl = data.find('\n') if nl >= 0: nl += 1 self._rbuf = data[nl:] return data[:nl] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: data = self.recv(self._rbufsize) if not data: break buffers.append(data) nl = data.find('\n') if nl >= 0: nl += 1 self._rbuf = data[nl:] buffers[-1] = data[:nl] break return "".join(buffers) else: # Read until size bytes or \n or EOF seen, whichever comes # first nl = data.find('\n', 0, size) if nl >= 0: nl += 1 self._rbuf = data[nl:] return data[:nl] buf_len = len(data) if buf_len >= size: self._rbuf = data[size:] return data[:size] buffers = [] if data: buffers.append(data) self._rbuf = "" while True: data = self.recv(self._rbufsize) if not data: break buffers.append(data) left = size - buf_len nl = data.find('\n', 0, left) if nl >= 0: nl += 1 self._rbuf = data[nl:] buffers[-1] = data[:nl] break n = len(data) if n >= left: self._rbuf = data[left:] buffers[-1] = data[:left] break buf_len += n return "".join(buffers) class HTTPConnection(object): """An HTTP connection (active socket). server: the Server object which received this connection. socket: the raw socket object (usually TCP) for this connection. makefile: a fileobject class for reading from the socket. """ remote_addr = None remote_port = None ssl_env = None rbufsize = DEFAULT_BUFFER_SIZE wbufsize = DEFAULT_BUFFER_SIZE RequestHandlerClass = HTTPRequest def __init__(self, server, sock, makefile=CP_fileobject): self.server = server self.socket = sock self.rfile = makefile(sock, "rb", self.rbufsize) self.wfile = makefile(sock, "wb", self.wbufsize) self.requests_seen = 0 def communicate(self): """Read each request and respond appropriately.""" request_seen = False try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self.server, self) # This order of operations should guarantee correct pipelining. req.parse_request() if self.server.stats['Enabled']: self.requests_seen += 1 if not req.ready: # Something went wrong in the parsing (and the server has # probably already made a simple_response). Return and # let the conn close. return request_seen = True req.respond() if req.close_connection: return except socket.error: e = sys.exc_info()[1] errnum = e.args[0] # sadly SSL sockets return a different (longer) time out string if ( errnum == 'timed out' or errnum == 'The read operation timed out' ): # Don't error if we're between requests; only error # if 1) no request has been started at all, or 2) we're # in the middle of a request. # See https://bitbucket.org/cherrypy/cherrypy/issue/853 if (not request_seen) or (req and req.started_request): # Don't bother writing the 408 if the response # has already started being written. if req and not req.sent_headers: try: req.simple_response("408 Request Timeout") except FatalSSLAlert: # Close the connection. return elif errnum not in socket_errors_to_ignore: self.server.error_log("socket.error %s" % repr(errnum), level=logging.WARNING, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return return except (KeyboardInterrupt, SystemExit): raise except FatalSSLAlert: # Close the connection. return except NoSSLError: if req and not req.sent_headers: # Unwrap our wfile self.wfile = CP_fileobject( self.socket._sock, "wb", self.wbufsize) req.simple_response( "400 Bad Request", "The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") self.linger = True except Exception: e = sys.exc_info()[1] self.server.error_log(repr(e), level=logging.ERROR, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return linger = False def close(self): """Close the socket underlying this connection.""" self.rfile.close() if not self.linger: # Python's socket module does NOT call close on the kernel # socket when you call socket.close(). We do so manually here # because we want this server to send a FIN TCP segment # immediately. Note this must be called *before* calling # socket.close(), because the latter drops its reference to # the kernel socket. if hasattr(self.socket, '_sock'): self.socket._sock.close() self.socket.close() else: # On the other hand, sometimes we want to hang around for a bit # to make sure the client has a chance to read our entire # response. Skipping the close() calls here delays the FIN # packet until the socket object is garbage-collected later. # Someday, perhaps, we'll do the full lingering_close that # Apache does, but not today. pass class TrueyZero(object): """An object which equals and does math like the integer 0 but evals True. """ def __add__(self, other): return other def __radd__(self, other): return other trueyzero = TrueyZero() _SHUTDOWNREQUEST = None class WorkerThread(threading.Thread): """Thread which continuously polls a Queue for Connection objects. Due to the timing issues of polling a Queue, a WorkerThread does not check its own 'ready' flag after it has started. To stop the thread, it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue (one for each running WorkerThread). """ conn = None """The current connection pulled off the Queue, or None.""" server = None """The HTTP Server which spawned this thread, and which owns the Queue and is placing active connections into it.""" ready = False """A simple flag for the calling server to know when this thread has begun polling the Queue.""" def __init__(self, server): self.ready = False self.server = server self.requests_seen = 0 self.bytes_read = 0 self.bytes_written = 0 self.start_time = None self.work_time = 0 self.stats = { 'Requests': lambda s: self.requests_seen + ( (self.start_time is None) and trueyzero or self.conn.requests_seen ), 'Bytes Read': lambda s: self.bytes_read + ( (self.start_time is None) and trueyzero or self.conn.rfile.bytes_read ), 'Bytes Written': lambda s: self.bytes_written + ( (self.start_time is None) and trueyzero or self.conn.wfile.bytes_written ), 'Work Time': lambda s: self.work_time + ( (self.start_time is None) and trueyzero or time.time() - self.start_time ), 'Read Throughput': lambda s: s['Bytes Read'](s) / ( s['Work Time'](s) or 1e-6), 'Write Throughput': lambda s: s['Bytes Written'](s) / ( s['Work Time'](s) or 1e-6), } threading.Thread.__init__(self) def run(self): self.server.stats['Worker Threads'][self.getName()] = self.stats try: self.ready = True while True: conn = self.server.requests.get() if conn is _SHUTDOWNREQUEST: return self.conn = conn if self.server.stats['Enabled']: self.start_time = time.time() try: conn.communicate() finally: conn.close() if self.server.stats['Enabled']: self.requests_seen += self.conn.requests_seen self.bytes_read += self.conn.rfile.bytes_read self.bytes_written += self.conn.wfile.bytes_written self.work_time += time.time() - self.start_time self.start_time = None self.conn = None except (KeyboardInterrupt, SystemExit): exc = sys.exc_info()[1] self.server.interrupt = exc class ThreadPool(object): """A Request Queue for an HTTPServer which pools threads. ThreadPool objects must provide min, get(), put(obj), start() and stop(timeout) attributes. """ def __init__(self, server, min=10, max=-1, accepted_queue_size=-1, accepted_queue_timeout=10): self.server = server self.min = min self.max = max self._threads = [] self._queue = queue.Queue(maxsize=accepted_queue_size) self._queue_put_timeout = accepted_queue_timeout self.get = self._queue.get def start(self): """Start the pool of threads.""" for i in range(self.min): self._threads.append(WorkerThread(self.server)) for worker in self._threads: worker.setName("CP Server " + worker.getName()) worker.start() for worker in self._threads: while not worker.ready: time.sleep(.1) def _get_idle(self): """Number of worker threads which are idle. Read-only.""" return len([t for t in self._threads if t.conn is None]) idle = property(_get_idle, doc=_get_idle.__doc__) def put(self, obj): self._queue.put(obj, block=True, timeout=self._queue_put_timeout) if obj is _SHUTDOWNREQUEST: return def grow(self, amount): """Spawn new worker threads (not above self.max).""" if self.max > 0: budget = max(self.max - len(self._threads), 0) else: # self.max <= 0 indicates no maximum budget = float('inf') n_new = min(amount, budget) workers = [self._spawn_worker() for i in range(n_new)] while not self._all(operator.attrgetter('ready'), workers): time.sleep(.1) self._threads.extend(workers) def _spawn_worker(self): worker = WorkerThread(self.server) worker.setName("CP Server " + worker.getName()) worker.start() return worker def _all(func, items): results = [func(item) for item in items] return reduce(operator.and_, results, True) _all = staticmethod(_all) def shrink(self, amount): """Kill off worker threads (not below self.min).""" # Grow/shrink the pool if necessary. # Remove any dead threads from our list for t in self._threads: if not t.isAlive(): self._threads.remove(t) amount -= 1 # calculate the number of threads above the minimum n_extra = max(len(self._threads) - self.min, 0) # don't remove more than amount n_to_remove = min(amount, n_extra) # put shutdown requests on the queue equal to the number of threads # to remove. As each request is processed by a worker, that worker # will terminate and be culled from the list. for n in range(n_to_remove): self._queue.put(_SHUTDOWNREQUEST) def stop(self, timeout=5): # Must shut down threads here so the code that calls # this method can know when all threads are stopped. for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) # Don't join currentThread (when stop is called inside a request). current = threading.currentThread() if timeout and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): # We exhausted the timeout. # Forcibly shut down the socket. c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: # pyOpenSSL sockets don't take an arg c.socket.shutdown() worker.join() except (AssertionError, # Ignore repeated Ctrl-C. # See # https://bitbucket.org/cherrypy/cherrypy/issue/691. KeyboardInterrupt): pass def _get_qsize(self): return self._queue.qsize() qsize = property(_get_qsize) try: import fcntl except ImportError: try: from ctypes import windll, WinError import ctypes.wintypes _SetHandleInformation = windll.kernel32.SetHandleInformation _SetHandleInformation.argtypes = [ ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ] _SetHandleInformation.restype = ctypes.wintypes.BOOL except ImportError: def prevent_socket_inheritance(sock): """Dummy function, since neither fcntl nor ctypes are available.""" pass else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (Windows).""" if not _SetHandleInformation(sock.fileno(), 1, 0): raise WinError() else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (POSIX).""" fd = sock.fileno() old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) class SSLAdapter(object): """Base class for SSL driver library adapters. Required methods: * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object`` """ def __init__(self, certificate, private_key, certificate_chain=None): self.certificate = certificate self.private_key = private_key self.certificate_chain = certificate_chain def wrap(self, sock): raise NotImplemented def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): raise NotImplemented class HTTPServer(object): """An HTTP server.""" _bind_addr = "127.0.0.1" _interrupt = None gateway = None """A Gateway instance.""" minthreads = None """The minimum number of worker threads to create (default 10).""" maxthreads = None """The maximum number of worker threads to create (default -1 = no limit). """ server_name = None """The name of the server; defaults to socket.gethostname().""" protocol = "HTTP/1.1" """The version string to write in the Status-Line of all HTTP responses. For example, "HTTP/1.1" is the default. This also limits the supported features used in the response.""" request_queue_size = 5 """The 'backlog' arg to socket.listen(); max queued connections (default 5). """ shutdown_timeout = 5 """The total time, in seconds, to wait for worker threads to cleanly exit. """ timeout = 10 """The timeout in seconds for accepted connections (default 10).""" version = "CherryPy/3.6.0" """A version string for the HTTPServer.""" software = None """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. If None, this defaults to ``'%s Server' % self.version``.""" ready = False """An internal flag which marks whether the socket is accepting connections """ max_request_header_size = 0 """The maximum size, in bytes, for request headers, or 0 for no limit.""" max_request_body_size = 0 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" nodelay = True """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" ConnectionClass = HTTPConnection """The class to use for handling HTTP connections.""" ssl_adapter = None """An instance of SSLAdapter (or a subclass). You must have the corresponding SSL driver library installed.""" def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, server_name=None): self.bind_addr = bind_addr self.gateway = gateway self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) if not server_name: server_name = socket.gethostname() self.server_name = server_name self.clear_stats() def clear_stats(self): self._start_time = None self._run_time = 0 self.stats = { 'Enabled': False, 'Bind Address': lambda s: repr(self.bind_addr), 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 'Accepts': 0, 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 'Queue': lambda s: getattr(self.requests, "qsize", None), 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), 'Threads Idle': lambda s: getattr(self.requests, "idle", None), 'Socket Errors': 0, 'Requests': lambda s: (not s['Enabled']) and -1 or sum( [w['Requests'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) for w in s['Worker Threads'].values()], 0), 'Work Time': lambda s: (not s['Enabled']) and -1 or sum( [w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Worker Threads': {}, } logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats def runtime(self): if self._start_time is None: return self._run_time else: return self._run_time + (time.time() - self._start_time) def __str__(self): return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, self.bind_addr) def _get_bind_addr(self): return self._bind_addr def _set_bind_addr(self, value): if isinstance(value, tuple) and value[0] in ('', None): # Despite the socket module docs, using '' does not # allow AI_PASSIVE to work. Passing None instead # returns '0.0.0.0' like we want. In other words: # host AI_PASSIVE result # '' Y 192.168.x.y # '' N 192.168.x.y # None Y 0.0.0.0 # None N 127.0.0.1 # But since you can get the same effect with an explicit # '0.0.0.0', we deny both the empty string and None as values. raise ValueError("Host values of '' or None are not allowed. " "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " "to listen on all active interfaces.") self._bind_addr = value bind_addr = property( _get_bind_addr, _set_bind_addr, doc="""The interface on which to listen for connections. For TCP sockets, a (host, port) tuple. Host values may be any IPv4 or IPv6 address, or any valid hostname. The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). The string '0.0.0.0' is a special IPv4 entry meaning "any active interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for IPv6. The empty string or None are not allowed. For UNIX sockets, supply the filename as a string.""") def start(self): """Run the server forever.""" # We don't have to trap KeyboardInterrupt or SystemExit here, # because cherrpy.server already does so, calling self.stop() for us. # If you're using this server with another framework, you should # trap those exceptions in whatever code block calls start(). self._interrupt = None if self.software is None: self.software = "%s Server" % self.version # SSL backward compatibility if (self.ssl_adapter is None and getattr(self, 'ssl_certificate', None) and getattr(self, 'ssl_private_key', None)): warnings.warn( "SSL attributes are deprecated in CherryPy 3.2, and will " "be removed in CherryPy 3.3. Use an ssl_adapter attribute " "instead.", DeprecationWarning ) try: from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter except ImportError: pass else: self.ssl_adapter = pyOpenSSLAdapter( self.ssl_certificate, self.ssl_private_key, getattr(self, 'ssl_certificate_chain', None)) # Select the appropriate socket if isinstance(self.bind_addr, basestring): # AF_UNIX socket # So we can reuse the socket... try: os.unlink(self.bind_addr) except: pass # So everyone can access the socket... try: os.chmod(self.bind_addr, 511) # 0777 except: pass info = [ (socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] else: # AF_INET or AF_INET6 socket # Get the correct address family for our host (allows IPv6 # addresses) host, port = self.bind_addr try: info = socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) except socket.gaierror: if ':' in self.bind_addr[0]: info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", self.bind_addr + (0, 0))] else: info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)] self.socket = None msg = "No socket could be created" for res in info: af, socktype, proto, canonname, sa = res try: self.bind(af, socktype, proto) except socket.error, serr: msg = "%s -- (%s: %s)" % (msg, sa, serr) if self.socket: self.socket.close() self.socket = None continue break if not self.socket: raise socket.error(msg) # Timeout so KeyboardInterrupt can be caught on Win32 self.socket.settimeout(1) self.socket.listen(self.request_queue_size) # Create worker threads self.requests.start() self.ready = True self._start_time = time.time() while self.ready: try: self.tick() except (KeyboardInterrupt, SystemExit): raise except: self.error_log("Error in HTTPServer.tick", level=logging.ERROR, traceback=True) if self.interrupt: while self.interrupt is True: # Wait for self.stop() to complete. See _set_interrupt. time.sleep(0.1) if self.interrupt: raise self.interrupt def error_log(self, msg="", level=20, traceback=False): # Override this in subclasses as desired sys.stderr.write(msg + '\n') sys.stderr.flush() if traceback: tblines = format_exc() sys.stderr.write(tblines) sys.stderr.flush() def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = socket.socket(family, type, proto) prevent_socket_inheritance(self.socket) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if self.nodelay and not isinstance(self.bind_addr, str): self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.ssl_adapter is not None: self.socket = self.ssl_adapter.bind(self.socket) # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), # activate dual-stack. See # https://bitbucket.org/cherrypy/cherrypy/issue/871. if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): try: self.socket.setsockopt( socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): # Apparently, the socket option is not available in # this machine's TCP stack pass self.socket.bind(self.bind_addr) def tick(self): """Accept a new connection and put it on the Queue.""" try: s, addr = self.socket.accept() if self.stats['Enabled']: self.stats['Accepts'] += 1 if not self.ready: return prevent_socket_inheritance(s) if hasattr(s, 'settimeout'): s.settimeout(self.timeout) makefile = CP_fileobject ssl_env = {} # if ssl cert and key are set, we try to be a secure HTTP server if self.ssl_adapter is not None: try: s, ssl_env = self.ssl_adapter.wrap(s) except NoSSLError: msg = ("The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") buf = ["%s 400 Bad Request\r\n" % self.protocol, "Content-Length: %s\r\n" % len(msg), "Content-Type: text/plain\r\n\r\n", msg] wfile = makefile(s._sock, "wb", DEFAULT_BUFFER_SIZE) try: wfile.sendall("".join(buf)) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return if not s: return makefile = self.ssl_adapter.makefile # Re-apply our timeout since we may have a new socket object if hasattr(s, 'settimeout'): s.settimeout(self.timeout) conn = self.ConnectionClass(self, s, makefile) if not isinstance(self.bind_addr, basestring): # optional values # Until we do DNS lookups, omit REMOTE_HOST if addr is None: # sometimes this can happen # figure out if AF_INET or AF_INET6. if len(s.getsockname()) == 2: # AF_INET addr = ('0.0.0.0', 0) else: # AF_INET6 addr = ('::', 0) conn.remote_addr = addr[0] conn.remote_port = addr[1] conn.ssl_env = ssl_env try: self.requests.put(conn) except queue.Full: # Just drop the conn. TODO: write 503 back? conn.close() return except socket.timeout: # The only reason for the timeout in start() is so we can # notice keyboard interrupts on Win32, which don't interrupt # accept() by default return except socket.error: x = sys.exc_info()[1] if self.stats['Enabled']: self.stats['Socket Errors'] += 1 if x.args[0] in socket_error_eintr: # I *think* this is right. EINTR should occur when a signal # is received during the accept() call; all docs say retry # the call, and I *think* I'm reading it right that Python # will then go ahead and poll for and handle the signal # elsewhere. See # https://bitbucket.org/cherrypy/cherrypy/issue/707. return if x.args[0] in socket_errors_nonblocking: # Just try again. See # https://bitbucket.org/cherrypy/cherrypy/issue/479. return if x.args[0] in socket_errors_to_ignore: # Our socket was closed. # See https://bitbucket.org/cherrypy/cherrypy/issue/686. return raise def _get_interrupt(self): return self._interrupt def _set_interrupt(self, interrupt): self._interrupt = True self.stop() self._interrupt = interrupt interrupt = property(_get_interrupt, _set_interrupt, doc="Set this to an Exception instance to " "interrupt the server.") def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False if self._start_time is not None: self._run_time += (time.time() - self._start_time) self._start_time = None sock = getattr(self, "socket", None) if sock: if not isinstance(self.bind_addr, basestring): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: # Changed to use error code and not message # See # https://bitbucket.org/cherrypy/cherrypy/issue/860. raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it will if we bound to '0.0.0.0' (INADDR_ANY). for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See # http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, "close"): sock.close() self.socket = None self.requests.stop(self.shutdown_timeout) class Gateway(object): """A base class to interface HTTPServer with other systems, such as WSGI. """ def __init__(self, req): self.req = req def respond(self): """Process the current request. Must be overridden in a subclass.""" raise NotImplemented # These may either be wsgiserver.SSLAdapter subclasses or the string names # of such classes (in which case they will be lazily loaded). ssl_adapters = { 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter', 'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter', } def get_ssl_adapter_class(name='pyopenssl'): """Return an SSL adapter class for the given name.""" adapter = ssl_adapters[name.lower()] if isinstance(adapter, basestring): last_dot = adapter.rfind(".") attr_name = adapter[last_dot + 1:] mod_path = adapter[:last_dot] try: mod = sys.modules[mod_path] if mod is None: raise KeyError() except KeyError: # The last [''] is important. mod = __import__(mod_path, globals(), locals(), ['']) # Let an AttributeError propagate outward. try: adapter = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) return adapter # ------------------------------- WSGI Stuff -------------------------------- # class CherryPyWSGIServer(HTTPServer): """A subclass of HTTPServer which calls a WSGI application.""" wsgi_version = (1, 0) """The version of WSGI to produce.""" def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5, accepted_queue_size=-1, accepted_queue_timeout=10): self.requests = ThreadPool(self, min=numthreads or 1, max=max, accepted_queue_size=accepted_queue_size, accepted_queue_timeout=accepted_queue_timeout) self.wsgi_app = wsgi_app self.gateway = wsgi_gateways[self.wsgi_version] self.bind_addr = bind_addr if not server_name: server_name = socket.gethostname() self.server_name = server_name self.request_queue_size = request_queue_size self.timeout = timeout self.shutdown_timeout = shutdown_timeout self.clear_stats() def _get_numthreads(self): return self.requests.min def _set_numthreads(self, value): self.requests.min = value numthreads = property(_get_numthreads, _set_numthreads) class WSGIGateway(Gateway): """A base class to interface HTTPServer with WSGI.""" def __init__(self, req): self.req = req self.started_response = False self.env = self.get_environ() self.remaining_bytes_out = None def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" raise NotImplemented def respond(self): """Process the current request.""" response = self.req.server.wsgi_app(self.env, self.start_response) try: for chunk in response: # "The start_response callable must not actually transmit # the response headers. Instead, it must store them for the # server or gateway to transmit only after the first # iteration of the application return value that yields # a NON-EMPTY string, or upon the application's first # invocation of the write() callable." (PEP 333) if chunk: if isinstance(chunk, unicodestr): chunk = chunk.encode('ISO-8859-1') self.write(chunk) finally: if hasattr(response, "close"): response.close() def start_response(self, status, headers, exc_info=None): """WSGI callable to begin the HTTP response.""" # "The application may call start_response more than once, # if and only if the exc_info argument is provided." if self.started_response and not exc_info: raise AssertionError("WSGI start_response called a second " "time with no exc_info.") self.started_response = True # "if exc_info is provided, and the HTTP headers have already been # sent, start_response must raise an error, and should raise the # exc_info tuple." if self.req.sent_headers: try: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None self.req.status = status for k, v in headers: if not isinstance(k, str): raise TypeError( "WSGI response header key %r is not of type str." % k) if not isinstance(v, str): raise TypeError( "WSGI response header value %r is not of type str." % v) if k.lower() == 'content-length': self.remaining_bytes_out = int(v) self.req.outheaders.extend(headers) return self.write def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError("WSGI write called before start_response.") chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response( "500 Internal Server Error", "The requested resource returned more bytes than the " "declared Content-Length.") else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] if not self.req.sent_headers: self.req.sent_headers = True self.req.send_headers() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError( "Response body exceeds the declared Content-Length.") class WSGIGateway_10(WSGIGateway): """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env = { # set a non-standard environ entry so the WSGI app can know what # the *real* server protocol is (and what features to support). # See http://www.faqs.org/rfcs/rfc2145.html. 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': req.path, 'QUERY_STRING': req.qs, 'REMOTE_ADDR': req.conn.remote_addr or '', 'REMOTE_PORT': str(req.conn.remote_port or ''), 'REQUEST_METHOD': req.method, 'REQUEST_URI': req.uri, 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 'SERVER_PROTOCOL': req.request_protocol, 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': req.scheme, 'wsgi.version': (1, 0), } if isinstance(req.server.bind_addr, basestring): # AF_UNIX. This isn't really allowed by WSGI, which doesn't # address unix domain sockets. But it's better than nothing. env["SERVER_PORT"] = "" else: env["SERVER_PORT"] = str(req.server.bind_addr[1]) # Request headers for k, v in req.inheaders.iteritems(): env["HTTP_" + k.upper().replace("-", "_")] = v # CONTENT_TYPE/CONTENT_LENGTH ct = env.pop("HTTP_CONTENT_TYPE", None) if ct is not None: env["CONTENT_TYPE"] = ct cl = env.pop("HTTP_CONTENT_LENGTH", None) if cl is not None: env["CONTENT_LENGTH"] = cl if req.conn.ssl_env: env.update(req.conn.ssl_env) return env class WSGIGateway_u0(WSGIGateway_10): """A Gateway class to interface HTTPServer with WSGI u.0. WSGI u.0 is an experimental protocol, which uses unicode for keys and values in both Python 2 and Python 3. """ def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env_10 = WSGIGateway_10.get_environ(self) env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()]) env[u'wsgi.version'] = ('u', 0) # Request-URI env.setdefault(u'wsgi.url_encoding', u'utf-8') try: for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) except UnicodeDecodeError: # Fall back to latin 1 so apps can transcode if needed. env[u'wsgi.url_encoding'] = u'ISO-8859-1' for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) for k, v in sorted(env.items()): if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'): env[k] = v.decode('ISO-8859-1') return env wsgi_gateways = { (1, 0): WSGIGateway_10, ('u', 0): WSGIGateway_u0, } class WSGIPathInfoDispatcher(object): """A WSGI dispatcher for dispatch based on the PATH_INFO. apps: a dict or list of (path_prefix, app) pairs. """ def __init__(self, apps): try: apps = list(apps.items()) except AttributeError: pass # Sort the apps by len(path), descending apps.sort(cmp=lambda x, y: cmp(len(x[0]), len(y[0]))) apps.reverse() # The path_prefix strings must start, but not end, with a slash. # Use "" instead of "/". self.apps = [(p.rstrip("/"), a) for p, a in apps] def __call__(self, environ, start_response): path = environ["PATH_INFO"] or "/" for p, app in self.apps: # The apps list should be sorted by length, descending. if path.startswith(p + "/") or path == p: environ = environ.copy() environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p environ["PATH_INFO"] = path[len(p):] return app(environ, start_response) start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', '0')]) return ['']
90,364
Python
.py
2,103
30.21826
79
0.540184
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,966
ssl_pyopenssl.py
evilhero_mylar/lib/cherrypy/wsgiserver/ssl_pyopenssl.py
"""A library for integrating pyOpenSSL with CherryPy. The OpenSSL module must be importable for SSL functionality. You can obtain it from `here <https://launchpad.net/pyopenssl>`_. To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of SSLAdapter. There are two ways to use SSL: Method One ---------- * ``ssl_adapter.context``: an instance of SSL.Context. If this is not None, it is assumed to be an SSL.Context instance, and will be passed to SSL.Connection on bind(). The developer is responsible for forming a valid Context object. This approach is to be preferred for more flexibility, e.g. if the cert and key are streams instead of files, or need decryption, or SSL.SSLv3_METHOD is desired instead of the default SSL.SSLv23_METHOD, etc. Consult the pyOpenSSL documentation for complete options. Method Two (shortcut) --------------------- * ``ssl_adapter.certificate``: the filename of the server SSL certificate. * ``ssl_adapter.private_key``: the filename of the server's private key file. Both are None by default. If ssl_adapter.context is None, but .private_key and .certificate are both given and valid, they will be read, and the context will be automatically created from them. """ import socket import threading import time from cherrypy import wsgiserver try: from OpenSSL import SSL from OpenSSL import crypto except ImportError: SSL = None class SSL_fileobject(wsgiserver.CP_fileobject): """SSL file object attached to a socket object.""" ssl_timeout = 3 ssl_retry = .01 def _safe_call(self, is_reader, call, *args, **kwargs): """Wrap the given call with SSL error-trapping. is_reader: if False EOF errors will be raised. If True, EOF errors will return "" (to emulate normal sockets). """ start = time.time() while True: try: return call(*args, **kwargs) except SSL.WantReadError: # Sleep and try again. This is dangerous, because it means # the rest of the stack has no way of differentiating # between a "new handshake" error and "client dropped". # Note this isn't an endless loop: there's a timeout below. time.sleep(self.ssl_retry) except SSL.WantWriteError: time.sleep(self.ssl_retry) except SSL.SysCallError, e: if is_reader and e.args == (-1, 'Unexpected EOF'): return "" errnum = e.args[0] if is_reader and errnum in wsgiserver.socket_errors_to_ignore: return "" raise socket.error(errnum) except SSL.Error, e: if is_reader and e.args == (-1, 'Unexpected EOF'): return "" thirdarg = None try: thirdarg = e.args[0][0][2] except IndexError: pass if thirdarg == 'http request': # The client is talking HTTP to an HTTPS server. raise wsgiserver.NoSSLError() raise wsgiserver.FatalSSLAlert(*e.args) except: raise if time.time() - start > self.ssl_timeout: raise socket.timeout("timed out") def recv(self, size): return self._safe_call(True, super(SSL_fileobject, self).recv, size) def sendall(self, *args, **kwargs): return self._safe_call(False, super(SSL_fileobject, self).sendall, *args, **kwargs) def send(self, *args, **kwargs): return self._safe_call(False, super(SSL_fileobject, self).send, *args, **kwargs) class SSLConnection: """A thread-safe wrapper for an SSL.Connection. ``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``. """ def __init__(self, *args): self._ssl_conn = SSL.Connection(*args) self._lock = threading.RLock() for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read', 'renegotiate', 'bind', 'listen', 'connect', 'accept', 'setblocking', 'fileno', 'close', 'get_cipher_list', 'getpeername', 'getsockname', 'getsockopt', 'setsockopt', 'makefile', 'get_app_data', 'set_app_data', 'state_string', 'sock_shutdown', 'get_peer_certificate', 'want_read', 'want_write', 'set_connect_state', 'set_accept_state', 'connect_ex', 'sendall', 'settimeout', 'gettimeout'): exec("""def %s(self, *args): self._lock.acquire() try: return self._ssl_conn.%s(*args) finally: self._lock.release() """ % (f, f)) def shutdown(self, *args): self._lock.acquire() try: # pyOpenSSL.socket.shutdown takes no args return self._ssl_conn.shutdown() finally: self._lock.release() class pyOpenSSLAdapter(wsgiserver.SSLAdapter): """A wrapper for integrating pyOpenSSL with CherryPy.""" context = None """An instance of SSL.Context.""" certificate = None """The filename of the server SSL certificate.""" private_key = None """The filename of the server's private key file.""" certificate_chain = None """Optional. The filename of CA's intermediate certificate bundle. This is needed for cheaper "chained root" SSL certificates, and should be left as None if not required.""" def __init__(self, certificate, private_key, certificate_chain=None): if SSL is None: raise ImportError("You must install pyOpenSSL to use HTTPS.") self.context = None self.certificate = certificate self.private_key = private_key self.certificate_chain = certificate_chain self._environ = None def bind(self, sock): """Wrap and return the given socket.""" if self.context is None: self.context = self.get_context() conn = SSLConnection(self.context, sock) self._environ = self.get_environ() return conn def wrap(self, sock): """Wrap and return the given socket, plus WSGI environ entries.""" return sock, self._environ.copy() def get_context(self): """Return an SSL.Context from self attributes.""" # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473 c = SSL.Context(SSL.SSLv23_METHOD) c.use_privatekey_file(self.private_key) if self.certificate_chain: c.load_verify_locations(self.certificate_chain) c.use_certificate_file(self.certificate) return c def get_environ(self): """Return WSGI environ entries to be merged into each request.""" ssl_environ = { "HTTPS": "on", # pyOpenSSL doesn't provide access to any of these AFAICT # 'SSL_PROTOCOL': 'SSLv2', # SSL_CIPHER string The cipher specification name # SSL_VERSION_INTERFACE string The mod_ssl program version # SSL_VERSION_LIBRARY string The OpenSSL program version } if self.certificate: # Server certificate attributes cert = open(self.certificate, 'rb').read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) ssl_environ.update({ 'SSL_SERVER_M_VERSION': cert.get_version(), 'SSL_SERVER_M_SERIAL': cert.get_serial_number(), # 'SSL_SERVER_V_START': # Validity of server's certificate (start time), # 'SSL_SERVER_V_END': # Validity of server's certificate (end time), }) for prefix, dn in [("I", cert.get_issuer()), ("S", cert.get_subject())]: # X509Name objects don't seem to have a way to get the # complete DN string. Use str() and slice it instead, # because str(dn) == "<X509Name object '/C=US/ST=...'>" dnstr = str(dn)[18:-2] wsgikey = 'SSL_SERVER_%s_DN' % prefix ssl_environ[wsgikey] = dnstr # The DN should be of the form: /k1=v1/k2=v2, but we must allow # for any value to contain slashes itself (in a URL). while dnstr: pos = dnstr.rfind("=") dnstr, value = dnstr[:pos], dnstr[pos + 1:] pos = dnstr.rfind("/") dnstr, key = dnstr[:pos], dnstr[pos + 1:] if key and value: wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key) ssl_environ[wsgikey] = value return ssl_environ def makefile(self, sock, mode='r', bufsize=-1): if SSL and isinstance(sock, SSL.ConnectionType): timeout = sock.gettimeout() f = SSL_fileobject(sock, mode, bufsize) f.ssl_timeout = timeout return f else: return wsgiserver.CP_fileobject(sock, mode, bufsize)
9,219
Python
.py
201
35.144279
79
0.59168
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,967
ssl_builtin.py
evilhero_mylar/lib/cherrypy/wsgiserver/ssl_builtin.py
"""A library for integrating Python's builtin ``ssl`` library with CherryPy. The ssl module must be importable for SSL functionality. To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of ``BuiltinSSLAdapter``. """ try: import ssl except ImportError: ssl = None try: from _pyio import DEFAULT_BUFFER_SIZE except ImportError: try: from io import DEFAULT_BUFFER_SIZE except ImportError: DEFAULT_BUFFER_SIZE = -1 import sys from cherrypy import wsgiserver class BuiltinSSLAdapter(wsgiserver.SSLAdapter): """A wrapper for integrating Python's builtin ssl module with CherryPy.""" certificate = None """The filename of the server SSL certificate.""" private_key = None """The filename of the server's private key file.""" def __init__(self, certificate, private_key, certificate_chain=None): if ssl is None: raise ImportError("You must install the ssl module to use HTTPS.") self.certificate = certificate self.private_key = private_key self.certificate_chain = certificate_chain def bind(self, sock): """Wrap and return the given socket.""" return sock def wrap(self, sock): """Wrap and return the given socket, plus WSGI environ entries.""" try: s = ssl.wrap_socket(sock, do_handshake_on_connect=True, server_side=True, certfile=self.certificate, keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23) except ssl.SSLError: e = sys.exc_info()[1] if e.errno == ssl.SSL_ERROR_EOF: # This is almost certainly due to the cherrypy engine # 'pinging' the socket to assert it's connectable; # the 'ping' isn't SSL. return None, {} elif e.errno == ssl.SSL_ERROR_SSL: if e.args[1].endswith('http request'): # The client is speaking HTTP to an HTTPS server. raise wsgiserver.NoSSLError elif e.args[1].endswith('unknown protocol'): # The client is speaking some non-HTTP protocol. # Drop the conn. return None, {} raise return s, self.get_environ(s) # TODO: fill this out more with mod ssl env def get_environ(self, sock): """Create WSGI environ entries to be merged into each request.""" cipher = sock.cipher() ssl_environ = { "wsgi.url_scheme": "https", "HTTPS": "on", 'SSL_PROTOCOL': cipher[1], 'SSL_CIPHER': cipher[0] # SSL_VERSION_INTERFACE string The mod_ssl program version # SSL_VERSION_LIBRARY string The OpenSSL program version } return ssl_environ if sys.version_info >= (3, 0): def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): return wsgiserver.CP_makefile(sock, mode, bufsize) else: def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): return wsgiserver.CP_fileobject(sock, mode, bufsize)
3,242
Python
.py
76
32.5
78
0.606349
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,968
wsgiserver3.py
evilhero_mylar/lib/cherrypy/wsgiserver/wsgiserver3.py
"""A high-speed, production ready, thread pooled, generic HTTP server. Simplest example on how to use this module directly (without using CherryPy's application machinery):: from cherrypy import wsgiserver def my_crazy_app(environ, start_response): status = '200 OK' response_headers = [('Content-type','text/plain')] start_response(status, response_headers) return ['Hello world!'] server = wsgiserver.CherryPyWSGIServer( ('0.0.0.0', 8070), my_crazy_app, server_name='www.cherrypy.example') server.start() The CherryPy WSGI server can serve as many WSGI applications as you want in one instance by using a WSGIPathInfoDispatcher:: d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. This won't call the CherryPy engine (application side) at all, only the HTTP server, which is independent from the rest of CherryPy. Don't let the name "CherryPyWSGIServer" throw you; the name merely reflects its origin, not its coupling. For those of you wanting to understand internals of this module, here's the basic call flow. The server's listening thread runs a very tight loop, sticking incoming connections onto a Queue:: server = CherryPyWSGIServer(...) server.start() while True: tick() # This blocks until a request comes in: child = socket.accept() conn = HTTPConnection(child, ...) server.requests.put(conn) Worker threads are kept in a pool and poll the Queue, popping off and then handling each connection in turn. Each connection can consist of an arbitrary number of requests and their responses, so we run a nested loop:: while True: conn = server.requests.get() conn.communicate() -> while True: req = HTTPRequest(...) req.parse_request() -> # Read the Request-Line, e.g. "GET /page HTTP/1.1" req.rfile.readline() read_headers(req.rfile, req.inheaders) req.respond() -> response = app(...) try: for chunk in response: if chunk: req.write(chunk) finally: if hasattr(response, "close"): response.close() if req.close_connection: return """ __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 'CP_makefile', 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 'WorkerThread', 'ThreadPool', 'SSLAdapter', 'CherryPyWSGIServer', 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] import os try: import queue except: import Queue as queue import re import email.utils import socket import sys if 'win' in sys.platform and hasattr(socket, "AF_INET6"): if not hasattr(socket, 'IPPROTO_IPV6'): socket.IPPROTO_IPV6 = 41 if not hasattr(socket, 'IPV6_V6ONLY'): socket.IPV6_V6ONLY = 27 if sys.version_info < (3, 1): import io else: import _pyio as io DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE import threading import time from traceback import format_exc if sys.version_info >= (3, 0): bytestr = bytes unicodestr = str basestring = (bytes, str) def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ # In Python 3, the native string type is unicode return n.encode(encoding) else: bytestr = str unicodestr = unicode basestring = basestring def ntob(n, encoding='ISO-8859-1'): """Return the given native string as a byte string in the given encoding. """ # In Python 2, the native string type is bytes. Assume it's already # in the given encoding, which for ISO-8859-1 is almost always what # was intended. return n LF = ntob('\n') CRLF = ntob('\r\n') TAB = ntob('\t') SPACE = ntob(' ') COLON = ntob(':') SEMICOLON = ntob(';') EMPTY = ntob('') NUMBER_SIGN = ntob('#') QUESTION_MARK = ntob('?') ASTERISK = ntob('*') FORWARD_SLASH = ntob('/') quoted_slash = re.compile(ntob("(?i)%2F")) import errno def plat_specific_errors(*errnames): """Return error numbers for all errors in errnames on this platform. The 'errno' module contains different global constants depending on the specific platform (OS). This function will return the list of numeric values for a given list of potential names. """ errno_names = dir(errno) nums = [getattr(errno, k) for k in errnames if k in errno_names] # de-dupe the list return list(dict.fromkeys(nums).keys()) socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") socket_errors_to_ignore = plat_specific_errors( "EPIPE", "EBADF", "WSAEBADF", "ENOTSOCK", "WSAENOTSOCK", "ETIMEDOUT", "WSAETIMEDOUT", "ECONNREFUSED", "WSAECONNREFUSED", "ECONNRESET", "WSAECONNRESET", "ECONNABORTED", "WSAECONNABORTED", "ENETRESET", "WSAENETRESET", "EHOSTDOWN", "EHOSTUNREACH", ) socket_errors_to_ignore.append("timed out") socket_errors_to_ignore.append("The read operation timed out") socket_errors_nonblocking = plat_specific_errors( 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') comma_separated_headers = [ ntob(h) for h in ['Accept', 'Accept-Charset', 'Accept-Encoding', 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'WWW-Authenticate'] ] import logging if not hasattr(logging, 'statistics'): logging.statistics = {} def read_headers(rfile, hdict=None): """Read headers from the given stream into the given header dict. If hdict is None, a new header dict is created. Returns the populated header dict. Headers which are repeated are folded together using a comma if their specification so dictates. This function raises ValueError when the read bytes violate the HTTP spec. You should probably return "400 Bad Request" if this happens. """ if hdict is None: hdict = {} while True: line = rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") if line[0] in (SPACE, TAB): # It's a continuation line. v = line.strip() else: try: k, v = line.split(COLON, 1) except ValueError: raise ValueError("Illegal header line.") # TODO: what about TE and WWW-Authenticate? k = k.strip().title() v = v.strip() hname = k if k in comma_separated_headers: existing = hdict.get(hname) if existing: v = b", ".join((existing, v)) hdict[hname] = v return hdict class MaxSizeExceeded(Exception): pass class SizeCheckWrapper(object): """Wraps a file-like object, raising MaxSizeExceeded if too large.""" def __init__(self, rfile, maxlen): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 def _check_length(self): if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded() def read(self, size=None): data = self.rfile.read(size) self.bytes_read += len(data) self._check_length() return data def readline(self, size=None): if size is not None: data = self.rfile.readline(size) self.bytes_read += len(data) self._check_length() return data # User didn't specify a size ... # We read the line in chunks to make sure it's not a 100MB line ! res = [] while True: data = self.rfile.readline(256) self.bytes_read += len(data) self._check_length() res.append(data) # See https://bitbucket.org/cherrypy/cherrypy/issue/421 if len(data) < 256 or data[-1:] == LF: return EMPTY.join(res) def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline() while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline() return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.bytes_read += len(data) self._check_length() return data def next(self): data = self.rfile.next() self.bytes_read += len(data) self._check_length() return data class KnownLengthRFile(object): """Wraps a file-like object, returning an empty string when exhausted.""" def __init__(self, rfile, content_length): self.rfile = rfile self.remaining = content_length def read(self, size=None): if self.remaining == 0: return b'' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.read(size) self.remaining -= len(data) return data def readline(self, size=None): if self.remaining == 0: return b'' if size is None: size = self.remaining else: size = min(size, self.remaining) data = self.rfile.readline(size) self.remaining -= len(data) return data def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def close(self): self.rfile.close() def __iter__(self): return self def __next__(self): data = next(self.rfile) self.remaining -= len(data) return data class ChunkedRFile(object): """Wraps a file-like object, returning an empty string when exhausted. This class is intended to provide a conforming wsgi.input value for request entities that have been encoded with the 'chunked' transfer encoding. """ def __init__(self, rfile, maxlen, bufsize=8192): self.rfile = rfile self.maxlen = maxlen self.bytes_read = 0 self.buffer = EMPTY self.bufsize = bufsize self.closed = False def _fetch(self): if self.closed: return line = self.rfile.readline() self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) line = line.strip().split(SEMICOLON, 1) try: chunk_size = line.pop(0) chunk_size = int(chunk_size, 16) except ValueError: raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) if chunk_size <= 0: self.closed = True return ## if line: chunk_extension = line[0] if self.maxlen and self.bytes_read + chunk_size > self.maxlen: raise IOError("Request Entity Too Large") chunk = self.rfile.read(chunk_size) self.bytes_read += len(chunk) self.buffer += chunk crlf = self.rfile.read(2) if crlf != CRLF: raise ValueError( "Bad chunked transfer coding (expected '\\r\\n', " "got " + repr(crlf) + ")") def read(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data if size: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: data += self.buffer def readline(self, size=None): data = EMPTY while True: if size and len(data) >= size: return data if not self.buffer: self._fetch() if not self.buffer: # EOF return data newline_pos = self.buffer.find(LF) if size: if newline_pos == -1: remaining = size - len(data) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: remaining = min(size - len(data), newline_pos) data += self.buffer[:remaining] self.buffer = self.buffer[remaining:] else: if newline_pos == -1: data += self.buffer else: data += self.buffer[:newline_pos] self.buffer = self.buffer[newline_pos:] def readlines(self, sizehint=0): # Shamelessly stolen from StringIO total = 0 lines = [] line = self.readline(sizehint) while line: lines.append(line) total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) return lines def read_trailer_lines(self): if not self.closed: raise ValueError( "Cannot read trailers until the request body has been read.") while True: line = self.rfile.readline() if not line: # No more data--illegal end of headers raise ValueError("Illegal end of headers.") self.bytes_read += len(line) if self.maxlen and self.bytes_read > self.maxlen: raise IOError("Request Entity Too Large") if line == CRLF: # Normal end of headers break if not line.endswith(CRLF): raise ValueError("HTTP requires CRLF terminators") yield line def close(self): self.rfile.close() def __iter__(self): # Shamelessly stolen from StringIO total = 0 line = self.readline(sizehint) while line: yield line total += len(line) if 0 < sizehint <= total: break line = self.readline(sizehint) class HTTPRequest(object): """An HTTP Request (and response). A single HTTP connection may consist of multiple request/response pairs. """ server = None """The HTTPServer object which is receiving this request.""" conn = None """The HTTPConnection object on which this request connected.""" inheaders = {} """A dict of request headers.""" outheaders = [] """A list of header tuples to write in the response.""" ready = False """When True, the request has been parsed and is ready to begin generating the response. When False, signals the calling Connection that the response should not be generated and the connection should close.""" close_connection = False """Signals the calling Connection that the request should close. This does not imply an error! The client and/or server may each request that the connection be closed.""" chunked_write = False """If True, output will be encoded with the "chunked" transfer-coding. This value is set automatically inside send_headers.""" def __init__(self, server, conn): self.server = server self.conn = conn self.ready = False self.started_request = False self.scheme = ntob("http") if self.server.ssl_adapter is not None: self.scheme = ntob("https") # Use the lowest-common protocol in case read_request_line errors. self.response_protocol = 'HTTP/1.0' self.inheaders = {} self.status = "" self.outheaders = [] self.sent_headers = False self.close_connection = self.__class__.close_connection self.chunked_read = False self.chunked_write = self.__class__.chunked_write def parse_request(self): """Parse the next HTTP request start-line and message-headers.""" self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size) try: success = self.read_request_line() except MaxSizeExceeded: self.simple_response( "414 Request-URI Too Long", "The Request-URI sent with the request exceeds the maximum " "allowed bytes.") return else: if not success: return try: success = self.read_request_headers() except MaxSizeExceeded: self.simple_response( "413 Request Entity Too Large", "The headers sent with the request exceed the maximum " "allowed bytes.") return else: if not success: return self.ready = True def read_request_line(self): # HTTP/1.1 connections are persistent by default. If a client # requests a page, then idles (leaves the connection open), # then rfile.readline() will raise socket.error("timed out"). # Note that it does this based on the value given to settimeout(), # and doesn't need the client to request or acknowledge the close # (although your TCP stack might suffer for it: cf Apache's history # with FIN_WAIT_2). request_line = self.rfile.readline() # Set started_request to True so communicate() knows to send 408 # from here on out. self.started_request = True if not request_line: return False if request_line == CRLF: # RFC 2616 sec 4.1: "...if the server is reading the protocol # stream at the beginning of a message and receives a CRLF # first, it should ignore the CRLF." # But only ignore one leading line! else we enable a DoS. request_line = self.rfile.readline() if not request_line: return False if not request_line.endswith(CRLF): self.simple_response( "400 Bad Request", "HTTP requires CRLF terminators") return False try: method, uri, req_protocol = request_line.strip().split(SPACE, 2) # The [x:y] slicing is necessary for byte strings to avoid getting # ord's rp = int(req_protocol[5:6]), int(req_protocol[7:8]) except ValueError: self.simple_response("400 Bad Request", "Malformed Request-Line") return False self.uri = uri self.method = method # uri may be an abs_path (including "http://host.domain.tld"); scheme, authority, path = self.parse_request_uri(uri) if NUMBER_SIGN in path: self.simple_response("400 Bad Request", "Illegal #fragment in Request-URI.") return False if scheme: self.scheme = scheme qs = EMPTY if QUESTION_MARK in path: path, qs = path.split(QUESTION_MARK, 1) # Unquote the path+params (e.g. "/this%20path" -> "/this path"). # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 # # But note that "...a URI must be separated into its components # before the escaped characters within those components can be # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". try: atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)] except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False path = b"%2F".join(atoms) self.path = path # Note that, like wsgiref and most other HTTP servers, # we "% HEX HEX"-unquote the path but not the query string. self.qs = qs # Compare request and server HTTP protocol versions, in case our # server does not support the requested protocol. Limit our output # to min(req, server). We want the following output: # request server actual written supported response # protocol protocol response protocol feature set # a 1.0 1.0 1.0 1.0 # b 1.0 1.1 1.1 1.0 # c 1.1 1.0 1.0 1.0 # d 1.1 1.1 1.1 1.1 # Notice that, in (b), the response will be "HTTP/1.1" even though # the client only understands 1.0. RFC 2616 10.5.6 says we should # only return 505 if the _major_ version is different. # The [x:y] slicing is necessary for byte strings to avoid getting # ord's sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8]) if sp[0] != rp[0]: self.simple_response("505 HTTP Version Not Supported") return False self.request_protocol = req_protocol self.response_protocol = "HTTP/%s.%s" % min(rp, sp) return True def read_request_headers(self): """Read self.rfile into self.inheaders. Return success.""" # then all the http headers try: read_headers(self.rfile, self.inheaders) except ValueError: ex = sys.exc_info()[1] self.simple_response("400 Bad Request", ex.args[0]) return False mrbs = self.server.max_request_body_size if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs: self.simple_response( "413 Request Entity Too Large", "The entity sent with the request exceeds the maximum " "allowed bytes.") return False # Persistent connection support if self.response_protocol == "HTTP/1.1": # Both server and client are HTTP/1.1 if self.inheaders.get(b"Connection", b"") == b"close": self.close_connection = True else: # Either the server or client (or both) are HTTP/1.0 if self.inheaders.get(b"Connection", b"") != b"Keep-Alive": self.close_connection = True # Transfer-Encoding support te = None if self.response_protocol == "HTTP/1.1": te = self.inheaders.get(b"Transfer-Encoding") if te: te = [x.strip().lower() for x in te.split(b",") if x.strip()] self.chunked_read = False if te: for enc in te: if enc == b"chunked": self.chunked_read = True else: # Note that, even if we see "chunked", we must reject # if there is an extension we don't recognize. self.simple_response("501 Unimplemented") self.close_connection = True return False # From PEP 333: # "Servers and gateways that implement HTTP 1.1 must provide # transparent support for HTTP 1.1's "expect/continue" mechanism. # This may be done in any of several ways: # 1. Respond to requests containing an Expect: 100-continue request # with an immediate "100 Continue" response, and proceed normally. # 2. Proceed with the request normally, but provide the application # with a wsgi.input stream that will send the "100 Continue" # response if/when the application first attempts to read from # the input stream. The read request must then remain blocked # until the client responds. # 3. Wait until the client decides that the server does not support # expect/continue, and sends the request body on its own. # (This is suboptimal, and is not recommended.) # # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, # but it seems like it would be a big slowdown for such a rare case. if self.inheaders.get(b"Expect", b"") == b"100-continue": # Don't use simple_response here, because it emits headers # we don't want. See # https://bitbucket.org/cherrypy/cherrypy/issue/951 msg = self.server.protocol.encode( 'ascii') + b" 100 Continue\r\n\r\n" try: self.conn.wfile.write(msg) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return True def parse_request_uri(self, uri): """Parse a Request-URI into (scheme, authority, path). Note that Request-URI's must be one of:: Request-URI = "*" | absoluteURI | abs_path | authority Therefore, a Request-URI which starts with a double forward-slash cannot be a "net_path":: net_path = "//" authority [ abs_path ] Instead, it must be interpreted as an "abs_path" with an empty first path segment:: abs_path = "/" path_segments path_segments = segment *( "/" segment ) segment = *pchar *( ";" param ) param = *pchar """ if uri == ASTERISK: return None, None, uri scheme, sep, remainder = uri.partition(b'://') if sep and QUESTION_MARK not in scheme: # An absoluteURI. # If there's a scheme (and it must be http or https), then: # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query # ]] authority, path_a, path_b = remainder.partition(FORWARD_SLASH) return scheme.lower(), authority, path_a + path_b if uri.startswith(FORWARD_SLASH): # An abs_path. return None, None, uri else: # An authority. return None, uri, None def unquote_bytes(self, path): """takes quoted string and unquotes % encoded values""" res = path.split(b'%') for i in range(1, len(res)): item = res[i] try: res[i] = bytes([int(item[:2], 16)]) + item[2:] except ValueError: raise return b''.join(res) def respond(self): """Call the gateway and write its iterable output.""" mrbs = self.server.max_request_body_size if self.chunked_read: self.rfile = ChunkedRFile(self.conn.rfile, mrbs) else: cl = int(self.inheaders.get(b"Content-Length", 0)) if mrbs and mrbs < cl: if not self.sent_headers: self.simple_response( "413 Request Entity Too Large", "The entity sent with the request exceeds the " "maximum allowed bytes.") return self.rfile = KnownLengthRFile(self.conn.rfile, cl) self.server.gateway(self).respond() if (self.ready and not self.sent_headers): self.sent_headers = True self.send_headers() if self.chunked_write: self.conn.wfile.write(b"0\r\n\r\n") def simple_response(self, status, msg=""): """Write a simple response back to the client.""" status = str(status) buf = [bytes(self.server.protocol, "ascii") + SPACE + bytes(status, "ISO-8859-1") + CRLF, bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"), b"Content-Type: text/plain\r\n"] if status[:3] in ("413", "414"): # Request Entity Too Large / Request-URI Too Long self.close_connection = True if self.response_protocol == 'HTTP/1.1': # This will not be true for 414, since read_request_line # usually raises 414 before reading the whole line, and we # therefore cannot know the proper response_protocol. buf.append(b"Connection: close\r\n") else: # HTTP/1.0 had no 413/414 status nor Connection header. # Emit 400 instead and trust the message body is enough. status = "400 Bad Request" buf.append(CRLF) if msg: if isinstance(msg, unicodestr): msg = msg.encode("ISO-8859-1") buf.append(msg) try: self.conn.wfile.write(b"".join(buf)) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise def write(self, chunk): """Write unbuffered data to the client.""" if self.chunked_write and chunk: buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF] self.conn.wfile.write(EMPTY.join(buf)) else: self.conn.wfile.write(chunk) def send_headers(self): """Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this. """ hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: # Request Entity Too Large. Close conn to avoid garbage. self.close_connection = True elif b"content-length" not in hkeys: # "All 1xx (informational), 204 (no content), # and 304 (not modified) responses MUST NOT # include a message-body." So no point chunking. if status < 200 or status in (204, 205, 304): pass else: if (self.response_protocol == 'HTTP/1.1' and self.method != b'HEAD'): # Use the chunked transfer-coding self.chunked_write = True self.outheaders.append((b"Transfer-Encoding", b"chunked")) else: # Closing the conn is the only way to determine len. self.close_connection = True if b"connection" not in hkeys: if self.response_protocol == 'HTTP/1.1': # Both server and client are HTTP/1.1 or better if self.close_connection: self.outheaders.append((b"Connection", b"close")) else: # Server and/or client are HTTP/1.0 if not self.close_connection: self.outheaders.append((b"Connection", b"Keep-Alive")) if (not self.close_connection) and (not self.chunked_read): # Read any remaining request body data on the socket. # "If an origin server receives a request that does not include an # Expect request-header field with the "100-continue" expectation, # the request includes a request body, and the server responds # with a final status code before reading the entire request body # from the transport connection, then the server SHOULD NOT close # the transport connection until it has read the entire request, # or until the client closes the connection. Otherwise, the client # might not reliably receive the response message. However, this # requirement is not be construed as preventing a server from # defending itself against denial-of-service attacks, or from # badly broken client implementations." remaining = getattr(self.rfile, 'remaining', 0) if remaining > 0: self.rfile.read(remaining) if b"date" not in hkeys: self.outheaders.append(( b"Date", email.utils.formatdate(usegmt=True).encode('ISO-8859-1') )) if b"server" not in hkeys: self.outheaders.append( (b"Server", self.server.server_name.encode('ISO-8859-1'))) buf = [self.server.protocol.encode( 'ascii') + SPACE + self.status + CRLF] for k, v in self.outheaders: buf.append(k + COLON + SPACE + v + CRLF) buf.append(CRLF) self.conn.wfile.write(EMPTY.join(buf)) class NoSSLError(Exception): """Exception raised when a client speaks HTTP to an HTTPS socket.""" pass class FatalSSLAlert(Exception): """Exception raised when the SSL implementation signals a fatal alert.""" pass class CP_BufferedWriter(io.BufferedWriter): """Faux file object attached to a socket object.""" def write(self, b): self._checkClosed() if isinstance(b, str): raise TypeError("can't write str to binary stream") with self._write_lock: self._write_buf.extend(b) self._flush_unlocked() return len(b) def _flush_unlocked(self): self._checkClosed("flush of closed file") while self._write_buf: try: # ssl sockets only except 'bytes', not bytearrays # so perhaps we should conditionally wrap this for perf? n = self.raw.write(bytes(self._write_buf)) except io.BlockingIOError as e: n = e.characters_written del self._write_buf[:n] def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): if 'r' in mode: return io.BufferedReader(socket.SocketIO(sock, mode), bufsize) else: return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize) class HTTPConnection(object): """An HTTP connection (active socket). server: the Server object which received this connection. socket: the raw socket object (usually TCP) for this connection. makefile: a fileobject class for reading from the socket. """ remote_addr = None remote_port = None ssl_env = None rbufsize = DEFAULT_BUFFER_SIZE wbufsize = DEFAULT_BUFFER_SIZE RequestHandlerClass = HTTPRequest def __init__(self, server, sock, makefile=CP_makefile): self.server = server self.socket = sock self.rfile = makefile(sock, "rb", self.rbufsize) self.wfile = makefile(sock, "wb", self.wbufsize) self.requests_seen = 0 def communicate(self): """Read each request and respond appropriately.""" request_seen = False try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self.server, self) # This order of operations should guarantee correct pipelining. req.parse_request() if self.server.stats['Enabled']: self.requests_seen += 1 if not req.ready: # Something went wrong in the parsing (and the server has # probably already made a simple_response). Return and # let the conn close. return request_seen = True req.respond() if req.close_connection: return except socket.error: e = sys.exc_info()[1] errnum = e.args[0] # sadly SSL sockets return a different (longer) time out string if ( errnum == 'timed out' or errnum == 'The read operation timed out' ): # Don't error if we're between requests; only error # if 1) no request has been started at all, or 2) we're # in the middle of a request. # See https://bitbucket.org/cherrypy/cherrypy/issue/853 if (not request_seen) or (req and req.started_request): # Don't bother writing the 408 if the response # has already started being written. if req and not req.sent_headers: try: req.simple_response("408 Request Timeout") except FatalSSLAlert: # Close the connection. return elif errnum not in socket_errors_to_ignore: self.server.error_log("socket.error %s" % repr(errnum), level=logging.WARNING, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return return except (KeyboardInterrupt, SystemExit): raise except FatalSSLAlert: # Close the connection. return except NoSSLError: if req and not req.sent_headers: # Unwrap our wfile self.wfile = CP_makefile( self.socket._sock, "wb", self.wbufsize) req.simple_response( "400 Bad Request", "The client sent a plain HTTP request, but this server " "only speaks HTTPS on this port.") self.linger = True except Exception: e = sys.exc_info()[1] self.server.error_log(repr(e), level=logging.ERROR, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return linger = False def close(self): """Close the socket underlying this connection.""" self.rfile.close() if not self.linger: # Python's socket module does NOT call close on the kernel # socket when you call socket.close(). We do so manually here # because we want this server to send a FIN TCP segment # immediately. Note this must be called *before* calling # socket.close(), because the latter drops its reference to # the kernel socket. # Python 3 *probably* fixed this with socket._real_close; # hard to tell. # self.socket._sock.close() self.socket.close() else: # On the other hand, sometimes we want to hang around for a bit # to make sure the client has a chance to read our entire # response. Skipping the close() calls here delays the FIN # packet until the socket object is garbage-collected later. # Someday, perhaps, we'll do the full lingering_close that # Apache does, but not today. pass class TrueyZero(object): """An object which equals and does math like the integer 0 but evals True. """ def __add__(self, other): return other def __radd__(self, other): return other trueyzero = TrueyZero() _SHUTDOWNREQUEST = None class WorkerThread(threading.Thread): """Thread which continuously polls a Queue for Connection objects. Due to the timing issues of polling a Queue, a WorkerThread does not check its own 'ready' flag after it has started. To stop the thread, it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue (one for each running WorkerThread). """ conn = None """The current connection pulled off the Queue, or None.""" server = None """The HTTP Server which spawned this thread, and which owns the Queue and is placing active connections into it.""" ready = False """A simple flag for the calling server to know when this thread has begun polling the Queue.""" def __init__(self, server): self.ready = False self.server = server self.requests_seen = 0 self.bytes_read = 0 self.bytes_written = 0 self.start_time = None self.work_time = 0 self.stats = { 'Requests': lambda s: self.requests_seen + ( (self.start_time is None) and trueyzero or self.conn.requests_seen ), 'Bytes Read': lambda s: self.bytes_read + ( (self.start_time is None) and trueyzero or self.conn.rfile.bytes_read ), 'Bytes Written': lambda s: self.bytes_written + ( (self.start_time is None) and trueyzero or self.conn.wfile.bytes_written ), 'Work Time': lambda s: self.work_time + ( (self.start_time is None) and trueyzero or time.time() - self.start_time ), 'Read Throughput': lambda s: s['Bytes Read'](s) / ( s['Work Time'](s) or 1e-6), 'Write Throughput': lambda s: s['Bytes Written'](s) / ( s['Work Time'](s) or 1e-6), } threading.Thread.__init__(self) def run(self): self.server.stats['Worker Threads'][self.getName()] = self.stats try: self.ready = True while True: conn = self.server.requests.get() if conn is _SHUTDOWNREQUEST: return self.conn = conn if self.server.stats['Enabled']: self.start_time = time.time() try: conn.communicate() finally: conn.close() if self.server.stats['Enabled']: self.requests_seen += self.conn.requests_seen self.bytes_read += self.conn.rfile.bytes_read self.bytes_written += self.conn.wfile.bytes_written self.work_time += time.time() - self.start_time self.start_time = None self.conn = None except (KeyboardInterrupt, SystemExit): exc = sys.exc_info()[1] self.server.interrupt = exc class ThreadPool(object): """A Request Queue for an HTTPServer which pools threads. ThreadPool objects must provide min, get(), put(obj), start() and stop(timeout) attributes. """ def __init__(self, server, min=10, max=-1, accepted_queue_size=-1, accepted_queue_timeout=10): self.server = server self.min = min self.max = max self._threads = [] self._queue = queue.Queue(maxsize=accepted_queue_size) self._queue_put_timeout = accepted_queue_timeout self.get = self._queue.get def start(self): """Start the pool of threads.""" for i in range(self.min): self._threads.append(WorkerThread(self.server)) for worker in self._threads: worker.setName("CP Server " + worker.getName()) worker.start() for worker in self._threads: while not worker.ready: time.sleep(.1) def _get_idle(self): """Number of worker threads which are idle. Read-only.""" return len([t for t in self._threads if t.conn is None]) idle = property(_get_idle, doc=_get_idle.__doc__) def put(self, obj): self._queue.put(obj, block=True, timeout=self._queue_put_timeout) if obj is _SHUTDOWNREQUEST: return def grow(self, amount): """Spawn new worker threads (not above self.max).""" if self.max > 0: budget = max(self.max - len(self._threads), 0) else: # self.max <= 0 indicates no maximum budget = float('inf') n_new = min(amount, budget) workers = [self._spawn_worker() for i in range(n_new)] while not all(worker.ready for worker in workers): time.sleep(.1) self._threads.extend(workers) def _spawn_worker(self): worker = WorkerThread(self.server) worker.setName("CP Server " + worker.getName()) worker.start() return worker def shrink(self, amount): """Kill off worker threads (not below self.min).""" # Grow/shrink the pool if necessary. # Remove any dead threads from our list for t in self._threads: if not t.isAlive(): self._threads.remove(t) amount -= 1 # calculate the number of threads above the minimum n_extra = max(len(self._threads) - self.min, 0) # don't remove more than amount n_to_remove = min(amount, n_extra) # put shutdown requests on the queue equal to the number of threads # to remove. As each request is processed by a worker, that worker # will terminate and be culled from the list. for n in range(n_to_remove): self._queue.put(_SHUTDOWNREQUEST) def stop(self, timeout=5): # Must shut down threads here so the code that calls # this method can know when all threads are stopped. for worker in self._threads: self._queue.put(_SHUTDOWNREQUEST) # Don't join currentThread (when stop is called inside a request). current = threading.currentThread() if timeout and timeout >= 0: endtime = time.time() + timeout while self._threads: worker = self._threads.pop() if worker is not current and worker.isAlive(): try: if timeout is None or timeout < 0: worker.join() else: remaining_time = endtime - time.time() if remaining_time > 0: worker.join(remaining_time) if worker.isAlive(): # We exhausted the timeout. # Forcibly shut down the socket. c = worker.conn if c and not c.rfile.closed: try: c.socket.shutdown(socket.SHUT_RD) except TypeError: # pyOpenSSL sockets don't take an arg c.socket.shutdown() worker.join() except (AssertionError, # Ignore repeated Ctrl-C. # See # https://bitbucket.org/cherrypy/cherrypy/issue/691. KeyboardInterrupt): pass def _get_qsize(self): return self._queue.qsize() qsize = property(_get_qsize) try: import fcntl except ImportError: try: from ctypes import windll, WinError import ctypes.wintypes _SetHandleInformation = windll.kernel32.SetHandleInformation _SetHandleInformation.argtypes = [ ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ] _SetHandleInformation.restype = ctypes.wintypes.BOOL except ImportError: def prevent_socket_inheritance(sock): """Dummy function, since neither fcntl nor ctypes are available.""" pass else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (Windows).""" if not _SetHandleInformation(sock.fileno(), 1, 0): raise WinError() else: def prevent_socket_inheritance(sock): """Mark the given socket fd as non-inheritable (POSIX).""" fd = sock.fileno() old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) class SSLAdapter(object): """Base class for SSL driver library adapters. Required methods: * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object`` """ def __init__(self, certificate, private_key, certificate_chain=None): self.certificate = certificate self.private_key = private_key self.certificate_chain = certificate_chain def wrap(self, sock): raise NotImplemented def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): raise NotImplemented class HTTPServer(object): """An HTTP server.""" _bind_addr = "127.0.0.1" _interrupt = None gateway = None """A Gateway instance.""" minthreads = None """The minimum number of worker threads to create (default 10).""" maxthreads = None """The maximum number of worker threads to create (default -1 = no limit). """ server_name = None """The name of the server; defaults to socket.gethostname().""" protocol = "HTTP/1.1" """The version string to write in the Status-Line of all HTTP responses. For example, "HTTP/1.1" is the default. This also limits the supported features used in the response.""" request_queue_size = 5 """The 'backlog' arg to socket.listen(); max queued connections (default 5). """ shutdown_timeout = 5 """The total time, in seconds, to wait for worker threads to cleanly exit. """ timeout = 10 """The timeout in seconds for accepted connections (default 10).""" version = "CherryPy/3.6.0" """A version string for the HTTPServer.""" software = None """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. If None, this defaults to ``'%s Server' % self.version``.""" ready = False """An internal flag which marks whether the socket is accepting connections. """ max_request_header_size = 0 """The maximum size, in bytes, for request headers, or 0 for no limit.""" max_request_body_size = 0 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" nodelay = True """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" ConnectionClass = HTTPConnection """The class to use for handling HTTP connections.""" ssl_adapter = None """An instance of SSLAdapter (or a subclass). You must have the corresponding SSL driver library installed.""" def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, server_name=None): self.bind_addr = bind_addr self.gateway = gateway self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) if not server_name: server_name = socket.gethostname() self.server_name = server_name self.clear_stats() def clear_stats(self): self._start_time = None self._run_time = 0 self.stats = { 'Enabled': False, 'Bind Address': lambda s: repr(self.bind_addr), 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 'Accepts': 0, 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 'Queue': lambda s: getattr(self.requests, "qsize", None), 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), 'Threads Idle': lambda s: getattr(self.requests, "idle", None), 'Socket Errors': 0, 'Requests': lambda s: (not s['Enabled']) and -1 or sum( [w['Requests'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) for w in s['Worker Threads'].values()], 0), 'Work Time': lambda s: (not s['Enabled']) and -1 or sum( [w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) for w in s['Worker Threads'].values()], 0), 'Worker Threads': {}, } logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats def runtime(self): if self._start_time is None: return self._run_time else: return self._run_time + (time.time() - self._start_time) def __str__(self): return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, self.bind_addr) def _get_bind_addr(self): return self._bind_addr def _set_bind_addr(self, value): if isinstance(value, tuple) and value[0] in ('', None): # Despite the socket module docs, using '' does not # allow AI_PASSIVE to work. Passing None instead # returns '0.0.0.0' like we want. In other words: # host AI_PASSIVE result # '' Y 192.168.x.y # '' N 192.168.x.y # None Y 0.0.0.0 # None N 127.0.0.1 # But since you can get the same effect with an explicit # '0.0.0.0', we deny both the empty string and None as values. raise ValueError("Host values of '' or None are not allowed. " "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " "to listen on all active interfaces.") self._bind_addr = value bind_addr = property( _get_bind_addr, _set_bind_addr, doc="""The interface on which to listen for connections. For TCP sockets, a (host, port) tuple. Host values may be any IPv4 or IPv6 address, or any valid hostname. The string 'localhost' is a synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). The string '0.0.0.0' is a special IPv4 entry meaning "any active interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for IPv6. The empty string or None are not allowed. For UNIX sockets, supply the filename as a string.""") def start(self): """Run the server forever.""" # We don't have to trap KeyboardInterrupt or SystemExit here, # because cherrpy.server already does so, calling self.stop() for us. # If you're using this server with another framework, you should # trap those exceptions in whatever code block calls start(). self._interrupt = None if self.software is None: self.software = "%s Server" % self.version # Select the appropriate socket if isinstance(self.bind_addr, basestring): # AF_UNIX socket # So we can reuse the socket... try: os.unlink(self.bind_addr) except: pass # So everyone can access the socket... try: os.chmod(self.bind_addr, 511) # 0777 except: pass info = [ (socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] else: # AF_INET or AF_INET6 socket # Get the correct address family for our host (allows IPv6 # addresses) host, port = self.bind_addr try: info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) except socket.gaierror: if ':' in self.bind_addr[0]: info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, "", self.bind_addr + (0, 0))] else: info = [(socket.AF_INET, socket.SOCK_STREAM, 0, "", self.bind_addr)] self.socket = None msg = "No socket could be created" for res in info: af, socktype, proto, canonname, sa = res try: self.bind(af, socktype, proto) except socket.error as serr: msg = "%s -- (%s: %s)" % (msg, sa, serr) if self.socket: self.socket.close() self.socket = None continue break if not self.socket: raise socket.error(msg) # Timeout so KeyboardInterrupt can be caught on Win32 self.socket.settimeout(1) self.socket.listen(self.request_queue_size) # Create worker threads self.requests.start() self.ready = True self._start_time = time.time() while self.ready: try: self.tick() except (KeyboardInterrupt, SystemExit): raise except: self.error_log("Error in HTTPServer.tick", level=logging.ERROR, traceback=True) if self.interrupt: while self.interrupt is True: # Wait for self.stop() to complete. See _set_interrupt. time.sleep(0.1) if self.interrupt: raise self.interrupt def error_log(self, msg="", level=20, traceback=False): # Override this in subclasses as desired sys.stderr.write(msg + '\n') sys.stderr.flush() if traceback: tblines = format_exc() sys.stderr.write(tblines) sys.stderr.flush() def bind(self, family, type, proto=0): """Create (or recreate) the actual socket object.""" self.socket = socket.socket(family, type, proto) prevent_socket_inheritance(self.socket) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if self.nodelay and not isinstance(self.bind_addr, str): self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.ssl_adapter is not None: self.socket = self.ssl_adapter.bind(self.socket) # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), # activate dual-stack. See # https://bitbucket.org/cherrypy/cherrypy/issue/871. if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): try: self.socket.setsockopt( socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) except (AttributeError, socket.error): # Apparently, the socket option is not available in # this machine's TCP stack pass self.socket.bind(self.bind_addr) def tick(self): """Accept a new connection and put it on the Queue.""" try: s, addr = self.socket.accept() if self.stats['Enabled']: self.stats['Accepts'] += 1 if not self.ready: return prevent_socket_inheritance(s) if hasattr(s, 'settimeout'): s.settimeout(self.timeout) makefile = CP_makefile ssl_env = {} # if ssl cert and key are set, we try to be a secure HTTP server if self.ssl_adapter is not None: try: s, ssl_env = self.ssl_adapter.wrap(s) except NoSSLError: msg = ("The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") buf = ["%s 400 Bad Request\r\n" % self.protocol, "Content-Length: %s\r\n" % len(msg), "Content-Type: text/plain\r\n\r\n", msg] wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE) try: wfile.write("".join(buf).encode('ISO-8859-1')) except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: raise return if not s: return makefile = self.ssl_adapter.makefile # Re-apply our timeout since we may have a new socket object if hasattr(s, 'settimeout'): s.settimeout(self.timeout) conn = self.ConnectionClass(self, s, makefile) if not isinstance(self.bind_addr, basestring): # optional values # Until we do DNS lookups, omit REMOTE_HOST if addr is None: # sometimes this can happen # figure out if AF_INET or AF_INET6. if len(s.getsockname()) == 2: # AF_INET addr = ('0.0.0.0', 0) else: # AF_INET6 addr = ('::', 0) conn.remote_addr = addr[0] conn.remote_port = addr[1] conn.ssl_env = ssl_env try: self.requests.put(conn) except queue.Full: # Just drop the conn. TODO: write 503 back? conn.close() return except socket.timeout: # The only reason for the timeout in start() is so we can # notice keyboard interrupts on Win32, which don't interrupt # accept() by default return except socket.error: x = sys.exc_info()[1] if self.stats['Enabled']: self.stats['Socket Errors'] += 1 if x.args[0] in socket_error_eintr: # I *think* this is right. EINTR should occur when a signal # is received during the accept() call; all docs say retry # the call, and I *think* I'm reading it right that Python # will then go ahead and poll for and handle the signal # elsewhere. See # https://bitbucket.org/cherrypy/cherrypy/issue/707. return if x.args[0] in socket_errors_nonblocking: # Just try again. See # https://bitbucket.org/cherrypy/cherrypy/issue/479. return if x.args[0] in socket_errors_to_ignore: # Our socket was closed. # See https://bitbucket.org/cherrypy/cherrypy/issue/686. return raise def _get_interrupt(self): return self._interrupt def _set_interrupt(self, interrupt): self._interrupt = True self.stop() self._interrupt = interrupt interrupt = property(_get_interrupt, _set_interrupt, doc="Set this to an Exception instance to " "interrupt the server.") def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False if self._start_time is not None: self._run_time += (time.time() - self._start_time) self._start_time = None sock = getattr(self, "socket", None) if sock: if not isinstance(self.bind_addr, basestring): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error: x = sys.exc_info()[1] if x.args[0] not in socket_errors_to_ignore: # Changed to use error code and not message # See # https://bitbucket.org/cherrypy/cherrypy/issue/860. raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it will if we bound to '0.0.0.0' (INADDR_ANY). for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See # http://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, "close"): sock.close() self.socket = None self.requests.stop(self.shutdown_timeout) class Gateway(object): """A base class to interface HTTPServer with other systems, such as WSGI. """ def __init__(self, req): self.req = req def respond(self): """Process the current request. Must be overridden in a subclass.""" raise NotImplemented # These may either be wsgiserver.SSLAdapter subclasses or the string names # of such classes (in which case they will be lazily loaded). ssl_adapters = { 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter', } def get_ssl_adapter_class(name='builtin'): """Return an SSL adapter class for the given name.""" adapter = ssl_adapters[name.lower()] if isinstance(adapter, basestring): last_dot = adapter.rfind(".") attr_name = adapter[last_dot + 1:] mod_path = adapter[:last_dot] try: mod = sys.modules[mod_path] if mod is None: raise KeyError() except KeyError: # The last [''] is important. mod = __import__(mod_path, globals(), locals(), ['']) # Let an AttributeError propagate outward. try: adapter = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) return adapter # ------------------------------- WSGI Stuff -------------------------------- # class CherryPyWSGIServer(HTTPServer): """A subclass of HTTPServer which calls a WSGI application.""" wsgi_version = (1, 0) """The version of WSGI to produce.""" def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5, accepted_queue_size=-1, accepted_queue_timeout=10): self.requests = ThreadPool(self, min=numthreads or 1, max=max, accepted_queue_size=accepted_queue_size, accepted_queue_timeout=accepted_queue_timeout) self.wsgi_app = wsgi_app self.gateway = wsgi_gateways[self.wsgi_version] self.bind_addr = bind_addr if not server_name: server_name = socket.gethostname() self.server_name = server_name self.request_queue_size = request_queue_size self.timeout = timeout self.shutdown_timeout = shutdown_timeout self.clear_stats() def _get_numthreads(self): return self.requests.min def _set_numthreads(self, value): self.requests.min = value numthreads = property(_get_numthreads, _set_numthreads) class WSGIGateway(Gateway): """A base class to interface HTTPServer with WSGI.""" def __init__(self, req): self.req = req self.started_response = False self.env = self.get_environ() self.remaining_bytes_out = None def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" raise NotImplemented def respond(self): """Process the current request.""" response = self.req.server.wsgi_app(self.env, self.start_response) try: for chunk in response: # "The start_response callable must not actually transmit # the response headers. Instead, it must store them for the # server or gateway to transmit only after the first # iteration of the application return value that yields # a NON-EMPTY string, or upon the application's first # invocation of the write() callable." (PEP 333) if chunk: if isinstance(chunk, unicodestr): chunk = chunk.encode('ISO-8859-1') self.write(chunk) finally: if hasattr(response, "close"): response.close() def start_response(self, status, headers, exc_info=None): """WSGI callable to begin the HTTP response.""" # "The application may call start_response more than once, # if and only if the exc_info argument is provided." if self.started_response and not exc_info: raise AssertionError("WSGI start_response called a second " "time with no exc_info.") self.started_response = True # "if exc_info is provided, and the HTTP headers have already been # sent, start_response must raise an error, and should raise the # exc_info tuple." if self.req.sent_headers: try: raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) finally: exc_info = None # According to PEP 3333, when using Python 3, the response status # and headers must be bytes masquerading as unicode; that is, they # must be of type "str" but are restricted to code points in the # "latin-1" set. if not isinstance(status, str): raise TypeError("WSGI response status is not of type str.") self.req.status = status.encode('ISO-8859-1') for k, v in headers: if not isinstance(k, str): raise TypeError( "WSGI response header key %r is not of type str." % k) if not isinstance(v, str): raise TypeError( "WSGI response header value %r is not of type str." % v) if k.lower() == 'content-length': self.remaining_bytes_out = int(v) self.req.outheaders.append( (k.encode('ISO-8859-1'), v.encode('ISO-8859-1'))) return self.write def write(self, chunk): """WSGI callable to write unbuffered data to the client. This method is also used internally by start_response (to write data from the iterable returned by the WSGI application). """ if not self.started_response: raise AssertionError("WSGI write called before start_response.") chunklen = len(chunk) rbo = self.remaining_bytes_out if rbo is not None and chunklen > rbo: if not self.req.sent_headers: # Whew. We can send a 500 to the client. self.req.simple_response("500 Internal Server Error", "The requested resource returned " "more bytes than the declared " "Content-Length.") else: # Dang. We have probably already sent data. Truncate the chunk # to fit (so the client doesn't hang) and raise an error later. chunk = chunk[:rbo] if not self.req.sent_headers: self.req.sent_headers = True self.req.send_headers() self.req.write(chunk) if rbo is not None: rbo -= chunklen if rbo < 0: raise ValueError( "Response body exceeds the declared Content-Length.") class WSGIGateway_10(WSGIGateway): """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env = { # set a non-standard environ entry so the WSGI app can know what # the *real* server protocol is (and what features to support). # See http://www.faqs.org/rfcs/rfc2145.html. 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': req.path.decode('ISO-8859-1'), 'QUERY_STRING': req.qs.decode('ISO-8859-1'), 'REMOTE_ADDR': req.conn.remote_addr or '', 'REMOTE_PORT': str(req.conn.remote_port or ''), 'REQUEST_METHOD': req.method.decode('ISO-8859-1'), 'REQUEST_URI': req.uri.decode('ISO-8859-1'), 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'), 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'), 'wsgi.version': (1, 0), } if isinstance(req.server.bind_addr, basestring): # AF_UNIX. This isn't really allowed by WSGI, which doesn't # address unix domain sockets. But it's better than nothing. env["SERVER_PORT"] = "" else: env["SERVER_PORT"] = str(req.server.bind_addr[1]) # Request headers for k, v in req.inheaders.items(): k = k.decode('ISO-8859-1').upper().replace("-", "_") env["HTTP_" + k] = v.decode('ISO-8859-1') # CONTENT_TYPE/CONTENT_LENGTH ct = env.pop("HTTP_CONTENT_TYPE", None) if ct is not None: env["CONTENT_TYPE"] = ct cl = env.pop("HTTP_CONTENT_LENGTH", None) if cl is not None: env["CONTENT_LENGTH"] = cl if req.conn.ssl_env: env.update(req.conn.ssl_env) return env class WSGIGateway_u0(WSGIGateway_10): """A Gateway class to interface HTTPServer with WSGI u.0. WSGI u.0 is an experimental protocol, which uses unicode for keys and values in both Python 2 and Python 3. """ def get_environ(self): """Return a new environ dict targeting the given wsgi.version""" req = self.req env_10 = WSGIGateway_10.get_environ(self) env = env_10.copy() env['wsgi.version'] = ('u', 0) # Request-URI env.setdefault('wsgi.url_encoding', 'utf-8') try: # SCRIPT_NAME is the empty string, who cares what encoding it is? env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding']) env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding']) except UnicodeDecodeError: # Fall back to latin 1 so apps can transcode if needed. env['wsgi.url_encoding'] = 'ISO-8859-1' env["PATH_INFO"] = env_10["PATH_INFO"] env["QUERY_STRING"] = env_10["QUERY_STRING"] return env wsgi_gateways = { (1, 0): WSGIGateway_10, ('u', 0): WSGIGateway_u0, } class WSGIPathInfoDispatcher(object): """A WSGI dispatcher for dispatch based on the PATH_INFO. apps: a dict or list of (path_prefix, app) pairs. """ def __init__(self, apps): try: apps = list(apps.items()) except AttributeError: pass # Sort the apps by len(path), descending apps.sort() apps.reverse() # The path_prefix strings must start, but not end, with a slash. # Use "" instead of "/". self.apps = [(p.rstrip("/"), a) for p, a in apps] def __call__(self, environ, start_response): path = environ["PATH_INFO"] or "/" for p, app in self.apps: # The apps list should be sorted by length, descending. if path.startswith(p + "/") or path == p: environ = environ.copy() environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p environ["PATH_INFO"] = path[len(p):] return app(environ, start_response) start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', '0')]) return ['']
79,001
Python
.py
1,816
31.726322
79
0.561848
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,969
__init__.py
evilhero_mylar/lib/cherrypy/wsgiserver/__init__.py
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 'WorkerThread', 'ThreadPool', 'SSLAdapter', 'CherryPyWSGIServer', 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] import sys if sys.version_info < (3, 0): from wsgiserver2 import * else: # Le sigh. Boo for backward-incompatible syntax. exec('from .wsgiserver3 import *')
579
Python
.py
13
37.461538
72
0.653097
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,970
tzfile.py
evilhero_mylar/lib/pytz/tzfile.py
#!/usr/bin/env python ''' $Id: tzfile.py,v 1.8 2004/06/03 00:15:24 zenzen Exp $ ''' try: from cStringIO import StringIO except ImportError: from io import StringIO from datetime import datetime, timedelta from struct import unpack, calcsize from pytz.tzinfo import StaticTzInfo, DstTzInfo, memorized_ttinfo from pytz.tzinfo import memorized_datetime, memorized_timedelta def _byte_string(s): """Cast a string or byte string to an ASCII byte string.""" return s.encode('US-ASCII') _NULL = _byte_string('\0') def _std_string(s): """Cast a string or byte string to an ASCII string.""" return str(s.decode('US-ASCII')) def build_tzinfo(zone, fp): head_fmt = '>4s c 15x 6l' head_size = calcsize(head_fmt) (magic, format, ttisgmtcnt, ttisstdcnt,leapcnt, timecnt, typecnt, charcnt) = unpack(head_fmt, fp.read(head_size)) # Make sure it is a tzfile(5) file assert magic == _byte_string('TZif'), 'Got magic %s' % repr(magic) # Read out the transition times, localtime indices and ttinfo structures. data_fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict( timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt) data_size = calcsize(data_fmt) data = unpack(data_fmt, fp.read(data_size)) # make sure we unpacked the right number of values assert len(data) == 2 * timecnt + 3 * typecnt + 1 transitions = [memorized_datetime(trans) for trans in data[:timecnt]] lindexes = list(data[timecnt:2 * timecnt]) ttinfo_raw = data[2 * timecnt:-1] tznames_raw = data[-1] del data # Process ttinfo into separate structs ttinfo = [] tznames = {} i = 0 while i < len(ttinfo_raw): # have we looked up this timezone name yet? tzname_offset = ttinfo_raw[i+2] if tzname_offset not in tznames: nul = tznames_raw.find(_NULL, tzname_offset) if nul < 0: nul = len(tznames_raw) tznames[tzname_offset] = _std_string( tznames_raw[tzname_offset:nul]) ttinfo.append((ttinfo_raw[i], bool(ttinfo_raw[i+1]), tznames[tzname_offset])) i += 3 # Now build the timezone object if len(transitions) == 0: ttinfo[0][0], ttinfo[0][2] cls = type(zone, (StaticTzInfo,), dict( zone=zone, _utcoffset=memorized_timedelta(ttinfo[0][0]), _tzname=ttinfo[0][2])) else: # Early dates use the first standard time ttinfo i = 0 while ttinfo[i][1]: i += 1 if ttinfo[i] == ttinfo[lindexes[0]]: transitions[0] = datetime.min else: transitions.insert(0, datetime.min) lindexes.insert(0, i) # calculate transition info transition_info = [] for i in range(len(transitions)): inf = ttinfo[lindexes[i]] utcoffset = inf[0] if not inf[1]: dst = 0 else: for j in range(i-1, -1, -1): prev_inf = ttinfo[lindexes[j]] if not prev_inf[1]: break dst = inf[0] - prev_inf[0] # dst offset # Bad dst? Look further. DST > 24 hours happens when # a timzone has moved across the international dateline. if dst <= 0 or dst > 3600*3: for j in range(i+1, len(transitions)): stdinf = ttinfo[lindexes[j]] if not stdinf[1]: dst = inf[0] - stdinf[0] if dst > 0: break # Found a useful std time. tzname = inf[2] # Round utcoffset and dst to the nearest minute or the # datetime library will complain. Conversions to these timezones # might be up to plus or minus 30 seconds out, but it is # the best we can do. utcoffset = int((utcoffset + 30) // 60) * 60 dst = int((dst + 30) // 60) * 60 transition_info.append(memorized_ttinfo(utcoffset, dst, tzname)) cls = type(zone, (DstTzInfo,), dict( zone=zone, _utc_transition_times=transitions, _transition_info=transition_info)) return cls() if __name__ == '__main__': import os.path from pprint import pprint base = os.path.join(os.path.dirname(__file__), 'zoneinfo') tz = build_tzinfo('Australia/Melbourne', open(os.path.join(base,'Australia','Melbourne'), 'rb')) tz = build_tzinfo('US/Eastern', open(os.path.join(base,'US','Eastern'), 'rb')) pprint(tz._utc_transition_times) #print tz.asPython(4) #print tz.transitions_mapping
4,869
Python
.py
119
30.882353
77
0.570795
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,971
lazy.py
evilhero_mylar/lib/pytz/lazy.py
from threading import RLock try: from UserDict import DictMixin except ImportError: from collections import Mapping as DictMixin # With lazy loading, we might end up with multiple threads triggering # it at the same time. We need a lock. _fill_lock = RLock() class LazyDict(DictMixin): """Dictionary populated on first use.""" data = None def __getitem__(self, key): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return self.data[key.upper()] def __contains__(self, key): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return key in self.data def __iter__(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return iter(self.data) def __len__(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return len(self.data) def keys(self): if self.data is None: _fill_lock.acquire() try: if self.data is None: self._fill() finally: _fill_lock.release() return self.data.keys() class LazyList(list): """List populated on first use.""" _props = [ '__str__', '__repr__', '__unicode__', '__hash__', '__sizeof__', '__cmp__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', 'append', 'count', 'index', 'extend', 'insert', 'pop', 'remove', 'reverse', 'sort', '__add__', '__radd__', '__iadd__', '__mul__', '__rmul__', '__imul__', '__contains__', '__len__', '__nonzero__', '__getitem__', '__setitem__', '__delitem__', '__iter__', '__reversed__', '__getslice__', '__setslice__', '__delslice__'] def __new__(cls, fill_iter=None): if fill_iter is None: return list() # We need a new class as we will be dynamically messing with its # methods. class LazyList(list): pass fill_iter = [fill_iter] def lazy(name): def _lazy(self, *args, **kw): _fill_lock.acquire() try: if len(fill_iter) > 0: list.extend(self, fill_iter.pop()) for method_name in cls._props: delattr(LazyList, method_name) finally: _fill_lock.release() return getattr(list, name)(self, *args, **kw) return _lazy for name in cls._props: setattr(LazyList, name, lazy(name)) new_list = LazyList() return new_list # Not all versions of Python declare the same magic methods. # Filter out properties that don't exist in this version of Python # from the list. LazyList._props = [prop for prop in LazyList._props if hasattr(list, prop)] class LazySet(set): """Set populated on first use.""" _props = ( '__str__', '__repr__', '__unicode__', '__hash__', '__sizeof__', '__cmp__', '__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__', '__contains__', '__len__', '__nonzero__', '__getitem__', '__setitem__', '__delitem__', '__iter__', '__sub__', '__and__', '__xor__', '__or__', '__rsub__', '__rand__', '__rxor__', '__ror__', '__isub__', '__iand__', '__ixor__', '__ior__', 'add', 'clear', 'copy', 'difference', 'difference_update', 'discard', 'intersection', 'intersection_update', 'isdisjoint', 'issubset', 'issuperset', 'pop', 'remove', 'symmetric_difference', 'symmetric_difference_update', 'union', 'update') def __new__(cls, fill_iter=None): if fill_iter is None: return set() class LazySet(set): pass fill_iter = [fill_iter] def lazy(name): def _lazy(self, *args, **kw): _fill_lock.acquire() try: if len(fill_iter) > 0: for i in fill_iter.pop(): set.add(self, i) for method_name in cls._props: delattr(LazySet, method_name) finally: _fill_lock.release() return getattr(set, name)(self, *args, **kw) return _lazy for name in cls._props: setattr(LazySet, name, lazy(name)) new_set = LazySet() return new_set # Not all versions of Python declare the same magic methods. # Filter out properties that don't exist in this version of Python # from the list. LazySet._props = [prop for prop in LazySet._props if hasattr(set, prop)]
5,263
Python
.py
138
26.891304
75
0.485574
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,972
__init__.py
evilhero_mylar/lib/pytz/__init__.py
''' datetime.tzinfo timezone definitions generated from the Olson timezone database: ftp://elsie.nci.nih.gov/pub/tz*.tar.gz See the datetime section of the Python Library Reference for information on how to use these modules. ''' # The Olson database is updated several times a year. OLSON_VERSION = '2014j' VERSION = '2014.10' # Switching to pip compatible version numbering. __version__ = VERSION OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling __all__ = [ 'timezone', 'utc', 'country_timezones', 'country_names', 'AmbiguousTimeError', 'InvalidTimeError', 'NonExistentTimeError', 'UnknownTimeZoneError', 'all_timezones', 'all_timezones_set', 'common_timezones', 'common_timezones_set', ] import sys, datetime, os.path, gettext try: from pkg_resources import resource_stream except ImportError: resource_stream = None from pytz.exceptions import AmbiguousTimeError from pytz.exceptions import InvalidTimeError from pytz.exceptions import NonExistentTimeError from pytz.exceptions import UnknownTimeZoneError from pytz.lazy import LazyDict, LazyList, LazySet from pytz.tzinfo import unpickler from pytz.tzfile import build_tzinfo, _byte_string try: unicode except NameError: # Python 3.x # Python 3.x doesn't have unicode(), making writing code # for Python 2.3 and Python 3.x a pain. unicode = str def ascii(s): r""" >>> ascii('Hello') 'Hello' >>> ascii('\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnicodeEncodeError: ... """ s.encode('US-ASCII') # Raise an exception if not ASCII return s # But return the original string - not a byte string. else: # Python 2.x def ascii(s): r""" >>> ascii('Hello') 'Hello' >>> ascii(u'Hello') 'Hello' >>> ascii(u'\N{TRADE MARK SIGN}') #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... UnicodeEncodeError: ... """ return s.encode('US-ASCII') def open_resource(name): """Open a resource from the zoneinfo subdir for reading. Uses the pkg_resources module if available and no standard file found at the calculated location. """ name_parts = name.lstrip('/').split('/') for part in name_parts: if part == os.path.pardir or os.path.sep in part: raise ValueError('Bad path segment: %r' % part) filename = os.path.join(os.path.dirname(__file__), 'zoneinfo', *name_parts) if not os.path.exists(filename) and resource_stream is not None: # http://bugs.launchpad.net/bugs/383171 - we avoid using this # unless absolutely necessary to help when a broken version of # pkg_resources is installed. return resource_stream(__name__, 'zoneinfo/' + name) return open(filename, 'rb') def resource_exists(name): """Return true if the given resource exists""" try: open_resource(name).close() return True except IOError: return False # Enable this when we get some translations? # We want an i18n API that is useful to programs using Python's gettext # module, as well as the Zope3 i18n package. Perhaps we should just provide # the POT file and translations, and leave it up to callers to make use # of them. # # t = gettext.translation( # 'pytz', os.path.join(os.path.dirname(__file__), 'locales'), # fallback=True # ) # def _(timezone_name): # """Translate a timezone name using the current locale, returning Unicode""" # return t.ugettext(timezone_name) _tzinfo_cache = {} def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ''' if zone.upper() == 'UTC': return utc try: zone = ascii(zone) except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: fp = open_resource(zone) try: _tzinfo_cache[zone] = build_tzinfo(zone, fp) finally: fp.close() else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone] def _unmunge_zone(zone): """Undo the time zone name munging done by older versions of pytz.""" return zone.replace('_plus_', '+').replace('_minus_', '-') ZERO = datetime.timedelta(0) HOUR = datetime.timedelta(hours=1) class UTC(datetime.tzinfo): """UTC Optimized UTC implementation. It unpickles using the single module global instance defined beneath this class declaration. """ zone = "UTC" _utcoffset = ZERO _dst = ZERO _tzname = zone def fromutc(self, dt): if dt.tzinfo is None: return self.localize(dt) return super(utc.__class__, self).fromutc(dt) def utcoffset(self, dt): return ZERO def tzname(self, dt): return "UTC" def dst(self, dt): return ZERO def __reduce__(self): return _UTC, () def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return "<UTC>" def __str__(self): return "UTC" UTC = utc = UTC() # UTC is a singleton def _UTC(): """Factory function for utc unpickling. Makes sure that unpickling a utc instance always returns the same module global. These examples belong in the UTC class above, but it is obscured; or in the README.txt, but we are not depending on Python 2.4 so integrating the README.txt examples with the unit tests is not trivial. >>> import datetime, pickle >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) >>> naive = dt.replace(tzinfo=None) >>> p = pickle.dumps(dt, 1) >>> naive_p = pickle.dumps(naive, 1) >>> len(p) - len(naive_p) 17 >>> new = pickle.loads(p) >>> new == dt True >>> new is dt False >>> new.tzinfo is dt.tzinfo True >>> utc is UTC is timezone('UTC') True >>> utc is timezone('GMT') False """ return utc _UTC.__safe_for_unpickling__ = True def _p(*args): """Factory function for unpickling pytz tzinfo instances. Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle by shortening the path. """ return unpickler(*args) _p.__safe_for_unpickling__ = True class _CountryTimezoneDict(LazyDict): """Map ISO 3166 country code to a list of timezone names commonly used in that country. iso3166_code is the two letter code used to identify the country. >>> def print_list(list_of_strings): ... 'We use a helper so doctests work under Python 2.3 -> 3.x' ... for s in list_of_strings: ... print(s) >>> print_list(country_timezones['nz']) Pacific/Auckland Pacific/Chatham >>> print_list(country_timezones['ch']) Europe/Zurich >>> print_list(country_timezones['CH']) Europe/Zurich >>> print_list(country_timezones[unicode('ch')]) Europe/Zurich >>> print_list(country_timezones['XXX']) Traceback (most recent call last): ... KeyError: 'XXX' Previously, this information was exposed as a function rather than a dictionary. This is still supported:: >>> print_list(country_timezones('nz')) Pacific/Auckland Pacific/Chatham """ def __call__(self, iso3166_code): """Backwards compatibility.""" return self[iso3166_code] def _fill(self): data = {} zone_tab = open_resource('zone.tab') try: for line in zone_tab: line = line.decode('US-ASCII') if line.startswith('#'): continue code, coordinates, zone = line.split(None, 4)[:3] if zone not in all_timezones_set: continue try: data[code].append(zone) except KeyError: data[code] = [zone] self.data = data finally: zone_tab.close() country_timezones = _CountryTimezoneDict() class _CountryNameDict(LazyDict): '''Dictionary proving ISO3166 code -> English name. >>> print(country_names['au']) Australia ''' def _fill(self): data = {} zone_tab = open_resource('iso3166.tab') try: for line in zone_tab.readlines(): line = line.decode('US-ASCII') if line.startswith('#'): continue code, name = line.split(None, 1) data[code] = name.strip() self.data = data finally: zone_tab.close() country_names = _CountryNameDict() # Time-zone info based solely on fixed offsets class _FixedOffset(datetime.tzinfo): zone = None # to match the standard pytz API def __init__(self, minutes): if abs(minutes) >= 1440: raise ValueError("absolute offset is too large", minutes) self._minutes = minutes self._offset = datetime.timedelta(minutes=minutes) def utcoffset(self, dt): return self._offset def __reduce__(self): return FixedOffset, (self._minutes, ) def dst(self, dt): return ZERO def tzname(self, dt): return None def __repr__(self): return 'pytz.FixedOffset(%d)' % self._minutes def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.replace(tzinfo=self) def FixedOffset(offset, _tzinfos = {}): """return a fixed-offset timezone based off a number of minutes. >>> one = FixedOffset(-330) >>> one pytz.FixedOffset(-330) >>> one.utcoffset(datetime.datetime.now()) datetime.timedelta(-1, 66600) >>> one.dst(datetime.datetime.now()) datetime.timedelta(0) >>> two = FixedOffset(1380) >>> two pytz.FixedOffset(1380) >>> two.utcoffset(datetime.datetime.now()) datetime.timedelta(0, 82800) >>> two.dst(datetime.datetime.now()) datetime.timedelta(0) The datetime.timedelta must be between the range of -1 and 1 day, non-inclusive. >>> FixedOffset(1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', 1440) >>> FixedOffset(-1440) Traceback (most recent call last): ... ValueError: ('absolute offset is too large', -1440) An offset of 0 is special-cased to return UTC. >>> FixedOffset(0) is UTC True There should always be only one instance of a FixedOffset per timedelta. This should be true for multiple creation calls. >>> FixedOffset(-330) is one True >>> FixedOffset(1380) is two True It should also be true for pickling. >>> import pickle >>> pickle.loads(pickle.dumps(one)) is one True >>> pickle.loads(pickle.dumps(two)) is two True """ if offset == 0: return UTC info = _tzinfos.get(offset) if info is None: # We haven't seen this one before. we need to save it. # Use setdefault to avoid a race condition and make sure we have # only one info = _tzinfos.setdefault(offset, _FixedOffset(offset)) return info FixedOffset.__safe_for_unpickling__ = True def _test(): import doctest, os, sys sys.path.insert(0, os.pardir) import pytz return doctest.testmod(pytz) if __name__ == '__main__': _test() all_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Asmera', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Juba', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Timbuktu', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/ComodRivadavia', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Atka', 'America/Bahia', 'America/Bahia_Banderas', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Buenos_Aires', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Catamarca', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Coral_Harbour', 'America/Cordoba', 'America/Costa_Rica', 'America/Creston', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Ensenada', 'America/Fort_Wayne', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Indianapolis', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Jujuy', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Knox_IN', 'America/Kralendijk', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Louisville', 'America/Lower_Princes', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Matamoros', 'America/Mazatlan', 'America/Mendoza', 'America/Menominee', 'America/Merida', 'America/Metlakatla', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Beulah', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Ojinaga', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Acre', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Rosario', 'America/Santa_Isabel', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Shiprock', 'America/Sitka', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Virgin', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Macquarie', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/South_Pole', 'Antarctica/Syowa', 'Antarctica/Troll', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Ashkhabad', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Calcutta', 'Asia/Chita', 'Asia/Choibalsan', 'Asia/Chongqing', 'Asia/Chungking', 'Asia/Colombo', 'Asia/Dacca', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Harbin', 'Asia/Hebron', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Istanbul', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kashgar', 'Asia/Kathmandu', 'Asia/Katmandu', 'Asia/Khandyga', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macao', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novokuznetsk', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Saigon', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Srednekolymsk', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Tel_Aviv', 'Asia/Thimbu', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ujung_Pandang', 'Asia/Ulaanbaatar', 'Asia/Ulan_Bator', 'Asia/Urumqi', 'Asia/Ust-Nera', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faeroe', 'Atlantic/Faroe', 'Atlantic/Jan_Mayen', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/ACT', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Canberra', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/LHI', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/NSW', 'Australia/North', 'Australia/Perth', 'Australia/Queensland', 'Australia/South', 'Australia/Sydney', 'Australia/Tasmania', 'Australia/Victoria', 'Australia/West', 'Australia/Yancowinna', 'Brazil/Acre', 'Brazil/DeNoronha', 'Brazil/East', 'Brazil/West', 'CET', 'CST6CDT', 'Canada/Atlantic', 'Canada/Central', 'Canada/East-Saskatchewan', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Canada/Saskatchewan', 'Canada/Yukon', 'Chile/Continental', 'Chile/EasterIsland', 'Cuba', 'EET', 'EST', 'EST5EDT', 'Egypt', 'Eire', 'Etc/GMT', 'Etc/GMT+0', 'Etc/GMT+1', 'Etc/GMT+10', 'Etc/GMT+11', 'Etc/GMT+12', 'Etc/GMT+2', 'Etc/GMT+3', 'Etc/GMT+4', 'Etc/GMT+5', 'Etc/GMT+6', 'Etc/GMT+7', 'Etc/GMT+8', 'Etc/GMT+9', 'Etc/GMT-0', 'Etc/GMT-1', 'Etc/GMT-10', 'Etc/GMT-11', 'Etc/GMT-12', 'Etc/GMT-13', 'Etc/GMT-14', 'Etc/GMT-2', 'Etc/GMT-3', 'Etc/GMT-4', 'Etc/GMT-5', 'Etc/GMT-6', 'Etc/GMT-7', 'Etc/GMT-8', 'Etc/GMT-9', 'Etc/GMT0', 'Etc/Greenwich', 'Etc/UCT', 'Etc/UTC', 'Etc/Universal', 'Etc/Zulu', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belfast', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Busingen', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Nicosia', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Tiraspol', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GB', 'GB-Eire', 'GMT', 'GMT+0', 'GMT-0', 'GMT0', 'Greenwich', 'HST', 'Hongkong', 'Iceland', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Iran', 'Israel', 'Jamaica', 'Japan', 'Kwajalein', 'Libya', 'MET', 'MST', 'MST7MDT', 'Mexico/BajaNorte', 'Mexico/BajaSur', 'Mexico/General', 'NZ', 'NZ-CHAT', 'Navajo', 'PRC', 'PST8PDT', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Bougainville', 'Pacific/Chatham', 'Pacific/Chuuk', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Pohnpei', 'Pacific/Ponape', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Samoa', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Truk', 'Pacific/Wake', 'Pacific/Wallis', 'Pacific/Yap', 'Poland', 'Portugal', 'ROC', 'ROK', 'Singapore', 'Turkey', 'UCT', 'US/Alaska', 'US/Aleutian', 'US/Arizona', 'US/Central', 'US/East-Indiana', 'US/Eastern', 'US/Hawaii', 'US/Indiana-Starke', 'US/Michigan', 'US/Mountain', 'US/Pacific', 'US/Pacific-New', 'US/Samoa', 'UTC', 'Universal', 'W-SU', 'WET', 'Zulu'] all_timezones = LazyList( tz for tz in all_timezones if resource_exists(tz)) all_timezones_set = LazySet(all_timezones) common_timezones = \ ['Africa/Abidjan', 'Africa/Accra', 'Africa/Addis_Ababa', 'Africa/Algiers', 'Africa/Asmara', 'Africa/Bamako', 'Africa/Bangui', 'Africa/Banjul', 'Africa/Bissau', 'Africa/Blantyre', 'Africa/Brazzaville', 'Africa/Bujumbura', 'Africa/Cairo', 'Africa/Casablanca', 'Africa/Ceuta', 'Africa/Conakry', 'Africa/Dakar', 'Africa/Dar_es_Salaam', 'Africa/Djibouti', 'Africa/Douala', 'Africa/El_Aaiun', 'Africa/Freetown', 'Africa/Gaborone', 'Africa/Harare', 'Africa/Johannesburg', 'Africa/Juba', 'Africa/Kampala', 'Africa/Khartoum', 'Africa/Kigali', 'Africa/Kinshasa', 'Africa/Lagos', 'Africa/Libreville', 'Africa/Lome', 'Africa/Luanda', 'Africa/Lubumbashi', 'Africa/Lusaka', 'Africa/Malabo', 'Africa/Maputo', 'Africa/Maseru', 'Africa/Mbabane', 'Africa/Mogadishu', 'Africa/Monrovia', 'Africa/Nairobi', 'Africa/Ndjamena', 'Africa/Niamey', 'Africa/Nouakchott', 'Africa/Ouagadougou', 'Africa/Porto-Novo', 'Africa/Sao_Tome', 'Africa/Tripoli', 'Africa/Tunis', 'Africa/Windhoek', 'America/Adak', 'America/Anchorage', 'America/Anguilla', 'America/Antigua', 'America/Araguaina', 'America/Argentina/Buenos_Aires', 'America/Argentina/Catamarca', 'America/Argentina/Cordoba', 'America/Argentina/Jujuy', 'America/Argentina/La_Rioja', 'America/Argentina/Mendoza', 'America/Argentina/Rio_Gallegos', 'America/Argentina/Salta', 'America/Argentina/San_Juan', 'America/Argentina/San_Luis', 'America/Argentina/Tucuman', 'America/Argentina/Ushuaia', 'America/Aruba', 'America/Asuncion', 'America/Atikokan', 'America/Bahia', 'America/Bahia_Banderas', 'America/Barbados', 'America/Belem', 'America/Belize', 'America/Blanc-Sablon', 'America/Boa_Vista', 'America/Bogota', 'America/Boise', 'America/Cambridge_Bay', 'America/Campo_Grande', 'America/Cancun', 'America/Caracas', 'America/Cayenne', 'America/Cayman', 'America/Chicago', 'America/Chihuahua', 'America/Costa_Rica', 'America/Creston', 'America/Cuiaba', 'America/Curacao', 'America/Danmarkshavn', 'America/Dawson', 'America/Dawson_Creek', 'America/Denver', 'America/Detroit', 'America/Dominica', 'America/Edmonton', 'America/Eirunepe', 'America/El_Salvador', 'America/Fortaleza', 'America/Glace_Bay', 'America/Godthab', 'America/Goose_Bay', 'America/Grand_Turk', 'America/Grenada', 'America/Guadeloupe', 'America/Guatemala', 'America/Guayaquil', 'America/Guyana', 'America/Halifax', 'America/Havana', 'America/Hermosillo', 'America/Indiana/Indianapolis', 'America/Indiana/Knox', 'America/Indiana/Marengo', 'America/Indiana/Petersburg', 'America/Indiana/Tell_City', 'America/Indiana/Vevay', 'America/Indiana/Vincennes', 'America/Indiana/Winamac', 'America/Inuvik', 'America/Iqaluit', 'America/Jamaica', 'America/Juneau', 'America/Kentucky/Louisville', 'America/Kentucky/Monticello', 'America/Kralendijk', 'America/La_Paz', 'America/Lima', 'America/Los_Angeles', 'America/Lower_Princes', 'America/Maceio', 'America/Managua', 'America/Manaus', 'America/Marigot', 'America/Martinique', 'America/Matamoros', 'America/Mazatlan', 'America/Menominee', 'America/Merida', 'America/Metlakatla', 'America/Mexico_City', 'America/Miquelon', 'America/Moncton', 'America/Monterrey', 'America/Montevideo', 'America/Montreal', 'America/Montserrat', 'America/Nassau', 'America/New_York', 'America/Nipigon', 'America/Nome', 'America/Noronha', 'America/North_Dakota/Beulah', 'America/North_Dakota/Center', 'America/North_Dakota/New_Salem', 'America/Ojinaga', 'America/Panama', 'America/Pangnirtung', 'America/Paramaribo', 'America/Phoenix', 'America/Port-au-Prince', 'America/Port_of_Spain', 'America/Porto_Velho', 'America/Puerto_Rico', 'America/Rainy_River', 'America/Rankin_Inlet', 'America/Recife', 'America/Regina', 'America/Resolute', 'America/Rio_Branco', 'America/Santa_Isabel', 'America/Santarem', 'America/Santiago', 'America/Santo_Domingo', 'America/Sao_Paulo', 'America/Scoresbysund', 'America/Sitka', 'America/St_Barthelemy', 'America/St_Johns', 'America/St_Kitts', 'America/St_Lucia', 'America/St_Thomas', 'America/St_Vincent', 'America/Swift_Current', 'America/Tegucigalpa', 'America/Thule', 'America/Thunder_Bay', 'America/Tijuana', 'America/Toronto', 'America/Tortola', 'America/Vancouver', 'America/Whitehorse', 'America/Winnipeg', 'America/Yakutat', 'America/Yellowknife', 'Antarctica/Casey', 'Antarctica/Davis', 'Antarctica/DumontDUrville', 'Antarctica/Macquarie', 'Antarctica/Mawson', 'Antarctica/McMurdo', 'Antarctica/Palmer', 'Antarctica/Rothera', 'Antarctica/Syowa', 'Antarctica/Troll', 'Antarctica/Vostok', 'Arctic/Longyearbyen', 'Asia/Aden', 'Asia/Almaty', 'Asia/Amman', 'Asia/Anadyr', 'Asia/Aqtau', 'Asia/Aqtobe', 'Asia/Ashgabat', 'Asia/Baghdad', 'Asia/Bahrain', 'Asia/Baku', 'Asia/Bangkok', 'Asia/Beirut', 'Asia/Bishkek', 'Asia/Brunei', 'Asia/Chita', 'Asia/Choibalsan', 'Asia/Colombo', 'Asia/Damascus', 'Asia/Dhaka', 'Asia/Dili', 'Asia/Dubai', 'Asia/Dushanbe', 'Asia/Gaza', 'Asia/Hebron', 'Asia/Ho_Chi_Minh', 'Asia/Hong_Kong', 'Asia/Hovd', 'Asia/Irkutsk', 'Asia/Jakarta', 'Asia/Jayapura', 'Asia/Jerusalem', 'Asia/Kabul', 'Asia/Kamchatka', 'Asia/Karachi', 'Asia/Kathmandu', 'Asia/Khandyga', 'Asia/Kolkata', 'Asia/Krasnoyarsk', 'Asia/Kuala_Lumpur', 'Asia/Kuching', 'Asia/Kuwait', 'Asia/Macau', 'Asia/Magadan', 'Asia/Makassar', 'Asia/Manila', 'Asia/Muscat', 'Asia/Nicosia', 'Asia/Novokuznetsk', 'Asia/Novosibirsk', 'Asia/Omsk', 'Asia/Oral', 'Asia/Phnom_Penh', 'Asia/Pontianak', 'Asia/Pyongyang', 'Asia/Qatar', 'Asia/Qyzylorda', 'Asia/Rangoon', 'Asia/Riyadh', 'Asia/Sakhalin', 'Asia/Samarkand', 'Asia/Seoul', 'Asia/Shanghai', 'Asia/Singapore', 'Asia/Srednekolymsk', 'Asia/Taipei', 'Asia/Tashkent', 'Asia/Tbilisi', 'Asia/Tehran', 'Asia/Thimphu', 'Asia/Tokyo', 'Asia/Ulaanbaatar', 'Asia/Urumqi', 'Asia/Ust-Nera', 'Asia/Vientiane', 'Asia/Vladivostok', 'Asia/Yakutsk', 'Asia/Yekaterinburg', 'Asia/Yerevan', 'Atlantic/Azores', 'Atlantic/Bermuda', 'Atlantic/Canary', 'Atlantic/Cape_Verde', 'Atlantic/Faroe', 'Atlantic/Madeira', 'Atlantic/Reykjavik', 'Atlantic/South_Georgia', 'Atlantic/St_Helena', 'Atlantic/Stanley', 'Australia/Adelaide', 'Australia/Brisbane', 'Australia/Broken_Hill', 'Australia/Currie', 'Australia/Darwin', 'Australia/Eucla', 'Australia/Hobart', 'Australia/Lindeman', 'Australia/Lord_Howe', 'Australia/Melbourne', 'Australia/Perth', 'Australia/Sydney', 'Canada/Atlantic', 'Canada/Central', 'Canada/Eastern', 'Canada/Mountain', 'Canada/Newfoundland', 'Canada/Pacific', 'Europe/Amsterdam', 'Europe/Andorra', 'Europe/Athens', 'Europe/Belgrade', 'Europe/Berlin', 'Europe/Bratislava', 'Europe/Brussels', 'Europe/Bucharest', 'Europe/Budapest', 'Europe/Busingen', 'Europe/Chisinau', 'Europe/Copenhagen', 'Europe/Dublin', 'Europe/Gibraltar', 'Europe/Guernsey', 'Europe/Helsinki', 'Europe/Isle_of_Man', 'Europe/Istanbul', 'Europe/Jersey', 'Europe/Kaliningrad', 'Europe/Kiev', 'Europe/Lisbon', 'Europe/Ljubljana', 'Europe/London', 'Europe/Luxembourg', 'Europe/Madrid', 'Europe/Malta', 'Europe/Mariehamn', 'Europe/Minsk', 'Europe/Monaco', 'Europe/Moscow', 'Europe/Oslo', 'Europe/Paris', 'Europe/Podgorica', 'Europe/Prague', 'Europe/Riga', 'Europe/Rome', 'Europe/Samara', 'Europe/San_Marino', 'Europe/Sarajevo', 'Europe/Simferopol', 'Europe/Skopje', 'Europe/Sofia', 'Europe/Stockholm', 'Europe/Tallinn', 'Europe/Tirane', 'Europe/Uzhgorod', 'Europe/Vaduz', 'Europe/Vatican', 'Europe/Vienna', 'Europe/Vilnius', 'Europe/Volgograd', 'Europe/Warsaw', 'Europe/Zagreb', 'Europe/Zaporozhye', 'Europe/Zurich', 'GMT', 'Indian/Antananarivo', 'Indian/Chagos', 'Indian/Christmas', 'Indian/Cocos', 'Indian/Comoro', 'Indian/Kerguelen', 'Indian/Mahe', 'Indian/Maldives', 'Indian/Mauritius', 'Indian/Mayotte', 'Indian/Reunion', 'Pacific/Apia', 'Pacific/Auckland', 'Pacific/Bougainville', 'Pacific/Chatham', 'Pacific/Chuuk', 'Pacific/Easter', 'Pacific/Efate', 'Pacific/Enderbury', 'Pacific/Fakaofo', 'Pacific/Fiji', 'Pacific/Funafuti', 'Pacific/Galapagos', 'Pacific/Gambier', 'Pacific/Guadalcanal', 'Pacific/Guam', 'Pacific/Honolulu', 'Pacific/Johnston', 'Pacific/Kiritimati', 'Pacific/Kosrae', 'Pacific/Kwajalein', 'Pacific/Majuro', 'Pacific/Marquesas', 'Pacific/Midway', 'Pacific/Nauru', 'Pacific/Niue', 'Pacific/Norfolk', 'Pacific/Noumea', 'Pacific/Pago_Pago', 'Pacific/Palau', 'Pacific/Pitcairn', 'Pacific/Pohnpei', 'Pacific/Port_Moresby', 'Pacific/Rarotonga', 'Pacific/Saipan', 'Pacific/Tahiti', 'Pacific/Tarawa', 'Pacific/Tongatapu', 'Pacific/Wake', 'Pacific/Wallis', 'US/Alaska', 'US/Arizona', 'US/Central', 'US/Eastern', 'US/Hawaii', 'US/Mountain', 'US/Pacific', 'UTC'] common_timezones = LazyList( tz for tz in common_timezones if tz in all_timezones) common_timezones_set = LazySet(common_timezones)
34,011
Python
.py
1,404
20.87963
81
0.683302
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,973
tzinfo.py
evilhero_mylar/lib/pytz/tzinfo.py
'''Base classes and helpers for building zone specific tzinfo classes''' from datetime import datetime, timedelta, tzinfo from bisect import bisect_right try: set except NameError: from sets import Set as set import pytz from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError __all__ = [] _timedelta_cache = {} def memorized_timedelta(seconds): '''Create only one instance of each distinct timedelta''' try: return _timedelta_cache[seconds] except KeyError: delta = timedelta(seconds=seconds) _timedelta_cache[seconds] = delta return delta _epoch = datetime.utcfromtimestamp(0) _datetime_cache = {0: _epoch} def memorized_datetime(seconds): '''Create only one instance of each distinct datetime''' try: return _datetime_cache[seconds] except KeyError: # NB. We can't just do datetime.utcfromtimestamp(seconds) as this # fails with negative values under Windows (Bug #90096) dt = _epoch + timedelta(seconds=seconds) _datetime_cache[seconds] = dt return dt _ttinfo_cache = {} def memorized_ttinfo(*args): '''Create only one instance of each distinct tuple''' try: return _ttinfo_cache[args] except KeyError: ttinfo = ( memorized_timedelta(args[0]), memorized_timedelta(args[1]), args[2] ) _ttinfo_cache[args] = ttinfo return ttinfo _notime = memorized_timedelta(0) def _to_seconds(td): '''Convert a timedelta to seconds''' return td.seconds + td.days * 24 * 60 * 60 class BaseTzInfo(tzinfo): # Overridden in subclass _utcoffset = None _tzname = None zone = None def __str__(self): return self.zone class StaticTzInfo(BaseTzInfo): '''A timezone that has a constant offset from UTC These timezones are rare, as most locations have changed their offset at some point in their history ''' def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if dt.tzinfo is not None and dt.tzinfo is not self: raise ValueError('fromutc: dt.tzinfo is not self') return (dt + self._utcoffset).replace(tzinfo=self) def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return _notime def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname is_dst is ignored for StaticTzInfo, and exists only to retain compatibility with DstTzInfo. ''' return self._tzname def localize(self, dt, is_dst=False): '''Convert naive time to local time''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') return dt.replace(tzinfo=self) def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime. This is normally a no-op, as StaticTzInfo timezones never have ambiguous cases to correct: >>> from pytz import timezone >>> gmt = timezone('GMT') >>> isinstance(gmt, StaticTzInfo) True >>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt) >>> gmt.normalize(dt) is dt True The supported method of converting between timezones is to use datetime.astimezone(). Currently normalize() also works: >>> la = timezone('America/Los_Angeles') >>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> gmt.normalize(dt).strftime(fmt) '2011-05-07 08:02:03 GMT (+0000)' ''' if dt.tzinfo is self: return dt if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.astimezone(self) def __repr__(self): return '<StaticTzInfo %r>' % (self.zone,) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, (self.zone,) class DstTzInfo(BaseTzInfo): '''A timezone that has a variable offset from UTC The offset might change if daylight saving time comes into effect, or at a point in history when the region decides to change their timezone definition. ''' # Overridden in subclass _utc_transition_times = None # Sorted list of DST transition times in UTC _transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding # to _utc_transition_times entries zone = None # Set in __init__ _tzinfos = None _dst = None # DST offset def __init__(self, _inf=None, _tzinfos=None): if _inf: self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = _inf else: _tzinfos = {} self._tzinfos = _tzinfos self._utcoffset, self._dst, self._tzname = self._transition_info[0] _tzinfos[self._transition_info[0]] = self for inf in self._transition_info[1:]: if inf not in _tzinfos: _tzinfos[inf] = self.__class__(inf, _tzinfos) def fromutc(self, dt): '''See datetime.tzinfo.fromutc''' if (dt.tzinfo is not None and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos): raise ValueError('fromutc: dt.tzinfo is not self') dt = dt.replace(tzinfo=None) idx = max(0, bisect_right(self._utc_transition_times, dt) - 1) inf = self._transition_info[idx] return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf]) def normalize(self, dt): '''Correct the timezone information on the given datetime If date arithmetic crosses DST boundaries, the tzinfo is not magically adjusted. This method normalizes the tzinfo to the correct one. To test, first we need to do some setup >>> from pytz import timezone >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' We next create a datetime right on an end-of-DST transition point, the instant when the wallclocks are wound back one hour. >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' Now, if we subtract a few minutes from it, note that the timezone information has not changed. >>> before = loc_dt - timedelta(minutes=10) >>> before.strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' But we can fix that by calling the normalize method >>> before = eastern.normalize(before) >>> before.strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' The supported method of converting between timezones is to use datetime.astimezone(). Currently, normalize() also works: >>> th = timezone('Asia/Bangkok') >>> am = timezone('Europe/Amsterdam') >>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3)) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> am.normalize(dt).strftime(fmt) '2011-05-06 20:02:03 CEST (+0200)' ''' if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') # Convert dt in localtime to UTC offset = dt.tzinfo._utcoffset dt = dt.replace(tzinfo=None) dt = dt - offset # convert it back, and return it return self.fromutc(dt) def localize(self, dt, is_dst=False): '''Convert naive time to local time. This method should be used to construct localtimes, rather than passing a tzinfo argument to a datetime constructor. is_dst is used to determine the correct timezone in the ambigous period at the end of daylight saving time. >>> from pytz import timezone >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> amdam = timezone('Europe/Amsterdam') >>> dt = datetime(2004, 10, 31, 2, 0, 0) >>> loc_dt1 = amdam.localize(dt, is_dst=True) >>> loc_dt2 = amdam.localize(dt, is_dst=False) >>> loc_dt1.strftime(fmt) '2004-10-31 02:00:00 CEST (+0200)' >>> loc_dt2.strftime(fmt) '2004-10-31 02:00:00 CET (+0100)' >>> str(loc_dt2 - loc_dt1) '1:00:00' Use is_dst=None to raise an AmbiguousTimeError for ambiguous times at the end of daylight saving time >>> try: ... loc_dt1 = amdam.localize(dt, is_dst=None) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous is_dst defaults to False >>> amdam.localize(dt) == amdam.localize(dt, False) True is_dst is also used to determine the correct timezone in the wallclock times jumped over at the start of daylight saving time. >>> pacific = timezone('US/Pacific') >>> dt = datetime(2008, 3, 9, 2, 0, 0) >>> ploc_dt1 = pacific.localize(dt, is_dst=True) >>> ploc_dt2 = pacific.localize(dt, is_dst=False) >>> ploc_dt1.strftime(fmt) '2008-03-09 02:00:00 PDT (-0700)' >>> ploc_dt2.strftime(fmt) '2008-03-09 02:00:00 PST (-0800)' >>> str(ploc_dt2 - ploc_dt1) '1:00:00' Use is_dst=None to raise a NonExistentTimeError for these skipped times. >>> try: ... loc_dt1 = pacific.localize(dt, is_dst=None) ... except NonExistentTimeError: ... print('Non-existent') Non-existent ''' if dt.tzinfo is not None: raise ValueError('Not naive datetime (tzinfo is already set)') # Find the two best possibilities. possible_loc_dt = set() for delta in [timedelta(days=-1), timedelta(days=1)]: loc_dt = dt + delta idx = max(0, bisect_right( self._utc_transition_times, loc_dt) - 1) inf = self._transition_info[idx] tzinfo = self._tzinfos[inf] loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo)) if loc_dt.replace(tzinfo=None) == dt: possible_loc_dt.add(loc_dt) if len(possible_loc_dt) == 1: return possible_loc_dt.pop() # If there are no possibly correct timezones, we are attempting # to convert a time that never happened - the time period jumped # during the start-of-DST transition period. if len(possible_loc_dt) == 0: # If we refuse to guess, raise an exception. if is_dst is None: raise NonExistentTimeError(dt) # If we are forcing the pre-DST side of the DST transition, we # obtain the correct timezone by winding the clock forward a few # hours. elif is_dst: return self.localize( dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6) # If we are forcing the post-DST side of the DST transition, we # obtain the correct timezone by winding the clock back. else: return self.localize( dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6) # If we get this far, we have multiple possible timezones - this # is an ambiguous case occuring during the end-of-DST transition. # If told to be strict, raise an exception since we have an # ambiguous case if is_dst is None: raise AmbiguousTimeError(dt) # Filter out the possiblilities that don't match the requested # is_dst filtered_possible_loc_dt = [ p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst ] # Hopefully we only have one possibility left. Return it. if len(filtered_possible_loc_dt) == 1: return filtered_possible_loc_dt[0] if len(filtered_possible_loc_dt) == 0: filtered_possible_loc_dt = list(possible_loc_dt) # If we get this far, we have in a wierd timezone transition # where the clocks have been wound back but is_dst is the same # in both (eg. Europe/Warsaw 1915 when they switched to CET). # At this point, we just have to guess unless we allow more # hints to be passed in (such as the UTC offset or abbreviation), # but that is just getting silly. # # Choose the earliest (by UTC) applicable timezone if is_dst=True # Choose the latest (by UTC) applicable timezone if is_dst=False # i.e., behave like end-of-DST transition dates = {} # utc -> local for local_dt in filtered_possible_loc_dt: utc_time = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset assert utc_time not in dates dates[utc_time] = local_dt return dates[[min, max][not is_dst](dates)] def utcoffset(self, dt, is_dst=None): '''See datetime.tzinfo.utcoffset The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.utcoffset(ambiguous, is_dst=False) datetime.timedelta(-1, 73800) >>> tz.utcoffset(ambiguous, is_dst=True) datetime.timedelta(-1, 77400) >>> try: ... tz.utcoffset(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._utcoffset else: return self._utcoffset def dst(self, dt, is_dst=None): '''See datetime.tzinfo.dst The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> tz.dst(normal) datetime.timedelta(0, 3600) >>> tz.dst(normal, is_dst=False) datetime.timedelta(0, 3600) >>> tz.dst(normal, is_dst=True) datetime.timedelta(0, 3600) >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.dst(ambiguous, is_dst=False) datetime.timedelta(0) >>> tz.dst(ambiguous, is_dst=True) datetime.timedelta(0, 3600) >>> try: ... tz.dst(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return None elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._dst else: return self._dst def tzname(self, dt, is_dst=None): '''See datetime.tzinfo.tzname The is_dst parameter may be used to remove ambiguity during DST transitions. >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> normal = datetime(2009, 9, 1) >>> tz.tzname(normal) 'NDT' >>> tz.tzname(normal, is_dst=False) 'NDT' >>> tz.tzname(normal, is_dst=True) 'NDT' >>> ambiguous = datetime(2009, 10, 31, 23, 30) >>> tz.tzname(ambiguous, is_dst=False) 'NST' >>> tz.tzname(ambiguous, is_dst=True) 'NDT' >>> try: ... tz.tzname(ambiguous) ... except AmbiguousTimeError: ... print('Ambiguous') Ambiguous ''' if dt is None: return self.zone elif dt.tzinfo is not self: dt = self.localize(dt, is_dst) return dt.tzinfo._tzname else: return self._tzname def __repr__(self): if self._dst: dst = 'DST' else: dst = 'STD' if self._utcoffset > _notime: return '<DstTzInfo %r %s+%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) else: return '<DstTzInfo %r %s%s %s>' % ( self.zone, self._tzname, self._utcoffset, dst ) def __reduce__(self): # Special pickle to zone remains a singleton and to cope with # database changes. return pytz._p, ( self.zone, _to_seconds(self._utcoffset), _to_seconds(self._dst), self._tzname ) def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None): """Factory function for unpickling pytz tzinfo instances. This is shared for both StaticTzInfo and DstTzInfo instances, because database changes could cause a zones implementation to switch between these two base classes and we can't break pickles on a pytz version upgrade. """ # Raises a KeyError if zone no longer exists, which should never happen # and would be a bug. tz = pytz.timezone(zone) # A StaticTzInfo - just return it if utcoffset is None: return tz # This pickle was created from a DstTzInfo. We need to # determine which of the list of tzinfo instances for this zone # to use in order to restore the state of any datetime instances using # it correctly. utcoffset = memorized_timedelta(utcoffset) dstoffset = memorized_timedelta(dstoffset) try: return tz._tzinfos[(utcoffset, dstoffset, tzname)] except KeyError: # The particular state requested in this timezone no longer exists. # This indicates a corrupt pickle, or the timezone database has been # corrected violently enough to make this particular # (utcoffset,dstoffset) no longer exist in the zone, or the # abbreviation has been changed. pass # See if we can find an entry differing only by tzname. Abbreviations # get changed from the initial guess by the database maintainers to # match reality when this information is discovered. for localized_tz in tz._tzinfos.values(): if (localized_tz._utcoffset == utcoffset and localized_tz._dst == dstoffset): return localized_tz # This (utcoffset, dstoffset) information has been removed from the # zone. Add it back. This might occur when the database maintainers have # corrected incorrect information. datetime instances using this # incorrect information will continue to do so, exactly as they were # before being pickled. This is purely an overly paranoid safety net - I # doubt this will ever been needed in real life. inf = (utcoffset, dstoffset, tzname) tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos) return tz._tzinfos[inf]
19,368
Python
.py
461
32.885033
81
0.603382
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,974
exceptions.py
evilhero_mylar/lib/pytz/exceptions.py
''' Custom exceptions raised by pytz. ''' __all__ = [ 'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError', 'NonExistentTimeError', ] class UnknownTimeZoneError(KeyError): '''Exception raised when pytz is passed an unknown timezone. >>> isinstance(UnknownTimeZoneError(), LookupError) True This class is actually a subclass of KeyError to provide backwards compatibility with code relying on the undocumented behavior of earlier pytz releases. >>> isinstance(UnknownTimeZoneError(), KeyError) True ''' pass class InvalidTimeError(Exception): '''Base class for invalid time exceptions.''' class AmbiguousTimeError(InvalidTimeError): '''Exception raised when attempting to create an ambiguous wallclock time. At the end of a DST transition period, a particular wallclock time will occur twice (once before the clocks are set back, once after). Both possibilities may be correct, unless further information is supplied. See DstTzInfo.normalize() for more info ''' class NonExistentTimeError(InvalidTimeError): '''Exception raised when attempting to create a wallclock time that cannot exist. At the start of a DST transition period, the wallclock time jumps forward. The instants jumped over never occur. '''
1,333
Python
.py
33
35.909091
78
0.754086
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,975
reference.py
evilhero_mylar/lib/pytz/reference.py
''' Reference tzinfo implementations from the Python docs. Used for testing against as they are only correct for the years 1987 to 2006. Do not use these for real code. ''' from datetime import tzinfo, timedelta, datetime from pytz import utc, UTC, HOUR, ZERO # A class building tzinfo objects for fixed-offset time zones. # Note that FixedOffset(0, "UTC") is a different way to build a # UTC tzinfo object. class FixedOffset(tzinfo): """Fixed offset in minutes east from UTC.""" def __init__(self, offset, name): self.__offset = timedelta(minutes = offset) self.__name = name def utcoffset(self, dt): return self.__offset def tzname(self, dt): return self.__name def dst(self, dt): return ZERO # A class capturing the platform's idea of local time. import time as _time STDOFFSET = timedelta(seconds = -_time.timezone) if _time.daylight: DSTOFFSET = timedelta(seconds = -_time.altzone) else: DSTOFFSET = STDOFFSET DSTDIFF = DSTOFFSET - STDOFFSET class LocalTimezone(tzinfo): def utcoffset(self, dt): if self._isdst(dt): return DSTOFFSET else: return STDOFFSET def dst(self, dt): if self._isdst(dt): return DSTDIFF else: return ZERO def tzname(self, dt): return _time.tzname[self._isdst(dt)] def _isdst(self, dt): tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1) stamp = _time.mktime(tt) tt = _time.localtime(stamp) return tt.tm_isdst > 0 Local = LocalTimezone() # A complete implementation of current DST rules for major US time zones. def first_sunday_on_or_after(dt): days_to_go = 6 - dt.weekday() if days_to_go: dt += timedelta(days_to_go) return dt # In the US, DST starts at 2am (standard time) on the first Sunday in April. DSTSTART = datetime(1, 4, 1, 2) # and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct. # which is the first Sunday on or after Oct 25. DSTEND = datetime(1, 10, 25, 1) class USTimeZone(tzinfo): def __init__(self, hours, reprname, stdname, dstname): self.stdoffset = timedelta(hours=hours) self.reprname = reprname self.stdname = stdname self.dstname = dstname def __repr__(self): return self.reprname def tzname(self, dt): if self.dst(dt): return self.dstname else: return self.stdname def utcoffset(self, dt): return self.stdoffset + self.dst(dt) def dst(self, dt): if dt is None or dt.tzinfo is None: # An exception may be sensible here, in one or both cases. # It depends on how you want to treat them. The default # fromutc() implementation (called by the default astimezone() # implementation) passes a datetime with dt.tzinfo is self. return ZERO assert dt.tzinfo is self # Find first Sunday in April & the last in October. start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year)) end = first_sunday_on_or_after(DSTEND.replace(year=dt.year)) # Can't compare naive to aware objects, so strip the timezone from # dt first. if start <= dt.replace(tzinfo=None) < end: return HOUR else: return ZERO Eastern = USTimeZone(-5, "Eastern", "EST", "EDT") Central = USTimeZone(-6, "Central", "CST", "CDT") Mountain = USTimeZone(-7, "Mountain", "MST", "MDT") Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
3,649
Python
.py
97
30.989691
76
0.641965
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,976
comicvinetalker.py
evilhero_mylar/lib/comictaggerlib/comicvinetalker.py
"""A python class to manage communication with Comic Vine's REST API""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import urllib2 import urllib import re import time import datetime import sys import os from bs4 import BeautifulSoup try: from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest from PyQt4.QtCore import QUrl, pyqtSignal, QObject, QByteArray except ImportError: # No Qt, so define a few dummy QObjects to help us compile class QObject(): def __init__(self, *args): pass class pyqtSignal(): def __init__(self, *args): pass def emit(a, b, c): pass import ctversion import utils from comicvinecacher import ComicVineCacher from genericmetadata import GenericMetadata from issuestring import IssueString from settings import ComicTaggerSettings try: import requests except: try: lib_path = os.path.join(ComicTaggerSettings.baseDir(), '..') sys.path.append(lib_path) import requests except ImportError: print "Unable to use requests module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting." class CVTypeID: Volume = "4050" Issue = "4000" class ComicVineTalkerException(Exception): Unknown = -1 Network = -2 InvalidKey = 100 RateLimit = 107 def __init__(self, code=-1, desc=""): self.desc = desc self.code = code def __str__(self): if (self.code == ComicVineTalkerException.Unknown or self.code == ComicVineTalkerException.Network): return self.desc else: return "CV error #{0}: [{1}]. \n".format(self.code, self.desc) class ComicVineTalker(QObject): logo_url = "http://static.comicvine.com/bundles/comicvinesite/images/logo.png" api_key = "" @staticmethod def getRateLimitMessage(): if ComicVineTalker.api_key == "": return "Comic Vine rate limit exceeded. You should configue your own Comic Vine API key." else: return "Comic Vine rate limit exceeded. Please wait a bit." def __init__(self): QObject.__init__(self) self.api_base_url = "http://comicvine.gamespot.com/api" self.wait_for_rate_limit = False # key that is registered to comictagger default_api_key = '27431e6787042105bd3e47e169a624521f89f3a4' if ComicVineTalker.api_key == "": self.api_key = default_api_key else: self.api_key = ComicVineTalker.api_key self.cv_headers = {'User-Agent': 'ComicTagger ' + str(ctversion.version) + ' [' + ctversion.fork + ' / ' + ctversion.fork_tag + ']'} self.log_func = None def setLogFunc(self, log_func): self.log_func = log_func def writeLog(self, text): if self.log_func is None: # sys.stdout.write(text.encode(errors='replace')) # sys.stdout.flush() print >> sys.stderr, text else: self.log_func(text) def parseDateStr(self, date_str): day = None month = None year = None if date_str is not None: parts = date_str.split('-') year = parts[0] if len(parts) > 1: month = parts[1] if len(parts) > 2: day = parts[2] return day, month, year def testKey(self, key): test_url = self.api_base_url + "/issue/1/?api_key=" + \ key + "&format=json&field_list=name" r = requests.get(test_url, headers=self.cv_headers) cv_response = r.json() # Bogus request, but if the key is wrong, you get error 100: "Invalid # API Key" return cv_response['status_code'] != 100 """ Get the contect from the CV server. If we're in "wait mode" and status code is a rate limit error sleep for a bit and retry. """ def getCVContent(self, url): total_time_waited = 0 limit_wait_time = 1 counter = 0 wait_times = [1, 2, 3, 4] while True: cv_response = self.getUrlContent(url) if self.wait_for_rate_limit and cv_response[ 'status_code'] == ComicVineTalkerException.RateLimit: self.writeLog( "Rate limit encountered. Waiting for {0} minutes\n".format(limit_wait_time)) time.sleep(limit_wait_time * 60) total_time_waited += limit_wait_time limit_wait_time = wait_times[counter] if counter < 3: counter += 1 # don't wait much more than 20 minutes if total_time_waited < 20: continue if cv_response['status_code'] != 1: self.writeLog( "Comic Vine query failed with error #{0}: [{1}]. \n".format( cv_response['status_code'], cv_response['error'])) raise ComicVineTalkerException( cv_response['status_code'], cv_response['error']) else: # it's all good break return cv_response def getUrlContent(self, url): # connect to server: # if there is a 500 error, try a few more times before giving up # any other error, just bail # print "ATB---", url for tries in range(3): try: r = requests.get(url, headers=self.cv_headers) return r.json() except Exception as e: ecode = type(e).__name__ if ecode == 500: self.writeLog("Try #{0}: ".format(tries + 1)) time.sleep(1) self.writeLog(str(e) + "\n") if ecode != 500: break except Exception as e: self.writeLog(str(e) + "\n") raise ComicVineTalkerException( ComicVineTalkerException.Network, "Network Error!") raise ComicVineTalkerException( ComicVineTalkerException.Unknown, "Error on Comic Vine server") def searchForSeries(self, series_name, callback=None, refresh_cache=False): # remove cruft from the search string series_name = utils.removearticles(series_name).lower().strip() # before we search online, look in our cache, since we might have # done this same search recently cvc = ComicVineCacher() if not refresh_cache: cached_search_results = cvc.get_search_results(series_name) if len(cached_search_results) > 0: return cached_search_results original_series_name = series_name # We need to make the series name into an "AND"ed query list query_word_list = series_name.split() and_list = ['AND'] * (len(query_word_list) - 1) and_list.append('') # zipper up the two lists query_list = zip(query_word_list, and_list) # flatten the list query_list = [item for sublist in query_list for item in sublist] # convert back to a string query_string = " ".join(query_list).strip() # print "Query string = ", query_string query_string = urllib.quote_plus(query_string.encode("utf-8")) search_url = self.api_base_url + "/search/?api_key=" + self.api_key + "&format=json&resources=volume&query=" + \ query_string + \ "&field_list=name,id,start_year,publisher,image,description,count_of_issues" cv_response = self.getCVContent(search_url + "&page=1") search_results = list() # see http://api.comicvine.com/documentation/#handling_responses limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] if callback is None: self.writeLog( "Found {0} of {1} results\n".format( cv_response['number_of_page_results'], cv_response['number_of_total_results'])) search_results.extend(cv_response['results']) page = 1 if callback is not None: callback(current_result_count, total_result_count) # see if we need to keep asking for more pages... while (current_result_count < total_result_count): if callback is None: self.writeLog( "getting another page of results {0} of {1}...\n".format( current_result_count, total_result_count)) page += 1 cv_response = self.getCVContent(search_url + "&page=" + str(page)) search_results.extend(cv_response['results']) current_result_count += cv_response['number_of_page_results'] if callback is not None: callback(current_result_count, total_result_count) # for record in search_results: #print(u"{0}: {1} ({2})".format(record['id'], record['name'] , record['start_year'])) # print(record) #record['count_of_issues'] = record['count_of_isssues'] #print(u"{0}: {1} ({2})".format(search_results['results'][0]['id'], search_results['results'][0]['name'] , search_results['results'][0]['start_year'])) # cache these search results cvc.add_search_results(original_series_name, search_results) return search_results def fetchVolumeData(self, series_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() cached_volume_result = cvc.get_volume_info(series_id) if cached_volume_result is not None: return cached_volume_result volume_url = self.api_base_url + "/volume/" + CVTypeID.Volume + "-" + \ str(series_id) + "/?api_key=" + self.api_key + \ "&field_list=name,id,start_year,publisher,count_of_issues&format=json" cv_response = self.getCVContent(volume_url) volume_results = cv_response['results'] cvc.add_volume_info(volume_results) return volume_results def fetchIssuesByVolume(self, series_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() cached_volume_issues_result = cvc.get_volume_issues_info(series_id) if cached_volume_issues_result is not None: return cached_volume_issues_result #--------------------------------- issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + "&filter=volume:" + \ str(series_id) + \ "&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json" cv_response = self.getCVContent(issues_url) #------------------------------------ limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] # print "ATB total_result_count", total_result_count #print("ATB Found {0} of {1} results".format(cv_response['number_of_page_results'], cv_response['number_of_total_results'])) volume_issues_result = cv_response['results'] page = 1 offset = 0 # see if we need to keep asking for more pages... while (current_result_count < total_result_count): #print("ATB getting another page of issue results {0} of {1}...".format(current_result_count, total_result_count)) page += 1 offset += cv_response['number_of_page_results'] # print issues_url+ "&offset="+str(offset) cv_response = self.getCVContent( issues_url + "&offset=" + str(offset)) volume_issues_result.extend(cv_response['results']) current_result_count += cv_response['number_of_page_results'] self.repairUrls(volume_issues_result) cvc.add_volume_issues_info(series_id, volume_issues_result) return volume_issues_result def fetchIssuesByVolumeIssueNumAndYear( self, volume_id_list, issue_number, year): volume_filter = "volume:" for vid in volume_id_list: volume_filter += str(vid) + "|" year_filter = "" if year is not None and str(year).isdigit(): year_filter = ",cover_date:{0}-1-1|{1}-1-1".format( year, int(year) + 1) issue_number = urllib.quote_plus(unicode(issue_number).encode("utf-8")) filter = "&filter=" + volume_filter + \ year_filter + ",issue_number:" + issue_number issues_url = self.api_base_url + "/issues/" + "?api_key=" + self.api_key + filter + \ "&field_list=id,volume,issue_number,name,image,cover_date,site_detail_url,description&format=json" cv_response = self.getCVContent(issues_url) #------------------------------------ limit = cv_response['limit'] current_result_count = cv_response['number_of_page_results'] total_result_count = cv_response['number_of_total_results'] # print "ATB total_result_count", total_result_count #print("ATB Found {0} of {1} results\n".format(cv_response['number_of_page_results'], cv_response['number_of_total_results'])) filtered_issues_result = cv_response['results'] page = 1 offset = 0 # see if we need to keep asking for more pages... while (current_result_count < total_result_count): #print("ATB getting another page of issue results {0} of {1}...\n".format(current_result_count, total_result_count)) page += 1 offset += cv_response['number_of_page_results'] # print issues_url+ "&offset="+str(offset) cv_response = self.getCVContent( issues_url + "&offset=" + str(offset)) filtered_issues_result.extend(cv_response['results']) current_result_count += cv_response['number_of_page_results'] self.repairUrls(filtered_issues_result) return filtered_issues_result def fetchIssueData(self, series_id, issue_number, settings): volume_results = self.fetchVolumeData(series_id) issues_list_results = self.fetchIssuesByVolume(series_id) found = False for record in issues_list_results: if IssueString(issue_number).asString() is None: issue_number = 1 if IssueString(record['issue_number']).asString().lower() == IssueString( issue_number).asString().lower(): found = True break if (found): issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \ str(record['id']) + "/?api_key=" + \ self.api_key + "&format=json" cv_response = self.getCVContent(issue_url) issue_results = cv_response['results'] else: return None # Now, map the Comic Vine data to generic metadata return self.mapCVDataToMetadata( volume_results, issue_results, settings) def fetchIssueDataByIssueID(self, issue_id, settings): issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \ str(issue_id) + "/?api_key=" + self.api_key + "&format=json" cv_response = self.getCVContent(issue_url) issue_results = cv_response['results'] volume_results = self.fetchVolumeData(issue_results['volume']['id']) # Now, map the Comic Vine data to generic metadata md = self.mapCVDataToMetadata(volume_results, issue_results, settings) md.isEmpty = False return md def mapCVDataToMetadata(self, volume_results, issue_results, settings): # Now, map the Comic Vine data to generic metadata metadata = GenericMetadata() metadata.series = issue_results['volume']['name'] num_s = IssueString(issue_results['issue_number']).asString() metadata.issue = num_s metadata.title = issue_results['name'] metadata.publisher = volume_results['publisher']['name'] metadata.day, metadata.month, metadata.year = self.parseDateStr( issue_results['cover_date']) #metadata.issueCount = volume_results['count_of_issues'] metadata.comments = self.cleanup_html( issue_results['description'], settings.remove_html_tables) if settings.use_series_start_as_volume: metadata.volume = volume_results['start_year'] metadata.notes = "Tagged with the {0} fork of ComicTagger {1} using info from Comic Vine on {2}. [Issue ID {3}]".format( ctversion.fork, ctversion.version, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), issue_results['id']) #metadata.notes += issue_results['site_detail_url'] metadata.webLink = issue_results['site_detail_url'] person_credits = issue_results['person_credits'] for person in person_credits: if 'role' in person: roles = person['role'].split(',') for role in roles: # can we determine 'primary' from CV?? metadata.addCredit( person['name'], role.title().strip(), False) character_credits = issue_results['character_credits'] character_list = list() for character in character_credits: character_list.append(character['name']) metadata.characters = utils.listToString(character_list) team_credits = issue_results['team_credits'] team_list = list() for team in team_credits: team_list.append(team['name']) metadata.teams = utils.listToString(team_list) location_credits = issue_results['location_credits'] location_list = list() for location in location_credits: location_list.append(location['name']) metadata.locations = utils.listToString(location_list) story_arc_credits = issue_results['story_arc_credits'] arc_list = [] for arc in story_arc_credits: arc_list.append(arc['name']) if len(arc_list) > 0: metadata.storyArc = utils.listToString(arc_list) return metadata def cleanup_html(self, string, remove_html_tables): """ converter = html2text.HTML2Text() #converter.emphasis_mark = '*' #converter.ignore_links = True converter.body_width = 0 print(html2text.html2text(string)) return string #return converter.handle(string) """ if string is None: return "" # find any tables soup = BeautifulSoup(string, "html.parser") tables = soup.findAll('table') # remove all newlines first string = string.replace("\n", "") # put in our own string = string.replace("<br>", "\n") string = string.replace("</p>", "\n\n") string = string.replace("<h4>", "*") string = string.replace("</h4>", "*\n") # remove the tables p = re.compile(r'<table[^<]*?>.*?<\/table>') if remove_html_tables: string = p.sub('', string) string = string.replace("*List of covers and their creators:*", "") else: string = p.sub('{}', string) # now strip all other tags p = re.compile(r'<[^<]*?>') newstring = p.sub('', string) newstring = newstring.replace('&nbsp;', ' ') newstring = newstring.replace('&amp;', '&') newstring = newstring.strip() if not remove_html_tables: # now rebuild the tables into text from BSoup try: table_strings = [] for table in tables: rows = [] hdrs = [] col_widths = [] for hdr in table.findAll('th'): item = hdr.string.strip() hdrs.append(item) col_widths.append(len(item)) rows.append(hdrs) for row in table.findAll('tr'): cols = [] col = row.findAll('td') i = 0 for c in col: item = c.string.strip() cols.append(item) if len(item) > col_widths[i]: col_widths[i] = len(item) i += 1 if len(cols) != 0: rows.append(cols) # now we have the data, make it into text fmtstr = "" for w in col_widths: fmtstr += " {{:{}}}|".format(w + 1) width = sum(col_widths) + len(col_widths) * 2 print "width=", width table_text = "" counter = 0 for row in rows: table_text += fmtstr.format(*row) + "\n" if counter == 0 and len(hdrs) != 0: table_text += "-" * width + "\n" counter += 1 table_strings.append(table_text) newstring = newstring.format(*table_strings) except: # we caught an error rebuilding the table. # just bail and remove the formatting print("table parse error") newstring.replace("{}", "") return newstring def fetchIssueDate(self, issue_id): details = self.fetchIssueSelectDetails(issue_id) day, month, year = self.parseDateStr(details['cover_date']) return month, year def fetchIssueCoverURLs(self, issue_id): details = self.fetchIssueSelectDetails(issue_id) return details['image_url'], details['thumb_image_url'] def fetchIssuePageURL(self, issue_id): details = self.fetchIssueSelectDetails(issue_id) return details['site_detail_url'] def fetchIssueSelectDetails(self, issue_id): #cached_image_url,cached_thumb_url,cached_month,cached_year = self.fetchCachedIssueSelectDetails(issue_id) cached_details = self.fetchCachedIssueSelectDetails(issue_id) if cached_details['image_url'] is not None: return cached_details issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \ str(issue_id) + "/?api_key=" + self.api_key + \ "&format=json&field_list=image,cover_date,site_detail_url" details = dict() details['image_url'] = None details['thumb_image_url'] = None details['cover_date'] = None details['site_detail_url'] = None cv_response = self.getCVContent(issue_url) details['image_url'] = cv_response['results']['image']['super_url'] details['thumb_image_url'] = cv_response[ 'results']['image']['thumb_url'] details['cover_date'] = cv_response['results']['cover_date'] details['site_detail_url'] = cv_response['results']['site_detail_url'] if details['image_url'] is not None: self.cacheIssueSelectDetails(issue_id, details['image_url'], details['thumb_image_url'], details['cover_date'], details['site_detail_url']) # print(details['site_detail_url']) return details def fetchCachedIssueSelectDetails(self, issue_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() return cvc.get_issue_select_details(issue_id) def cacheIssueSelectDetails( self, issue_id, image_url, thumb_url, cover_date, page_url): cvc = ComicVineCacher() cvc.add_issue_select_details( issue_id, image_url, thumb_url, cover_date, page_url) def fetchAlternateCoverURLs(self, issue_id, issue_page_url): url_list = self.fetchCachedAlternateCoverURLs(issue_id) if url_list is not None: return url_list # scrape the CV issue page URL to get the alternate cover URLs resp = urllib2.urlopen(issue_page_url) content = resp.read() alt_cover_url_list = self.parseOutAltCoverUrls(content) # cache this alt cover URL list self.cacheAlternateCoverURLs(issue_id, alt_cover_url_list) return alt_cover_url_list def parseOutAltCoverUrls(self, page_html): soup = BeautifulSoup(page_html, "html.parser") alt_cover_url_list = [] # Using knowledge of the layout of the Comic Vine issue page here: # look for the divs that are in the classes 'content-pod' and # 'alt-cover' div_list = soup.find_all('div') covers_found = 0 for d in div_list: if 'class' in d: c = d['class'] if 'imgboxart' in c and 'issue-cover' in c: covers_found += 1 if covers_found != 1: alt_cover_url_list.append(d.img['src']) return alt_cover_url_list def fetchCachedAlternateCoverURLs(self, issue_id): # before we search online, look in our cache, since we might already # have this info cvc = ComicVineCacher() url_list = cvc.get_alt_covers(issue_id) if url_list is not None: return url_list else: return None def cacheAlternateCoverURLs(self, issue_id, url_list): cvc = ComicVineCacher() cvc.add_alt_covers(issue_id, url_list) #------------------------------------------------------------------------- urlFetchComplete = pyqtSignal(str, str, int) def asyncFetchIssueCoverURLs(self, issue_id): self.issue_id = issue_id details = self.fetchCachedIssueSelectDetails(issue_id) if details['image_url'] is not None: self.urlFetchComplete.emit( details['image_url'], details['thumb_image_url'], self.issue_id) return issue_url = self.api_base_url + "/issue/" + CVTypeID.Issue + "-" + \ str(issue_id) + "/?api_key=" + self.api_key + \ "&format=json&field_list=image,cover_date,site_detail_url" self.nam = QNetworkAccessManager() self.nam.finished.connect(self.asyncFetchIssueCoverURLComplete) self.nam.get(QNetworkRequest(QUrl(issue_url))) def asyncFetchIssueCoverURLComplete(self, reply): # read in the response data = reply.readAll() try: cv_response = json.loads(str(data)) except: print >> sys.stderr, "Comic Vine query failed to get JSON data" print >> sys.stderr, str(data) return if cv_response['status_code'] != 1: print >> sys.stderr, "Comic Vine query failed with error: [{0}]. ".format( cv_response['error']) return image_url = cv_response['results']['image']['super_url'] thumb_url = cv_response['results']['image']['thumb_url'] cover_date = cv_response['results']['cover_date'] page_url = cv_response['results']['site_detail_url'] self.cacheIssueSelectDetails( self.issue_id, image_url, thumb_url, cover_date, page_url) self.urlFetchComplete.emit(image_url, thumb_url, self.issue_id) altUrlListFetchComplete = pyqtSignal(list, int) def asyncFetchAlternateCoverURLs(self, issue_id, issue_page_url): # This async version requires the issue page url to be provided! self.issue_id = issue_id url_list = self.fetchCachedAlternateCoverURLs(issue_id) if url_list is not None: self.altUrlListFetchComplete.emit(url_list, int(self.issue_id)) return self.nam = QNetworkAccessManager() self.nam.finished.connect(self.asyncFetchAlternateCoverURLsComplete) self.nam.get(QNetworkRequest(QUrl(str(issue_page_url)))) def asyncFetchAlternateCoverURLsComplete(self, reply): # read in the response html = str(reply.readAll()) alt_cover_url_list = self.parseOutAltCoverUrls(html) # cache this alt cover URL list self.cacheAlternateCoverURLs(self.issue_id, alt_cover_url_list) self.altUrlListFetchComplete.emit( alt_cover_url_list, int(self.issue_id)) def repairUrls(self, issue_list): # make sure there are URLs for the image fields for issue in issue_list: if issue['image'] is None: issue['image'] = dict() issue['image']['super_url'] = ComicVineTalker.logo_url issue['image']['thumb_url'] = ComicVineTalker.logo_url
29,991
Python
.py
632
36.003165
159
0.585121
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,977
optionalmsgdialog.py
evilhero_mylar/lib/comictaggerlib/optionalmsgdialog.py
"""A PyQt4 dialog to show a message and let the user check a box Example usage: checked = OptionalMessageDialog.msg(self, "Disclaimer", "This is beta software, and you are using it at your own risk!", ) said_yes, checked = OptionalMessageDialog.question(self, "Question", "Are you sure you wish to do this?", ) """ # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from PyQt4.QtCore import * from PyQt4.QtGui import * StyleMessage = 0 StyleQuestion = 1 class OptionalMessageDialog(QDialog): def __init__(self, parent, style, title, msg, check_state=Qt.Unchecked, check_text=None): QDialog.__init__(self, parent) self.setWindowTitle(title) self.was_accepted = False l = QVBoxLayout(self) self.theLabel = QLabel(msg) self.theLabel.setWordWrap(True) self.theLabel.setTextFormat(Qt.RichText) self.theLabel.setOpenExternalLinks(True) self.theLabel.setTextInteractionFlags( Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse | Qt.LinksAccessibleByKeyboard) l.addWidget(self.theLabel) l.insertSpacing(-1, 10) if check_text is None: if style == StyleQuestion: check_text = "Remember this answer" else: check_text = "Don't show this message again" self.theCheckBox = QCheckBox(check_text) self.theCheckBox.setCheckState(check_state) l.addWidget(self.theCheckBox) btnbox_style = QDialogButtonBox.Ok if style == StyleQuestion: btnbox_style = QDialogButtonBox.Yes | QDialogButtonBox.No self.theButtonBox = QDialogButtonBox( btnbox_style, parent=self, accepted=self.accept, rejected=self.reject) l.addWidget(self.theButtonBox) def accept(self): self.was_accepted = True QDialog.accept(self) def reject(self): self.was_accepted = False QDialog.reject(self) @staticmethod def msg(parent, title, msg, check_state=Qt.Unchecked, check_text=None): d = OptionalMessageDialog( parent, StyleMessage, title, msg, check_state=check_state, check_text=check_text) d.exec_() return d.theCheckBox.isChecked() @staticmethod def question( parent, title, msg, check_state=Qt.Unchecked, check_text=None): d = OptionalMessageDialog( parent, StyleQuestion, title, msg, check_state=check_state, check_text=check_text) d.exec_() return d.was_accepted, d.theCheckBox.isChecked()
3,372
Python
.py
84
30.809524
96
0.640553
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,978
pagebrowser.py
evilhero_mylar/lib/comictaggerlib/pagebrowser.py
"""A PyQT4 dialog to show pages of a comic archive""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform #import sys #import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings from coverimagewidget import CoverImageWidget class PageBrowserWindow(QtGui.QDialog): def __init__(self, parent, metadata): super(PageBrowserWindow, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('pagebrowser.ui'), self) self.pageWidget = CoverImageWidget( self.pageContainer, CoverImageWidget.ArchiveMode) gridlayout = QtGui.QGridLayout(self.pageContainer) gridlayout.addWidget(self.pageWidget) gridlayout.setContentsMargins(0, 0, 0, 0) self.pageWidget.showControls = False self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint) self.comic_archive = None self.page_count = 0 self.current_page_num = 0 self.metadata = metadata self.buttonBox.button(QtGui.QDialogButtonBox.Close).setDefault(True) if platform.system() == "Darwin": self.btnPrev.setText("<<") self.btnNext.setText(">>") else: self.btnPrev.setIcon( QtGui.QIcon(ComicTaggerSettings.getGraphic('left.png'))) self.btnNext.setIcon( QtGui.QIcon(ComicTaggerSettings.getGraphic('right.png'))) self.btnNext.clicked.connect(self.nextPage) self.btnPrev.clicked.connect(self.prevPage) self.show() self.btnNext.setEnabled(False) self.btnPrev.setEnabled(False) def reset(self): self.comic_archive = None self.page_count = 0 self.current_page_num = 0 self.metadata = None self.btnNext.setEnabled(False) self.btnPrev.setEnabled(False) self.pageWidget.clear() def setComicArchive(self, ca): self.comic_archive = ca self.page_count = ca.getNumberOfPages() self.current_page_num = 0 self.pageWidget.setArchive(self.comic_archive) self.setPage() if self.page_count > 1: self.btnNext.setEnabled(True) self.btnPrev.setEnabled(True) def nextPage(self): if self.current_page_num + 1 < self.page_count: self.current_page_num += 1 else: self.current_page_num = 0 self.setPage() def prevPage(self): if self.current_page_num - 1 >= 0: self.current_page_num -= 1 else: self.current_page_num = self.page_count - 1 self.setPage() def setPage(self): if self.metadata is not None: archive_page_index = self.metadata.getArchivePageIndex( self.current_page_num) else: archive_page_index = self.current_page_num self.pageWidget.setPage(archive_page_index) self.setWindowTitle( "Page Browser - Page {0} (of {1}) ".format(self.current_page_num + 1, self.page_count))
3,682
Python
.py
86
34.046512
99
0.659753
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,979
renamewindow.py
evilhero_mylar/lib/comictaggerlib/renamewindow.py
"""A PyQT4 dialog to confirm rename""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings from settingswindow import SettingsWindow from filerenamer import FileRenamer from comicarchive import MetaDataStyle import utils class RenameWindow(QtGui.QDialog): def __init__(self, parent, comic_archive_list, data_style, settings): super(RenameWindow, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('renamewindow.ui'), self) self.label.setText( "Preview (based on {0} tags):".format( MetaDataStyle.name[data_style])) self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint) self.settings = settings self.comic_archive_list = comic_archive_list self.data_style = data_style self.btnSettings.clicked.connect(self.modifySettings) self.configRenamer() self.doPreview() def configRenamer(self): self.renamer = FileRenamer(None) self.renamer.setTemplate(self.settings.rename_template) self.renamer.setIssueZeroPadding( self.settings.rename_issue_number_padding) self.renamer.setSmartCleanup( self.settings.rename_use_smart_string_cleanup) def doPreview(self): self.rename_list = [] while self.twList.rowCount() > 0: self.twList.removeRow(0) self.twList.setSortingEnabled(False) for ca in self.comic_archive_list: new_ext = None # default if self.settings.rename_extension_based_on_archive: if ca.isZip(): new_ext = ".cbz" elif ca.isRar(): new_ext = ".cbr" md = ca.readMetadata(self.data_style) if md.isEmpty: md = ca.metadataFromFilename(self.settings.parse_scan_info) self.renamer.setMetadata(md) new_name = self.renamer.determineName(ca.path, ext=new_ext) row = self.twList.rowCount() self.twList.insertRow(row) folder_item = QtGui.QTableWidgetItem() old_name_item = QtGui.QTableWidgetItem() new_name_item = QtGui.QTableWidgetItem() item_text = os.path.split(ca.path)[0] folder_item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 0, folder_item) folder_item.setText(item_text) folder_item.setData(QtCore.Qt.ToolTipRole, item_text) item_text = os.path.split(ca.path)[1] old_name_item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 1, old_name_item) old_name_item.setText(item_text) old_name_item.setData(QtCore.Qt.ToolTipRole, item_text) new_name_item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 2, new_name_item) new_name_item.setText(new_name) new_name_item.setData(QtCore.Qt.ToolTipRole, new_name) dict_item = dict() dict_item['archive'] = ca dict_item['new_name'] = new_name self.rename_list.append(dict_item) # Adjust column sizes self.twList.setVisible(False) self.twList.resizeColumnsToContents() self.twList.setVisible(True) if self.twList.columnWidth(0) > 200: self.twList.setColumnWidth(0, 200) self.twList.setSortingEnabled(True) def modifySettings(self): settingswin = SettingsWindow(self, self.settings) settingswin.setModal(True) settingswin.showRenameTab() settingswin.exec_() if settingswin.result(): self.configRenamer() self.doPreview() def accept(self): progdialog = QtGui.QProgressDialog( "", "Cancel", 0, len(self.rename_list), self) progdialog.setWindowTitle("Renaming Archives") progdialog.setWindowModality(QtCore.Qt.WindowModal) progdialog.show() for idx, item in enumerate(self.rename_list): QtCore.QCoreApplication.processEvents() if progdialog.wasCanceled(): break progdialog.setValue(idx) idx += 1 progdialog.setLabelText(item['new_name']) if item['new_name'] == os.path.basename(item['archive'].path): print item['new_name'], "Filename is already good!" continue if not item['archive'].isWritable(check_rar_status=False): continue folder = os.path.dirname(os.path.abspath(item['archive'].path)) new_abs_path = utils.unique_file( os.path.join(folder, item['new_name'])) os.rename(item['archive'].path, new_abs_path) item['archive'].rename(new_abs_path) progdialog.close() QtGui.QDialog.accept(self)
5,742
Python
.py
124
35.540323
75
0.635239
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,980
comicvinecacher.py
evilhero_mylar/lib/comictaggerlib/comicvinecacher.py
"""A python class to manage caching of data from Comic Vine""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlite3 as lite import os import datetime #import sys #from pprint import pprint import ctversion from settings import ComicTaggerSettings import utils class ComicVineCacher: def __init__(self): self.settings_folder = ComicTaggerSettings.getSettingsFolder() self.db_file = os.path.join(self.settings_folder, "cv_cache.db") self.version_file = os.path.join( self.settings_folder, "cache_version.txt") # verify that cache is from same version as this one data = "" try: with open(self.version_file, 'rb') as f: data = f.read() f.close() except: pass if data != ctversion.version: self.clearCache() if not os.path.exists(self.db_file): self.create_cache_db() def clearCache(self): try: os.unlink(self.db_file) except: pass try: os.unlink(self.version_file) except: pass def create_cache_db(self): # create the version file with open(self.version_file, 'w') as f: f.write(ctversion.version) # this will wipe out any existing version open(self.db_file, 'w').close() con = lite.connect(self.db_file) # create tables with con: cur = con.cursor() # name,id,start_year,publisher,image,description,count_of_issues cur.execute( "CREATE TABLE VolumeSearchCache(" + "search_term TEXT," + "id INT," + "name TEXT," + "start_year INT," + "publisher TEXT," + "count_of_issues INT," + "image_url TEXT," + "description TEXT," + "timestamp DATE DEFAULT (datetime('now','localtime'))) ") cur.execute( "CREATE TABLE Volumes(" + "id INT," + "name TEXT," + "publisher TEXT," + "count_of_issues INT," + "start_year INT," + "timestamp DATE DEFAULT (datetime('now','localtime')), " + "PRIMARY KEY (id))") cur.execute( "CREATE TABLE AltCovers(" + "issue_id INT," + "url_list TEXT," + "timestamp DATE DEFAULT (datetime('now','localtime')), " + "PRIMARY KEY (issue_id))") cur.execute( "CREATE TABLE Issues(" + "id INT," + "volume_id INT," + "name TEXT," + "issue_number TEXT," + "super_url TEXT," + "thumb_url TEXT," + "cover_date TEXT," + "site_detail_url TEXT," + "description TEXT," + "timestamp DATE DEFAULT (datetime('now','localtime')), " + "PRIMARY KEY (id))") def add_search_results(self, search_term, cv_search_results): con = lite.connect(self.db_file) with con: con.text_factory = unicode cur = con.cursor() # remove all previous entries with this search term cur.execute( "DELETE FROM VolumeSearchCache WHERE search_term = ?", [ search_term.lower()]) # now add in new results for record in cv_search_results: timestamp = datetime.datetime.now() if record['publisher'] is None: pub_name = "" else: pub_name = record['publisher']['name'] if record['image'] is None: url = "" else: url = record['image']['super_url'] cur.execute( "INSERT INTO VolumeSearchCache " + "(search_term, id, name, start_year, publisher, count_of_issues, image_url, description) " + "VALUES(?, ?, ?, ?, ?, ?, ?, ?)", (search_term.lower(), record['id'], record['name'], record['start_year'], pub_name, record['count_of_issues'], url, record['description'])) def get_search_results(self, search_term): results = list() con = lite.connect(self.db_file) with con: con.text_factory = unicode cur = con.cursor() # purge stale search results a_day_ago = datetime.datetime.today() - datetime.timedelta(days=1) cur.execute( "DELETE FROM VolumeSearchCache WHERE timestamp < ?", [ str(a_day_ago)]) # fetch cur.execute( "SELECT * FROM VolumeSearchCache WHERE search_term=?", [search_term.lower()]) rows = cur.fetchall() # now process the results for record in rows: result = dict() result['id'] = record[1] result['name'] = record[2] result['start_year'] = record[3] result['publisher'] = dict() result['publisher']['name'] = record[4] result['count_of_issues'] = record[5] result['image'] = dict() result['image']['super_url'] = record[6] result['description'] = record[7] results.append(result) return results def add_alt_covers(self, issue_id, url_list): con = lite.connect(self.db_file) with con: con.text_factory = unicode cur = con.cursor() # remove all previous entries with this search term cur.execute("DELETE FROM AltCovers WHERE issue_id = ?", [issue_id]) url_list_str = utils.listToString(url_list) # now add in new record cur.execute("INSERT INTO AltCovers " + "(issue_id, url_list) " + "VALUES(?, ?)", (issue_id, url_list_str) ) def get_alt_covers(self, issue_id): con = lite.connect(self.db_file) with con: cur = con.cursor() con.text_factory = unicode # purge stale issue info - probably issue data won't change # much.... a_month_ago = datetime.datetime.today() - \ datetime.timedelta(days=30) cur.execute( "DELETE FROM AltCovers WHERE timestamp < ?", [ str(a_month_ago)]) cur.execute( "SELECT url_list FROM AltCovers WHERE issue_id=?", [issue_id]) row = cur.fetchone() if row is None: return None else: url_list_str = row[0] if len(url_list_str) == 0: return [] raw_list = url_list_str.split(",") url_list = [] for item in raw_list: url_list.append(str(item).strip()) return url_list def add_volume_info(self, cv_volume_record): con = lite.connect(self.db_file) with con: cur = con.cursor() timestamp = datetime.datetime.now() if cv_volume_record['publisher'] is None: pub_name = "" else: pub_name = cv_volume_record['publisher']['name'] data = { "name": cv_volume_record['name'], "publisher": pub_name, "count_of_issues": cv_volume_record['count_of_issues'], "start_year": cv_volume_record['start_year'], "timestamp": timestamp } self.upsert(cur, "volumes", "id", cv_volume_record['id'], data) def add_volume_issues_info(self, volume_id, cv_volume_issues): con = lite.connect(self.db_file) with con: cur = con.cursor() timestamp = datetime.datetime.now() # add in issues for issue in cv_volume_issues: data = { "volume_id": volume_id, "name": issue['name'], "issue_number": issue['issue_number'], "site_detail_url": issue['site_detail_url'], "cover_date": issue['cover_date'], "super_url": issue['image']['super_url'], "thumb_url": issue['image']['thumb_url'], "description": issue['description'], "timestamp": timestamp } self.upsert(cur, "issues", "id", issue['id'], data) def get_volume_info(self, volume_id): result = None con = lite.connect(self.db_file) with con: cur = con.cursor() con.text_factory = unicode # purge stale volume info a_week_ago = datetime.datetime.today() - datetime.timedelta(days=7) cur.execute( "DELETE FROM Volumes WHERE timestamp < ?", [str(a_week_ago)]) # fetch cur.execute( "SELECT id,name,publisher,count_of_issues,start_year FROM Volumes WHERE id = ?", [volume_id]) row = cur.fetchone() if row is None: return result result = dict() # since ID is primary key, there is only one row result['id'] = row[0] result['name'] = row[1] result['publisher'] = dict() result['publisher']['name'] = row[2] result['count_of_issues'] = row[3] result['start_year'] = row[4] result['issues'] = list() return result def get_volume_issues_info(self, volume_id): result = None con = lite.connect(self.db_file) with con: cur = con.cursor() con.text_factory = unicode # purge stale issue info - probably issue data won't change # much.... a_week_ago = datetime.datetime.today() - datetime.timedelta(days=7) cur.execute( "DELETE FROM Issues WHERE timestamp < ?", [str(a_week_ago)]) # fetch results = list() cur.execute( "SELECT id,name,issue_number,site_detail_url,cover_date,super_url,thumb_url,description FROM Issues WHERE volume_id = ?", [volume_id]) rows = cur.fetchall() # now process the results for row in rows: record = dict() record['id'] = row[0] record['name'] = row[1] record['issue_number'] = row[2] record['site_detail_url'] = row[3] record['cover_date'] = row[4] record['image'] = dict() record['image']['super_url'] = row[5] record['image']['thumb_url'] = row[6] record['description'] = row[7] results.append(record) if len(results) == 0: return None return results def add_issue_select_details( self, issue_id, image_url, thumb_image_url, cover_date, site_detail_url): con = lite.connect(self.db_file) with con: cur = con.cursor() con.text_factory = unicode timestamp = datetime.datetime.now() data = { "super_url": image_url, "thumb_url": thumb_image_url, "cover_date": cover_date, "site_detail_url": site_detail_url, "timestamp": timestamp } self.upsert(cur, "issues", "id", issue_id, data) def get_issue_select_details(self, issue_id): con = lite.connect(self.db_file) with con: cur = con.cursor() con.text_factory = unicode cur.execute( "SELECT super_url,thumb_url,cover_date,site_detail_url FROM Issues WHERE id=?", [issue_id]) row = cur.fetchone() details = dict() if row is None or row[0] is None: details['image_url'] = None details['thumb_image_url'] = None details['cover_date'] = None details['site_detail_url'] = None else: details['image_url'] = row[0] details['thumb_image_url'] = row[1] details['cover_date'] = row[2] details['site_detail_url'] = row[3] return details def upsert(self, cur, tablename, pkname, pkval, data): """This does an insert if the given PK doesn't exist, and an update it if does TODO: look into checking if UPDATE is needed TODO: should the cursor be created here, and not up the stack? """ ins_count = len(data) + 1 keys = "" vals = list() ins_slots = "" set_slots = "" for key in data: if keys != "": keys += ", " if ins_slots != "": ins_slots += ", " if set_slots != "": set_slots += ", " keys += key vals.append(data[key]) ins_slots += "?" set_slots += key + " = ?" keys += ", " + pkname vals.append(pkval) ins_slots += ", ?" condition = pkname + " = ?" sql_ins = ("INSERT OR IGNORE INTO " + tablename + " (" + keys + ") " + " VALUES (" + ins_slots + ")") cur.execute(sql_ins, vals) sql_upd = ("UPDATE " + tablename + " SET " + set_slots + " WHERE " + condition) cur.execute(sql_upd, vals)
14,868
Python
.py
368
26.652174
137
0.492187
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,981
exportwindow.py
evilhero_mylar/lib/comictaggerlib/exportwindow.py
"""A PyQT4 dialog to confirm and set options for export to zip""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings #from settingswindow import SettingsWindow #from filerenamer import FileRenamer #import utils class ExportConflictOpts: dontCreate = 1 overwrite = 2 createUnique = 3 class ExportWindow(QtGui.QDialog): def __init__(self, parent, settings, msg): super(ExportWindow, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('exportwindow.ui'), self) self.label.setText(msg) self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint) self.settings = settings self.cbxDeleteOriginal.setCheckState(QtCore.Qt.Unchecked) self.cbxAddToList.setCheckState(QtCore.Qt.Checked) self.radioDontCreate.setChecked(True) self.deleteOriginal = False self.addToList = True self.fileConflictBehavior = ExportConflictOpts.dontCreate def accept(self): QtGui.QDialog.accept(self) self.deleteOriginal = self.cbxDeleteOriginal.isChecked() self.addToList = self.cbxAddToList.isChecked() if self.radioDontCreate.isChecked(): self.fileConflictBehavior = ExportConflictOpts.dontCreate elif self.radioCreateNew.isChecked(): self.fileConflictBehavior = ExportConflictOpts.createUnique # else: # self.fileConflictBehavior = ExportConflictOpts.overwrite
2,119
Python
.py
45
40.866667
74
0.739659
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,982
fileselectionlist.py
evilhero_mylar/lib/comictaggerlib/fileselectionlist.py
# coding=utf-8 """A PyQt4 widget for managing list of comic archive files""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import os #import os #import sys from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4 import uic from PyQt4.QtCore import pyqtSignal from settings import ComicTaggerSettings from comicarchive import ComicArchive from optionalmsgdialog import OptionalMessageDialog from comictaggerlib.ui.qtutils import reduceWidgetFontSize, centerWindowOnParent import utils #from comicarchive import MetaDataStyle #from genericmetadata import GenericMetadata, PageType class FileTableWidgetItem(QTableWidgetItem): def __lt__(self, other): return (self.data(Qt.UserRole).toBool() < other.data(Qt.UserRole).toBool()) class FileInfo(): def __init__(self, ca): self.ca = ca class FileSelectionList(QWidget): selectionChanged = pyqtSignal(QVariant) listCleared = pyqtSignal() fileColNum = 0 CRFlagColNum = 1 CBLFlagColNum = 2 typeColNum = 3 readonlyColNum = 4 folderColNum = 5 dataColNum = fileColNum def __init__(self, parent, settings): super(FileSelectionList, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('fileselectionlist.ui'), self) self.settings = settings reduceWidgetFontSize(self.twList) self.twList.setColumnCount(6) #self.twlist.setHorizontalHeaderLabels (["File", "Folder", "CR", "CBL", ""]) # self.twList.horizontalHeader().setStretchLastSection(True) self.twList.currentItemChanged.connect(self.currentItemChangedCB) self.currentItem = None self.setContextMenuPolicy(Qt.ActionsContextMenu) self.modifiedFlag = False selectAllAction = QAction("Select All", self) removeAction = QAction("Remove Selected Items", self) self.separator = QAction("", self) self.separator.setSeparator(True) selectAllAction.setShortcut('Ctrl+A') removeAction.setShortcut('Ctrl+X') selectAllAction.triggered.connect(self.selectAll) removeAction.triggered.connect(self.removeSelection) self.addAction(selectAllAction) self.addAction(removeAction) self.addAction(self.separator) def getSorting(self): col = self.twList.horizontalHeader().sortIndicatorSection() order = self.twList.horizontalHeader().sortIndicatorOrder() return col, order def setSorting(self, col, order): col = self.twList.horizontalHeader().setSortIndicator(col, order) def addAppAction(self, action): self.insertAction(None, action) def setModifiedFlag(self, modified): self.modifiedFlag = modified def selectAll(self): self.twList.setRangeSelected( QTableWidgetSelectionRange( 0, 0, self.twList.rowCount() - 1, 5), True) def deselectAll(self): self.twList.setRangeSelected( QTableWidgetSelectionRange( 0, 0, self.twList.rowCount() - 1, 5), False) def removeArchiveList(self, ca_list): self.twList.setSortingEnabled(False) for ca in ca_list: for row in range(self.twList.rowCount()): row_ca = self.getArchiveByRow(row) if row_ca == ca: self.twList.removeRow(row) break self.twList.setSortingEnabled(True) def getArchiveByRow(self, row): fi = self.twList.item(row, FileSelectionList.dataColNum).data( Qt.UserRole).toPyObject() return fi.ca def getCurrentArchive(self): return self.getArchiveByRow(self.twList.currentRow()) def removeSelection(self): row_list = [] for item in self.twList.selectedItems(): if item.column() == 0: row_list.append(item.row()) if len(row_list) == 0: return if self.twList.currentRow() in row_list: if not self.modifiedFlagVerification( "Remove Archive", "If you close this archive, data in the form will be lost. Are you sure?"): return row_list.sort() row_list.reverse() self.twList.currentItemChanged.disconnect(self.currentItemChangedCB) self.twList.setSortingEnabled(False) for i in row_list: self.twList.removeRow(i) self.twList.setSortingEnabled(True) self.twList.currentItemChanged.connect(self.currentItemChangedCB) if self.twList.rowCount() > 0: # since on a removal, we select row 0, make sure callback occurs if # we're already there if self.twList.currentRow() == 0: self.currentItemChangedCB(self.twList.currentItem(), None) self.twList.selectRow(0) else: self.listCleared.emit() def addPathList(self, pathlist): filelist = utils.get_recursive_filelist(pathlist) # we now have a list of files to add progdialog = QProgressDialog("", "Cancel", 0, len(filelist), self) progdialog.setWindowTitle("Adding Files") # progdialog.setWindowModality(Qt.WindowModal) progdialog.setWindowModality(Qt.ApplicationModal) progdialog.show() firstAdded = None self.twList.setSortingEnabled(False) for idx, f in enumerate(filelist): QCoreApplication.processEvents() if progdialog.wasCanceled(): break progdialog.setValue(idx) progdialog.setLabelText(f) centerWindowOnParent(progdialog) QCoreApplication.processEvents() row = self.addPathItem(f) if firstAdded is None and row is not None: firstAdded = row progdialog.close() if (self.settings.show_no_unrar_warning and self.settings.unrar_exe_path == "" and self.settings.rar_exe_path == "" and platform.system() != "Windows"): for f in filelist: ext = os.path.splitext(f)[1].lower() if ext == ".rar" or ext == ".cbr": checked = OptionalMessageDialog.msg(self, "No unrar tool", """ It looks like you've tried to open at least one CBR or RAR file.<br><br> In order for ComicTagger to read this kind of file, you will have to configure the location of the unrar tool in the settings. Until then, ComicTagger will not be able recognize these kind of files. """ ) self.settings.show_no_unrar_warning = not checked break if firstAdded is not None: self.twList.selectRow(firstAdded) else: if len(pathlist) == 1 and os.path.isfile(pathlist[0]): QMessageBox.information(self, self.tr("File Open"), self.tr( "Selected file doesn't seem to be a comic archive.")) else: QMessageBox.information( self, self.tr("File/Folder Open"), self.tr("No comic archives were found.")) self.twList.setSortingEnabled(True) # Adjust column size self.twList.resizeColumnsToContents() self.twList.setColumnWidth(FileSelectionList.CRFlagColNum, 35) self.twList.setColumnWidth(FileSelectionList.CBLFlagColNum, 35) self.twList.setColumnWidth(FileSelectionList.readonlyColNum, 35) self.twList.setColumnWidth(FileSelectionList.typeColNum, 45) if self.twList.columnWidth(FileSelectionList.fileColNum) > 250: self.twList.setColumnWidth(FileSelectionList.fileColNum, 250) if self.twList.columnWidth(FileSelectionList.folderColNum) > 200: self.twList.setColumnWidth(FileSelectionList.folderColNum, 200) def isListDupe(self, path): r = 0 while r < self.twList.rowCount(): ca = self.getArchiveByRow(r) if ca.path == path: return True r = r + 1 return False def getCurrentListRow(self, path): r = 0 while r < self.twList.rowCount(): ca = self.getArchiveByRow(r) if ca.path == path: return r r = r + 1 return -1 def addPathItem(self, path): path = unicode(path) path = os.path.abspath(path) # print "processing", path if self.isListDupe(path): return self.getCurrentListRow(path) ca = ComicArchive( path, self.settings.rar_exe_path, ComicTaggerSettings.getGraphic('nocover.png')) if ca.seemsToBeAComicArchive(): row = self.twList.rowCount() self.twList.insertRow(row) fi = FileInfo(ca) filename_item = QTableWidgetItem() folder_item = QTableWidgetItem() cix_item = FileTableWidgetItem() cbi_item = FileTableWidgetItem() readonly_item = FileTableWidgetItem() type_item = QTableWidgetItem() filename_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) filename_item.setData(Qt.UserRole, fi) self.twList.setItem( row, FileSelectionList.fileColNum, filename_item) folder_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) self.twList.setItem( row, FileSelectionList.folderColNum, folder_item) type_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) self.twList.setItem(row, FileSelectionList.typeColNum, type_item) cix_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) cix_item.setTextAlignment(Qt.AlignHCenter) self.twList.setItem(row, FileSelectionList.CRFlagColNum, cix_item) cbi_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) cbi_item.setTextAlignment(Qt.AlignHCenter) self.twList.setItem(row, FileSelectionList.CBLFlagColNum, cbi_item) readonly_item.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled) readonly_item.setTextAlignment(Qt.AlignHCenter) self.twList.setItem( row, FileSelectionList.readonlyColNum, readonly_item) self.updateRow(row) return row def updateRow(self, row): fi = self.twList.item(row, FileSelectionList.dataColNum).data( Qt.UserRole).toPyObject() filename_item = self.twList.item(row, FileSelectionList.fileColNum) folder_item = self.twList.item(row, FileSelectionList.folderColNum) cix_item = self.twList.item(row, FileSelectionList.CRFlagColNum) cbi_item = self.twList.item(row, FileSelectionList.CBLFlagColNum) type_item = self.twList.item(row, FileSelectionList.typeColNum) readonly_item = self.twList.item(row, FileSelectionList.readonlyColNum) item_text = os.path.split(fi.ca.path)[0] folder_item.setText(item_text) folder_item.setData(Qt.ToolTipRole, item_text) item_text = os.path.split(fi.ca.path)[1] filename_item.setText(item_text) filename_item.setData(Qt.ToolTipRole, item_text) if fi.ca.isZip(): item_text = "ZIP" elif fi.ca.isRar(): item_text = "RAR" else: item_text = "" type_item.setText(item_text) type_item.setData(Qt.ToolTipRole, item_text) if fi.ca.hasCIX(): cix_item.setCheckState(Qt.Checked) cix_item.setData(Qt.UserRole, True) else: cix_item.setData(Qt.UserRole, False) cix_item.setCheckState(Qt.Unchecked) if fi.ca.hasCBI(): cbi_item.setCheckState(Qt.Checked) cbi_item.setData(Qt.UserRole, True) else: cbi_item.setData(Qt.UserRole, False) cbi_item.setCheckState(Qt.Unchecked) if not fi.ca.isWritable(): readonly_item.setCheckState(Qt.Checked) readonly_item.setData(Qt.UserRole, True) else: readonly_item.setData(Qt.UserRole, False) readonly_item.setCheckState(Qt.Unchecked) # Reading these will force them into the ComicArchive's cache fi.ca.readCIX() fi.ca.hasCBI() def getSelectedArchiveList(self): ca_list = [] for r in range(self.twList.rowCount()): item = self.twList.item(r, FileSelectionList.dataColNum) if self.twList.isItemSelected(item): fi = item.data(Qt.UserRole).toPyObject() ca_list.append(fi.ca) return ca_list def updateCurrentRow(self): self.updateRow(self.twList.currentRow()) def updateSelectedRows(self): self.twList.setSortingEnabled(False) for r in range(self.twList.rowCount()): item = self.twList.item(r, FileSelectionList.dataColNum) if self.twList.isItemSelected(item): self.updateRow(r) self.twList.setSortingEnabled(True) def currentItemChangedCB(self, curr, prev): new_idx = curr.row() old_idx = -1 if prev is not None: old_idx = prev.row() #print("old {0} new {1}".format(old_idx, new_idx)) if old_idx == new_idx: return # don't allow change if modified if prev is not None and new_idx != old_idx: if not self.modifiedFlagVerification( "Change Archive", "If you change archives now, data in the form will be lost. Are you sure?"): self.twList.currentItemChanged.disconnect( self.currentItemChangedCB) self.twList.setCurrentItem(prev) self.twList.currentItemChanged.connect( self.currentItemChangedCB) # Need to defer this revert selection, for some reason QTimer.singleShot(1, self.revertSelection) return fi = self.twList.item(new_idx, FileSelectionList.dataColNum).data( Qt.UserRole).toPyObject() self.selectionChanged.emit(QVariant(fi)) def revertSelection(self): self.twList.selectRow(self.twList.currentRow()) def modifiedFlagVerification(self, title, desc): if self.modifiedFlag: reply = QMessageBox.question(self, self.tr(title), self.tr(desc), QMessageBox.Yes, QMessageBox.No) if reply != QMessageBox.Yes: return False return True # Attempt to use a special checkbox widget in the cell. # Couldn't figure out how to disable it with "enabled" colors #w = QWidget() #cb = QCheckBox(w) # cb.setCheckState(Qt.Checked) #layout = QHBoxLayout() # layout.addWidget(cb) # layout.setAlignment(Qt.AlignHCenter) # layout.setMargin(2) # w.setLayout(layout) #self.twList.setCellWidget(row, 2, w)
16,058
Python
.py
363
33.044077
106
0.623702
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,983
cbltransformer.py
evilhero_mylar/lib/comictaggerlib/cbltransformer.py
"""A class to manage modifying metadata specifically for CBL/CBI""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import os #import utils class CBLTransformer: def __init__(self, metadata, settings): self.metadata = metadata self.settings = settings def apply(self): # helper funcs def append_to_tags_if_unique(item): if item.lower() not in (tag.lower() for tag in self.metadata.tags): self.metadata.tags.append(item) def add_string_list_to_tags(str_list): if str_list is not None and str_list != "": items = [s.strip() for s in str_list.split(',')] for item in items: append_to_tags_if_unique(item) if self.settings.assume_lone_credit_is_primary: # helper def setLonePrimary(role_list): lone_credit = None count = 0 for c in self.metadata.credits: if c['role'].lower() in role_list: count += 1 lone_credit = c if count > 1: lone_credit = None break if lone_credit is not None: lone_credit['primary'] = True return lone_credit, count # need to loop three times, once for 'writer', 'artist', and then # 'penciler' if no artist setLonePrimary(['writer']) c, count = setLonePrimary(['artist']) if c is None and count == 0: c, count = setLonePrimary(['penciler', 'penciller']) if c is not None: c['primary'] = False self.metadata.addCredit(c['person'], 'Artist', True) if self.settings.copy_characters_to_tags: add_string_list_to_tags(self.metadata.characters) if self.settings.copy_teams_to_tags: add_string_list_to_tags(self.metadata.teams) if self.settings.copy_locations_to_tags: add_string_list_to_tags(self.metadata.locations) if self.settings.copy_storyarcs_to_tags: add_string_list_to_tags(self.metadata.storyArc) if self.settings.copy_notes_to_comments: if self.metadata.notes is not None: if self.metadata.comments is None: self.metadata.comments = "" else: self.metadata.comments += "\n\n" if self.metadata.notes not in self.metadata.comments: self.metadata.comments += self.metadata.notes if self.settings.copy_weblink_to_comments: if self.metadata.webLink is not None: if self.metadata.comments is None: self.metadata.comments = "" else: self.metadata.comments += "\n\n" if self.metadata.webLink not in self.metadata.comments: self.metadata.comments += self.metadata.webLink return self.metadata
3,643
Python
.py
76
35.026316
79
0.58291
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,984
filerenamer.py
evilhero_mylar/lib/comictaggerlib/filerenamer.py
"""Functions for renaming files based on metadata""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import datetime import utils from issuestring import IssueString class FileRenamer: def __init__(self, metadata): self.setMetadata(metadata) self.setTemplate( "%series% v%volume% #%issue% (of %issuecount%) (%year%)") self.smart_cleanup = True self.issue_zero_padding = 3 def setMetadata(self, metadata): self.metdata = metadata def setIssueZeroPadding(self, count): self.issue_zero_padding = count def setSmartCleanup(self, on): self.smart_cleanup = on def setTemplate(self, template): self.template = template def replaceToken(self, text, value, token): # helper func def isToken(word): return (word[0] == "%" and word[-1:] == "%") if value is not None: return text.replace(token, unicode(value)) else: if self.smart_cleanup: # smart cleanup means we want to remove anything appended to token if it's empty # (e.g "#%issue%" or "v%volume%") # (TODO: This could fail if there is more than one token appended together, I guess) text_list = text.split() # special case for issuecount, remove preceding non-token word, # as in "...(of %issuecount%)..." if token == '%issuecount%': for idx, word in enumerate(text_list): if token in word and not isToken(text_list[idx - 1]): text_list[idx - 1] = "" text_list = [x for x in text_list if token not in x] return " ".join(text_list) else: return text.replace(token, "") def determineName(self, filename, ext=None): md = self.metdata new_name = self.template preferred_encoding = utils.get_actual_preferred_encoding() # print(u"{0}".format(md)) new_name = self.replaceToken(new_name, md.series, '%series%') new_name = self.replaceToken(new_name, md.volume, '%volume%') if md.issue is not None: issue_str = u"{0}".format( IssueString(md.issue).asString(pad=self.issue_zero_padding)) else: issue_str = None new_name = self.replaceToken(new_name, issue_str, '%issue%') new_name = self.replaceToken(new_name, md.issueCount, '%issuecount%') new_name = self.replaceToken(new_name, md.year, '%year%') new_name = self.replaceToken(new_name, md.publisher, '%publisher%') new_name = self.replaceToken(new_name, md.title, '%title%') new_name = self.replaceToken(new_name, md.month, '%month%') month_name = None if md.month is not None: if (isinstance(md.month, str) and md.month.isdigit()) or isinstance( md.month, int): if int(md.month) in range(1, 13): dt = datetime.datetime(1970, int(md.month), 1, 0, 0) month_name = dt.strftime( u"%B".encode(preferred_encoding)).decode(preferred_encoding) new_name = self.replaceToken(new_name, month_name, '%month_name%') new_name = self.replaceToken(new_name, md.genre, '%genre%') new_name = self.replaceToken(new_name, md.language, '%language_code%') new_name = self.replaceToken( new_name, md.criticalRating, '%criticalrating%') new_name = self.replaceToken( new_name, md.alternateSeries, '%alternateseries%') new_name = self.replaceToken( new_name, md.alternateNumber, '%alternatenumber%') new_name = self.replaceToken( new_name, md.alternateCount, '%alternatecount%') new_name = self.replaceToken(new_name, md.imprint, '%imprint%') new_name = self.replaceToken(new_name, md.format, '%format%') new_name = self.replaceToken( new_name, md.maturityRating, '%maturityrating%') new_name = self.replaceToken(new_name, md.storyArc, '%storyarc%') new_name = self.replaceToken(new_name, md.seriesGroup, '%seriesgroup%') new_name = self.replaceToken(new_name, md.scanInfo, '%scaninfo%') if self.smart_cleanup: # remove empty braces,brackets, parentheses new_name = re.sub("\(\s*[-:]*\s*\)", "", new_name) new_name = re.sub("\[\s*[-:]*\s*\]", "", new_name) new_name = re.sub("\{\s*[-:]*\s*\}", "", new_name) # remove duplicate spaces new_name = u" ".join(new_name.split()) # remove remove duplicate -, _, new_name = re.sub("[-_]{2,}\s+", "-- ", new_name) new_name = re.sub("(\s--)+", " --", new_name) new_name = re.sub("(\s-)+", " -", new_name) # remove dash or double dash at end of line new_name = re.sub("[-]{1,2}\s*$", "", new_name) # remove duplicate spaces (again!) new_name = u" ".join(new_name.split()) if ext is None: ext = os.path.splitext(filename)[1] new_name += ext # some tweaks to keep various filesystems happy new_name = new_name.replace("/", "-") new_name = new_name.replace(" :", " -") new_name = new_name.replace(": ", " - ") new_name = new_name.replace(":", "-") new_name = new_name.replace("?", "") return new_name
6,107
Python
.py
122
39.598361
100
0.586456
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,985
issueidentifier.py
evilhero_mylar/lib/comictaggerlib/issueidentifier.py
"""A class to automatically identify a comic archive""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import StringIO #import math #import urllib2 #import urllib try: from PIL import Image from PIL import WebPImagePlugin pil_available = True except ImportError: pil_available = False from genericmetadata import GenericMetadata from comicvinetalker import ComicVineTalker, ComicVineTalkerException from imagehasher import ImageHasher from imagefetcher import ImageFetcher, ImageFetcherException from issuestring import IssueString import utils #from settings import ComicTaggerSettings #from comicvinecacher import ComicVineCacher class IssueIdentifierNetworkError(Exception): pass class IssueIdentifierCancelled(Exception): pass class IssueIdentifier: ResultNoMatches = 0 ResultFoundMatchButBadCoverScore = 1 ResultFoundMatchButNotFirstPage = 2 ResultMultipleMatchesWithBadImageScores = 3 ResultOneGoodMatch = 4 ResultMultipleGoodMatches = 5 def __init__(self, comic_archive, settings): self.comic_archive = comic_archive self.image_hasher = 1 self.onlyUseAdditionalMetaData = False # a decent hamming score, good enough to call it a match self.min_score_thresh = 16 # for alternate covers, be more stringent, since we're a bit more # scattershot in comparisons self.min_alternate_score_thresh = 12 # the min distance a hamming score must be to separate itself from # closest neighbor self.min_score_distance = 4 # a very strong hamming score, almost certainly the same image self.strong_score_thresh = 8 # used to eliminate series names that are too long based on our search # string self.length_delta_thresh = settings.id_length_delta_thresh # used to eliminate unlikely publishers self.publisher_blacklist = [ s.strip().lower() for s in settings.id_publisher_blacklist.split(',')] self.additional_metadata = GenericMetadata() self.output_function = IssueIdentifier.defaultWriteOutput self.callback = None self.coverUrlCallback = None self.search_result = self.ResultNoMatches self.cover_page_index = 0 self.cancel = False self.waitAndRetryOnRateLimit = False def setScoreMinThreshold(self, thresh): self.min_score_thresh = thresh def setScoreMinDistance(self, distance): self.min_score_distance = distance def setAdditionalMetadata(self, md): self.additional_metadata = md def setNameLengthDeltaThreshold(self, delta): self.length_delta_thresh = delta def setPublisherBlackList(self, blacklist): self.publisher_blacklist = blacklist def setHasherAlgorithm(self, algo): self.image_hasher = algo pass def setOutputFunction(self, func): self.output_function = func pass def calculateHash(self, image_data): if self.image_hasher == '3': return ImageHasher(data=image_data).dct_average_hash() elif self.image_hasher == '2': return ImageHasher(data=image_data).average_hash2() else: return ImageHasher(data=image_data).average_hash() def getAspectRatio(self, image_data): try: im = Image.open(StringIO.StringIO(image_data)) w, h = im.size return float(h) / float(w) except: return 1.5 def cropCover(self, image_data): im = Image.open(StringIO.StringIO(image_data)) w, h = im.size try: cropped_im = im.crop((int(w / 2), 0, w, h)) except Exception as e: sys.exc_clear() print "cropCover() error:", e return None output = StringIO.StringIO() cropped_im.save(output, format="PNG") cropped_image_data = output.getvalue() output.close() return cropped_image_data def setProgressCallback(self, cb_func): self.callback = cb_func def setCoverURLCallback(self, cb_func): self.coverUrlCallback = cb_func def getSearchKeys(self): ca = self.comic_archive search_keys = dict() search_keys['series'] = None search_keys['issue_number'] = None search_keys['month'] = None search_keys['year'] = None search_keys['issue_count'] = None if ca is None: return if self.onlyUseAdditionalMetaData: search_keys['series'] = self.additional_metadata.series search_keys['issue_number'] = self.additional_metadata.issue search_keys['year'] = self.additional_metadata.year search_keys['month'] = self.additional_metadata.month search_keys['issue_count'] = self.additional_metadata.issueCount return search_keys # see if the archive has any useful meta data for searching with if ca.hasCIX(): internal_metadata = ca.readCIX() elif ca.hasCBI(): internal_metadata = ca.readCBI() else: internal_metadata = ca.readCBI() # try to get some metadata from filename md_from_filename = ca.metadataFromFilename() # preference order: # 1. Additional metadata # 1. Internal metadata # 1. Filename metadata if self.additional_metadata.series is not None: search_keys['series'] = self.additional_metadata.series elif internal_metadata.series is not None: search_keys['series'] = internal_metadata.series else: search_keys['series'] = md_from_filename.series if self.additional_metadata.issue is not None: search_keys['issue_number'] = self.additional_metadata.issue elif internal_metadata.issue is not None: search_keys['issue_number'] = internal_metadata.issue else: search_keys['issue_number'] = md_from_filename.issue if self.additional_metadata.year is not None: search_keys['year'] = self.additional_metadata.year elif internal_metadata.year is not None: search_keys['year'] = internal_metadata.year else: search_keys['year'] = md_from_filename.year if self.additional_metadata.month is not None: search_keys['month'] = self.additional_metadata.month elif internal_metadata.month is not None: search_keys['month'] = internal_metadata.month else: search_keys['month'] = md_from_filename.month if self.additional_metadata.issueCount is not None: search_keys['issue_count'] = self.additional_metadata.issueCount elif internal_metadata.issueCount is not None: search_keys['issue_count'] = internal_metadata.issueCount else: search_keys['issue_count'] = md_from_filename.issueCount return search_keys @staticmethod def defaultWriteOutput(text): sys.stdout.write(text) sys.stdout.flush() def log_msg(self, msg, newline=True): self.output_function(msg) if newline: self.output_function("\n") def getIssueCoverMatchScore( self, comicVine, issue_id, primary_img_url, primary_thumb_url, page_url, localCoverHashList, useRemoteAlternates=False, useLog=True): # localHashes is a list of pre-calculated hashs. # useRemoteAlternates - indicates to use alternate covers from CV try: url_image_data = ImageFetcher().fetch( primary_thumb_url, blocking=True) except ImageFetcherException: self.log_msg( "Network issue while fetching cover image from Comic Vine. Aborting...") raise IssueIdentifierNetworkError if self.cancel: raise IssueIdentifierCancelled # alert the GUI, if needed if self.coverUrlCallback is not None: self.coverUrlCallback(url_image_data) remote_cover_list = [] item = dict() item['url'] = primary_img_url item['hash'] = self.calculateHash(url_image_data) remote_cover_list.append(item) if self.cancel: raise IssueIdentifierCancelled if useRemoteAlternates: alt_img_url_list = comicVine.fetchAlternateCoverURLs( issue_id, page_url) for alt_url in alt_img_url_list: try: alt_url_image_data = ImageFetcher().fetch( alt_url, blocking=True) except ImageFetcherException: self.log_msg( "Network issue while fetching alt. cover image from Comic Vine. Aborting...") raise IssueIdentifierNetworkError if self.cancel: raise IssueIdentifierCancelled # alert the GUI, if needed if self.coverUrlCallback is not None: self.coverUrlCallback(alt_url_image_data) item = dict() item['url'] = alt_url item['hash'] = self.calculateHash(alt_url_image_data) remote_cover_list.append(item) if self.cancel: raise IssueIdentifierCancelled if useLog and useRemoteAlternates: self.log_msg( "[{0} alt. covers]".format(len(remote_cover_list) - 1), False) if useLog: self.log_msg("[ ", False) score_list = [] done = False for local_cover_hash in localCoverHashList: for remote_cover_item in remote_cover_list: score = ImageHasher.hamming_distance( local_cover_hash, remote_cover_item['hash']) score_item = dict() score_item['score'] = score score_item['url'] = remote_cover_item['url'] score_item['hash'] = remote_cover_item['hash'] score_list.append(score_item) if useLog: self.log_msg("{0}".format(score), False) if score <= self.strong_score_thresh: # such a good score, we can quit now, since for sure we # have a winner done = True break if done: break if useLog: self.log_msg(" ]", False) best_score_item = min(score_list, key=lambda x: x['score']) return best_score_item # def validate(self, issue_id): # create hash list # score = self.getIssueMatchScore(issue_id, hash_list, useRemoteAlternates = True) # if score < 20: # return True # else: # return False def search(self): ca = self.comic_archive self.match_list = [] self.cancel = False self.search_result = self.ResultNoMatches if not pil_available: self.log_msg( "Python Imaging Library (PIL) is not available and is needed for issue identification.") return self.match_list if not ca.seemsToBeAComicArchive(): self.log_msg( "Sorry, but " + opts.filename + " is not a comic archive!") return self.match_list cover_image_data = ca.getPage(self.cover_page_index) cover_hash = self.calculateHash(cover_image_data) # check the aspect ratio # if it's wider than it is high, it's probably a two page spread # if so, crop it and calculate a second hash narrow_cover_hash = None aspect_ratio = self.getAspectRatio(cover_image_data) if aspect_ratio < 1.0: right_side_image_data = self.cropCover(cover_image_data) if right_side_image_data is not None: narrow_cover_hash = self.calculateHash(right_side_image_data) #self.log_msg("Cover hash = {0:016x}".format(cover_hash)) keys = self.getSearchKeys() # normalize the issue number keys['issue_number'] = IssueString(keys['issue_number']).asString() # we need, at minimum, a series and issue number if keys['series'] is None or keys['issue_number'] is None: self.log_msg("Not enough info for a search!") return [] self.log_msg("Going to search for:") self.log_msg("\tSeries: " + keys['series']) self.log_msg("\tIssue: " + keys['issue_number']) if keys['issue_count'] is not None: self.log_msg("\tCount: " + str(keys['issue_count'])) if keys['year'] is not None: self.log_msg("\tYear: " + str(keys['year'])) if keys['month'] is not None: self.log_msg("\tMonth: " + str(keys['month'])) #self.log_msg("Publisher Blacklist: " + str(self.publisher_blacklist)) comicVine = ComicVineTalker() comicVine.wait_for_rate_limit = self.waitAndRetryOnRateLimit comicVine.setLogFunc(self.output_function) # self.log_msg(("Searching for " + keys['series'] + "...") self.log_msg(u"Searching for {0} #{1} ...".format( keys['series'], keys['issue_number'])) try: cv_search_results = comicVine.searchForSeries(keys['series']) except ComicVineTalkerException: self.log_msg( "Network issue while searching for series. Aborting...") return [] #self.log_msg("Found " + str(len(cv_search_results)) + " initial results") if self.cancel: return [] if cv_search_results is None: return [] series_second_round_list = [] #self.log_msg("Removing results with too long names, banned publishers, or future start dates") for item in cv_search_results: length_approved = False publisher_approved = True date_approved = True # remove any series that starts after the issue year if keys['year'] is not None and str( keys['year']).isdigit() and item['start_year'] is not None and str( item['start_year']).isdigit(): if int(keys['year']) < int(item['start_year']): date_approved = False # assume that our search name is close to the actual name, say # within ,e.g. 5 chars shortened_key = utils.removearticles(keys['series']) shortened_item_name = utils.removearticles(item['name']) if len(shortened_item_name) < ( len(shortened_key) + self.length_delta_thresh): length_approved = True # remove any series from publishers on the blacklist if item['publisher'] is not None: publisher = item['publisher']['name'] if publisher is not None and publisher.lower( ) in self.publisher_blacklist: publisher_approved = False if length_approved and publisher_approved and date_approved: series_second_round_list.append(item) self.log_msg( "Searching in " + str(len(series_second_round_list)) + " series") if self.callback is not None: self.callback(0, len(series_second_round_list)) # now sort the list by name length series_second_round_list.sort( key=lambda x: len(x['name']), reverse=False) # build a list of volume IDs volume_id_list = list() for series in series_second_round_list: volume_id_list.append(series['id']) try: issue_list = comicVine.fetchIssuesByVolumeIssueNumAndYear( volume_id_list, keys['issue_number'], keys['year']) except ComicVineTalkerException: self.log_msg( "Network issue while searching for series details. Aborting...") return [] if issue_list is None: return [] shortlist = list() # now re-associate the issues and volumes for issue in issue_list: for series in series_second_round_list: if series['id'] == issue['volume']['id']: shortlist.append((series, issue)) break if keys['year'] is None: self.log_msg(u"Found {0} series that have an issue #{1}".format( len(shortlist), keys['issue_number'])) else: self.log_msg( u"Found {0} series that have an issue #{1} from {2}".format( len(shortlist), keys['issue_number'], keys['year'])) # now we have a shortlist of volumes with the desired issue number # Do first round of cover matching counter = len(shortlist) for series, issue in shortlist: if self.callback is not None: self.callback(counter, len(shortlist) * 3) counter += 1 self.log_msg(u"Examining covers for ID: {0} {1} ({2}) ...".format( series['id'], series['name'], series['start_year']), newline=False) # parse out the cover date day, month, year = comicVine.parseDateStr(issue['cover_date']) # Now check the cover match against the primary image hash_list = [cover_hash] if narrow_cover_hash is not None: hash_list.append(narrow_cover_hash) try: image_url = issue['image']['super_url'] thumb_url = issue['image']['thumb_url'] page_url = issue['site_detail_url'] score_item = self.getIssueCoverMatchScore( comicVine, issue['id'], image_url, thumb_url, page_url, hash_list, useRemoteAlternates=False) except: self.match_list = [] return self.match_list match = dict() match['series'] = u"{0} ({1})".format( series['name'], series['start_year']) match['distance'] = score_item['score'] match['issue_number'] = keys['issue_number'] match['cv_issue_count'] = series['count_of_issues'] match['url_image_hash'] = score_item['hash'] match['issue_title'] = issue['name'] match['issue_id'] = issue['id'] match['volume_id'] = series['id'] match['month'] = month match['year'] = year match['publisher'] = None if series['publisher'] is not None: match['publisher'] = series['publisher']['name'] match['image_url'] = image_url match['thumb_url'] = thumb_url match['page_url'] = page_url match['description'] = issue['description'] self.match_list.append(match) self.log_msg(" --> {0}".format(match['distance']), newline=False) self.log_msg("") if len(self.match_list) == 0: self.log_msg(":-(no matches!") self.search_result = self.ResultNoMatches return self.match_list # sort list by image match scores self.match_list.sort(key=lambda k: k['distance']) l = [] for i in self.match_list: l.append(i['distance']) self.log_msg("Compared to covers in {0} issue(s):".format( len(self.match_list)), newline=False) self.log_msg(str(l)) def print_match(item): self.log_msg(u"-----> {0} #{1} {2} ({3}/{4}) -- score: {5}".format( item['series'], item['issue_number'], item['issue_title'], item['month'], item['year'], item['distance'])) best_score = self.match_list[0]['distance'] if best_score >= self.min_score_thresh: # we have 1 or more low-confidence matches (all bad cover scores) # look at a few more pages in the archive, and also alternate # covers online self.log_msg( "Very weak scores for the cover. Analyzing alternate pages and covers...") hash_list = [cover_hash] if narrow_cover_hash is not None: hash_list.append(narrow_cover_hash) for i in range(1, min(3, ca.getNumberOfPages())): image_data = ca.getPage(i) page_hash = self.calculateHash(image_data) hash_list.append(page_hash) second_match_list = [] counter = 2 * len(self.match_list) for m in self.match_list: if self.callback is not None: self.callback(counter, len(self.match_list) * 3) counter += 1 self.log_msg( u"Examining alternate covers for ID: {0} {1} ...".format( m['volume_id'], m['series']), newline=False) try: score_item = self.getIssueCoverMatchScore( comicVine, m['issue_id'], m['image_url'], m['thumb_url'], m['page_url'], hash_list, useRemoteAlternates=True) except: self.match_list = [] return self.match_list self.log_msg("--->{0}".format(score_item['score'])) self.log_msg("") if score_item['score'] < self.min_alternate_score_thresh: second_match_list.append(m) m['distance'] = score_item['score'] if len(second_match_list) == 0: if len(self.match_list) == 1: self.log_msg("No matching pages in the issue.") self.log_msg( u"--------------------------------------------------------------------------") print_match(self.match_list[0]) self.log_msg( u"--------------------------------------------------------------------------") self.search_result = self.ResultFoundMatchButBadCoverScore else: self.log_msg( u"--------------------------------------------------------------------------") self.log_msg( u"Multiple bad cover matches! Need to use other info...") self.log_msg( u"--------------------------------------------------------------------------") self.search_result = self.ResultMultipleMatchesWithBadImageScores return self.match_list else: # We did good, found something! self.log_msg("Success in secondary/alternate cover matching!") self.match_list = second_match_list # sort new list by image match scores self.match_list.sort(key=lambda k: k['distance']) best_score = self.match_list[0]['distance'] self.log_msg( "[Second round cover matching: best score = {0}]".format(best_score)) # now drop down into the rest of the processing if self.callback is not None: self.callback(99, 100) # now pare down list, remove any item more than specified distant from # the top scores for item in reversed(self.match_list): if item['distance'] > best_score + self.min_score_distance: self.match_list.remove(item) # One more test for the case choosing limited series first issue vs a trade with the same cover: # if we have a given issue count > 1 and the volume from CV has # count==1, remove it from match list if len(self.match_list) >= 2 and keys[ 'issue_count'] is not None and keys['issue_count'] != 1: new_list = list() for match in self.match_list: if match['cv_issue_count'] != 1: new_list.append(match) else: self.log_msg( "Removing volume {0} [{1}] from consideration (only 1 issue)".format( match['series'], match['volume_id'])) if len(new_list) > 0: self.match_list = new_list if len(self.match_list) == 1: self.log_msg( u"--------------------------------------------------------------------------") print_match(self.match_list[0]) self.log_msg( u"--------------------------------------------------------------------------") self.search_result = self.ResultOneGoodMatch elif len(self.match_list) == 0: self.log_msg( u"--------------------------------------------------------------------------") self.log_msg("No matches found :(") self.log_msg( u"--------------------------------------------------------------------------") self.search_result = self.ResultNoMatches else: # we've got multiple good matches: self.log_msg("More than one likely candidate.") self.search_result = self.ResultMultipleGoodMatches self.log_msg( u"--------------------------------------------------------------------------") for item in self.match_list: print_match(item) self.log_msg( u"--------------------------------------------------------------------------") return self.match_list
26,956
Python
.py
591
33.072758
104
0.551803
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,986
issueselectionwindow.py
evilhero_mylar/lib/comictaggerlib/issueselectionwindow.py
"""A PyQT4 dialog to select specific issue from list""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import sys #import os #import re from PyQt4 import QtCore, QtGui, uic #from PyQt4.QtCore import QUrl, pyqtSignal, QByteArray #from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest from comicvinetalker import ComicVineTalker, ComicVineTalkerException from settings import ComicTaggerSettings from issuestring import IssueString from coverimagewidget import CoverImageWidget from comictaggerlib.ui.qtutils import reduceWidgetFontSize #from imagefetcher import ImageFetcher #import utils class IssueNumberTableWidgetItem(QtGui.QTableWidgetItem): def __lt__(self, other): selfStr = self.data(QtCore.Qt.DisplayRole).toString() otherStr = other.data(QtCore.Qt.DisplayRole).toString() return (IssueString(selfStr).asFloat() < IssueString(otherStr).asFloat()) class IssueSelectionWindow(QtGui.QDialog): volume_id = 0 def __init__(self, parent, settings, series_id, issue_number): super(IssueSelectionWindow, self).__init__(parent) uic.loadUi( ComicTaggerSettings.getUIFile('issueselectionwindow.ui'), self) self.coverWidget = CoverImageWidget( self.coverImageContainer, CoverImageWidget.AltCoverMode) gridlayout = QtGui.QGridLayout(self.coverImageContainer) gridlayout.addWidget(self.coverWidget) gridlayout.setContentsMargins(0, 0, 0, 0) reduceWidgetFontSize(self.twList) reduceWidgetFontSize(self.teDescription, 1) self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint) self.series_id = series_id self.settings = settings self.url_fetch_thread = None if issue_number is None or issue_number == "": self.issue_number = 1 else: self.issue_number = issue_number self.initial_id = None self.performQuery() self.twList.resizeColumnsToContents() self.twList.currentItemChanged.connect(self.currentItemChanged) self.twList.cellDoubleClicked.connect(self.cellDoubleClicked) # now that the list has been sorted, find the initial record, and # select it if self.initial_id is None: self.twList.selectRow(0) else: for r in range(0, self.twList.rowCount()): issue_id, b = self.twList.item( r, 0).data(QtCore.Qt.UserRole).toInt() if (issue_id == self.initial_id): self.twList.selectRow(r) break def performQuery(self): QtGui.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor)) try: comicVine = ComicVineTalker() volume_data = comicVine.fetchVolumeData(self.series_id) self.issue_list = comicVine.fetchIssuesByVolume(self.series_id) except ComicVineTalkerException as e: QtGui.QApplication.restoreOverrideCursor() if e.code == ComicVineTalkerException.RateLimit: QtGui.QMessageBox.critical( self, self.tr("Comic Vine Error"), ComicVineTalker.getRateLimitMessage()) else: QtGui.QMessageBox.critical( self, self.tr("Network Issue"), self.tr("Could not connect to Comic Vine to list issues!")) return while self.twList.rowCount() > 0: self.twList.removeRow(0) self.twList.setSortingEnabled(False) row = 0 for record in self.issue_list: self.twList.insertRow(row) item_text = record['issue_number'] item = IssueNumberTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setData(QtCore.Qt.UserRole, record['id']) item.setData(QtCore.Qt.DisplayRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 0, item) item_text = record['cover_date'] if item_text is None: item_text = "" # remove the day of "YYYY-MM-DD" parts = item_text.split("-") if len(parts) > 1: item_text = parts[0] + "-" + parts[1] item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 1, item) item_text = record['name'] if item_text is None: item_text = "" item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 2, item) if IssueString( record['issue_number']).asString().lower() == IssueString( self.issue_number).asString().lower(): self.initial_id = record['id'] row += 1 self.twList.setSortingEnabled(True) self.twList.sortItems(0, QtCore.Qt.AscendingOrder) QtGui.QApplication.restoreOverrideCursor() def cellDoubleClicked(self, r, c): self.accept() def currentItemChanged(self, curr, prev): if curr is None: return if prev is not None and prev.row() == curr.row(): return self.issue_id, b = self.twList.item( curr.row(), 0).data(QtCore.Qt.UserRole).toInt() # list selection was changed, update the the issue cover for record in self.issue_list: if record['id'] == self.issue_id: self.issue_number = record['issue_number'] self.coverWidget.setIssueID(int(self.issue_id)) if record['description'] is None: self.teDescription.setText("") else: self.teDescription.setText(record['description']) break
6,868
Python
.py
147
35.714286
79
0.634621
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,987
settings.py
evilhero_mylar/lib/comictaggerlib/settings.py
"""Settings class for ComicTagger app""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import platform import codecs import uuid import utils try: config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') if config_path not in sys.path: sys.path.append(config_path) from configobj import ConfigObj except ImportError: print "Unable to use configobj module. This is a CRITICAL error and ComicTagger cannot proceed. Exiting." class ComicTaggerSettings: @staticmethod def getSettingsFolder(): filename_encoding = sys.getfilesystemencoding() folder = os.path.join(ComicTaggerSettings.baseDir(), 'ct_settings') if folder is not None: folder = folder.decode(filename_encoding) return folder frozen_win_exe_path = None @staticmethod def baseDir(): if getattr(sys, 'frozen', None): if platform.system() == "Darwin": return sys._MEIPASS else: # Windows # Preserve this value, in case sys.argv gets changed importing # a plugin script if ComicTaggerSettings.frozen_win_exe_path is None: ComicTaggerSettings.frozen_win_exe_path = os.path.dirname( os.path.abspath(sys.argv[0])) return ComicTaggerSettings.frozen_win_exe_path else: return os.path.dirname(os.path.abspath(__file__)) @staticmethod def getGraphic(filename): graphic_folder = os.path.join( ComicTaggerSettings.baseDir(), 'graphics') return os.path.join(graphic_folder, filename) @staticmethod def getUIFile(filename): ui_folder = os.path.join(ComicTaggerSettings.baseDir(), 'ui') return os.path.join(ui_folder, filename) def setDefaultValues(self): # General Settings self.rar_exe_path = "" self.unrar_exe_path = "" self.allow_cbi_in_rar = True self.check_for_new_version = True self.send_usage_stats = False # automatic settings self.install_id = uuid.uuid4().hex self.last_selected_save_data_style = 0 self.last_selected_load_data_style = 0 self.last_opened_folder = "" self.last_main_window_width = 0 self.last_main_window_height = 0 self.last_main_window_x = 0 self.last_main_window_y = 0 self.last_form_side_width = -1 self.last_list_side_width = -1 self.last_filelist_sorted_column = -1 self.last_filelist_sorted_order = 0 # identifier settings self.id_length_delta_thresh = 5 self.id_publisher_blacklist = "Panini Comics, Abril, Planeta DeAgostini, Editorial Televisa" # Show/ask dialog flags self.ask_about_cbi_in_rar = True self.show_disclaimer = True self.dont_notify_about_this_version = "" self.ask_about_usage_stats = True self.show_no_unrar_warning = True # filename parsing settings self.parse_scan_info = True # Comic Vine settings self.use_series_start_as_volume = False self.clear_form_before_populating_from_cv = False self.remove_html_tables = False self.cv_api_key = "" # CBL Tranform settings self.assume_lone_credit_is_primary = False self.copy_characters_to_tags = False self.copy_teams_to_tags = False self.copy_locations_to_tags = False self.copy_storyarcs_to_tags = False self.copy_notes_to_comments = False self.copy_weblink_to_comments = False self.apply_cbl_transform_on_cv_import = False self.apply_cbl_transform_on_bulk_operation = False # Rename settings self.rename_template = "%series% #%issue% (%year%)" self.rename_issue_number_padding = 3 self.rename_use_smart_string_cleanup = True self.rename_extension_based_on_archive = True # Auto-tag stickies self.save_on_low_confidence = False self.dont_use_year_when_identifying = False self.assume_1_if_no_issue_num = False self.ignore_leading_numbers_in_filename = False self.remove_archive_after_successful_match = False self.wait_and_retry_on_rate_limit = False def __init__(self): self.settings_file = "" self.folder = "" self.setDefaultValues() self.folder = ComicTaggerSettings.getSettingsFolder() if not os.path.exists(self.folder): os.makedirs(self.folder) self.settings_file = os.path.join(self.folder, "settings.ini") self.CFG = ConfigObj(self.settings_file, encoding='utf-8') # if config file doesn't exist, write one out if not os.path.exists(self.settings_file): self.save() else: self.load() # take a crack at finding rar exes, if not set already if self.rar_exe_path == "": if platform.system() == "Windows": # look in some likely places for Windows machines if os.path.exists("C:\Program Files\WinRAR\Rar.exe"): self.rar_exe_path = "C:\Program Files\WinRAR\Rar.exe" elif os.path.exists("C:\Program Files (x86)\WinRAR\Rar.exe"): self.rar_exe_path = "C:\Program Files (x86)\WinRAR\Rar.exe" else: # see if it's in the path of unix user if utils.which("rar") is not None: self.rar_exe_path = utils.which("rar") #if self.rar_exe_path != "": # self.save() if self.unrar_exe_path == "": if platform.system() != "Windows": # see if it's in the path of unix user if utils.which("unrar") is not None: self.unrar_exe_path = utils.which("unrar") #if self.unrar_exe_path != "": # self.save() # make sure unrar/rar programs are now in the path for the UnRAR class to # use utils.addtopath(os.path.dirname(self.unrar_exe_path)) utils.addtopath(os.path.dirname(self.rar_exe_path)) def reset(self): os.unlink(self.settings_file) self.__init__() def CheckSection(self, sec): """ Check if INI section exists, if not create it """ try: self.CFG[sec] return True except: self.CFG[sec] = {} return False ################################################################################ # Check_setting_int # ################################################################################ def check_setting_int(self, config, cfg_name, item_name, def_val): try: my_val = int(config[cfg_name][item_name]) except: my_val = def_val try: config[cfg_name][item_name] = my_val except: config[cfg_name] = {} config[cfg_name][item_name] = my_val return my_val ################################################################################ # Check_setting_str # ################################################################################ def check_setting_str(self, config, cfg_name, item_name, def_val, log=True): try: my_val = config[cfg_name][item_name] except: my_val = def_val try: config[cfg_name][item_name] = my_val except: config[cfg_name] = {} config[cfg_name][item_name] = my_val return my_val def load(self): self.rar_exe_path = self.check_setting_str(self.CFG, 'settings', 'rar_exe_path', '') self.unrar_exe_path = self.check_setting_str(self.CFG, 'settings', 'unurar_exe_path', '') self.check_for_new_version = bool(self.check_setting_int(self.CFG, 'settings', 'check_for_new_version', 0)) self.send_usage_stats = bool(self.check_setting_int(self.CFG, 'settings', 'send_usage_stats', 0)) self.install_id = self.check_setting_str(self.CFG, 'auto', 'install_id', '') self.last_selected_load_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_load_data_style', '') self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '') self.last_selected_save_data_style = self.check_setting_str(self.CFG, 'auto', 'last_selected_save_data_style', '') self.last_opened_folder = self.check_setting_str(self.CFG, 'auto', 'last_opened_folder', '') self.last_main_window_width = self.check_setting_str(self.CFG, 'auto', 'last_main_window_width', '') self.last_main_window_height = self.check_setting_str(self.CFG, 'auto', 'last_main_window_height', '') self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width', '') self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width', '') self.last_filelist_sorted_column = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_column', '') self.last_filelist_sorted_order = self.check_setting_str(self.CFG, 'auto', 'last_filelist_sorted_order', '') self.last_main_window_x = self.check_setting_str(self.CFG, 'auto', 'last_main_window_x', '') self.last_main_window_y = self.check_setting_str(self.CFG, 'auto', 'last_main_window_y','') self.last_form_side_width = self.check_setting_str(self.CFG, 'auto', 'last_form_side_width','') self.last_list_side_width = self.check_setting_str(self.CFG, 'auto', 'last_list_side_width','') self.id_length_delta_thresh = self.check_setting_str(self.CFG, 'identifier', 'id_length_delta_thresh', '') self.id_publisher_blacklist = self.check_setting_str(self.CFG, 'identifier', 'id_publisher_blacklist', '') self.parse_scan_info = bool(self.check_setting_int(self.CFG, 'filenameparser', 'parse_scan_info', 0)) self.ask_about_cbi_in_rar = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_cbi_in_rar', 0)) self.show_disclaimer = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_disclaimer', 0)) self.dont_notify_about_this_version = self.check_setting_str(self.CFG, 'dialogflags', 'dont_notify_about_this_version', '') self.ask_about_usage_stats = bool(self.check_setting_int(self.CFG, 'dialogflags', 'ask_about_usage_stats', 0)) self.show_no_unrar_warning = bool(self.check_setting_int(self.CFG, 'dialogflags', 'show_no_unrar_warning', 0)) self.use_series_start_as_volume = bool(self.check_setting_int(self.CFG, 'comicvine', 'use_series_start_as_volume', 0)) self.clear_form_before_populating_from_cv = bool(self.check_setting_int(self.CFG, 'comicvine', 'clear_form_before_populating_from_cv', 0)) self.remove_html_tables = bool(self.check_setting_int(self.CFG, 'comicvine', 'remove_html_tables', 0)) self.cv_api_key = self.check_setting_str(self.CFG, 'comicvine', 'cv_api_key', '') self.assume_lone_credit_is_primary = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'assume_lone_credit_is_primary', 0)) self.copy_characters_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_characters_to_tags', 0)) self.copy_teams_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_teams_to_tags', 0)) self.copy_locations_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_locations_to_tags', 0)) self.copy_notes_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_notes_to_comments', 0)) self.copy_storyarcs_to_tags = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_storyarcs_to_tags', 0)) self.copy_weblink_to_comments = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'copy_weblink_to_comments', 0)) self.apply_cbl_transform_on_cv_import = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_cv_import', 0)) self.apply_cbl_transform_on_bulk_operation = bool(self.check_setting_int(self.CFG, 'cbl_transform', 'apply_cbl_transform_on_bulk_operation', 0)) self.rename_template = bool(self.check_setting_int(self.CFG, 'rename', 'rename_template', 0)) self.rename_issue_number_padding = self.check_setting_str(self.CFG, 'rename', 'rename_issue_number_padding', '') self.rename_use_smart_string_cleanup = bool(self.check_setting_int(self.CFG, 'rename', 'rename_use_smart_string_cleanup', 0)) self.rename_extension_based_on_archive = bool(self.check_setting_int(self.CFG, 'rename', 'rename_extension_based_on_archive', 0)) self.save_on_low_confidence = bool(self.check_setting_int(self.CFG, 'autotag', 'save_on_low_confidence', 0)) self.dont_use_year_when_identifying = bool(self.check_setting_int(self.CFG, 'autotag', 'dont_use_year_when_identifying', 0)) self.assume_1_if_no_issue_num = bool(self.check_setting_int(self.CFG, 'autotag', 'assume_1_if_no_issue_num', 0)) self.ignore_leading_numbers_in_filename = bool(self.check_setting_int(self.CFG, 'autotag', 'ignore_leading_numbers_in_filename', 0)) self.remove_archive_after_successful_match = bool(self.check_setting_int(self.CFG, 'autotag', 'remove_archive_after_successful_match', 0)) self.wait_and_retry_on_rate_limit = bool(self.check_setting_int(self.CFG, 'autotag', 'wait_and_retry_on_rate_limit', 0)) def save(self): new_config = ConfigObj() new_config.filename = self.settings_file new_config.encoding = 'UTF8' new_config['settings'] = {} new_config['settings']['check_for_new_version'] = self.check_for_new_version new_config['settings']['rar_exe_path'] = self.rar_exe_path new_config['settings']['unrar_exe_path'] = self.unrar_exe_path new_config['settings']['send_usage_stats'] = self.send_usage_stats new_config.write() new_config['auto'] = {} new_config['auto']['install_id'] = self.install_id new_config['auto']['last_selected_load_data_style'] = self.last_selected_load_data_style new_config['auto']['last_selected_save_data_style'] = self.last_selected_save_data_style new_config['auto']['last_opened_folder'] = self.last_opened_folder new_config['auto']['last_main_window_width'] = self.last_main_window_width new_config['auto']['last_main_window_height'] = self.last_main_window_height new_config['auto']['last_main_window_x'] = self.last_main_window_x new_config['auto']['last_main_window_y'] = self.last_main_window_y new_config['auto']['last_form_side_width'] = self.last_form_side_width new_config['auto']['last_list_side_width'] = self.last_list_side_width new_config['auto']['last_filelist_sorted_column'] = self.last_filelist_sorted_column new_config['auto']['last_filelist_sorted_order'] = self.last_filelist_sorted_order new_config['identifier'] = {} new_config['identifier']['id_length_delta_thresh'] = self.id_length_delta_thresh new_config['identifier']['id_publisher_blacklist'] = self.id_publisher_blacklist new_config['dialogflags'] = {} new_config['dialogflags']['ask_about_cbi_in_rar'] = self.ask_about_cbi_in_rar new_config['dialogflags']['show_disclaimer'] = self.show_disclaimer new_config['dialogflags']['dont_notify_about_this_version'] = self.dont_notify_about_this_version new_config['dialogflags']['ask_about_usage_stats'] = self.ask_about_usage_stats new_config['dialogflags']['show_no_unrar_warning'] = self.show_no_unrar_warning new_config['filenameparser'] = {} new_config['filenameparser']['parse_scan_info'] = self.parse_scan_info new_config['comicvine'] = {} new_config['comicvine']['use_series_start_as_volume'] = self.use_series_start_as_volume new_config['comicvine']['clear_form_before_populating_from_cv'] = self.clear_form_before_populating_from_cv new_config['comicvine']['remove_html_tables'] = self.remove_html_tables new_config['comicvine']['cv_api_key'] = self.cv_api_key new_config['cbl_transform'] = {} new_config['cbl_transform']['assume_lone_credit_is_primary'] = self.assume_lone_credit_is_primary new_config['cbl_transform']['copy_characters_to_tags'] = self.copy_characters_to_tags new_config['cbl_transform']['copy_teams_to_tags'] = self.copy_teams_to_tags new_config['cbl_transform']['copy_locations_to_tags'] = self.copy_locations_to_tags new_config['cbl_transform']['copy_storyarcs_to_tags'] = self.copy_storyarcs_to_tags new_config['cbl_transform']['copy_notes_to_comments'] = self.copy_notes_to_comments new_config['cbl_transform']['copy_weblink_to_comments'] = self.copy_weblink_to_comments new_config['cbl_transform']['apply_cbl_transform_on_cv_import'] = self.apply_cbl_transform_on_cv_import new_config['cbl_transform']['apply_cbl_transform_on_bulk_operation'] = self.apply_cbl_transform_on_bulk_operation new_config['rename'] = {} new_config['rename']['rename_template'] = self.rename_template new_config['rename']['rename_issue_number_padding'] = self.rename_issue_number_padding new_config['rename']['rename_use_smart_string_cleanup'] = self.rename_use_smart_string_cleanup new_config['rename']['rename_extension_based_on_archive'] = self.rename_extension_based_on_archive new_config['autotag'] = {} new_config['autotag']['save_on_low_confidence'] = self.save_on_low_confidence new_config['autotag']['dont_use_year_when_identifying'] = self.dont_use_year_when_identifying new_config['autotag']['assume_1_if_no_issue_num'] = self.assume_1_if_no_issue_num new_config['autotag']['ignore_leading_numbers_in_filename'] = self.ignore_leading_numbers_in_filename new_config['autotag']['remove_archive_after_successful_match'] = self.remove_archive_after_successful_match new_config['autotag']['wait_and_retry_on_rate_limit'] = self.wait_and_retry_on_rate_limit # make sure the basedir is cached, in case we're on Windows running a # script from frozen binary ComicTaggerSettings.baseDir()
18,839
Python
.py
307
53.068404
145
0.648119
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,988
crediteditorwindow.py
evilhero_mylar/lib/comictaggerlib/crediteditorwindow.py
"""A PyQT4 dialog to edit credits""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings class CreditEditorWindow(QtGui.QDialog): ModeEdit = 0 ModeNew = 1 def __init__(self, parent, mode, role, name, primary): super(CreditEditorWindow, self).__init__(parent) uic.loadUi( ComicTaggerSettings.getUIFile('crediteditorwindow.ui'), self) self.mode = mode if self.mode == self.ModeEdit: self.setWindowTitle("Edit Credit") else: self.setWindowTitle("New Credit") # Add the entries to the role combobox self.cbRole.addItem("") self.cbRole.addItem("Writer") self.cbRole.addItem("Artist") self.cbRole.addItem("Penciller") self.cbRole.addItem("Inker") self.cbRole.addItem("Colorist") self.cbRole.addItem("Letterer") self.cbRole.addItem("Cover Artist") self.cbRole.addItem("Editor") self.cbRole.addItem("Other") self.cbRole.addItem("Plotter") self.cbRole.addItem("Scripter") self.leName.setText(name) if role is not None and role != "": i = self.cbRole.findText(role) if i == -1: self.cbRole.setEditText(role) else: self.cbRole.setCurrentIndex(i) if primary: self.cbPrimary.setCheckState(QtCore.Qt.Checked) self.cbRole.currentIndexChanged.connect(self.roleChanged) self.cbRole.editTextChanged.connect(self.roleChanged) self.updatePrimaryButton() def updatePrimaryButton(self): enabled = self.currentRoleCanBePrimary() self.cbPrimary.setEnabled(enabled) def currentRoleCanBePrimary(self): role = self.cbRole.currentText() if str(role).lower() == "writer" or str(role).lower() == "artist": return True else: return False def roleChanged(self, s): self.updatePrimaryButton() def getCredits(self): primary = self.currentRoleCanBePrimary() and self.cbPrimary.isChecked() return self.cbRole.currentText(), self.leName.text(), primary def accept(self): if self.cbRole.currentText() == "" or self.leName.text() == "": QtGui.QMessageBox.warning(self, self.tr("Whoops"), self.tr( "You need to enter both role and name for a credit.")) else: QtGui.QDialog.accept(self)
3,070
Python
.py
71
35.070423
79
0.66308
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,989
imagehasher.py
evilhero_mylar/lib/comictaggerlib/imagehasher.py
"""A class to manage creating image content hashes, and calculate hamming distances""" # Copyright 2013 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import StringIO import sys from functools import reduce try: from PIL import Image from PIL import WebPImagePlugin pil_available = True except ImportError: pil_available = False class ImageHasher(object): def __init__(self, path=None, data=None, width=8, height=8): #self.hash_size = size self.width = width self.height = height if path is None and data is None: raise IOError else: try: if path is not None: self.image = Image.open(path) else: self.image = Image.open(StringIO.StringIO(data)) except: print("Image data seems corrupted!") # just generate a bogus image self.image = Image.new("L", (1, 1)) def average_hash(self): try: image = self.image.resize( (self.width, self.height), Image.ANTIALIAS).convert("L") except Exception as e: sys.exc_clear() print "average_hash error:", e return long(0) pixels = list(image.getdata()) avg = sum(pixels) / len(pixels) def compare_value_to_avg(i): return (1 if i > avg else 0) bitlist = map(compare_value_to_avg, pixels) # build up an int value from the bit list, one bit at a time def set_bit(x, idx_val): (idx, val) = idx_val return (x | (val << idx)) result = reduce(set_bit, enumerate(bitlist), 0) # print("{0:016x}".format(result)) return result def average_hash2(self): pass """ # Got this one from somewhere on the net. Not a clue how the 'convolve2d' # works! from numpy import array from scipy.signal import convolve2d im = self.image.resize((self.width, self.height), Image.ANTIALIAS).convert('L') in_data = array((im.getdata())).reshape(self.width, self.height) filt = array([[0,1,0],[1,-4,1],[0,1,0]]) filt_data = convolve2d(in_data,filt,mode='same',boundary='symm').flatten() result = reduce(lambda x, (y, z): x | (z << y), enumerate(map(lambda i: 0 if i < 0 else 1, filt_data)), 0) #print("{0:016x}".format(result)) return result """ def dct_average_hash(self): pass """ # Algorithm source: http://syntaxcandy.blogspot.com/2012/08/perceptual-hash.html 1. Reduce size. Like Average Hash, pHash starts with a small image. However, the image is larger than 8x8; 32x32 is a good size. This is really done to simplify the DCT computation and not because it is needed to reduce the high frequencies. 2. Reduce color. The image is reduced to a grayscale just to further simplify the number of computations. 3. Compute the DCT. The DCT separates the image into a collection of frequencies and scalars. While JPEG uses an 8x8 DCT, this algorithm uses a 32x32 DCT. 4. Reduce the DCT. This is the magic step. While the DCT is 32x32, just keep the top-left 8x8. Those represent the lowest frequencies in the picture. 5. Compute the average value. Like the Average Hash, compute the mean DCT value (using only the 8x8 DCT low-frequency values and excluding the first term since the DC coefficient can be significantly different from the other values and will throw off the average). Thanks to David Starkweather for the added information about pHash. He wrote: "the dct hash is based on the low 2D DCT coefficients starting at the second from lowest, leaving out the first DC term. This excludes completely flat image information (i.e. solid colors) from being included in the hash description." 6. Further reduce the DCT. This is the magic step. Set the 64 hash bits to 0 or 1 depending on whether each of the 64 DCT values is above or below the average value. The result doesn't tell us the actual low frequencies; it just tells us the very-rough relative scale of the frequencies to the mean. The result will not vary as long as the overall structure of the image remains the same; this can survive gamma and color histogram adjustments without a problem. 7. Construct the hash. Set the 64 bits into a 64-bit integer. The order does not matter, just as long as you are consistent. """ """ import numpy import scipy.fftpack numpy.set_printoptions(threshold=10000, linewidth=200, precision=2, suppress=True) # Step 1,2 im = self.image.resize((32, 32), Image.ANTIALIAS).convert("L") in_data = numpy.asarray(im) # Step 3 dct = scipy.fftpack.dct(in_data.astype(float)) # Step 4 # Just skip the top and left rows when slicing, as suggested somewhere else... lofreq_dct = dct[1:9, 1:9].flatten() # Step 5 avg = (lofreq_dct.sum()) / (lofreq_dct.size) median = numpy.median(lofreq_dct) thresh = avg # Step 6 def compare_value_to_thresh(i): return (1 if i > thresh else 0) bitlist = map(compare_value_to_thresh, lofreq_dct) #Step 7 def set_bit(x, (idx, val)): return (x | (val << idx)) result = reduce(set_bit, enumerate(bitlist), long(0)) #print("{0:016x}".format(result)) return result """ # accepts 2 hashes (longs or hex strings) and returns the hamming distance @staticmethod def hamming_distance(h1, h2): if isinstance(h1, long) or isinstance(h1, int): n1 = h1 n2 = h2 else: # convert hex strings to ints n1 = long(h1, 16) n2 = long(h2, 16) # xor the two numbers n = n1 ^ n2 # count up the 1's in the binary string return sum(b == '1' for b in bin(n)[2:])
6,803
Python
.py
148
36.702703
90
0.62466
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,990
volumeselectionwindow.py
evilhero_mylar/lib/comictaggerlib/volumeselectionwindow.py
"""A PyQT4 dialog to select specific series/volume from list""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import sys #import time #import os from PyQt4 import QtCore, QtGui, uic from PyQt4.QtCore import QUrl, pyqtSignal #from PyQt4.QtCore import QObject #from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest from comicvinetalker import ComicVineTalker, ComicVineTalkerException from issueselectionwindow import IssueSelectionWindow from issueidentifier import IssueIdentifier from genericmetadata import GenericMetadata from progresswindow import IDProgressWindow from settings import ComicTaggerSettings from matchselectionwindow import MatchSelectionWindow from coverimagewidget import CoverImageWidget from comictaggerlib.ui.qtutils import reduceWidgetFontSize #from imagefetcher import ImageFetcher #import utils class SearchThread(QtCore.QThread): searchComplete = pyqtSignal() progressUpdate = pyqtSignal(int, int) def __init__(self, series_name, refresh): QtCore.QThread.__init__(self) self.series_name = series_name self.refresh = refresh self.error_code = None def run(self): comicVine = ComicVineTalker() try: self.cv_error = False self.cv_search_results = comicVine.searchForSeries( self.series_name, callback=self.prog_callback, refresh_cache=self.refresh) except ComicVineTalkerException as e: self.cv_search_results = [] self.cv_error = True self.error_code = e.code finally: self.searchComplete.emit() def prog_callback(self, current, total): self.progressUpdate.emit(current, total) class IdentifyThread(QtCore.QThread): identifyComplete = pyqtSignal() identifyLogMsg = pyqtSignal(str) identifyProgress = pyqtSignal(int, int) def __init__(self, identifier): QtCore.QThread.__init__(self) self.identifier = identifier self.identifier.setOutputFunction(self.logOutput) self.identifier.setProgressCallback(self.progressCallback) def logOutput(self, text): self.identifyLogMsg.emit(text) def progressCallback(self, cur, total): self.identifyProgress.emit(cur, total) def run(self): matches = self.identifier.search() self.identifyComplete.emit() class VolumeSelectionWindow(QtGui.QDialog): def __init__(self, parent, series_name, issue_number, year, issue_count, cover_index_list, comic_archive, settings, autoselect=False): super(VolumeSelectionWindow, self).__init__(parent) uic.loadUi( ComicTaggerSettings.getUIFile('volumeselectionwindow.ui'), self) self.imageWidget = CoverImageWidget( self.imageContainer, CoverImageWidget.URLMode) gridlayout = QtGui.QGridLayout(self.imageContainer) gridlayout.addWidget(self.imageWidget) gridlayout.setContentsMargins(0, 0, 0, 0) reduceWidgetFontSize(self.teDetails, 1) reduceWidgetFontSize(self.twList) self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint) self.settings = settings self.series_name = series_name self.issue_number = issue_number self.year = year self.issue_count = issue_count self.volume_id = 0 self.comic_archive = comic_archive self.immediate_autoselect = autoselect self.cover_index_list = cover_index_list self.cv_search_results = None self.twList.resizeColumnsToContents() self.twList.currentItemChanged.connect(self.currentItemChanged) self.twList.cellDoubleClicked.connect(self.cellDoubleClicked) self.btnRequery.clicked.connect(self.requery) self.btnIssues.clicked.connect(self.showIssues) self.btnAutoSelect.clicked.connect(self.autoSelect) self.updateButtons() self.performQuery() self.twList.selectRow(0) def updateButtons(self): if self.cv_search_results is not None and len( self.cv_search_results) > 0: enabled = True else: enabled = False self.btnRequery.setEnabled(enabled) self.btnIssues.setEnabled(enabled) self.btnAutoSelect.setEnabled(enabled) self.buttonBox.button(QtGui.QDialogButtonBox.Ok).setEnabled(enabled) def requery(self,): self.performQuery(refresh=True) self.twList.selectRow(0) def autoSelect(self): if self.comic_archive is None: QtGui.QMessageBox.information( self, "Auto-Select", "You need to load a comic first!") return if self.issue_number is None or self.issue_number == "": QtGui.QMessageBox.information( self, "Auto-Select", "Can't auto-select without an issue number (yet!)") return self.iddialog = IDProgressWindow(self) self.iddialog.setModal(True) self.iddialog.rejected.connect(self.identifyCancel) self.iddialog.show() self.ii = IssueIdentifier(self.comic_archive, self.settings) md = GenericMetadata() md.series = self.series_name md.issue = self.issue_number md.year = self.year md.issueCount = self.issue_count self.ii.setAdditionalMetadata(md) self.ii.onlyUseAdditionalMetaData = True self.ii.cover_page_index = int(self.cover_index_list[0]) self.id_thread = IdentifyThread(self.ii) self.id_thread.identifyComplete.connect(self.identifyComplete) self.id_thread.identifyLogMsg.connect(self.logIDOutput) self.id_thread.identifyProgress.connect(self.identifyProgress) self.id_thread.start() self.iddialog.exec_() def logIDOutput(self, text): print unicode(text), self.iddialog.textEdit.ensureCursorVisible() self.iddialog.textEdit.insertPlainText(text) def identifyProgress(self, cur, total): self.iddialog.progressBar.setMaximum(total) self.iddialog.progressBar.setValue(cur) def identifyCancel(self): self.ii.cancel = True def identifyComplete(self): matches = self.ii.match_list result = self.ii.search_result match_index = 0 found_match = None choices = False if result == self.ii.ResultNoMatches: QtGui.QMessageBox.information( self, "Auto-Select Result", " No matches found :-(") elif result == self.ii.ResultFoundMatchButBadCoverScore: QtGui.QMessageBox.information( self, "Auto-Select Result", " Found a match, but cover doesn't seem the same. Verify before commiting!") found_match = matches[0] elif result == self.ii.ResultFoundMatchButNotFirstPage: QtGui.QMessageBox.information( self, "Auto-Select Result", " Found a match, but not with the first page of the archive.") found_match = matches[0] elif result == self.ii.ResultMultipleMatchesWithBadImageScores: QtGui.QMessageBox.information( self, "Auto-Select Result", " Found some possibilities, but no confidence. Proceed manually.") choices = True elif result == self.ii.ResultOneGoodMatch: found_match = matches[0] elif result == self.ii.ResultMultipleGoodMatches: QtGui.QMessageBox.information( self, "Auto-Select Result", " Found multiple likely matches. Please select.") choices = True if choices: selector = MatchSelectionWindow(self, matches, self.comic_archive) selector.setModal(True) selector.exec_() if selector.result(): # we should now have a list index found_match = selector.currentMatch() if found_match is not None: self.iddialog.accept() self.volume_id = found_match['volume_id'] self.issue_number = found_match['issue_number'] self.selectByID() self.showIssues() def showIssues(self): selector = IssueSelectionWindow( self, self.settings, self.volume_id, self.issue_number) title = "" for record in self.cv_search_results: if record['id'] == self.volume_id: title = record['name'] title += " (" + unicode(record['start_year']) + ")" title += " - " break selector.setWindowTitle(title + "Select Issue") selector.setModal(True) selector.exec_() if selector.result(): # we should now have a volume ID self.issue_number = selector.issue_number self.accept() return def selectByID(self): for r in range(0, self.twList.rowCount()): volume_id, b = self.twList.item( r, 0).data(QtCore.Qt.UserRole).toInt() if (volume_id == self.volume_id): self.twList.selectRow(r) break def performQuery(self, refresh=False): self.progdialog = QtGui.QProgressDialog( "Searching Online", "Cancel", 0, 100, self) self.progdialog.setWindowTitle("Online Search") self.progdialog.canceled.connect(self.searchCanceled) self.progdialog.setModal(True) self.search_thread = SearchThread(self.series_name, refresh) self.search_thread.searchComplete.connect(self.searchComplete) self.search_thread.progressUpdate.connect(self.searchProgressUpdate) self.search_thread.start() # QtCore.QCoreApplication.processEvents() self.progdialog.exec_() def searchCanceled(self): print("query cancelled") self.search_thread.searchComplete.disconnect(self.searchComplete) self.search_thread.progressUpdate.disconnect(self.searchProgressUpdate) self.progdialog.canceled.disconnect(self.searchCanceled) self.progdialog.reject() QtCore.QTimer.singleShot(200, self.closeMe) def closeMe(self): print("closeme") self.reject() def searchProgressUpdate(self, current, total): self.progdialog.setMaximum(total) self.progdialog.setValue(current) def searchComplete(self): self.progdialog.accept() if self.search_thread.cv_error: if self.search_thread.error_code == ComicVineTalkerException.RateLimit: QtGui.QMessageBox.critical( self, self.tr("Comic Vine Error"), ComicVineTalker.getRateLimitMessage()) else: QtGui.QMessageBox.critical( self, self.tr("Network Issue"), self.tr("Could not connect to Comic Vine to search for series!")) return self.cv_search_results = self.search_thread.cv_search_results self.updateButtons() self.twList.setSortingEnabled(False) while self.twList.rowCount() > 0: self.twList.removeRow(0) row = 0 for record in self.cv_search_results: self.twList.insertRow(row) item_text = record['name'] item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setData(QtCore.Qt.UserRole, record['id']) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 0, item) item_text = str(record['start_year']) item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 1, item) item_text = record['count_of_issues'] item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setData(QtCore.Qt.DisplayRole, record['count_of_issues']) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 2, item) if record['publisher'] is not None: item_text = record['publisher']['name'] item.setData(QtCore.Qt.ToolTipRole, item_text) item = QtGui.QTableWidgetItem(item_text) item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 3, item) row += 1 self.twList.resizeColumnsToContents() self.twList.setSortingEnabled(True) self.twList.sortItems(2, QtCore.Qt.DescendingOrder) self.twList.selectRow(0) self.twList.resizeColumnsToContents() if len(self.cv_search_results) == 0: QtCore.QCoreApplication.processEvents() QtGui.QMessageBox.information( self, "Search Result", "No matches found!") if self.immediate_autoselect and len(self.cv_search_results) > 0: # defer the immediate autoselect so this dialog has time to pop up QtCore.QCoreApplication.processEvents() QtCore.QTimer.singleShot(10, self.doImmediateAutoselect) def doImmediateAutoselect(self): self.immediate_autoselect = False self.autoSelect() def cellDoubleClicked(self, r, c): self.showIssues() def currentItemChanged(self, curr, prev): if curr is None: return if prev is not None and prev.row() == curr.row(): return self.volume_id, b = self.twList.item( curr.row(), 0).data(QtCore.Qt.UserRole).toInt() # list selection was changed, update the info on the volume for record in self.cv_search_results: if record['id'] == self.volume_id: if record['description'] is None: self.teDetails.setText("") else: self.teDetails.setText(record['description']) self.imageWidget.setURL(record['image']['super_url']) break
15,105
Python
.py
335
34.608955
93
0.64506
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,991
settingswindow.py
evilhero_mylar/lib/comictaggerlib/settingswindow.py
"""A PyQT4 dialog to enter app settings""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings from comicvinecacher import ComicVineCacher from comicvinetalker import ComicVineTalker from imagefetcher import ImageFetcher import utils windowsRarHelp = """ <html><head/><body><p>In order to write to CBR/RAR archives, you will need to have the tools from <a href="http://www.win-rar.com/download.html"> <span style=" text-decoration: underline; color:#0000ff;">WinRAR</span> </a> installed. </p></body></html> """ linuxRarHelp = """ <html><head/><body><p>In order to read/write to CBR/RAR archives, you will need to have the shareware tools from WinRar installed. Your package manager should have unrar, and probably rar. If not, download them <a href="http://www.win-rar.com/download.html"> <span style=" text-decoration: underline; color:#0000ff;">here</span> </a>, and install in your path. </p></body></html> """ macRarHelp = """ <html><head/><body><p>In order to read/write to CBR/RAR archives, you will need the shareware tools from <a href="http://www.win-rar.com/download.html"> <span style=" text-decoration: underline; color:#0000ff;">WinRAR</span> </a>. </p></body></html> """ class SettingsWindow(QtGui.QDialog): def __init__(self, parent, settings): super(SettingsWindow, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('settingswindow.ui'), self) self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint) self.settings = settings self.name = "Settings" if platform.system() == "Windows": self.lblUnrar.hide() self.leUnrarExePath.hide() self.btnBrowseUnrar.hide() self.lblRarHelp.setText(windowsRarHelp) elif platform.system() == "Linux": self.lblRarHelp.setText(linuxRarHelp) elif platform.system() == "Darwin": self.lblRarHelp.setText(macRarHelp) self.name = "Preferences" self.setWindowTitle("ComicTagger " + self.name) self.lblDefaultSettings.setText( "Revert to default " + self.name.lower()) self.btnResetSettings.setText("Default " + self.name) nldtTip = ( """<html>The <b>Default Name Length Match Tolerance</b> is for eliminating automatic search matches that are too long compared to your series name search. The higher it is, the more likely to have a good match, but each search will take longer and use more bandwidth. Too low, and only the very closest lexical matches will be explored.</html>""") self.leNameLengthDeltaThresh.setToolTip(nldtTip) pblTip = ( """<html> The <b>Publisher Blacklist</b> is for eliminating automatic matches to certain publishers that you know are incorrect. Useful for avoiding international re-prints with same covers or series names. Enter publisher names separated by commas. </html>""" ) self.tePublisherBlacklist.setToolTip(pblTip) validator = QtGui.QIntValidator(1, 4, self) self.leIssueNumPadding.setValidator(validator) validator = QtGui.QIntValidator(0, 99, self) self.leNameLengthDeltaThresh.setValidator(validator) self.settingsToForm() self.btnBrowseRar.clicked.connect(self.selectRar) self.btnBrowseUnrar.clicked.connect(self.selectUnrar) self.btnClearCache.clicked.connect(self.clearCache) self.btnResetSettings.clicked.connect(self.resetSettings) self.btnTestKey.clicked.connect(self.testAPIKey) def settingsToForm(self): # Copy values from settings to form self.leRarExePath.setText(self.settings.rar_exe_path) self.leUnrarExePath.setText(self.settings.unrar_exe_path) self.leNameLengthDeltaThresh.setText( str(self.settings.id_length_delta_thresh)) self.tePublisherBlacklist.setPlainText( self.settings.id_publisher_blacklist) if self.settings.check_for_new_version: self.cbxCheckForNewVersion.setCheckState(QtCore.Qt.Checked) if self.settings.parse_scan_info: self.cbxParseScanInfo.setCheckState(QtCore.Qt.Checked) if self.settings.use_series_start_as_volume: self.cbxUseSeriesStartAsVolume.setCheckState(QtCore.Qt.Checked) if self.settings.clear_form_before_populating_from_cv: self.cbxClearFormBeforePopulating.setCheckState(QtCore.Qt.Checked) if self.settings.remove_html_tables: self.cbxRemoveHtmlTables.setCheckState(QtCore.Qt.Checked) self.leKey.setText(str(self.settings.cv_api_key)) if self.settings.assume_lone_credit_is_primary: self.cbxAssumeLoneCreditIsPrimary.setCheckState(QtCore.Qt.Checked) if self.settings.copy_characters_to_tags: self.cbxCopyCharactersToTags.setCheckState(QtCore.Qt.Checked) if self.settings.copy_teams_to_tags: self.cbxCopyTeamsToTags.setCheckState(QtCore.Qt.Checked) if self.settings.copy_locations_to_tags: self.cbxCopyLocationsToTags.setCheckState(QtCore.Qt.Checked) if self.settings.copy_storyarcs_to_tags: self.cbxCopyStoryArcsToTags.setCheckState(QtCore.Qt.Checked) if self.settings.copy_notes_to_comments: self.cbxCopyNotesToComments.setCheckState(QtCore.Qt.Checked) if self.settings.copy_weblink_to_comments: self.cbxCopyWebLinkToComments.setCheckState(QtCore.Qt.Checked) if self.settings.apply_cbl_transform_on_cv_import: self.cbxApplyCBLTransformOnCVIMport.setCheckState( QtCore.Qt.Checked) if self.settings.apply_cbl_transform_on_bulk_operation: self.cbxApplyCBLTransformOnBatchOperation.setCheckState( QtCore.Qt.Checked) self.leRenameTemplate.setText(self.settings.rename_template) self.leIssueNumPadding.setText( str(self.settings.rename_issue_number_padding)) if self.settings.rename_use_smart_string_cleanup: self.cbxSmartCleanup.setCheckState(QtCore.Qt.Checked) if self.settings.rename_extension_based_on_archive: self.cbxChangeExtension.setCheckState(QtCore.Qt.Checked) def accept(self): # Copy values from form to settings and save self.settings.rar_exe_path = str(self.leRarExePath.text()) self.settings.unrar_exe_path = str(self.leUnrarExePath.text()) # make sure unrar/rar program is now in the path for the UnRAR class utils.addtopath(os.path.dirname(self.settings.unrar_exe_path)) utils.addtopath(os.path.dirname(self.settings.rar_exe_path)) if not str(self.leNameLengthDeltaThresh.text()).isdigit(): self.leNameLengthDeltaThresh.setText("0") if not str(self.leIssueNumPadding.text()).isdigit(): self.leIssueNumPadding.setText("0") self.settings.check_for_new_version = self.cbxCheckForNewVersion.isChecked() self.settings.id_length_delta_thresh = int( self.leNameLengthDeltaThresh.text()) self.settings.id_publisher_blacklist = str( self.tePublisherBlacklist.toPlainText()) self.settings.parse_scan_info = self.cbxParseScanInfo.isChecked() self.settings.use_series_start_as_volume = self.cbxUseSeriesStartAsVolume.isChecked() self.settings.clear_form_before_populating_from_cv = self.cbxClearFormBeforePopulating.isChecked() self.settings.remove_html_tables = self.cbxRemoveHtmlTables.isChecked() self.settings.cv_api_key = unicode(self.leKey.text()) ComicVineTalker.api_key = self.settings.cv_api_key self.settings.assume_lone_credit_is_primary = self.cbxAssumeLoneCreditIsPrimary.isChecked() self.settings.copy_characters_to_tags = self.cbxCopyCharactersToTags.isChecked() self.settings.copy_teams_to_tags = self.cbxCopyTeamsToTags.isChecked() self.settings.copy_locations_to_tags = self.cbxCopyLocationsToTags.isChecked() self.settings.copy_storyarcs_to_tags = self.cbxCopyStoryArcsToTags.isChecked() self.settings.copy_notes_to_comments = self.cbxCopyNotesToComments.isChecked() self.settings.copy_weblink_to_comments = self.cbxCopyWebLinkToComments.isChecked() self.settings.apply_cbl_transform_on_cv_import = self.cbxApplyCBLTransformOnCVIMport.isChecked() self.settings.apply_cbl_transform_on_bulk_operation = self.cbxApplyCBLTransformOnBatchOperation.isChecked() self.settings.rename_template = str(self.leRenameTemplate.text()) self.settings.rename_issue_number_padding = int( self.leIssueNumPadding.text()) self.settings.rename_use_smart_string_cleanup = self.cbxSmartCleanup.isChecked() self.settings.rename_extension_based_on_archive = self.cbxChangeExtension.isChecked() self.settings.save() QtGui.QDialog.accept(self) def selectRar(self): self.selectFile(self.leRarExePath, "RAR") def selectUnrar(self): self.selectFile(self.leUnrarExePath, "UnRAR") def clearCache(self): ImageFetcher().clearCache() ComicVineCacher().clearCache() QtGui.QMessageBox.information( self, self.name, "Cache has been cleared.") def testAPIKey(self): if ComicVineTalker().testKey(unicode(self.leKey.text())): QtGui.QMessageBox.information( self, "API Key Test", "Key is valid!") else: QtGui.QMessageBox.warning( self, "API Key Test", "Key is NOT valid.") def resetSettings(self): self.settings.reset() self.settingsToForm() QtGui.QMessageBox.information( self, self.name, self.name + " have been returned to default values.") def selectFile(self, control, name): dialog = QtGui.QFileDialog(self) dialog.setFileMode(QtGui.QFileDialog.ExistingFile) if platform.system() == "Windows": if name == "RAR": filter = self.tr("Rar Program (Rar.exe)") else: filter = self.tr("Programs (*.exe)") dialog.setNameFilter(filter) else: # QtCore.QDir.Executable | QtCore.QDir.Files) dialog.setFilter(QtCore.QDir.Files) pass dialog.setDirectory(os.path.dirname(str(control.text()))) dialog.setWindowTitle("Find " + name + " program") if (dialog.exec_()): fileList = dialog.selectedFiles() control.setText(str(fileList[0])) def showRenameTab(self): self.tabWidget.setCurrentIndex(5)
11,783
Python
.py
216
44.166667
115
0.677439
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,992
cli.py
evilhero_mylar/lib/comictaggerlib/cli.py
#!/usr/bin/python """ComicTagger CLI functions""" # Copyright 2013 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import os from pprint import pprint import json #import signal #import traceback #import time #import platform #import locale #import codecs filename_encoding = sys.getfilesystemencoding() from settings import ComicTaggerSettings from options import Options from comicarchive import ComicArchive, MetaDataStyle from issueidentifier import IssueIdentifier from genericmetadata import GenericMetadata from comicvinetalker import ComicVineTalker, ComicVineTalkerException from filerenamer import FileRenamer from cbltransformer import CBLTransformer import utils class MultipleMatch(): def __init__(self, filename, match_list): self.filename = filename self.matches = match_list class OnlineMatchResults(): def __init__(self): self.goodMatches = [] self.noMatches = [] self.multipleMatches = [] self.lowConfidenceMatches = [] self.writeFailures = [] self.fetchDataFailures = [] #----------------------------- def actual_issue_data_fetch(match, settings, opts): # now get the particular issue data try: comicVine = ComicVineTalker() comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit cv_md = comicVine.fetchIssueData( match['volume_id'], match['issue_number'], settings) except ComicVineTalkerException: print >> sys.stderr, "Network error while getting issue details. Save aborted" return None if settings.apply_cbl_transform_on_cv_import: cv_md = CBLTransformer(cv_md, settings).apply() return cv_md def actual_metadata_save(ca, opts, md): if not opts.dryrun: # write out the new data if not ca.writeMetadata(md, opts.data_style): print >> sys.stderr, "The tag save seemed to fail!" return False else: print >> sys.stderr, "Save complete." else: if opts.terse: print >> sys.stderr, "dry-run option was set, so nothing was written" else: print >> sys.stderr, "dry-run option was set, so nothing was written, but here is the final set of tags:" print(u"{0}".format(md)) return True def display_match_set_for_choice(label, match_set, opts, settings): print(u"{0} -- {1}:".format(match_set.filename, label)) # sort match list by year match_set.matches.sort(key=lambda k: k['year']) for (counter, m) in enumerate(match_set.matches): counter += 1 print( u" {0}. {1} #{2} [{3}] ({4}/{5}) - {6}".format( counter, m['series'], m['issue_number'], m['publisher'], m['month'], m['year'], m['issue_title'])) if opts.interactive: while True: i = raw_input("Choose a match #, or 's' to skip: ") if (i.isdigit() and int(i) in range( 1, len(match_set.matches) + 1)) or i == 's': break if i != 's': i = int(i) - 1 # save the data! # we know at this point, that the file is all good to go ca = ComicArchive( match_set.filename, settings.rar_exe_path, ComicTaggerSettings.getGraphic('nocover.png')) md = create_local_metadata( opts, ca, ca.hasMetadata(opts.data_style)) cv_md = actual_issue_data_fetch( match_set.matches[int(i)], settings, opts) md.overlay(cv_md) actual_metadata_save(ca, opts, md) def post_process_matches(match_results, opts, settings): # now go through the match results if opts.show_save_summary: if len(match_results.goodMatches) > 0: print("\nSuccessful matches:\n------------------") for f in match_results.goodMatches: print(f) if len(match_results.noMatches) > 0: print("\nNo matches:\n------------------") for f in match_results.noMatches: print(f) if len(match_results.writeFailures) > 0: print("\nFile Write Failures:\n------------------") for f in match_results.writeFailures: print(f) if len(match_results.fetchDataFailures) > 0: print("\nNetwork Data Fetch Failures:\n------------------") for f in match_results.fetchDataFailures: print(f) if not opts.show_save_summary and not opts.interactive: # just quit if we're not interactive or showing the summary return if len(match_results.multipleMatches) > 0: print( "\nArchives with multiple high-confidence matches:\n------------------") for match_set in match_results.multipleMatches: display_match_set_for_choice( "Multiple high-confidence matches", match_set, opts, settings) if len(match_results.lowConfidenceMatches) > 0: print("\nArchives with low-confidence matches:\n------------------") for match_set in match_results.lowConfidenceMatches: if len(match_set.matches) == 1: label = "Single low-confidence match" else: label = "Multiple low-confidence matches" display_match_set_for_choice(label, match_set, opts, settings) def cli_mode(opts, settings): if len(opts.file_list) < 1: print >> sys.stderr, "You must specify at least one filename. Use the -h option for more info" return match_results = OnlineMatchResults() for f in opts.file_list: if isinstance(f, str): f = f.decode(filename_encoding, 'replace') process_file_cli(f, opts, settings, match_results) sys.stdout.flush() post_process_matches(match_results, opts, settings) def create_local_metadata(opts, ca, has_desired_tags): md = GenericMetadata() md.setDefaultPageList(ca.getNumberOfPages()) if has_desired_tags: md = ca.readMetadata(opts.data_style) # now, overlay the parsed filename info if opts.parse_filename: md.overlay(ca.metadataFromFilename()) # finally, use explicit stuff if opts.metadata is not None: md.overlay(opts.metadata) return md def process_file_cli(filename, opts, settings, match_results): batch_mode = len(opts.file_list) > 1 ca = ComicArchive( filename, settings.rar_exe_path, ComicTaggerSettings.getGraphic('nocover.png')) if not os.path.lexists(filename): print >> sys.stderr, "Cannot find " + filename return if not ca.seemsToBeAComicArchive(): print >> sys.stderr, "Sorry, but " + \ filename + " is not a comic archive!" return # if not ca.isWritableForStyle(opts.data_style) and (opts.delete_tags or # opts.save_tags or opts.rename_file): if not ca.isWritable() and ( opts.delete_tags or opts.copy_tags or opts.save_tags or opts.rename_file): print >> sys.stderr, "This archive is not writable for that tag type" return has = [False, False, False] if ca.hasCIX(): has[MetaDataStyle.CIX] = True if ca.hasCBI(): has[MetaDataStyle.CBI] = True if ca.hasCoMet(): has[MetaDataStyle.COMET] = True if opts.print_tags: if opts.data_style is None: page_count = ca.getNumberOfPages() brief = "" if batch_mode: brief = u"{0}: ".format(filename) if ca.isZip(): brief += "ZIP archive " elif ca.isRar(): brief += "RAR archive " elif ca.isFolder(): brief += "Folder archive " brief += "({0: >3} pages)".format(page_count) brief += " tags:[ " if not (has[MetaDataStyle.CBI] or has[ MetaDataStyle.CIX] or has[MetaDataStyle.COMET]): brief += "none " else: if has[MetaDataStyle.CBI]: brief += "CBL " if has[MetaDataStyle.CIX]: brief += "CR " if has[MetaDataStyle.COMET]: brief += "CoMet " brief += "]" print brief if opts.terse: return print if opts.data_style is None or opts.data_style == MetaDataStyle.CIX: if has[MetaDataStyle.CIX]: print("--------- ComicRack tags ---------") if opts.raw: print( u"{0}".format( unicode( ca.readRawCIX(), errors='ignore'))) else: print(u"{0}".format(ca.readCIX())) if opts.data_style is None or opts.data_style == MetaDataStyle.CBI: if has[MetaDataStyle.CBI]: print("------- ComicBookLover tags -------") if opts.raw: pprint(json.loads(ca.readRawCBI())) else: print(u"{0}".format(ca.readCBI())) if opts.data_style is None or opts.data_style == MetaDataStyle.COMET: if has[MetaDataStyle.COMET]: print("----------- CoMet tags -----------") if opts.raw: print(u"{0}".format(ca.readRawCoMet())) else: print(u"{0}".format(ca.readCoMet())) elif opts.delete_tags: style_name = MetaDataStyle.name[opts.data_style] if has[opts.data_style]: if not opts.dryrun: if not ca.removeMetadata(opts.data_style): print(u"{0}: Tag removal seemed to fail!".format(filename)) else: print( u"{0}: Removed {1} tags.".format(filename, style_name)) else: print( u"{0}: dry-run. {1} tags not removed".format(filename, style_name)) else: print(u"{0}: This archive doesn't have {1} tags to remove.".format( filename, style_name)) elif opts.copy_tags: dst_style_name = MetaDataStyle.name[opts.data_style] if opts.no_overwrite and has[opts.data_style]: print(u"{0}: Already has {1} tags. Not overwriting.".format( filename, dst_style_name)) return if opts.copy_source == opts.data_style: print( u"{0}: Destination and source are same: {1}. Nothing to do.".format( filename, dst_style_name)) return src_style_name = MetaDataStyle.name[opts.copy_source] if has[opts.copy_source]: if not opts.dryrun: md = ca.readMetadata(opts.copy_source) if settings.apply_cbl_transform_on_bulk_operation and opts.data_style == MetaDataStyle.CBI: md = CBLTransformer(md, settings).apply() if not ca.writeMetadata(md, opts.data_style): print(u"{0}: Tag copy seemed to fail!".format(filename)) else: print(u"{0}: Copied {1} tags to {2} .".format( filename, src_style_name, dst_style_name)) else: print( u"{0}: dry-run. {1} tags not copied".format(filename, src_style_name)) else: print(u"{0}: This archive doesn't have {1} tags to copy.".format( filename, src_style_name)) elif opts.save_tags: if opts.no_overwrite and has[opts.data_style]: print(u"{0}: Already has {1} tags. Not overwriting.".format( filename, MetaDataStyle.name[opts.data_style])) return if batch_mode: print(u"Processing {0}...".format(filename)) md = create_local_metadata(opts, ca, has[opts.data_style]) if md.issue is None or md.issue == "": if opts.assume_issue_is_one_if_not_set: md.issue = "1" # now, search online if opts.search_online: if opts.issue_id is not None: # we were given the actual ID to search with try: comicVine = ComicVineTalker() comicVine.wait_for_rate_limit = opts.wait_and_retry_on_rate_limit cv_md = comicVine.fetchIssueDataByIssueID( opts.issue_id, settings) except ComicVineTalkerException: print >> sys.stderr, "Network error while getting issue details. Save aborted" match_results.fetchDataFailures.append(filename) return if cv_md is None: print >> sys.stderr, "No match for ID {0} was found.".format( opts.issue_id) match_results.noMatches.append(filename) return if settings.apply_cbl_transform_on_cv_import: cv_md = CBLTransformer(cv_md, settings).apply() else: ii = IssueIdentifier(ca, settings) if md is None or md.isEmpty: print >> sys.stderr, "No metadata given to search online with!" match_results.noMatches.append(filename) return def myoutput(text): if opts.verbose: IssueIdentifier.defaultWriteOutput(text) # use our overlayed MD struct to search ii.setAdditionalMetadata(md) ii.onlyUseAdditionalMetaData = True ii.waitAndRetryOnRateLimit = opts.wait_and_retry_on_rate_limit ii.setOutputFunction(myoutput) ii.cover_page_index = md.getCoverPageIndexList()[0] matches = ii.search() result = ii.search_result found_match = False choices = False low_confidence = False if result == ii.ResultNoMatches: pass elif result == ii.ResultFoundMatchButBadCoverScore: low_confidence = True found_match = True elif result == ii.ResultFoundMatchButNotFirstPage: found_match = True elif result == ii.ResultMultipleMatchesWithBadImageScores: low_confidence = True choices = True elif result == ii.ResultOneGoodMatch: found_match = True elif result == ii.ResultMultipleGoodMatches: choices = True if choices: if low_confidence: print >> sys.stderr, "Online search: Multiple low confidence matches. Save aborted" match_results.lowConfidenceMatches.append( MultipleMatch(filename, matches)) return else: print >> sys.stderr, "Online search: Multiple good matches. Save aborted" match_results.multipleMatches.append( MultipleMatch(filename, matches)) return if low_confidence and opts.abortOnLowConfidence: print >> sys.stderr, "Online search: Low confidence match. Save aborted" match_results.lowConfidenceMatches.append( MultipleMatch(filename, matches)) return if not found_match: print >> sys.stderr, "Online search: No match found. Save aborted" match_results.noMatches.append(filename) return # we got here, so we have a single match # now get the particular issue data cv_md = actual_issue_data_fetch(matches[0], settings, opts) if cv_md is None: match_results.fetchDataFailures.append(filename) return md.overlay(cv_md) # ok, done building our metadata. time to save if not actual_metadata_save(ca, opts, md): match_results.writeFailures.append(filename) else: match_results.goodMatches.append(filename) elif opts.rename_file: msg_hdr = "" if batch_mode: msg_hdr = u"{0}: ".format(filename) if opts.data_style is not None: use_tags = has[opts.data_style] else: use_tags = False md = create_local_metadata(opts, ca, use_tags) if md.series is None: print >> sys.stderr, msg_hdr + "Can't rename without series name" return new_ext = None # default if settings.rename_extension_based_on_archive: if ca.isZip(): new_ext = ".cbz" elif ca.isRar(): new_ext = ".cbr" renamer = FileRenamer(md) renamer.setTemplate(settings.rename_template) renamer.setIssueZeroPadding(settings.rename_issue_number_padding) renamer.setSmartCleanup(settings.rename_use_smart_string_cleanup) new_name = renamer.determineName(filename, ext=new_ext) if new_name == os.path.basename(filename): print >> sys.stderr, msg_hdr + "Filename is already good!" return folder = os.path.dirname(os.path.abspath(filename)) new_abs_path = utils.unique_file(os.path.join(folder, new_name)) suffix = "" if not opts.dryrun: # rename the file os.rename(filename, new_abs_path) else: suffix = " (dry-run, no change)" print( u"renamed '{0}' -> '{1}' {2}".format(os.path.basename(filename), new_name, suffix)) elif opts.export_to_zip: msg_hdr = "" if batch_mode: msg_hdr = u"{0}: ".format(filename) if not ca.isRar(): print >> sys.stderr, msg_hdr + "Archive is not a RAR." return rar_file = os.path.abspath(os.path.abspath(filename)) new_file = os.path.splitext(rar_file)[0] + ".cbz" if opts.abort_export_on_conflict and os.path.lexists(new_file): print msg_hdr + "{0} already exists in the that folder.".format(os.path.split(new_file)[1]) return new_file = utils.unique_file(os.path.join(new_file)) delete_success = False export_success = False if not opts.dryrun: if ca.exportAsZip(new_file): export_success = True if opts.delete_rar_after_export: try: os.unlink(rar_file) except: print >> sys.stderr, msg_hdr + \ "Error deleting original RAR after export" delete_success = False else: delete_success = True else: # last export failed, so remove the zip, if it exists if os.path.lexists(new_file): os.remove(new_file) else: msg = msg_hdr + \ u"Dry-run: Would try to create {0}".format( os.path.split(new_file)[1]) if opts.delete_rar_after_export: msg += u" and delete orginal." print(msg) return msg = msg_hdr if export_success: msg += u"Archive exported successfully to: {0}".format( os.path.split(new_file)[1]) if opts.delete_rar_after_export and delete_success: msg += u" (Original deleted) " else: msg += u"Archive failed to export!" print(msg)
20,764
Python
.py
472
31.194915
117
0.554509
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,993
imagepopup.py
evilhero_mylar/lib/comictaggerlib/imagepopup.py
"""A PyQT4 widget to display a popup image""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import sys #import os from PyQt4 import QtCore, QtGui, uic from settings import ComicTaggerSettings class ImagePopup(QtGui.QDialog): def __init__(self, parent, image_pixmap): super(ImagePopup, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('imagepopup.ui'), self) QtGui.QApplication.setOverrideCursor( QtGui.QCursor(QtCore.Qt.WaitCursor)) # self.setWindowModality(QtCore.Qt.WindowModal) self.setWindowFlags(QtCore.Qt.Popup) self.setWindowState(QtCore.Qt.WindowFullScreen) self.imagePixmap = image_pixmap screen_size = QtGui.QDesktopWidget().screenGeometry() self.resize(screen_size.width(), screen_size.height()) self.move(0, 0) # This is a total hack. Uses a snapshot of the desktop, and overlays a # translucent screen over it. Probably can do it better by setting opacity of a # widget self.desktopBg = QtGui.QPixmap.grabWindow( QtGui.QApplication.desktop().winId(), 0, 0, screen_size.width(), screen_size.height()) bg = QtGui.QPixmap(ComicTaggerSettings.getGraphic('popup_bg.png')) self.clientBgPixmap = bg.scaled( screen_size.width(), screen_size.height()) self.setMask(self.clientBgPixmap.mask()) self.applyImagePixmap() self.showFullScreen() self.raise_() QtGui.QApplication.restoreOverrideCursor() def paintEvent(self, event): self.painter = QtGui.QPainter(self) self.painter.setRenderHint(QtGui.QPainter.Antialiasing) self.painter.drawPixmap(0, 0, self.desktopBg) self.painter.drawPixmap(0, 0, self.clientBgPixmap) self.painter.end() def applyImagePixmap(self): win_h = self.height() win_w = self.width() if self.imagePixmap.width( ) > win_w or self.imagePixmap.height() > win_h: # scale the pixmap to fit in the frame display_pixmap = self.imagePixmap.scaled( win_w, win_h, QtCore.Qt.KeepAspectRatio) self.lblImage.setPixmap(display_pixmap) else: display_pixmap = self.imagePixmap self.lblImage.setPixmap(display_pixmap) # move and resize the label to be centered in the fame img_w = display_pixmap.width() img_h = display_pixmap.height() self.lblImage.resize(img_w, img_h) self.lblImage.move((win_w - img_w) / 2, (win_h - img_h) / 2) def mousePressEvent(self, event): self.close()
3,226
Python
.py
70
38.085714
88
0.674218
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,994
imagefetcher.py
evilhero_mylar/lib/comictaggerlib/imagefetcher.py
"""A class to manage fetching and caching of images by URL""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlite3 as lite import os import datetime import shutil import tempfile import urllib #import urllib2 try: from PyQt4.QtNetwork import QNetworkAccessManager, QNetworkRequest from PyQt4.QtCore import QUrl, pyqtSignal, QObject, QByteArray from PyQt4 import QtGui except ImportError: # No Qt, so define a few dummy QObjects to help us compile class QObject(): def __init__(self, *args): pass class QByteArray(): pass class pyqtSignal(): def __init__(self, *args): pass def emit(a, b, c): pass from settings import ComicTaggerSettings class ImageFetcherException(Exception): pass class ImageFetcher(QObject): fetchComplete = pyqtSignal(QByteArray, int) def __init__(self): QObject.__init__(self) self.settings_folder = ComicTaggerSettings.getSettingsFolder() self.db_file = os.path.join(self.settings_folder, "image_url_cache.db") self.cache_folder = os.path.join(self.settings_folder, "image_cache") if not os.path.exists(self.db_file): self.create_image_db() def clearCache(self): os.unlink(self.db_file) if os.path.isdir(self.cache_folder): shutil.rmtree(self.cache_folder) def fetch(self, url, user_data=None, blocking=False): """ If called with blocking=True, this will block until the image is fetched. If called with blocking=False, this will run the fetch in the background, and emit a signal when done """ self.user_data = user_data self.fetched_url = url # first look in the DB image_data = self.get_image_from_cache(url) if blocking: if image_data is None: try: image_data = urllib.urlopen(url).read() except Exception as e: print(e) raise ImageFetcherException("Network Error!") # save the image to the cache self.add_image_to_cache(self.fetched_url, image_data) return image_data else: # if we found it, just emit the signal asap if image_data is not None: self.fetchComplete.emit(QByteArray(image_data), self.user_data) return # didn't find it. look online self.nam = QNetworkAccessManager() self.nam.finished.connect(self.finishRequest) self.nam.get(QNetworkRequest(QUrl(url))) # we'll get called back when done... def finishRequest(self, reply): # read in the image data image_data = reply.readAll() # save the image to the cache self.add_image_to_cache(self.fetched_url, image_data) self.fetchComplete.emit(QByteArray(image_data), self.user_data) def create_image_db(self): # this will wipe out any existing version open(self.db_file, 'w').close() # wipe any existing image cache folder too if os.path.isdir(self.cache_folder): shutil.rmtree(self.cache_folder) os.makedirs(self.cache_folder) con = lite.connect(self.db_file) # create tables with con: cur = con.cursor() cur.execute("CREATE TABLE Images(" + "url TEXT," + "filename TEXT," + "timestamp TEXT," + "PRIMARY KEY (url))" ) def add_image_to_cache(self, url, image_data): con = lite.connect(self.db_file) with con: cur = con.cursor() timestamp = datetime.datetime.now() tmp_fd, filename = tempfile.mkstemp( dir=self.cache_folder, prefix="img") f = os.fdopen(tmp_fd, 'w+b') f.write(image_data) f.close() cur.execute("INSERT or REPLACE INTO Images VALUES(?, ?, ?)", (url, filename, timestamp) ) def get_image_from_cache(self, url): con = lite.connect(self.db_file) with con: cur = con.cursor() cur.execute("SELECT filename FROM Images WHERE url=?", [url]) row = cur.fetchone() if row is None: return None else: filename = row[0] image_data = None try: with open(filename, 'rb') as f: image_data = f.read() f.close() except IOError as e: pass return image_data
5,429
Python
.py
137
28.759124
79
0.585911
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,995
options.py
evilhero_mylar/lib/comictaggerlib/options.py
"""CLI options class for ComicTagger app""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import getopt import platform import os import traceback try: import argparse except ImportError: pass from genericmetadata import GenericMetadata from comicarchive import MetaDataStyle from versionchecker import VersionChecker import ctversion import utils class Options: help_text = """Usage: {0} [option] ... [file [files ...]] A utility for reading and writing metadata to comic archives. If no options are given, {0} will run in windowed mode. -p, --print Print out tag info from file. Specify type (via -t) to get only info of that tag type. --raw With -p, will print out the raw tag block(s) from the file. -d, --delete Deletes the tag block of specified type (via -t). -c, --copy=SOURCE Copy the specified source tag block to destination style specified via -t (potentially lossy operation). -s, --save Save out tags as specified type (via -t). Must specify also at least -o, -p, or -m. --nooverwrite Don't modify tag block if it already exists (relevant for -s or -c). -1, --assume-issue-one Assume issue number is 1 if not found (relevant for -s). -n, --dryrun Don't actually modify file (only relevant for -d, -s, or -r). -t, --type=TYPE Specify TYPE as either "CR", "CBL", or "COMET" (as either ComicRack, ComicBookLover, or CoMet style tags, respectively). -f, --parsefilename Parse the filename to get some info, specifically series name, issue number, volume, and publication year. -i, --interactive Interactively query the user when there are multiple matches for an online search. --nosummary Suppress the default summary after a save operation. -o, --online Search online and attempt to identify file using existing metadata and images in archive. May be used in conjunction with -f and -m. --id=ID Use the issue ID when searching online. Overrides all other metadata. -m, --metadata=LIST Explicitly define, as a list, some tags to be used. e.g.: "series=Plastic Man, publisher=Quality Comics" "series=Kickers^, Inc., issue=1, year=1986" Name-Value pairs are comma separated. Use a "^" to escape an "=" or a ",", as shown in the example above. Some names that can be used: series, issue, issueCount, year, publisher, title -r, --rename Rename the file based on specified tag style. --noabort Don't abort save operation when online match is of low confidence. -e, --export-to-zip Export RAR archive to Zip format. --delete-rar Delete original RAR archive after successful export to Zip. --abort-on-conflict Don't export to zip if intended new filename exists (otherwise, creates a new unique filename). -S, --script=FILE Run an "add-on" python script that uses the ComicTagger library for custom processing. Script arguments can follow the script name. -R, --recursive Recursively include files in sub-folders. --cv-api-key=KEY Use the given Comic Vine API Key (persisted in settings). --only-set-cv-key Only set the Comic Vine API key and quit. -w, --wait-on-cv-rate-limit When encountering a Comic Vine rate limit error, wait and retry query. -v, --verbose Be noisy when doing what it does. --terse Don't say much (for print mode). --version Display version. -h, --help Display this message. For more help visit the wiki at: http://code.google.com/p/comictagger/ """ def __init__(self): self.data_style = None self.no_gui = False self.filename = None self.verbose = False self.terse = False self.metadata = None self.print_tags = False self.copy_tags = False self.delete_tags = False self.export_to_zip = False self.abort_export_on_conflict = False self.delete_rar_after_export = False self.search_online = False self.dryrun = False self.abortOnLowConfidence = True self.save_tags = False self.parse_filename = False self.show_save_summary = True self.raw = False self.cv_api_key = None self.only_set_key = False self.rename_file = False self.no_overwrite = False self.interactive = False self.issue_id = None self.recursive = False self.run_script = False self.script = None self.wait_and_retry_on_rate_limit = False self.assume_issue_is_one_if_not_set = False self.file_list = [] def display_msg_and_quit(self, msg, code, show_help=False): appname = os.path.basename(sys.argv[0]) if msg is not None: print(msg) if show_help: print(self.help_text.format(appname)) else: print("For more help, run with '--help'") sys.exit(code) def parseMetadataFromString(self, mdstr): """The metadata string is a comma separated list of name-value pairs The names match the attributes of the internal metadata struct (for now) The caret is the special "escape character", since it's not common in natural language text example = "series=Kickers^, Inc. ,issue=1, year=1986" """ escaped_comma = "^," escaped_equals = "^=" replacement_token = "<_~_>" md = GenericMetadata() # First, replace escaped commas with with a unique token (to be changed # back later) mdstr = mdstr.replace(escaped_comma, replacement_token) tmp_list = mdstr.split(",") md_list = [] for item in tmp_list: item = item.replace(replacement_token, ",") md_list.append(item) # Now build a nice dict from the list md_dict = dict() for item in md_list: # Make sure to fix any escaped equal signs i = item.replace(escaped_equals, replacement_token) key, value = i.split("=") value = value.replace(replacement_token, "=").strip() key = key.strip() if key.lower() == "credit": cred_attribs = value.split(":") role = cred_attribs[0] person = (cred_attribs[1] if len(cred_attribs) > 1 else "") primary = (cred_attribs[2] if len(cred_attribs) > 2 else None) md.addCredit( person.strip(), role.strip(), True if primary is not None else False) else: md_dict[key] = value # Map the dict to the metadata object for key in md_dict: if not hasattr(md, key): print("Warning: '{0}' is not a valid tag name".format(key)) else: md.isEmpty = False setattr(md, key, md_dict[key]) # print(md) return md def launch_script(self, scriptfile): # we were given a script. special case for the args: # 1. ignore everything before the -S, # 2. pass all the ones that follow (including script name) to the # script script_args = list() for idx, arg in enumerate(sys.argv): if arg in ['-S', '--script']: # found script! script_args = sys.argv[idx + 1:] break sys.argv = script_args if not os.path.exists(scriptfile): print("Can't find {0}".format(scriptfile)) else: # I *think* this makes sense: # assume the base name of the file is the module name # add the folder of the given file to the python path # import module dirname = os.path.dirname(scriptfile) module_name = os.path.splitext(os.path.basename(scriptfile))[0] sys.path = [dirname] + sys.path try: script = __import__(module_name) # Determine if the entry point exists before trying to run it if "main" in dir(script): script.main() else: print( "Can't find entry point \"main()\" in module \"{0}\"".format(module_name)) except Exception as e: print "Script raised an unhandled exception: ", e print(traceback.format_exc()) sys.exit(0) def parseCmdLineArgs(self): if platform.system() == "Darwin" and hasattr( sys, "frozen") and sys.frozen == 1: # remove the PSN ("process serial number") argument from OS/X input_args = [a for a in sys.argv[1:] if "-psn_0_" not in a] else: input_args = sys.argv[1:] # first check if we're launching a script: for n in range(len(input_args)): if (input_args[n] in ["-S", "--script"] and n + 1 < len(input_args)): # insert a "--" which will cause getopt to ignore the remaining args # so they will be passed to the script input_args.insert(n + 2, "--") break # parse command line options try: opts, args = getopt.getopt(input_args, "hpdt:fm:vownsrc:ieRS:1", ["help", "print", "delete", "type=", "copy=", "parsefilename", "metadata=", "verbose", "online", "dryrun", "save", "rename", "raw", "noabort", "terse", "nooverwrite", "interactive", "nosummary", "version", "id=", "recursive", "script=", "export-to-zip", "delete-rar", "abort-on-conflict", "assume-issue-one", "cv-api-key=", "only-set-cv-key", "wait-on-cv-rate-limit"]) except getopt.GetoptError as err: self.display_msg_and_quit(str(err), 2) # process options for o, a in opts: if o in ("-h", "--help"): self.display_msg_and_quit(None, 0, show_help=True) if o in ("-v", "--verbose"): self.verbose = True if o in ("-S", "--script"): self.run_script = True self.script = a if o in ("-R", "--recursive"): self.recursive = True if o in ("-p", "--print"): self.print_tags = True if o in ("-d", "--delete"): self.delete_tags = True if o in ("-i", "--interactive"): self.interactive = True if o in ("-c", "--copy"): self.copy_tags = True if a.lower() == "cr": self.copy_source = MetaDataStyle.CIX elif a.lower() == "cbl": self.copy_source = MetaDataStyle.CBI elif a.lower() == "comet": self.copy_source = MetaDataStyle.COMET else: self.display_msg_and_quit( "Invalid copy tag source type", 1) if o in ("-o", "--online"): self.search_online = True if o in ("-n", "--dryrun"): self.dryrun = True if o in ("-m", "--metadata"): self.metadata = self.parseMetadataFromString(a) if o in ("-s", "--save"): self.save_tags = True if o in ("-r", "--rename"): self.rename_file = True if o in ("-e", "--export_to_zip"): self.export_to_zip = True if o == "--delete-rar": self.delete_rar_after_export = True if o == "--abort-on-conflict": self.abort_export_on_conflict = True if o in ("-f", "--parsefilename"): self.parse_filename = True if o in ("-w", "--wait-on-cv-rate-limit"): self.wait_and_retry_on_rate_limit = True if o == "--id": self.issue_id = a if o == "--raw": self.raw = True if o == "--noabort": self.abortOnLowConfidence = False if o == "--terse": self.terse = True if o == "--nosummary": self.show_save_summary = False if o in ("-1", "--assume-issue-one"): self.assume_issue_is_one_if_not_set = True if o == "--nooverwrite": self.no_overwrite = True if o == "--cv-api-key": self.cv_api_key = a if o == "--only-set-cv-key": self.only_set_key = True if o == "--version": print( "ComicTagger {0} [{1} / {2}]".format(ctversion.version, ctversion.fork, ctversion.fork_tag)) print( "Modified version of ComicTagger (Copyright (c) 2012-2014 Anthony Beville)") print( "Distributed under Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)") sys.exit(0) if o in ("-t", "--type"): if a.lower() == "cr": self.data_style = MetaDataStyle.CIX elif a.lower() == "cbl": self.data_style = MetaDataStyle.CBI elif a.lower() == "comet": self.data_style = MetaDataStyle.COMET else: self.display_msg_and_quit("Invalid tag type", 1) if self.print_tags or self.delete_tags or self.save_tags or self.copy_tags or self.rename_file or self.export_to_zip or self.only_set_key: self.no_gui = True count = 0 if self.run_script: count += 1 if self.print_tags: count += 1 if self.delete_tags: count += 1 if self.save_tags: count += 1 if self.copy_tags: count += 1 if self.rename_file: count += 1 if self.export_to_zip: count += 1 if self.only_set_key: count += 1 if count > 1: self.display_msg_and_quit( "Must choose only one action of print, delete, save, copy, rename, export, set key, or run script", 1) if self.script is not None: self.launch_script(self.script) if len(args) > 0: if platform.system() == "Windows": # no globbing on windows shell, so do it for them import glob self.file_list = [] for item in args: self.file_list.extend(glob.glob(item)) if len(self.file_list) > 0: self.filename = self.file_list[0] else: self.filename = args[0] self.file_list = args if self.only_set_key and self.cv_api_key is None: self.display_msg_and_quit("Key not given!", 1) if (self.only_set_key == False) and self.no_gui and ( self.filename is None): self.display_msg_and_quit( "Command requires at least one filename!", 1) if self.delete_tags and self.data_style is None: self.display_msg_and_quit( "Please specify the type to delete with -t", 1) if self.save_tags and self.data_style is None: self.display_msg_and_quit( "Please specify the type to save with -t", 1) if self.copy_tags and self.data_style is None: self.display_msg_and_quit( "Please specify the type to copy to with -t", 1) # if self.rename_file and self.data_style is None: # self.display_msg_and_quit("Please specify the type to use for renaming with -t", 1) if self.recursive: self.file_list = utils.get_recursive_filelist(self.file_list)
17,841
Python
.py
383
33.067885
146
0.520643
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,996
coverimagewidget.py
evilhero_mylar/lib/comictaggerlib/coverimagewidget.py
"""A PyQt4 widget to display cover images Display cover images from either a local archive, or from Comic Vine. TODO: This should be re-factored using subclasses! """ # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import os from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4 import uic from settings import ComicTaggerSettings from comicvinetalker import ComicVineTalker, ComicVineTalkerException from imagefetcher import ImageFetcher from pageloader import PageLoader from imagepopup import ImagePopup from comictaggerlib.ui.qtutils import reduceWidgetFontSize, getQImageFromData #from genericmetadata import GenericMetadata, PageType #from comicarchive import MetaDataStyle #import utils def clickable(widget): """# Allow a label to be clickable""" class Filter(QObject): dblclicked = pyqtSignal() def eventFilter(self, obj, event): if obj == widget: if event.type() == QEvent.MouseButtonDblClick: self.dblclicked.emit() return True return False filter = Filter(widget) widget.installEventFilter(filter) return filter.dblclicked class CoverImageWidget(QWidget): ArchiveMode = 0 AltCoverMode = 1 URLMode = 1 DataMode = 3 def __init__(self, parent, mode, expand_on_click=True): super(CoverImageWidget, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('coverimagewidget.ui'), self) reduceWidgetFontSize(self.label) self.mode = mode self.comicVine = ComicVineTalker() self.page_loader = None self.showControls = True self.btnLeft.setIcon(QIcon(ComicTaggerSettings.getGraphic('left.png'))) self.btnRight.setIcon( QIcon(ComicTaggerSettings.getGraphic('right.png'))) self.btnLeft.clicked.connect(self.decrementImage) self.btnRight.clicked.connect(self.incrementImage) self.resetWidget() if expand_on_click: clickable(self.lblImage).connect(self.showPopup) else: self.lblImage.setToolTip("") self.updateContent() def resetWidget(self): self.comic_archive = None self.issue_id = None self.comicVine = None self.cover_fetcher = None self.url_list = [] if self.page_loader is not None: self.page_loader.abandoned = True self.page_loader = None self.imageIndex = -1 self.imageCount = 1 self.imageData = None def clear(self): self.resetWidget() self.updateContent() def incrementImage(self): self.imageIndex += 1 if self.imageIndex == self.imageCount: self.imageIndex = 0 self.updateContent() def decrementImage(self): self.imageIndex -= 1 if self.imageIndex == -1: self.imageIndex = self.imageCount - 1 self.updateContent() def setArchive(self, ca, page=0): if self.mode == CoverImageWidget.ArchiveMode: self.resetWidget() self.comic_archive = ca self.imageIndex = page self.imageCount = ca.getNumberOfPages() self.updateContent() def setURL(self, url): if self.mode == CoverImageWidget.URLMode: self.resetWidget() self.updateContent() self.url_list = [url] self.imageIndex = 0 self.imageCount = 1 self.updateContent() def setIssueID(self, issue_id): if self.mode == CoverImageWidget.AltCoverMode: self.resetWidget() self.updateContent() self.issue_id = issue_id self.comicVine = ComicVineTalker() self.comicVine.urlFetchComplete.connect( self.primaryUrlFetchComplete) self.comicVine.asyncFetchIssueCoverURLs(int(self.issue_id)) def setImageData(self, image_data): if self.mode == CoverImageWidget.DataMode: self.resetWidget() if image_data is None: self.imageIndex = -1 else: self.imageIndex = 0 self.imageData = image_data self.updateContent() def primaryUrlFetchComplete(self, primary_url, thumb_url, issue_id): self.url_list.append(str(primary_url)) self.imageIndex = 0 self.imageCount = len(self.url_list) self.updateContent() # defer the alt cover search QTimer.singleShot(1, self.startAltCoverSearch) def startAltCoverSearch(self): # now we need to get the list of alt cover URLs self.label.setText("Searching for alt. covers...") # page URL should already be cached, so no need to defer self.comicVine = ComicVineTalker() issue_page_url = self.comicVine.fetchIssuePageURL(self.issue_id) self.comicVine.altUrlListFetchComplete.connect( self.altCoverUrlListFetchComplete) self.comicVine.asyncFetchAlternateCoverURLs( int(self.issue_id), issue_page_url) def altCoverUrlListFetchComplete(self, url_list, issue_id): if len(url_list) > 0: self.url_list.extend(url_list) self.imageCount = len(self.url_list) self.updateControls() def setPage(self, pagenum): if self.mode == CoverImageWidget.ArchiveMode: self.imageIndex = pagenum self.updateContent() def updateContent(self): self.updateImage() self.updateControls() def updateImage(self): if self.imageIndex == -1: self.loadDefault() elif self.mode in [CoverImageWidget.AltCoverMode, CoverImageWidget.URLMode]: self.loadURL() elif self.mode == CoverImageWidget.DataMode: self.coverRemoteFetchComplete(self.imageData, 0) else: self.loadPage() def updateControls(self): if not self.showControls or self.mode == CoverImageWidget.DataMode: self.btnLeft.hide() self.btnRight.hide() self.label.hide() return if self.imageIndex == -1 or self.imageCount == 1: self.btnLeft.setEnabled(False) self.btnRight.setEnabled(False) self.btnLeft.hide() self.btnRight.hide() else: self.btnLeft.setEnabled(True) self.btnRight.setEnabled(True) self.btnLeft.show() self.btnRight.show() if self.imageIndex == -1 or self.imageCount == 1: self.label.setText("") elif self.mode == CoverImageWidget.AltCoverMode: self.label.setText( "Cover {0} (of {1})".format( self.imageIndex + 1, self.imageCount)) else: self.label.setText( "Page {0} (of {1})".format( self.imageIndex + 1, self.imageCount)) def loadURL(self): self.loadDefault() self.cover_fetcher = ImageFetcher() self.cover_fetcher.fetchComplete.connect(self.coverRemoteFetchComplete) self.cover_fetcher.fetch(self.url_list[self.imageIndex]) #print("ATB cover fetch started...") # called when the image is done loading from internet def coverRemoteFetchComplete(self, image_data, issue_id): img = getQImageFromData(image_data) self.current_pixmap = QPixmap(img) self.setDisplayPixmap(0, 0) #print("ATB cover fetch complete!") def loadPage(self): if self.comic_archive is not None: if self.page_loader is not None: self.page_loader.abandoned = True self.page_loader = PageLoader(self.comic_archive, self.imageIndex) self.page_loader.loadComplete.connect(self.pageLoadComplete) self.page_loader.start() def pageLoadComplete(self, img): self.current_pixmap = QPixmap(img) self.setDisplayPixmap(0, 0) self.page_loader = None def loadDefault(self): self.current_pixmap = QPixmap( ComicTaggerSettings.getGraphic('nocover.png')) #print("loadDefault called") self.setDisplayPixmap(0, 0) def resizeEvent(self, resize_event): if self.current_pixmap is not None: delta_w = resize_event.size().width() - \ resize_event.oldSize().width() delta_h = resize_event.size().height() - \ resize_event.oldSize().height() # print "ATB resizeEvent deltas", resize_event.size().width(), # resize_event.size().height() self.setDisplayPixmap(delta_w, delta_h) def setDisplayPixmap(self, delta_w, delta_h): """The deltas let us know what the new width and height of the label will be""" #new_h = self.frame.height() + delta_h #new_w = self.frame.width() + delta_w # print "ATB setDisplayPixmap deltas", delta_w , delta_h # print "ATB self.frame", self.frame.width(), self.frame.height() # print "ATB self.", self.width(), self.height() #frame_w = new_w #frame_h = new_h new_h = self.frame.height() new_w = self.frame.width() frame_w = self.frame.width() frame_h = self.frame.height() new_h -= 4 new_w -= 4 if new_h < 0: new_h = 0 if new_w < 0: new_w = 0 # print "ATB setDisplayPixmap deltas", delta_w , delta_h # print "ATB self.frame", frame_w, frame_h # print "ATB new size", new_w, new_h # scale the pixmap to fit in the frame scaled_pixmap = self.current_pixmap.scaled( new_w, new_h, Qt.KeepAspectRatio) self.lblImage.setPixmap(scaled_pixmap) # move and resize the label to be centered in the fame img_w = scaled_pixmap.width() img_h = scaled_pixmap.height() self.lblImage.resize(img_w, img_h) self.lblImage.move((frame_w - img_w) / 2, (frame_h - img_h) / 2) def showPopup(self): self.popup = ImagePopup(self, self.current_pixmap)
10,721
Python
.py
258
32.093023
87
0.633417
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,997
pageloader.py
evilhero_mylar/lib/comictaggerlib/pageloader.py
"""A PyQT4 class to load a page image from a ComicArchive in a background thread""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from PyQt4 import QtCore, QtGui, uic from PyQt4.QtCore import pyqtSignal from comictaggerlib.ui.qtutils import getQImageFromData #from comicarchive import ComicArchive #import utils class PageLoader(QtCore.QThread): """ This class holds onto a reference of each instance in a list since problems occur if the ref count goes to zero and the GC tries to reap the object while the thread is going. If the client class wants to stop the thread, they should mark it as "abandoned", and no signals will be issued. """ loadComplete = pyqtSignal(QtGui.QImage) instanceList = [] mutex = QtCore.QMutex() # Remove all finished threads from the list @staticmethod def reapInstances(): for obj in reversed(PageLoader.instanceList): if obj.isFinished(): PageLoader.instanceList.remove(obj) def __init__(self, ca, page_num): QtCore.QThread.__init__(self) self.ca = ca self.page_num = page_num self.abandoned = False # remove any old instances, and then add ourself PageLoader.mutex.lock() PageLoader.reapInstances() PageLoader.instanceList.append(self) PageLoader.mutex.unlock() def run(self): image_data = self.ca.getPage(self.page_num) if self.abandoned: return if image_data is not None: img = getQImageFromData(image_data) if self.abandoned: return self.loadComplete.emit(img)
2,202
Python
.py
52
36.076923
83
0.704972
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,998
pagelisteditor.py
evilhero_mylar/lib/comictaggerlib/pagelisteditor.py
"""A PyQt4 widget for editing the page list info""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #import os from PyQt4.QtCore import * from PyQt4.QtGui import * from PyQt4 import uic from settings import ComicTaggerSettings from genericmetadata import GenericMetadata, PageType from comicarchive import MetaDataStyle from coverimagewidget import CoverImageWidget #from pageloader import PageLoader def itemMoveEvents(widget): class Filter(QObject): mysignal = pyqtSignal(str) def eventFilter(self, obj, event): if obj == widget: # print(event.type()) if event.type() == QEvent.ChildRemoved: # print("ChildRemoved") self.mysignal.emit("finish") if event.type() == QEvent.ChildAdded: # print("ChildAdded") self.mysignal.emit("start") return True return False filter = Filter(widget) widget.installEventFilter(filter) return filter.mysignal class PageListEditor(QWidget): firstFrontCoverChanged = pyqtSignal(int) listOrderChanged = pyqtSignal() modified = pyqtSignal() pageTypeNames = { PageType.FrontCover: "Front Cover", PageType.InnerCover: "Inner Cover", PageType.Advertisement: "Advertisement", PageType.Roundup: "Roundup", PageType.Story: "Story", PageType.Editorial: "Editorial", PageType.Letters: "Letters", PageType.Preview: "Preview", PageType.BackCover: "Back Cover", PageType.Other: "Other", PageType.Deleted: "Deleted", } def __init__(self, parent): super(PageListEditor, self).__init__(parent) uic.loadUi(ComicTaggerSettings.getUIFile('pagelisteditor.ui'), self) self.pageWidget = CoverImageWidget( self.pageContainer, CoverImageWidget.ArchiveMode) gridlayout = QGridLayout(self.pageContainer) gridlayout.addWidget(self.pageWidget) gridlayout.setContentsMargins(0, 0, 0, 0) self.pageWidget.showControls = False self.resetPage() # Add the entries to the manga combobox self.comboBox.addItem("", "") self.comboBox.addItem( self.pageTypeNames[PageType.FrontCover], PageType.FrontCover) self.comboBox.addItem( self.pageTypeNames[PageType.InnerCover], PageType.InnerCover) self.comboBox.addItem( self.pageTypeNames[PageType.Advertisement], PageType.Advertisement) self.comboBox.addItem( self.pageTypeNames[PageType.Roundup], PageType.Roundup) self.comboBox.addItem( self.pageTypeNames[PageType.Story], PageType.Story) self.comboBox.addItem( self.pageTypeNames[PageType.Editorial], PageType.Editorial) self.comboBox.addItem( self.pageTypeNames[PageType.Letters], PageType.Letters) self.comboBox.addItem( self.pageTypeNames[PageType.Preview], PageType.Preview) self.comboBox.addItem( self.pageTypeNames[PageType.BackCover], PageType.BackCover) self.comboBox.addItem( self.pageTypeNames[PageType.Other], PageType.Other) self.comboBox.addItem( self.pageTypeNames[PageType.Deleted], PageType.Deleted) self.listWidget.itemSelectionChanged.connect(self.changePage) itemMoveEvents(self.listWidget).connect(self.itemMoveEvent) self.comboBox.activated.connect(self.changePageType) self.btnUp.clicked.connect(self.moveCurrentUp) self.btnDown.clicked.connect(self.moveCurrentDown) self.pre_move_row = -1 self.first_front_page = None def resetPage(self): self.pageWidget.clear() self.comboBox.setDisabled(True) self.comic_archive = None self.pages_list = None def moveCurrentUp(self): row = self.listWidget.currentRow() if row > 0: item = self.listWidget.takeItem(row) self.listWidget.insertItem(row - 1, item) self.listWidget.setCurrentRow(row - 1) self.listOrderChanged.emit() self.emitFrontCoverChange() self.modified.emit() def moveCurrentDown(self): row = self.listWidget.currentRow() if row < self.listWidget.count() - 1: item = self.listWidget.takeItem(row) self.listWidget.insertItem(row + 1, item) self.listWidget.setCurrentRow(row + 1) self.listOrderChanged.emit() self.emitFrontCoverChange() self.modified.emit() def itemMoveEvent(self, s): # print "move event: ", s, self.listWidget.currentRow() if s == "start": self.pre_move_row = self.listWidget.currentRow() if s == "finish": if self.pre_move_row != self.listWidget.currentRow(): self.listOrderChanged.emit() self.emitFrontCoverChange() self.modified.emit() def changePageType(self, i): new_type = self.comboBox.itemData(i).toString() if self.getCurrentPageType() != new_type: self.setCurrentPageType(new_type) self.emitFrontCoverChange() self.modified.emit() def changePage(self): row = self.listWidget.currentRow() pagetype = self.getCurrentPageType() i = self.comboBox.findData(pagetype) self.comboBox.setCurrentIndex(i) #idx = int(str (self.listWidget.item(row).text())) idx = int(self.listWidget.item(row).data( Qt.UserRole).toPyObject()[0]['Image']) if self.comic_archive is not None: self.pageWidget.setArchive(self.comic_archive, idx) def getFirstFrontCover(self): frontCover = 0 for i in range(self.listWidget.count()): item = self.listWidget.item(i) page_dict = item.data(Qt.UserRole).toPyObject()[0] if 'Type' in page_dict and page_dict[ 'Type'] == PageType.FrontCover: frontCover = int(page_dict['Image']) break return frontCover def getCurrentPageType(self): row = self.listWidget.currentRow() page_dict = self.listWidget.item(row).data(Qt.UserRole).toPyObject()[0] if 'Type' in page_dict: return page_dict['Type'] else: return "" def setCurrentPageType(self, t): row = self.listWidget.currentRow() page_dict = self.listWidget.item(row).data(Qt.UserRole).toPyObject()[0] if t == "": if 'Type' in page_dict: del(page_dict['Type']) else: page_dict['Type'] = str(t) item = self.listWidget.item(row) # wrap the dict in a tuple to keep from being converted to QStrings item.setData(Qt.UserRole, (page_dict,)) item.setText(self.listEntryText(page_dict)) def setData(self, comic_archive, pages_list): self.comic_archive = comic_archive self.pages_list = pages_list if pages_list is not None and len(pages_list) > 0: self.comboBox.setDisabled(False) self.listWidget.itemSelectionChanged.disconnect(self.changePage) self.listWidget.clear() for p in pages_list: item = QListWidgetItem(self.listEntryText(p)) # wrap the dict in a tuple to keep from being converted to QStrings item.setData(Qt.UserRole, (p,)) self.listWidget.addItem(item) self.first_front_page = self.getFirstFrontCover() self.listWidget.itemSelectionChanged.connect(self.changePage) self.listWidget.setCurrentRow(0) def listEntryText(self, page_dict): text = str(int(page_dict['Image']) + 1) if 'Type' in page_dict: text += " (" + self.pageTypeNames[page_dict['Type']] + ")" return text def getPageList(self): page_list = [] for i in range(self.listWidget.count()): item = self.listWidget.item(i) page_list.append(item.data(Qt.UserRole).toPyObject()[0]) return page_list def emitFrontCoverChange(self): if self.first_front_page != self.getFirstFrontCover(): self.first_front_page = self.getFirstFrontCover() self.firstFrontCoverChanged.emit(self.first_front_page) def setMetadataStyle(self, data_style): # depending on the current data style, certain fields are disabled inactive_color = QColor(255, 170, 150) active_palette = self.comboBox.palette() inactive_palette3 = self.comboBox.palette() inactive_palette3.setColor(QPalette.Base, inactive_color) if data_style == MetaDataStyle.CIX: self.btnUp.setEnabled(True) self.btnDown.setEnabled(True) self.comboBox.setEnabled(True) self.listWidget.setEnabled(True) self.listWidget.setPalette(active_palette) elif data_style == MetaDataStyle.CBI: self.btnUp.setEnabled(False) self.btnDown.setEnabled(False) self.comboBox.setEnabled(False) self.listWidget.setEnabled(False) self.listWidget.setPalette(inactive_palette3) elif data_style == MetaDataStyle.CoMet: pass # make sure combo is disabled when no list if self.comic_archive is None: self.comboBox.setEnabled(False)
10,053
Python
.py
225
34.897778
79
0.648792
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)
20,999
matchselectionwindow.py
evilhero_mylar/lib/comictaggerlib/matchselectionwindow.py
"""A PyQT4 dialog to select from automated issue matches""" # Copyright 2012-2014 Anthony Beville # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os #import sys from PyQt4 import QtCore, QtGui, uic #from PyQt4.QtCore import QUrl, pyqtSignal, QByteArray from settings import ComicTaggerSettings from coverimagewidget import CoverImageWidget from comictaggerlib.ui.qtutils import reduceWidgetFontSize #from imagefetcher import ImageFetcher #from comicarchive import MetaDataStyle #from comicvinetalker import ComicVineTalker #import utils class MatchSelectionWindow(QtGui.QDialog): volume_id = 0 def __init__(self, parent, matches, comic_archive): super(MatchSelectionWindow, self).__init__(parent) uic.loadUi( ComicTaggerSettings.getUIFile('matchselectionwindow.ui'), self) self.altCoverWidget = CoverImageWidget( self.altCoverContainer, CoverImageWidget.AltCoverMode) gridlayout = QtGui.QGridLayout(self.altCoverContainer) gridlayout.addWidget(self.altCoverWidget) gridlayout.setContentsMargins(0, 0, 0, 0) self.archiveCoverWidget = CoverImageWidget( self.archiveCoverContainer, CoverImageWidget.ArchiveMode) gridlayout = QtGui.QGridLayout(self.archiveCoverContainer) gridlayout.addWidget(self.archiveCoverWidget) gridlayout.setContentsMargins(0, 0, 0, 0) reduceWidgetFontSize(self.twList) reduceWidgetFontSize(self.teDescription, 1) self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMaximizeButtonHint) self.matches = matches self.comic_archive = comic_archive self.twList.currentItemChanged.connect(self.currentItemChanged) self.twList.cellDoubleClicked.connect(self.cellDoubleClicked) self.updateData() def updateData(self): self.setCoverImage() self.populateTable() self.twList.resizeColumnsToContents() self.twList.selectRow(0) path = self.comic_archive.path self.setWindowTitle(u"Select correct match: {0}".format( os.path.split(path)[1])) def populateTable(self): while self.twList.rowCount() > 0: self.twList.removeRow(0) self.twList.setSortingEnabled(False) row = 0 for match in self.matches: self.twList.insertRow(row) item_text = match['series'] item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setData(QtCore.Qt.UserRole, (match,)) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 0, item) if match['publisher'] is not None: item_text = u"{0}".format(match['publisher']) else: item_text = u"Unknown" item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 1, item) month_str = u"" year_str = u"????" if match['month'] is not None: month_str = u"-{0:02d}".format(int(match['month'])) if match['year'] is not None: year_str = u"{0}".format(match['year']) item_text = year_str + month_str item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 2, item) item_text = match['issue_title'] if item_text is None: item_text = "" item = QtGui.QTableWidgetItem(item_text) item.setData(QtCore.Qt.ToolTipRole, item_text) item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled) self.twList.setItem(row, 3, item) row += 1 self.twList.resizeColumnsToContents() self.twList.setSortingEnabled(True) self.twList.sortItems(2, QtCore.Qt.AscendingOrder) self.twList.selectRow(0) self.twList.resizeColumnsToContents() self.twList.horizontalHeader().setStretchLastSection(True) def cellDoubleClicked(self, r, c): self.accept() def currentItemChanged(self, curr, prev): if curr is None: return if prev is not None and prev.row() == curr.row(): return self.altCoverWidget.setIssueID(self.currentMatch()['issue_id']) if self.currentMatch()['description'] is None: self.teDescription.setText("") else: self.teDescription.setText(self.currentMatch()['description']) def setCoverImage(self): self.archiveCoverWidget.setArchive(self.comic_archive) def currentMatch(self): row = self.twList.currentRow() match = self.twList.item(row, 0).data( QtCore.Qt.UserRole).toPyObject()[0] return match
5,689
Python
.py
121
37.528926
79
0.668656
evilhero/mylar
977
173
0
GPL-3.0
9/5/2024, 5:12:46 PM (Europe/Amsterdam)