id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22,200 | __init__.py | wummel_linkchecker/tests/configuration/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2009 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test package.
"""
| 802 | Python | .py | 19 | 41.210526 | 73 | 0.770115 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,201 | test_config.py | wummel_linkchecker/tests/configuration/test_config.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test config parsing.
"""
import unittest
import os
import linkcheck.configuration
def get_file (filename=None):
"""Get file name located within 'data' directory."""
directory = os.path.join("tests", "configuration", "data")
if filename:
return unicode(os.path.join(directory, filename))
return unicode(directory)
class TestConfig (unittest.TestCase):
"""Test configuration parsing."""
def test_confparse (self):
config = linkcheck.configuration.Configuration()
files = [get_file("config0.ini")]
config.read(files)
config.sanitize()
# checking section
for scheme in ("http", "https", "ftp"):
self.assertTrue(scheme in config["allowedschemes"])
self.assertEqual(config["threads"], 5)
self.assertEqual(config["timeout"], 42)
self.assertEqual(config["aborttimeout"], 99)
self.assertEqual(config["recursionlevel"], 1)
self.assertEqual(config["nntpserver"], "example.org")
self.assertEqual(config["cookiefile"], "blablabla")
self.assertEqual(config["useragent"], "Example/0.0")
self.assertEqual(config["debugmemory"], 1)
self.assertEqual(config["localwebroot"], "foo")
self.assertEqual(config["sslverify"], "/path/to/cacerts.crt")
self.assertEqual(config["maxnumurls"], 1000)
self.assertEqual(config["maxrunseconds"], 1)
self.assertEqual(config["maxfilesizeparse"], 100)
self.assertEqual(config["maxfilesizedownload"], 100)
# filtering section
patterns = [x["pattern"].pattern for x in config["externlinks"]]
for prefix in ("ignore_", "nofollow_"):
for suffix in ("1", "2"):
key = "%simadoofus%s" % (prefix, suffix)
self.assertTrue(key in patterns)
for key in ("url-unicode-domain",):
self.assertTrue(key in config["ignorewarnings"])
self.assertTrue(config["checkextern"])
# authentication section
patterns = [x["pattern"].pattern for x in config["authentication"]]
for suffix in ("1", "2"):
key = "imadoofus%s" % suffix
self.assertTrue(key in patterns)
self.assertTrue("http://www.example.com/" in patterns)
self.assertTrue("http://www.example.com/nopass" in patterns)
self.assertEqual(config["loginurl"], "http://www.example.com/")
self.assertEqual(config["loginuserfield"], "mylogin")
self.assertEqual(config["loginpasswordfield"], "mypassword")
self.assertEqual(config["loginextrafields"]["name1"], "value1")
self.assertEqual(config["loginextrafields"]["name 2"], "value 2")
self.assertEqual(len(config["loginextrafields"]), 2)
# output section
self.assertTrue(linkcheck.log.is_debug(linkcheck.LOG_THREAD))
self.assertFalse(config["status"])
self.assertTrue(isinstance(config["logger"], linkcheck.logger.customxml.CustomXMLLogger))
self.assertTrue(config["verbose"])
self.assertTrue(config["warnings"])
self.assertFalse(config["quiet"])
self.assertEqual(len(config["fileoutput"]), 8)
# plugins
for plugin in ("AnchorCheck", "CssSyntaxCheck", "HtmlSyntaxCheck", "LocationInfo", "RegexCheck", "SslCertificateCheck", "VirusCheck", "HttpHeaderInfo"):
self.assertTrue(plugin in config["enabledplugins"])
# text logger section
self.assertEqual(config["text"]["filename"], "imadoofus.txt")
self.assertEqual(config["text"]["parts"], ["realurl"])
self.assertEqual(config["text"]["encoding"], "utf-8")
self.assertEqual(config["text"]["colorparent"], "blink;red")
self.assertEqual(config["text"]["colorurl"], "blink;red")
self.assertEqual(config["text"]["colorname"], "blink;red")
self.assertEqual(config["text"]["colorreal"], "blink;red")
self.assertEqual(config["text"]["colorbase"], "blink;red")
self.assertEqual(config["text"]["colorvalid"], "blink;red")
self.assertEqual(config["text"]["colorinvalid"], "blink;red")
self.assertEqual(config["text"]["colorinfo"], "blink;red")
self.assertEqual(config["text"]["colorwarning"], "blink;red")
self.assertEqual(config["text"]["colordltime"], "blink;red")
self.assertEqual(config["text"]["colorreset"], "blink;red")
# gml logger section
self.assertEqual(config["gml"]["filename"], "imadoofus.gml")
self.assertEqual(config["gml"]["parts"], ["realurl"])
self.assertEqual(config["gml"]["encoding"], "utf-8")
# dot logger section
self.assertEqual(config["dot"]["filename"], "imadoofus.dot")
self.assertEqual(config["dot"]["parts"], ["realurl"])
self.assertEqual(config["dot"]["encoding"], "utf-8")
# csv logger section
self.assertEqual(config["csv"]["filename"], "imadoofus.csv")
self.assertEqual(config["csv"]["parts"], ["realurl"])
self.assertEqual(config["csv"]["encoding"], "utf-8")
self.assertEqual(config["csv"]["separator"], ";")
self.assertEqual(config["csv"]["quotechar"], "'")
# sql logger section
self.assertEqual(config["sql"]["filename"], "imadoofus.sql")
self.assertEqual(config["sql"]["parts"], ["realurl"])
self.assertEqual(config["sql"]["encoding"], "utf-8")
self.assertEqual(config["sql"]["separator"], ";")
self.assertEqual(config["sql"]["dbname"], "linksdb")
# html logger section
self.assertEqual(config["html"]["filename"], "imadoofus.html")
self.assertEqual(config["html"]["parts"], ["realurl"])
self.assertEqual(config["html"]["encoding"], "utf-8")
self.assertEqual(config["html"]["colorbackground"], "#ff0000")
self.assertEqual(config["html"]["colorurl"], "#ff0000")
self.assertEqual(config["html"]["colorborder"], "#ff0000")
self.assertEqual(config["html"]["colorlink"], "#ff0000")
self.assertEqual(config["html"]["colorwarning"], "#ff0000")
self.assertEqual(config["html"]["colorerror"], "#ff0000")
self.assertEqual(config["html"]["colorok"], "#ff0000")
# blacklist logger section
self.assertEqual(config["blacklist"]["filename"], "blacklist")
self.assertEqual(config["blacklist"]["encoding"], "utf-8")
# xml logger section
self.assertEqual(config["xml"]["filename"], "imadoofus.xml")
self.assertEqual(config["xml"]["parts"], ["realurl"])
self.assertEqual(config["xml"]["encoding"], "utf-8")
# gxml logger section
self.assertEqual(config["gxml"]["filename"], "imadoofus.gxml")
self.assertEqual(config["gxml"]["parts"], ["realurl"])
self.assertEqual(config["gxml"]["encoding"], "utf-8")
def test_confparse_error1 (self):
config = linkcheck.configuration.Configuration()
files = [get_file("config1.ini")]
self.assertRaises(linkcheck.LinkCheckerError, config.read, files)
def test_confparse_error2 (self):
config = linkcheck.configuration.Configuration()
files = [get_file("config2.ini")]
self.assertRaises(linkcheck.LinkCheckerError, config.read, files)
| 8,017 | Python | .py | 150 | 45.62 | 160 | 0.6553 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,202 | test_https.py | wummel_linkchecker/tests/checker/test_https.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test news checking.
"""
from tests import need_network
from . import LinkCheckTest
class TestHttps (LinkCheckTest):
"""
Test https: link checking.
"""
@need_network
def test_https (self):
url = u"https://www.amazon.com/"
rurl = u"https://www.amazon.com/"
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % rurl,
#u"info SSL cipher RC4-SHA, TLSv1/SSLv3.",
u"info Access denied by robots.txt, checked only syntax.",
u"valid",
]
confargs = dict(
#enabledplugins=['SslCertificateCheck'],
#SslCertificateCheck=dict(sslcertwarndays=10),
)
self.direct(url, resultlines, recursionlevel=0, confargs=confargs)
| 1,581 | Python | .py | 42 | 32.47619 | 74 | 0.677734 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,203 | test_internpat.py | wummel_linkchecker/tests/checker/test_internpat.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2009 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test internal pattern construction
"""
import linkcheck.director
import linkcheck.configuration
from __init__ import LinkCheckTest, get_url_from
class TestInternpat (LinkCheckTest):
"""Test internal pattern."""
def test_trailing_slash (self):
# Make sure a trailing slash is not lost
config = linkcheck.configuration.Configuration()
aggregate = linkcheck.director.get_aggregate(config)
url = "http://example.org/foo/"
url_data = get_url_from(url, 0, aggregate)
internpat = url_data.get_intern_pattern()
self.assertTrue(internpat.endswith('/'))
| 1,395 | Python | .py | 32 | 40.5 | 73 | 0.745588 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,204 | test_error.py | wummel_linkchecker/tests/checker/test_error.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test error checking.
"""
from . import LinkCheckTest
class TestError (LinkCheckTest):
"""
Test unrecognized or syntactically wrong links.
"""
def test_unrecognized (self):
# Unrecognized scheme
url = u"hutzli:"
attrs = self.get_attrs(url=url)
attrs['nurl'] = self.norm("file://%(curdir)s/%(url)s" % attrs)
resultlines = [
u"url file://%(curdir)s/%(url)s" % attrs,
u"cache key %(nurl)s" % attrs,
u"real url %(nurl)s" % attrs,
u"error",
]
self.direct(url, resultlines)
def test_invalid1 (self):
# invalid scheme chars
url = u"äöü:"
attrs = self.get_attrs(url=url)
attrs['nurl'] = self.norm("file://%(curdir)s/%(url)s" % attrs)
resultlines = [
u"url file://%(curdir)s/%(url)s" % attrs,
u"cache key %(nurl)s" % attrs,
u"real url %(nurl)s" % attrs,
u"name %(url)s" % attrs,
u"error",
]
self.direct(url, resultlines)
def test_invalid2 (self):
# missing scheme alltogether
url = u"äöü"
attrs = self.get_attrs(url=url)
attrs['nurl'] = self.norm("file://%(curdir)s/%(url)s" % attrs)
resultlines = [
u"url file://%(curdir)s/%(url)s" % attrs,
u"cache key %(nurl)s" % attrs,
u"real url %(nurl)s" % attrs,
u"name %(url)s" % attrs,
u"error",
]
self.direct(url, resultlines)
def test_invalid3 (self):
# really fucked up
url = u"@³²¼][½ ³@] ¬½"
attrs = self.get_attrs(url=url)
attrs['nurl'] = self.norm("file://%(curdir)s/%(url)s" % attrs)
resultlines = [
u"url file://%(curdir)s/%(url)s" % attrs,
u"cache key %(nurl)s" % attrs,
u"real url %(nurl)s" % attrs,
u"name %(url)s" % attrs,
u"error",
]
self.direct(url, resultlines)
| 2,780 | Python | .py | 75 | 29.586667 | 73 | 0.584661 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,205 | test_ftp.py | wummel_linkchecker/tests/checker/test_ftp.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
FTP checking.
"""
from .. import need_pyftpdlib
from .ftpserver import FtpServerTest
class TestFtp (FtpServerTest):
"""Test ftp: link checking."""
@need_pyftpdlib
def test_ftp (self):
# ftp two slashes
url = u"ftp://%s:%d/" % (self.host, self.port)
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
# ftp use/password
user = "anonymous"
passwd = "Ftp"
url = u"ftp://%s:%s@%s:%d/" % (user, passwd, self.host, self.port)
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
# ftp one slash
url = u"ftp:/%s:%d/" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key None",
u"real url %s" % nurl,
u"error",
]
self.direct(url, resultlines)
# missing path
url = u"ftp://%s:%d" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines)
# missing trailing dir slash
url = u"ftp://%s:%d/base" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s/" % nurl,
u"warning Missing trailing directory slash in ftp url.",
u"valid",
]
self.direct(url, resultlines)
# ftp two dir slashes
url = u"ftp://%s:%d//base/" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines)
# ftp many dir slashes
url = u"ftp://%s:%d////////base/" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines)
# ftp three slashes
url = u"ftp:///%s:%d/" % (self.host, self.port)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key None",
u"real url %s" % nurl,
u"error",
]
self.direct(url, resultlines)
| 3,470 | Python | .py | 105 | 24.742857 | 74 | 0.537775 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,206 | ftpserver.py | wummel_linkchecker/tests/checker/ftpserver.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Define http test support classes for LinkChecker tests.
"""
import os
import time
import threading
import pytest
from ftplib import FTP
from . import LinkCheckTest
TIMEOUT = 5
class FtpServerTest (LinkCheckTest):
"""Start/stop an FTP server that can be used for testing."""
def __init__ (self, methodName='runTest'):
"""Init test class and store default ftp server port."""
super(FtpServerTest, self).__init__(methodName=methodName)
self.host = 'localhost'
self.port = None
def setUp (self):
"""Start a new FTP server in a new thread."""
self.port = start_server(self.host, 0)
self.assertFalse(self.port is None)
def tearDown (self):
"""Send stop request to server."""
try:
stop_server(self.host, self.port)
except Exception:
pass
def start_server (host, port):
def line_logger(msg):
if "kill" in msg:
raise KeyboardInterrupt()
try:
from pyftpdlib import ftpserver
except ImportError:
pytest.skip("pyftpdlib is not available")
return
authorizer = ftpserver.DummyAuthorizer()
datadir = os.path.join(os.path.dirname(__file__), 'data')
authorizer.add_anonymous(datadir)
# Instantiate FTP handler class
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
ftp_handler.timeout = TIMEOUT
ftpserver.logline = line_logger
# Define a customized banner (string returned when client connects)
ftp_handler.banner = "pyftpdlib %s based ftpd ready." % ftpserver.__ver__
# Instantiate FTP server class and listen to host:port
address = (host, port)
server = ftpserver.FTPServer(address, ftp_handler)
port = server.address[1]
t = threading.Thread(None, server.serve_forever)
t.start()
# wait for server to start up
tries = 0
while tries < 5:
tries += 1
try:
ftp = FTP()
ftp.connect(host, port, TIMEOUT)
ftp.login()
ftp.close()
break
except:
time.sleep(0.5)
return port
def stop_server (host, port):
"""Stop a running FTP server."""
ftp = FTP()
ftp.connect(host, port, TIMEOUT)
ftp.login()
try:
ftp.sendcmd("kill")
except EOFError:
pass
ftp.close()
| 3,144 | Python | .py | 91 | 29.043956 | 77 | 0.674893 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,207 | httpserver.py | wummel_linkchecker/tests/checker/httpserver.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Define http test support classes for LinkChecker tests.
"""
import SimpleHTTPServer
import BaseHTTPServer
import httplib
import time
import threading
import cgi
import urllib
from cStringIO import StringIO
from . import LinkCheckTest
class StoppableHttpRequestHandler (SimpleHTTPServer.SimpleHTTPRequestHandler, object):
"""
HTTP request handler with QUIT stopping the server.
"""
def do_QUIT (self):
"""
Send 200 OK response, and set server.stop to True.
"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def log_message (self, format, *args):
"""
Logging is disabled.
"""
pass
# serve .xhtml files as application/xhtml+xml
StoppableHttpRequestHandler.extensions_map.update({
'.xhtml': 'application/xhtml+xml',
})
class StoppableHttpServer (BaseHTTPServer.HTTPServer, object):
"""
HTTP server that reacts to self.stop flag.
"""
def serve_forever (self):
"""
Handle one request at a time until stopped.
"""
self.stop = False
while not self.stop:
self.handle_request()
class NoQueryHttpRequestHandler (StoppableHttpRequestHandler):
"""
Handler ignoring the query part of requests and sending dummy directory
listings.
"""
def remove_path_query (self):
"""
Remove everything after a question mark.
"""
i = self.path.find('?')
if i != -1:
self.path = self.path[:i]
def get_status(self):
dummy, status = self.path.rsplit('/', 1)
status = int(status)
if status in self.responses:
return status
return 500
def do_GET (self):
"""
Removes query part of GET request.
"""
self.remove_path_query()
if "status/" in self.path:
status = self.get_status()
self.send_response(status)
self.end_headers()
if status >= 200 and status not in (204, 304):
self.wfile.write("testcontent")
else:
super(NoQueryHttpRequestHandler, self).do_GET()
def do_HEAD (self):
"""
Removes query part of HEAD request.
"""
self.remove_path_query()
if "status/" in self.path:
self.send_response(self.get_status())
self.end_headers()
else:
super(NoQueryHttpRequestHandler, self).do_HEAD()
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
f = StringIO()
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
f.write("<html>\n<title>Dummy directory listing</title>\n")
f.write("<body>\n<h2>Dummy test directory listing</h2>\n")
f.write("<hr>\n<ul>\n")
list = ["example1.txt", "example2.html", "example3"]
for name in list:
displayname = linkname = name
f.write('<li><a href="%s">%s</a>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("</ul>\n<hr>\n</body>\n</html>\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = "utf-8"
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
class HttpServerTest (LinkCheckTest):
"""
Start/stop an HTTP server that can be used for testing.
"""
def __init__ (self, methodName='runTest'):
"""
Init test class and store default http server port.
"""
super(HttpServerTest, self).__init__(methodName=methodName)
self.port = None
self.handler = NoQueryHttpRequestHandler
def setUp(self):
"""Start a new HTTP server in a new thread."""
self.port = start_server(self.handler)
assert self.port is not None
def tearDown(self):
"""Send QUIT request to http server."""
stop_server(self.port)
def get_url(self, filename):
"""Get HTTP URL for filename."""
return u"http://localhost:%d/tests/checker/data/%s" % (self.port, filename)
def start_server (handler):
"""Start an HTTP server thread and return its port number."""
server_address = ('localhost', 0)
handler.protocol_version = "HTTP/1.0"
httpd = StoppableHttpServer(server_address, handler)
port = httpd.server_port
t = threading.Thread(None, httpd.serve_forever)
t.start()
# wait for server to start up
while True:
try:
conn = httplib.HTTPConnection("localhost:%d" % port)
conn.request("GET", "/")
conn.getresponse()
break
except:
time.sleep(0.5)
return port
def stop_server (port):
"""Stop an HTTP server thread."""
conn = httplib.HTTPConnection("localhost:%d" % port)
conn.request("QUIT", "/")
conn.getresponse()
def get_cookie (maxage=2000):
data = (
("Comment", "justatest"),
("Max-Age", "%d" % maxage),
("Path", "/"),
("Version", "1"),
("Foo", "Bar"),
)
return "; ".join('%s="%s"' % (key, value) for key, value in data)
class CookieRedirectHttpRequestHandler (NoQueryHttpRequestHandler):
"""Handler redirecting certain requests, and setting cookies."""
def end_headers (self):
"""Send cookie before ending headers."""
self.send_header("Set-Cookie", get_cookie())
self.send_header("Set-Cookie", get_cookie(maxage=0))
super(CookieRedirectHttpRequestHandler, self).end_headers()
def redirect (self):
"""Redirect request."""
path = self.path.replace("redirect", "newurl")
self.send_response(302)
self.send_header("Location", path)
self.end_headers()
def redirect_newhost (self):
"""Redirect request to a new host."""
path = "http://www.example.com/"
self.send_response(302)
self.send_header("Location", path)
self.end_headers()
def redirect_newscheme (self):
"""Redirect request to a new scheme."""
if "file" in self.path:
path = "file:README.md"
else:
path = "ftp://example.com/"
self.send_response(302)
self.send_header("Location", path)
self.end_headers()
def do_GET (self):
"""Handle redirections for GET."""
if "redirect_newscheme" in self.path:
self.redirect_newscheme()
elif "redirect_newhost" in self.path:
self.redirect_newhost()
elif "redirect" in self.path:
self.redirect()
else:
super(CookieRedirectHttpRequestHandler, self).do_GET()
def do_HEAD (self):
"""Handle redirections for HEAD."""
if "redirect_newscheme" in self.path:
self.redirect_newscheme()
elif "redirect_newhost" in self.path:
self.redirect_newhost()
elif "redirect" in self.path:
self.redirect()
else:
super(CookieRedirectHttpRequestHandler, self).do_HEAD()
| 8,194 | Python | .py | 226 | 28.743363 | 86 | 0.619246 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,208 | test_misc.py | wummel_linkchecker/tests/checker/test_misc.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2009 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test miscellaneous html tag parsing and URL types
"""
from . import LinkCheckTest
class TestMisc (LinkCheckTest):
"""
Test misc link types.
"""
def test_misc (self):
self.file_test("misc.html")
def test_html5 (self):
self.file_test("html5.html")
def test_archive (self):
self.file_test("archive.html")
def test_itms_services(self):
url = u"itms-services:?action=download-manifest&url=http://www.example.com/"
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
u"url http://www.example.com/",
u"cache key http://www.example.com/",
u"real url http://www.example.com/",
u"valid",
]
self.direct(url, resultlines, recursionlevel=1)
| 1,634 | Python | .py | 43 | 32.674419 | 84 | 0.669401 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,209 | test_http_misc.py | wummel_linkchecker/tests/checker/test_http_misc.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http checking.
"""
import os
import sys
from .httpserver import HttpServerTest
from linkcheck.network import iputil
class TestHttpMisc (HttpServerTest):
"""Test http:// misc link checking."""
def test_html (self):
self.swf_test()
self.obfuscate_test()
def swf_test (self):
url = self.get_url(u"test.swf")
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
u"url http://www.example.org/",
u"cache key http://www.example.org/",
u"real url http://www.example.org/",
u"valid",
]
self.direct(url, resultlines, recursionlevel=1)
def obfuscate_test (self):
if os.name != "posix" or sys.platform != 'linux2':
return
host = "www.heise.de"
ip = iputil.resolve_host(host)[0]
url = u"http://%s/" % iputil.obfuscate_ip(ip)
rurl = u"http://%s/" % ip
resultlines = [
u"url %s" % url,
u"cache key %s" % rurl,
u"real url %s" % rurl,
u"info Access denied by robots.txt, checked only syntax.",
u"warning URL %s has obfuscated IP address %s" % (url, ip),
u"valid",
]
self.direct(url, resultlines, recursionlevel=0)
| 2,133 | Python | .py | 57 | 30.947368 | 73 | 0.628861 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,210 | test_frames.py | wummel_linkchecker/tests/checker/test_frames.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2009 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test html <frame> tag parsing.
"""
from . import LinkCheckTest
class TestFrames (LinkCheckTest):
"""
Test link checking of HTML framesets.
"""
def test_frames (self):
self.file_test("frames.html")
| 1,008 | Python | .py | 26 | 36.730769 | 73 | 0.751788 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,211 | test_telnet.py | wummel_linkchecker/tests/checker/test_telnet.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test telnet checking.
"""
from .telnetserver import TelnetServerTest
class TestTelnet (TelnetServerTest):
"""Test telnet: link checking."""
def test_telnet_error (self):
url = u"telnet:"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key None",
u"real url %s" % nurl,
u"error",
]
self.direct(url, resultlines)
def test_telnet_localhost (self):
url = self.get_url()
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.get_url(user=u"test")
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.get_url(user=u"test", password=u"test")
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
| 1,953 | Python | .py | 57 | 27.22807 | 73 | 0.603066 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,212 | test_whitespace.py | wummel_linkchecker/tests/checker/test_whitespace.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test whitespace handling.
"""
from . import LinkCheckTest
class TestWhitespace (LinkCheckTest):
"""
Test whitespace in URLs.
"""
def test_leading_whitespace (self):
# Leading whitespace
url = u" http://www.example.org/"
attrs = self.get_attrs(url=url)
attrs['surl'] = url.strip()
resultlines = [
u"url %(surl)s" % attrs,
u"cache key %(surl)s" % attrs,
u"real url %(surl)s" % attrs,
u"warning Leading or trailing whitespace in URL `%(url)s'." % attrs,
u"valid",
]
self.direct(url, resultlines)
url = u"\nhttp://www.example.org/"
attrs = self.get_attrs(url=url)
attrs['surl'] = url.strip()
resultlines = [
u"url %(surl)s" % attrs,
u"cache key %(surl)s" % attrs,
u"real url %(surl)s" % attrs,
u"warning Leading or trailing whitespace in URL `%(url)s'." % attrs,
u"valid",
]
self.direct(url, resultlines)
def test_trailing_whitespace (self):
# Trailing whitespace
url = u"http://www.example.org/ "
resultlines = [
u"url %s" % url.strip(),
u"cache key %s" % url.strip(),
u"real url %s" % url.strip(),
u"warning Leading or trailing whitespace in URL `%s'." % url,
u"valid",
]
self.direct(url, resultlines)
url = u"http://www.example.org/\n"
resultlines = [
u"url %s" % url.strip(),
u"cache key %s" % url.strip(),
u"real url %s" % url.strip(),
u"warning Leading or trailing whitespace in URL `%s'." % url,
u"valid",
]
self.direct(url, resultlines)
| 2,565 | Python | .py | 68 | 30.25 | 80 | 0.599679 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,213 | test_https_redirect.py | wummel_linkchecker/tests/checker/test_https_redirect.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http checking.
"""
from .httpserver import HttpServerTest, CookieRedirectHttpRequestHandler
class TestHttpsRedirect (HttpServerTest):
"""Test https:// link redirection checking."""
def __init__(self, methodName='runTest'):
super(TestHttpsRedirect, self).__init__(methodName=methodName)
self.handler = RedirectHttpsRequestHandler
def test_redirect (self):
url = u"http://localhost:%d/redirect1" % self.port
nurl = url
#rurl = u"https://localhost:%d/newurl1" % self.port
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % url,
# XXX the redirect fails because this is not an SSL server
#u"info Redirected to `%s'." % rurl.replace('http:', 'https:'),
#u"valid",
#u"url %s" % rurl,
#u"cache key %s" % rurl,
#u"real url %s" % rurl,
u"error",
]
self.direct(url, resultlines, recursionlevel=0)
class RedirectHttpsRequestHandler (CookieRedirectHttpRequestHandler):
def redirect (self):
"""Redirect request."""
path = self.path.replace("redirect", "newurl")
port = self.server.server_address[1]
url = "https://localhost:%d%s" % (port, path)
self.send_response(302)
self.send_header("Location", url)
self.end_headers()
| 2,184 | Python | .py | 51 | 36.666667 | 75 | 0.663217 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,214 | test_file.py | wummel_linkchecker/tests/checker/test_file.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test file parsing.
"""
import os
import sys
import zipfile
from tests import need_word, need_pdflib
from . import LinkCheckTest, get_file
def unzip (filename, targetdir):
"""Unzip given zipfile into targetdir."""
if isinstance(targetdir, unicode):
targetdir = str(targetdir)
zf = zipfile.ZipFile(filename)
for name in zf.namelist():
if name.endswith('/'):
os.mkdir(os.path.join(targetdir, name), 0700)
else:
outfile = open(os.path.join(targetdir, name), 'wb')
try:
outfile.write(zf.read(name))
finally:
outfile.close()
class TestFile (LinkCheckTest):
"""
Test file:// link checking (and file content parsing).
"""
def test_html (self):
self.file_test("file.html")
def test_wml (self):
self.file_test("file.wml")
def test_text (self):
self.file_test("file.txt")
def test_asc (self):
self.file_test("file.asc")
def test_css (self):
self.file_test("file.css")
def test_php (self):
self.file_test("file.php")
@need_word
def test_word (self):
confargs = dict(enabledplugins=["WordParser"])
self.file_test("file.doc", confargs=confargs)
@need_pdflib
def test_pdf(self):
confargs = dict(enabledplugins=["PdfParser"])
self.file_test("file.pdf", confargs=confargs)
def test_markdown(self):
confargs = dict(enabledplugins=["MarkdownCheck"])
self.file_test("file.markdown", confargs=confargs)
def test_urllist (self):
self.file_test("urllist.txt")
def test_directory_listing (self):
# unpack non-unicode filename which cannot be stored
# in the SF subversion repository
if os.name != 'posix' or sys.platform != 'linux2':
return
dirname = get_file("dir")
if not os.path.isdir(dirname):
unzip(dirname + ".zip", os.path.dirname(dirname))
self.file_test("dir")
def test_unicode_filename (self):
# a unicode filename
self.file_test(u"Мошкова.bin")
def test_good_file (self):
url = u"file://%(curdir)s/%(datadir)s/file.txt" % self.get_attrs()
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines)
def test_bad_file (self):
if os.name == 'nt':
# Fails on NT platforms and I am too lazy to fix
# Cause: url get quoted %7C which gets lowercased to
# %7c and this fails.
return
url = u"file:/%(curdir)s/%(datadir)s/file.txt" % self.get_attrs()
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"error",
]
self.direct(url, resultlines)
def test_good_file_missing_dslash (self):
# good file (missing double slash)
attrs = self.get_attrs()
url = u"file:%(curdir)s/%(datadir)s/file.txt" % attrs
resultlines = [
u"url %s" % url,
u"cache key file://%(curdir)s/%(datadir)s/file.txt" % attrs,
u"real url file://%(curdir)s/%(datadir)s/file.txt" % attrs,
u"valid",
]
self.direct(url, resultlines)
def test_good_dir (self):
url = u"file://%(curdir)s/%(datadir)s/" % self.get_attrs()
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
def test_good_dir_space (self):
url = u"file://%(curdir)s/%(datadir)s/a b/" % self.get_attrs()
nurl = self.norm(url)
url2 = u"file://%(curdir)s/%(datadir)s/a b/el.html" % self.get_attrs()
nurl2 = self.norm(url2)
url3 = u"file://%(curdir)s/%(datadir)s/a b/t.txt" % self.get_attrs()
nurl3 = self.norm(url3)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
u"url el.html",
u"cache key %s" % nurl2,
u"real url %s" % nurl2,
u"name el.html",
u"valid",
u"url t.txt",
u"cache key %s" % nurl3,
u"real url %s" % nurl3,
u"name t.txt",
u"valid",
u"url t.txt",
u"cache key %s" % nurl3,
u"real url %s" % nurl3,
u"name External link",
u"valid",
]
self.direct(url, resultlines, recursionlevel=2)
| 5,550 | Python | .py | 153 | 27.849673 | 78 | 0.574967 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,215 | test_base.py | wummel_linkchecker/tests/checker/test_base.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test html <base> tag parsing.
"""
from . import LinkCheckTest
class TestBase (LinkCheckTest):
"""
Test links of base*.html files.
"""
def test_base1 (self):
self.file_test("base1.html")
def test_base2 (self):
self.file_test("base2.html")
def test_base3 (self):
self.file_test("base3.html")
def test_base4 (self):
self.file_test("base4.html")
| 1,192 | Python | .py | 32 | 34.1875 | 73 | 0.72357 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,216 | __init__.py | wummel_linkchecker/tests/checker/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Define standard test support classes funtional for LinkChecker tests.
"""
import os
import re
import codecs
import difflib
import unittest
import linkcheck.checker
import linkcheck.configuration
import linkcheck.director
import linkcheck.logger
from .. import get_file
# helper alias
get_url_from = linkcheck.checker.get_url_from
class TestLogger (linkcheck.logger._Logger):
"""
Output logger for automatic regression tests.
"""
LoggerName = 'test'
def __init__ (self, **kwargs):
"""
The kwargs must have "expected" keyword with the expected logger
output lines.
"""
args = self.get_args(kwargs)
super(TestLogger, self).__init__(**args)
# list of expected output lines
self.expected = args['expected']
# list of real output lines
self.result = []
# diff between expected and real output
self.diff = []
def start_output (self):
"""
Nothing to do here.
"""
pass
def log_url (self, url_data):
"""
Append logger output to self.result.
"""
if self.has_part('url'):
url = u"url %s" % url_data.base_url
self.result.append(url)
if self.has_part('cachekey'):
cache_key = url_data.cache_url if url_data.cache_url else None
self.result.append(u"cache key %s" % cache_key)
if self.has_part('realurl'):
self.result.append(u"real url %s" % url_data.url)
if self.has_part('name') and url_data.name:
self.result.append(u"name %s" % url_data.name)
if self.has_part('base') and url_data.base_ref:
self.result.append(u"baseurl %s" % url_data.base_ref)
if self.has_part('info'):
for info in url_data.info:
if "Last modified" not in info and \
"is located in" not in info and \
"Using proxy" not in info:
self.result.append(u"info %s" % info)
if self.has_part('warning'):
for tag, warning in url_data.warnings:
self.result.append(u"warning %s" % warning)
if self.has_part('result'):
self.result.append(u"valid" if url_data.valid else u"error")
# note: do not append url_data.result since this is
# platform dependent
def end_output (self, linknumber=-1, **kwargs):
"""
Stores differences between expected and result in self.diff.
"""
for line in difflib.unified_diff(self.expected, self.result):
if not isinstance(line, unicode):
# The ---, +++ and @@ lines from diff format are ascii encoded.
# Make them unicode.
line = unicode(line, "ascii", "replace")
self.diff.append(line)
def get_file_url (filename):
return re.sub("^([a-zA-Z]):", r"/\1|", filename.replace("\\", "/"))
def add_fileoutput_config (config):
if os.name == 'posix':
devnull = '/dev/null'
elif os.name == 'nt':
devnull = 'NUL'
else:
return
for ftype in linkcheck.logger.LoggerNames:
if ftype in ('test', 'blacklist'):
continue
logger = config.logger_new(ftype, fileoutput=1, filename=devnull)
config['fileoutput'].append(logger)
def get_test_aggregate (confargs, logargs):
"""Initialize a test configuration object."""
config = linkcheck.configuration.Configuration()
config.logger_add(TestLogger)
config['recursionlevel'] = 1
config['logger'] = config.logger_new(TestLogger.LoggerName, **logargs)
add_fileoutput_config(config)
# uncomment for debugging
#config.init_logging(None, debug=["all"])
config["verbose"] = True
config['threads'] = 0
config['status'] = False
config["checkextern"] = True
config.update(confargs)
config.sanitize()
return linkcheck.director.get_aggregate(config)
class LinkCheckTest (unittest.TestCase):
"""
Functional test class with ability to test local files.
"""
def setUp (self):
"""Ensure the current locale setting is the default.
Otherwise, warnings will get translated and will break tests."""
super(LinkCheckTest, self).setUp()
linkcheck.init_i18n(loc='C')
def norm (self, url, encoding=None):
"""Helper function to norm a url."""
return linkcheck.url.url_norm(url, encoding=encoding)[0]
def get_attrs (self, **kwargs):
"""Return current and data directory as dictionary.
You can augment the dict with keyword attributes."""
d = {
'curdir': get_file_url(os.getcwd()),
'datadir': "tests/checker/data",
}
d.update(kwargs)
return d
def get_resultlines (self, filename):
"""
Return contents of file, as list of lines without line endings,
ignoring empty lines and lines starting with a hash sign (#).
"""
resultfile = get_file(u"%s.result" % filename)
d = {'curdir': get_file_url(os.getcwd()),
'datadir': get_file_url(get_file()),
}
# the webserver uses the first free port number
if hasattr(self, 'port'):
d['port'] = self.port
# all result files are encoded in utf-8
with codecs.open(resultfile, "r", "utf-8") as f:
return [line.rstrip(u'\r\n') % d for line in f
if line.strip() and not line.startswith(u'#')]
def get_url(self, filename):
"""Get URL for given filename."""
return get_file(filename)
def file_test (self, filename, confargs=None):
"""Check <filename> with expected result in <filename>.result."""
url = self.get_url(filename)
if confargs is None:
confargs = {}
logargs = {'expected': self.get_resultlines(filename)}
aggregate = get_test_aggregate(confargs, logargs)
url_data = get_url_from(url, 0, aggregate, extern=(0, 0))
aggregate.urlqueue.put(url_data)
linkcheck.director.check_urls(aggregate)
diff = aggregate.config['logger'].diff
if diff:
msg = unicode(os.linesep).join([url] + diff)
self.fail_unicode(msg)
def fail_unicode (self, msg):
"""Print encoded fail message."""
# XXX self.fail() only supports ascii
msg = msg.encode("ascii", "replace")
self.fail(msg)
def direct (self, url, resultlines, parts=None, recursionlevel=0,
confargs=None):
"""Check url with expected result."""
assert isinstance(url, unicode), repr(url)
if confargs is None:
confargs = {'recursionlevel': recursionlevel}
else:
confargs['recursionlevel'] = recursionlevel
logargs = {'expected': resultlines}
if parts is not None:
logargs['parts'] = parts
aggregate = get_test_aggregate(confargs, logargs)
# initial URL has recursion level zero
url_reclevel = 0
url_data = get_url_from(url, url_reclevel, aggregate)
aggregate.urlqueue.put(url_data)
linkcheck.director.check_urls(aggregate)
diff = aggregate.config['logger'].diff
if diff:
l = [u"Differences found testing %s" % url]
l.extend(x.rstrip() for x in diff[2:])
self.fail_unicode(unicode(os.linesep).join(l))
class MailTest (LinkCheckTest):
"""Test mailto: link checking."""
def mail_valid (self, addr, **kwargs):
"""Test valid mail address."""
return self.mail_test(addr, u"valid", **kwargs)
def mail_error (self, addr, **kwargs):
"""Test error mail address."""
return self.mail_test(addr, u"error", **kwargs)
def mail_test (self, addr, result, cache_key=None, warning=None):
"""Test mail address."""
url = self.norm(addr)
if cache_key is None:
cache_key = url
resultlines = [
u"url %s" % url,
u"cache key %s" % cache_key,
u"real url %s" % url,
]
if warning:
resultlines.append(u"warning %s" % warning)
resultlines.append(result)
self.direct(url, resultlines)
| 9,070 | Python | .py | 226 | 32.070796 | 79 | 0.617584 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,217 | test_bookmarks.py | wummel_linkchecker/tests/checker/test_bookmarks.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test bookmark file parsing.
"""
from . import LinkCheckTest
from .. import need_network, need_biplist
import os
class TestBookmarks (LinkCheckTest):
"""
Test bookmark link checking and content parsing.
"""
@need_network
def _test_firefox_bookmarks (self):
# firefox 3 bookmark file parsing
self.file_test("places.sqlite")
@need_network
def _test_opera_bookmarks (self):
# Opera bookmark file parsing
self.file_test("opera6.adr")
@need_network
def _test_chromium_bookmarks (self):
# Chromium and Google Chrome bookmark file parsing
self.file_test("Bookmarks")
@need_network
def test_safari_bookmarks_xml (self):
# Safari bookmark file parsing (for plaintext plist files)
self.file_test(os.path.join("plist_xml", "Bookmarks.plist"))
@need_network
@need_biplist
def test_safari_bookmarks_binary (self):
# Safari bookmark file parsing (for binary plist files)
self.file_test(os.path.join("plist_binary", "Bookmarks.plist"))
| 1,842 | Python | .py | 47 | 35.148936 | 73 | 0.723714 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,218 | test_http_robots.py | wummel_linkchecker/tests/checker/test_http_robots.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http checking.
"""
from .httpserver import HttpServerTest
class TestHttpRobots (HttpServerTest):
"""Test robots.txt link checking behaviour."""
def test_html (self):
self.robots_txt_test()
self.robots_txt2_test()
def robots_txt_test (self):
url = u"http://localhost:%d/robots.txt" % self.port
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines, recursionlevel=5)
def robots_txt2_test (self):
url = u"http://localhost:%d/secret" % self.port
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"info Access denied by robots.txt, checked only syntax.",
u"valid",
]
self.direct(url, resultlines, recursionlevel=5)
| 1,702 | Python | .py | 44 | 32.954545 | 73 | 0.659008 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,219 | test_httpbin.py | wummel_linkchecker/tests/checker/test_httpbin.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http stuff with httpbin.org.
"""
import re
from . import LinkCheckTest
def get_httpbin_url(path):
"""Get httpbin URL. Note that this also could be a local
httpbin installation, but right now this uses the official site."""
return u"http://httpbin.org%s" % path
class TestHttpbin(LinkCheckTest):
"""Test http:// link redirection checking."""
def test_http_link(self):
linkurl = u"http://www.example.com"
nlinkurl = self.norm(linkurl)
url = get_httpbin_url(u"/response-headers?Link=<%s>;rel=previous" % linkurl)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
u"url %s" % linkurl,
u"cache key %s" % nlinkurl,
u"real url %s" % nlinkurl,
u"name Link: header previous",
u"valid",
]
self.direct(url, resultlines, recursionlevel=1)
def test_basic_auth(self):
user = u"testuser"
password = u"testpassword"
url = get_httpbin_url(u"/basic-auth/%s/%s" % (user, password))
nurl = self.norm(url)
entry = dict(
user=user,
password=password,
pattern=re.compile(r'.*'),
)
confargs = dict(authentication=[entry])
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines, confargs=confargs)
def test_http_refresh_header(self):
linkurl = u"http://www.example.com"
nlinkurl = self.norm(linkurl)
url = get_httpbin_url(u"/response-headers?Refresh=5;url=%s" % linkurl)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
u"url %s" % linkurl,
u"cache key %s" % nlinkurl,
u"real url %s" % nlinkurl,
u"name Refresh: header",
u"valid",
]
self.direct(url, resultlines, recursionlevel=1)
def test_http_content_location_header(self):
linkurl = u"http://www.example.com"
nlinkurl = self.norm(linkurl)
url = get_httpbin_url(u"/response-headers?Content-Location=%s" % linkurl)
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"valid",
u"url %s" % linkurl,
u"cache key %s" % nlinkurl,
u"real url %s" % nlinkurl,
u"name Content-Location: header",
u"valid",
]
self.direct(url, resultlines, recursionlevel=1)
| 3,594 | Python | .py | 96 | 29.1875 | 84 | 0.590544 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,220 | test_news.py | wummel_linkchecker/tests/checker/test_news.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2010,2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test news checking.
"""
import pytest
from tests import need_newsserver, limit_time
from . import LinkCheckTest
# Changes often, as servers tend to get invalid. Thus it is necessary
# to enable the has_newsserver() resource manually.
NNTP_SERVER = "news.uni-stuttgart.de"
# info string returned by news server
NNTP_INFO = u"200 news.uni-stuttgart.de InterNetNews NNRP server " \
u"INN 2.5.2 ready (no posting)"
# Most free NNTP servers are slow, so don't waist a lot of time running those.
NNTP_TIMEOUT_SECS = 30
# disabled for now until some stable news server comes up
@pytest.mark.skipif("True")
class TestNews (LinkCheckTest):
"""Test nntp: and news: link checking."""
def newstest (self, url, resultlines):
self.direct(url, resultlines)
def test_news_without_host (self):
# news testing
url = u"news:comp.os.linux.misc"
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"warning No NNTP server was specified, skipping this URL.",
u"valid",
]
self.newstest(url, resultlines)
# no group
url = u"news:"
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"warning No NNTP server was specified, skipping this URL.",
u"valid",
]
self.newstest(url, resultlines)
def test_snews_with_group (self):
url = u"snews:de.comp.os.unix.linux.misc"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"warning No NNTP server was specified, skipping this URL.",
u"valid",
]
self.newstest(url, resultlines)
def test_illegal_syntax (self):
# illegal syntax
url = u"news:§$%&/´`(§%"
qurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % qurl,
u"real url %s" % qurl,
u"warning No NNTP server was specified, skipping this URL.",
u"valid",
]
self.newstest(url, resultlines)
@need_newsserver(NNTP_SERVER)
@limit_time(NNTP_TIMEOUT_SECS, skip=True)
def test_nntp_with_host (self):
url = u"nntp://%s/comp.lang.python" % NNTP_SERVER
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"info %s" % NNTP_INFO,
u"info News group comp.lang.python found.",
u"valid",
]
self.newstest(url, resultlines)
@need_newsserver(NNTP_SERVER)
@limit_time(NNTP_TIMEOUT_SECS, skip=True)
def test_article_span (self):
url = u"nntp://%s/comp.lang.python/1-5" % NNTP_SERVER
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"info %s" % NNTP_INFO,
u"info News group comp.lang.python found.",
u"valid",
]
self.newstest(url, resultlines)
def test_article_span_no_host (self):
url = u"news:comp.lang.python/1-5"
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"warning No NNTP server was specified, skipping this URL.",
u"valid",
]
self.newstest(url, resultlines)
@need_newsserver(NNTP_SERVER)
@limit_time(NNTP_TIMEOUT_SECS, skip=True)
def test_host_no_group (self):
url = u"nntp://%s/" % NNTP_SERVER
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
u"info %s" % NNTP_INFO,
u"warning No newsgroup specified in NNTP URL.",
u"valid",
]
self.newstest(url, resultlines)
| 4,749 | Python | .py | 129 | 28.821705 | 78 | 0.591974 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,221 | test_anchor.py | wummel_linkchecker/tests/checker/test_anchor.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test html anchor parsing and checking.
"""
from . import LinkCheckTest
class TestAnchor (LinkCheckTest):
"""
Test anchor checking of HTML pages.
"""
def test_anchor (self):
confargs = {"enabledplugins": ["AnchorCheck"]}
url = u"file://%(curdir)s/%(datadir)s/anchor.html" % self.get_attrs()
nurl = self.norm(url)
anchor = "broken"
urlanchor = url + "#" + anchor
resultlines = [
u"url %s" % urlanchor,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"warning Anchor `%s' not found. Available anchors: `myid:'." % anchor,
u"valid",
]
self.direct(urlanchor, resultlines, confargs=confargs)
| 1,514 | Python | .py | 38 | 35.052632 | 83 | 0.67731 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,222 | telnetserver.py | wummel_linkchecker/tests/checker/telnetserver.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Define http test support classes for LinkChecker tests.
"""
import sys
import os
import time
import threading
import telnetlib
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "third_party", "miniboa-r42"))
import miniboa
from . import LinkCheckTest
TIMEOUT = 5
class TelnetServerTest (LinkCheckTest):
"""Start/stop a Telnet server that can be used for testing."""
def __init__ (self, methodName='runTest'):
"""Init test class and store default ftp server port."""
super(TelnetServerTest, self).__init__(methodName=methodName)
self.host = 'localhost'
self.port = None
def get_url(self, user=None, password=None):
if user is not None:
if password is not None:
netloc = u"%s:%s@%s" % (user, password, self.host)
else:
netloc = u"%s@%s" % (user, self.host)
else:
netloc = self.host
return u"telnet://%s:%d" % (netloc, self.port)
def setUp (self):
"""Start a new Telnet server in a new thread."""
self.port = start_server(self.host, 0)
self.assertFalse(self.port is None)
def tearDown(self):
"""Send QUIT request to telnet server."""
try:
stop_server(self.host, self.port)
except Exception:
pass
def start_server (host, port):
# Instantiate Telnet server class and listen to host:port
clients = []
def on_connect(client):
clients.append(client)
client.send("Telnet test server\n")
server = miniboa.TelnetServer(port=port, host=host, on_connect=on_connect)
port = server.server_socket.getsockname()[1]
t = threading.Thread(None, serve_forever, args=(server, clients))
t.start()
# wait for server to start up
tries = 0
while tries < 5:
tries += 1
try:
client = telnetlib.Telnet(timeout=TIMEOUT)
client.open(host, port)
client.write("exit\n")
break
except:
time.sleep(0.5)
return port
def stop_server (host, port):
"""Stop a running FTP server."""
client = telnetlib.Telnet(timeout=TIMEOUT)
client.open(host, port)
client.write("stop\n")
def serve_forever(server, clients):
"""Run poll loop for server."""
while True:
server.poll()
for client in clients:
if client.active and client.cmd_ready:
if not handle_cmd(client):
return
def handle_cmd(client):
"""Handle telnet clients."""
msg = client.get_command().lower()
if msg == 'exit':
client.active = False
elif msg == 'stop':
return False
return True
| 3,490 | Python | .py | 97 | 29.845361 | 98 | 0.649305 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,223 | test_noproxy.py | wummel_linkchecker/tests/checker/test_noproxy.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2012 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test proxy handling.
"""
import httpserver
from test.test_support import EnvironmentVarGuard
class TestProxy (httpserver.HttpServerTest):
"""Test no_proxy env var handling."""
def test_noproxy (self):
# set env vars
with EnvironmentVarGuard() as env:
env.set("http_proxy", "http://example.org:8877")
env.set("no_proxy", "localhost:%d" % self.port)
self.noproxy_test()
def noproxy_test(self):
# Test setting proxy and no_proxy env variable.
url = self.get_url(u"favicon.ico")
nurl = url
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Ignoring proxy setting `http://example.org:8877'.",
u"valid",
]
self.direct(url, resultlines, recursionlevel=0)
| 1,644 | Python | .py | 41 | 34.829268 | 73 | 0.679375 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,224 | test_http_redirect.py | wummel_linkchecker/tests/checker/test_http_redirect.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http checking.
"""
from .httpserver import HttpServerTest, CookieRedirectHttpRequestHandler
class TestHttpRedirect (HttpServerTest):
"""Test http:// link redirection checking."""
def __init__(self, methodName='runTest'):
super(TestHttpRedirect, self).__init__(methodName=methodName)
self.handler = CookieRedirectHttpRequestHandler
def test_redirect (self):
self.redirect1()
self.redirect2()
self.redirect3()
self.redirect4()
self.redirect5()
def redirect1 (self):
url = u"http://localhost:%d/redirect1" % self.port
nurl = url
rurl = url.replace("redirect", "newurl")
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % rurl,
u"info Redirected to `%s'." % rurl,
u"error",
]
self.direct(url, resultlines, recursionlevel=0)
def redirect2 (self):
url = u"http://localhost:%d/tests/checker/data/redirect.html" % \
self.port
nurl = url
rurl = url.replace("redirect", "newurl")
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % rurl,
u"info Redirected to `%s'." % rurl,
u"valid",
u"url newurl.html",
u"cache key %s" % nurl,
u"real url %s" % rurl,
u"name Recursive Redirect",
# XXX the info is copied from the cached result
u"info Redirected to `%s'." % rurl,
u"valid",
]
self.direct(url, resultlines, recursionlevel=99)
def redirect3 (self):
url = u"http://localhost:%d/tests/checker/data/redir.html" % self.port
resultlines = self.get_resultlines("redir.html")
self.direct(url, resultlines, recursionlevel=1)
def redirect4 (self):
url = u"http://localhost:%d/redirect_newscheme_ftp" % self.port
nurl = url
#rurl = u"ftp://example.com/"
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
# don't allow ftp redirects
#u"info Redirected to `%s'." % rurl,
#u"valid",
#u"url %s" % rurl,
#u"cache key %s" % rurl,
#u"real url %s" % rurl,
u"error",
]
self.direct(url, resultlines, recursionlevel=99)
def redirect5 (self):
url = u"http://localhost:%d/redirect_newscheme_file" % self.port
nurl = url
#rurl = u"file:README"
#rnurl = u"file:///README"
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
# don't allow file redirects
#u"info Redirected to `%s'." % rurl,
#u"warning Redirection to url `%s' is not allowed." % rnurl,
u"error",
]
self.direct(url, resultlines, recursionlevel=99)
def redirect6(self):
#max_redirect = 10
# url = "http://httpbin.org/redirect/" + max_redirect --> valid
# url = "http://httpbin.org/redirect/" + (max_redirect+1) --> error
pass # XXX
| 4,028 | Python | .py | 104 | 30.240385 | 78 | 0.586462 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,225 | test_mail_bad.py | wummel_linkchecker/tests/checker/test_mail_bad.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test mail checking of bad mail addresses.
"""
from . import MailTest
class TestMailBad (MailTest):
"""Test mailto: link checking."""
def test_error_mail (self):
# too long or too short
self.mail_error(u"mailto:@")
self.mail_error(u"mailto:@example.org")
self.mail_error(u"mailto:a@")
self.mail_error(u"mailto:%s@example.org" % (u"a"*65))
self.mail_error(u'mailto:a@%s.com' % (u"a"*64))
# local part quoted
self.mail_error(u'mailto:"a""@example.com', cache_key=u'mailto:a')
self.mail_error(u'mailto:""a"@example.com', cache_key=u'mailto:""a"@example.com')
self.mail_error(u'mailto:"a\\"@example.com', cache_key=u'mailto:a"@example.com')
# local part unqouted
self.mail_error(u'mailto:.a@example.com')
self.mail_error(u'mailto:a.@example.com')
self.mail_error(u'mailto:a..b@example.com')
# domain part
self.mail_error(u'mailto:a@a_b.com')
self.mail_error(u'mailto:a@example.com.')
self.mail_error(u'mailto:a@example.com.111')
self.mail_error(u'mailto:a@example..com')
# other
# ? extension forbidden in <> construct
self.mail_error(u"mailto:Bastian Kleineidam <calvin@users.sourceforge.net?foo=bar>",
cache_key=u"mailto:calvin@users.sourceforge.net?foo=bar")
| 2,140 | Python | .py | 46 | 41.195652 | 92 | 0.683405 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,226 | test_unknown.py | wummel_linkchecker/tests/checker/test_unknown.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test checking of unknown URLs.
"""
from . import LinkCheckTest
class TestUnknown (LinkCheckTest):
"""Test unknown URL scheme checking."""
def test_skype (self):
url = u"skype:"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Skype URL ignored.",
u"valid",
]
self.direct(url, resultlines)
def test_irc (self):
url = u"irc://example.org"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Irc URL ignored.",
u"valid",
]
self.direct(url, resultlines)
url = u"ircs://example.org"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Ircs URL ignored.",
u"valid",
]
self.direct(url, resultlines)
def test_steam (self):
url = u"steam://connect/example.org"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Steam URL ignored.",
u"valid",
]
self.direct(url, resultlines)
def test_feed (self):
url = u"feed:https://example.com/entries.atom"
nurl = u"feed:https%3A/example.com/entries.atom"
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Feed URL ignored.",
u"valid",
]
self.direct(url, resultlines)
url = u"feed://example.com/entries.atom"
nurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key %s" % nurl,
u"real url %s" % nurl,
u"info Feed URL ignored.",
u"valid",
]
self.direct(url, resultlines)
| 2,887 | Python | .py | 86 | 25.290698 | 73 | 0.563148 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,227 | test_urllen.py | wummel_linkchecker/tests/checker/test_urllen.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test URL length checks.
"""
from . import LinkCheckTest
from linkcheck.checker.const import URL_MAX_LENGTH
class TestURLLength(LinkCheckTest):
"""
Test URL lengths.
"""
def test_url_warn(self):
url = u"http://www.example.org/" + (u"a" * URL_MAX_LENGTH)
attrs = self.get_attrs(url=url)
attrs['nurl'] = self.norm(url)
resultlines = [
u"url %(nurl)s" % attrs,
u"cache key %(nurl)s" % attrs,
u"real url %(nurl)s" % attrs,
u"warning URL length %d is longer than %d." % (len(url), URL_MAX_LENGTH),
u"error",
]
self.direct(url, resultlines)
| 1,445 | Python | .py | 37 | 34.621622 | 85 | 0.682562 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,228 | test_http.py | wummel_linkchecker/tests/checker/test_http.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test http checking.
"""
from .httpserver import HttpServerTest, CookieRedirectHttpRequestHandler
class TestHttp (HttpServerTest):
"""Test http:// link checking."""
def __init__(self, methodName='runTest'):
super(TestHttp, self).__init__(methodName=methodName)
self.handler = CookieRedirectHttpRequestHandler
def test_html (self):
confargs = dict(recursionlevel=1)
self.file_test("http.html", confargs=confargs)
self.file_test("http_lowercase.html", confargs=confargs)
self.file_test("http_quotes.html", confargs=confargs)
self.file_test("http_slash.html", confargs=confargs)
self.file_test("http.xhtml", confargs=confargs)
self.file_test("http_file.html", confargs=confargs)
def test_status(self):
for status in sorted(self.handler.responses.keys()):
self._test_status(status)
def _test_status(self, status):
url = u"http://localhost:%d/status/%d" % (self.port, status)
resultlines = [
u"url %s" % url,
u"cache key %s" % url,
u"real url %s" % url,
]
if status in (204,):
resultlines.append(u"warning No Content")
if (status != 101 and status < 200) or status >= 400:
result = u"error"
else:
result = u"valid"
resultlines.append(result)
self.direct(url, resultlines, recursionlevel=0)
| 2,221 | Python | .py | 51 | 37.588235 | 73 | 0.679446 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,229 | test_mail_good.py | wummel_linkchecker/tests/checker/test_mail_good.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test mail checking.
"""
from tests import need_network
from . import MailTest
class TestMailGood (MailTest):
"""
Test mailto: link checking.
"""
@need_network
def test_good_mail (self):
# some good mailto addrs
url = self.norm(u"mailto:Dude <calvin@users.sourceforge.net> , "\
"Killer <calvin@users.sourceforge.net>?subject=bla")
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.norm(u"mailto:Bastian Kleineidam <calvin@users.sourceforge.net>?"\
"bcc=calvin%40users.sourceforge.net")
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.norm(u"mailto:Bastian Kleineidam <calvin@users.sourceforge.net>")
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.norm(u"mailto:o'hara@users.sourceforge.net")
resultlines = [
u"url %s" % url,
u"cache key mailto:o'hara@users.sourceforge.net",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.norm(u"mailto:?to=calvin@users.sourceforge.net&subject=blubb&"
u"cc=calvin_cc@users.sourceforge.net&CC=calvin_CC@users.sourceforge.net")
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net,"
u"calvin_CC@users.sourceforge.net,calvin_cc@users.sourceforge.net",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
url = self.norm(u"mailto:news-admins@freshcode.club?subject="
"Re:%20[fm%20#11093]%20(news-admins)%20Submission%20"
"report%20-%20Pretty%20CoLoRs")
resultlines = [
u"url %s" % url,
u"cache key mailto:news-admins@freshcode.club",
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
@need_network
def test_warn_mail (self):
# some mailto addrs with warnings
# contains non-quoted characters
url = u"mailto:calvin@users.sourceforge.net?subject=äöü"
qurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net",
u"real url %s" % qurl,
u"valid",
]
self.direct(url, resultlines)
url = u"mailto:calvin@users.sourceforge.net?subject=Halli hallo"
qurl = self.norm(url)
resultlines = [
u"url %s" % url,
u"cache key mailto:calvin@users.sourceforge.net",
u"real url %s" % qurl,
u"valid",
]
self.direct(url, resultlines)
url = u"mailto:"
resultlines = [
u"url %s" % url,
u"cache key mailto:",
u"real url %s" % url,
u"warning No mail addresses or email subject found in `%s'." % url,
u"valid",
]
self.direct(url, resultlines)
def _mail_valid_unverified(self, char):
# valid mail addresses
addr = u'abc%sdef@sourceforge.net' % char
url = u"mailto:%s" % addr
self.mail_valid(url,
cache_key=url)
@need_network
def test_valid_mail1 (self):
for char in u"!#$&'":
self._mail_valid_unverified(char)
@need_network
def test_valid_mail2 (self):
for char in u"*+-/=":
self._mail_valid_unverified(char)
@need_network
def test_valid_mail3 (self):
for char in u"^_`.":
self._mail_valid_unverified(char)
@need_network
def test_valid_mail4 (self):
for char in u"{|}~":
self._mail_valid_unverified(char)
@need_network
def test_unicode_mail (self):
mailto = u"mailto:ölvin@users.sourceforge.net"
url = self.norm(mailto, encoding="iso-8859-1")
resultlines = [
u"url %s" % url,
u"cache key %s" % mailto,
u"real url %s" % url,
u"valid",
]
self.direct(url, resultlines)
@need_network
def test_mail_subject(self):
url = u"mailto:?subject=Halli hallo"
nurl = self.norm(url)
curl = u"mailto:"
resultlines = [
u"url %s" % url,
u"cache key %s" % curl,
u"real url %s" % nurl,
u"valid",
]
self.direct(url, resultlines)
| 5,698 | Python | .py | 158 | 27.132911 | 96 | 0.578767 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,230 | __init__.py | wummel_linkchecker/tests/logger/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2009 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
| 775 | Python | .py | 16 | 47.4375 | 73 | 0.774704 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,231 | test_csvlog.py | wummel_linkchecker/tests/logger/test_csvlog.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2009-2010 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import unittest
import os
from linkcheck.logger.csvlog import CSVLogger
class TestCsvLogger (unittest.TestCase):
def test_parts (self):
args = dict(
filename=os.path.join(os.path.dirname(__file__), "testlog.csv"),
parts=["realurl"],
fileoutput=1,
separator=";",
quotechar='"',
)
logger = CSVLogger(**args)
try:
logger.start_output()
finally:
logger.end_output()
os.remove(args["filename"])
| 1,314 | Python | .py | 34 | 33.411765 | 76 | 0.688871 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,232 | cookies.py | wummel_linkchecker/linkcheck/cookies.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Parsing of cookies.
"""
import cookielib
import httplib
import requests
from cStringIO import StringIO
def from_file (filename):
"""Parse cookie data from a text file in HTTP header format.
@return: list of tuples (headers, scheme, host, path)
"""
entries = []
with open(filename) as fd:
lines = []
for line in fd.readlines():
line = line.rstrip()
if not line:
if lines:
entries.append(from_headers("\r\n".join(lines)))
lines = []
else:
lines.append(line)
if lines:
entries.append(from_headers("\r\n".join(lines)))
return entries
def from_headers (strheader):
"""Parse cookie data from a string in HTTP header (RFC 2616) format.
@return: list of cookies
@raises: ValueError for incomplete or invalid data
"""
res = []
fp = StringIO(strheader)
headers = httplib.HTTPMessage(fp, seekable=True)
if "Host" not in headers:
raise ValueError("Required header 'Host:' missing")
host = headers["Host"]
path= headers.get("Path", "/")
for header in headers.getallmatchingheaders("Set-Cookie"):
headervalue = header.split(':', 1)[1]
for pairs in cookielib.split_header_words([headervalue]):
for name, value in pairs:
cookie = requests.cookies.create_cookie(name, value,
domain=host, path=path)
res.append(cookie)
return res
| 2,307 | Python | .py | 61 | 31.721311 | 73 | 0.663243 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,233 | winutil.py | wummel_linkchecker/linkcheck/winutil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2010-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Windows utility functions."""
def get_shell_folder (name):
"""Get Windows Shell Folder locations from the registry."""
try:
import _winreg as winreg
except ImportError:
import winreg
lm = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
key = winreg.OpenKey(lm, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
try:
return winreg.QueryValueEx(key, name)[0]
finally:
key.Close()
finally:
lm.Close()
| 1,309 | Python | .py | 32 | 36.875 | 101 | 0.724922 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,234 | lock.py | wummel_linkchecker/linkcheck/lock.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Locking utility class.
"""
import threading
from . import log, LOG_THREAD
def get_lock (name, debug=False):
"""Get a new lock.
@param debug: if True, acquire() and release() will have debug messages
@ptype debug: boolean, default is False
@return: a lock object
@rtype: threading.Lock or DebugLock
"""
lock = threading.Lock()
# for thread debugging, use the DebugLock wrapper
if debug:
lock = DebugLock(lock, name)
return lock
class DebugLock (object):
"""Debugging lock class."""
def __init__ (self, lock, name):
"""Store lock and name parameters."""
self.lock = lock
self.name = name
def acquire (self, blocking=1):
"""Acquire lock."""
threadname = threading.currentThread().getName()
log.debug(LOG_THREAD, "Acquire %s for %s", self.name, threadname)
self.lock.acquire(blocking)
log.debug(LOG_THREAD, "...acquired %s for %s", self.name, threadname)
def release (self):
"""Release lock."""
threadname = threading.currentThread().getName()
log.debug(LOG_THREAD, "Release %s for %s", self.name, threadname)
self.lock.release()
def get_semaphore(name, value=None, debug=False):
"""Get a new semaphore.
@param value: if not None, a BoundedSemaphore will be used
@ptype debug: int or None
@param debug: if True, acquire() and release() will have debug messages
@ptype debug: boolean, default is False
@return: a semaphore object
@rtype: threading.Semaphore or threading.BoundedSemaphore or DebugLock
"""
if value is None:
lock = threading.Semaphore()
else:
lock = threading.BoundedSemaphore(value)
if debug:
lock = DebugLock(lock, name)
return lock
| 2,569 | Python | .py | 66 | 34.287879 | 77 | 0.693788 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,235 | colorama.py | wummel_linkchecker/linkcheck/colorama.py | # These functions are part of the python-colorama module
# They have been adjusted slightly for LinkChecker
#
# Copyright: (C) 2010 Jonathan Hartley <tartley@tartley.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name(s) of the copyright holders nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# from winbase.h
STDOUT = -11
STDERR = -12
from ctypes import (windll, byref, Structure, c_char, c_short, c_uint32,
c_ushort, ArgumentError, WinError)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
"""Get string representation of console screen buffer info."""
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
"""Get console screen buffer info object."""
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
if not success:
raise WinError()
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
# from wincon.h
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
_default_foreground = None
_default_background = None
_default_style = None
def init():
"""Initialize foreground and background attributes."""
global _default_foreground, _default_background, _default_style
try:
attrs = GetConsoleScreenBufferInfo().wAttributes
except (ArgumentError, WindowsError):
_default_foreground = GREY
_default_background = BLACK
_default_style = NORMAL
else:
_default_foreground = attrs & 7
_default_background = (attrs >> 4) & 7
_default_style = attrs & BRIGHT
def get_attrs(foreground, background, style):
"""Get foreground and background attributes."""
return foreground + (background << 4) + style
def set_console(stream=STDOUT, foreground=None, background=None, style=None):
"""Set console foreground and background attributes."""
if foreground is None:
foreground = _default_foreground
if background is None:
background = _default_background
if style is None:
style = _default_style
attrs = get_attrs(foreground, background, style)
SetConsoleTextAttribute(stream, attrs)
def reset_console(stream=STDOUT):
"""Reset the console."""
set_console(stream=stream)
def get_console_size():
"""Get the console size."""
return GetConsoleScreenBufferInfo().dwSize
| 5,098 | Python | .py | 132 | 34.234848 | 94 | 0.70593 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,236 | socketutil.py | wummel_linkchecker/linkcheck/socketutil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2008-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import socket
# test for IPv6, both in Python build and in kernel build
has_ipv6 = False
if socket.has_ipv6:
# python has ipv6 compiled in, but the operating system also
# has to support it.
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM).close()
has_ipv6 = True
except socket.error as msg:
# only catch these one:
# socket.error: (97, 'Address family not supported by protocol')
# socket.error: (10047, 'Address family not supported by protocol')
# socket.error: (43, 'Protocol not supported')
if msg.args[0] not in (97, 10047, 43):
raise
def create_socket (family, socktype, proto=0, timeout=60):
"""
Create a socket with given family and type. If SSL context
is given an SSL socket is created.
"""
sock = socket.socket(family, socktype, proto=proto)
sock.settimeout(timeout)
socktypes_inet = [socket.AF_INET]
if has_ipv6:
socktypes_inet.append(socket.AF_INET6)
if family in socktypes_inet and socktype == socket.SOCK_STREAM:
# disable NAGLE algorithm, which means sending pending data
# immediately, possibly wasting bandwidth but improving
# responsiveness for fast networks
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
return sock
| 2,107 | Python | .py | 48 | 39.375 | 75 | 0.717624 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,237 | lc_cgi.py | wummel_linkchecker/linkcheck/lc_cgi.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Functions used by the WSGI script.
"""
import cgi
import os
import threading
import locale
import re
import time
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
from . import configuration, strformat, checker, director, get_link_pat, \
init_i18n, url as urlutil
from .decorators import synchronized
# 5 minutes timeout for requests
MAX_REQUEST_SECONDS = 300
# character set encoding for HTML output
HTML_ENCODING = 'utf-8'
def application(environ, start_response):
"""WSGI interface: start an URL check."""
# the environment variable CONTENT_LENGTH may be empty or missing
try:
request_body_size = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
request_body_size = 0
# When the method is POST the query string will be sent
# in the HTTP request body which is passed by the WSGI server
# in the file like wsgi.input environment variable.
if request_body_size > 0:
request_body = environ['wsgi.input'].read(request_body_size)
else:
request_body = environ['wsgi.input'].read()
form = cgi.parse_qs(request_body)
status = '200 OK'
start_response(status, get_response_headers())
for output in checklink(form=form, env=environ):
yield output
_supported_langs = ('de', 'C')
# map language -> locale name
lang_locale = {
'de': 'de_DE',
'C': 'C',
'en': 'en_EN',
}
_is_level = re.compile(r'^(0|1|2|3|-1)$').match
class LCFormError(Exception):
"""Form related errors."""
pass
def get_response_headers():
"""Get list of response headers in key-value form."""
return [("Content-type", "text/html"),
("Cache-Control", "no-cache"),
("Pragma:", "no-cache")
]
def formvalue (form, key):
"""Get value with given key from WSGI form."""
field = form.get(key)
if isinstance(field, list):
field = field[0]
return field
_lock = threading.Lock()
class ThreadsafeIO (object):
"""Thread-safe unicode I/O class."""
def __init__(self):
"""Initialize buffer."""
self.buf = []
self.closed = False
@synchronized(_lock)
def write (self, data):
"""Write given unicode data to buffer."""
assert isinstance(data, unicode)
if self.closed:
raise IOError("Write on closed I/O object")
if data:
self.buf.append(data)
@synchronized(_lock)
def get_data (self):
"""Get bufferd unicode data."""
data = u"".join(self.buf)
self.buf = []
return data
@synchronized(_lock)
def close (self):
"""Reset buffer and close this I/O object."""
self.buf = []
self.closed = True
def encode(s):
"""Encode given string in HTML encoding."""
return s.encode(HTML_ENCODING, 'ignore')
def checklink (form=None, env=os.environ):
"""Validates the CGI form and checks the given links."""
if form is None:
form = {}
try:
checkform(form, env)
except LCFormError as errmsg:
log(env, errmsg)
yield encode(format_error(errmsg))
return
out = ThreadsafeIO()
config = get_configuration(form, out)
url = strformat.stripurl(formvalue(form, "url"))
aggregate = director.get_aggregate(config)
url_data = checker.get_url_from(url, 0, aggregate, extern=(0, 0))
aggregate.urlqueue.put(url_data)
for html_str in start_check(aggregate, out):
yield encode(html_str)
out.close()
def start_check (aggregate, out):
"""Start checking in background and write encoded output to out."""
# check in background
t = threading.Thread(target=director.check_urls, args=(aggregate,))
t.start()
# time to wait for new data
sleep_seconds = 2
# current running time
run_seconds = 0
while not aggregate.is_finished():
yield out.get_data()
time.sleep(sleep_seconds)
run_seconds += sleep_seconds
if run_seconds > MAX_REQUEST_SECONDS:
director.abort(aggregate)
break
yield out.get_data()
def get_configuration(form, out):
"""Initialize a CGI configuration."""
config = configuration.Configuration()
config["recursionlevel"] = int(formvalue(form, "level"))
config["logger"] = config.logger_new('html', fd=out, encoding=HTML_ENCODING)
config["threads"] = 2
if "anchors" in form:
config["enabledplugins"].append("AnchorCheck")
if "errors" not in form:
config["verbose"] = True
# avoid checking of local files or other nasty stuff
pat = "!^%s$" % urlutil.safe_url_pattern
config["externlinks"].append(get_link_pat(pat, strict=True))
config.sanitize()
return config
def get_host_name (form):
"""Return host name of given URL."""
return urlparse.urlparse(formvalue(form, "url"))[1]
def checkform (form, env):
"""Check form data. throw exception on error
Be sure to NOT print out any user-given data as HTML code, so use
only plain strings as exception text."""
# check lang support
if "language" in form:
lang = formvalue(form, 'language')
if lang in _supported_langs:
localestr = lang_locale[lang]
try:
# XXX this is not thread-safe, so think of something else
locale.setlocale(locale.LC_ALL, localestr)
init_i18n()
except locale.Error as errmsg:
log(env, "could not set locale %r: %s" % (localestr, errmsg))
else:
raise LCFormError(_("unsupported language %r") % lang)
# check url syntax
if "url" in form:
url = formvalue(form, "url")
if not url or url == "http://":
raise LCFormError(_("empty url was given"))
if not urlutil.is_safe_url(url):
raise LCFormError(_("disallowed url %r was given") % url)
else:
raise LCFormError(_("no url was given"))
# check recursion level
if "level" in form:
level = formvalue(form, "level")
if not _is_level(level):
raise LCFormError(_("invalid recursion level %r") % level)
# check options
for option in ("anchors", "errors", "intern"):
if option in form:
value = formvalue(form, option)
if value != "on":
raise LCFormError(_("invalid %s option %r") % (option, value))
def log (env, msg):
"""Log message to WSGI error output."""
logfile = env['wsgi.errors']
logfile.write(msg + "\n")
def dump (env, form):
"""Log environment and form."""
for var, value in env.items():
log(env, var+"="+value)
for key in form:
log(env, str(formvalue(form, key)))
def format_error (why):
"""Format standard error page.
@param why: error message
@ptype why: unicode
@return: HTML page content
@rtype: unicode
"""
return _("""<!DOCTYPE HTML>
<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>LinkChecker Online Error</title></head>
<body text=#192c83 bgcolor=#fff7e5 link=#191c83 vlink=#191c83 alink=#191c83>
<blockquote>
<b>Error: %s</b><br/>
The LinkChecker Online script has encountered an error. Please ensure
that your provided URL link begins with <code>http://</code> and
contains only these characters: <code>A-Za-z0-9./_~-</code><br/><br/>
Errors are logged.
</blockquote>
</body>
</html>""") % cgi.escape(why)
| 8,260 | Python | .py | 230 | 30.321739 | 80 | 0.652946 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,238 | trace.py | wummel_linkchecker/linkcheck/trace.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import re
import linecache
import time
import sys
import threading
# tracing
_trace_ignore = set()
_trace_filter = set()
def trace_ignore (names):
"""Add given names to trace ignore set, or clear set if names is None."""
if names is None:
_trace_ignore.clear()
else:
_trace_ignore.update(names)
def trace_filter (patterns):
"""Add given patterns to trace filter set or clear set if patterns is
None."""
if patterns is None:
_trace_filter.clear()
else:
_trace_filter.update(re.compile(pat) for pat in patterns)
def _trace (frame, event, arg):
"""Trace function calls."""
if event in ('call', 'c_call'):
_trace_line(frame, event, arg)
elif event in ('return', 'c_return'):
_trace_line(frame, event, arg)
print(" return:", arg)
#elif event in ('exception', 'c_exception'):
# _trace_line(frame, event, arg)
return _trace
def _trace_full (frame, event, arg):
"""Trace every executed line."""
if event == "line":
_trace_line(frame, event, arg)
else:
_trace(frame, event, arg)
return _trace_full
def _trace_line (frame, event, arg):
"""Print current executed line."""
name = frame.f_globals["__name__"]
if name in _trace_ignore:
return _trace_line
for pat in _trace_filter:
if not pat.match(name):
return _trace_line
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
line = linecache.getline(filename, lineno)
currentThread = threading.currentThread()
tid = currentThread.ident
tname = currentThread.getName()
args = (tid, tname, time.time(), line.rstrip(), name, lineno)
print("THREAD(%d) %r %.2f %s # %s:%d" % args)
def trace_on (full=False):
"""Start tracing of the current thread (and the current thread only)."""
if full:
sys.settrace(_trace_full)
else:
sys.settrace(_trace)
def trace_off ():
"""Stop tracing of the current thread (and the current thread only)."""
sys.settrace(None)
| 2,961 | Python | .py | 82 | 31.707317 | 77 | 0.674232 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,239 | better_exchook2.py | wummel_linkchecker/linkcheck/better_exchook2.py | # -*- coding: iso-8859-1 -*-
#
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# file created 2011-04-15
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a simple replacement for the standard Python exception handler (sys.excepthook).
# In addition to what the standard handler does, it also prints all referenced variables
# (no matter if local, global or builtin) of the code line of each stack frame.
# See below for some examples and some example output.
# https://github.com/albertz/py_better_exchook
from __future__ import print_function
import sys
import os
def parse_py_statement(line):
state = 0
curtoken = ""
spaces = " \t\n"
ops = ".,;:+-*/%&=|(){}[]^<>"
i = 0
def _escape_char(c):
if c == "n": return "\n"
elif c == "t": return "\t"
else: return c
while i < len(line):
c = line[i]
i += 1
if state == 0:
if c in spaces: pass
elif c in ops: yield ("op", c)
elif c == "#": state = 6
elif c == "\"": state = 1
elif c == "'": state = 2
else:
curtoken = c
state = 3
elif state == 1: # string via "
if c == "\\": state = 4
elif c == "\"":
yield ("str", curtoken)
curtoken = ""
state = 0
else: curtoken += c
elif state == 2: # string via '
if c == "\\": state = 5
elif c == "'":
yield ("str", curtoken)
curtoken = ""
state = 0
else: curtoken += c
elif state == 3: # identifier
if c in spaces + ops + "#\"'":
yield ("id", curtoken)
curtoken = ""
state = 0
i -= 1
else: curtoken += c
elif state == 4: # escape in "
curtoken += _escape_char(c)
state = 1
elif state == 5: # escape in '
curtoken += _escape_char(c)
state = 2
elif state == 6: # comment
curtoken += c
if state == 3: yield ("id", curtoken)
elif state == 6: yield ("comment", curtoken)
import keyword
pykeywords = set(keyword.kwlist)
def grep_full_py_identifiers(tokens):
global pykeywords
tokens = list(tokens)
i = 0
while i < len(tokens):
tokentype, token = tokens[i]
i += 1
if tokentype != "id": continue
while i+1 < len(tokens) and tokens[i] == ("op", ".") and tokens[i+1][0] == "id":
token += "." + tokens[i+1][1]
i += 2
if token == "": continue
if token in pykeywords: continue
if token[0] in ".0123456789": continue
yield token
def set_linecache(filename, source):
import linecache
linecache.cache[filename] = None, None, [line+'\n' for line in source.splitlines()], filename
def output(s, out=sys.stdout): print(s, file=out)
def output_limit():
return 300
def pp_extra_info(obj, depthlimit = 3):
s = []
if hasattr(obj, "__len__"):
try:
if type(obj) in (str,unicode,list,tuple,dict) and len(obj) <= 5:
pass # don't print len in this case
else:
s += ["len = " + str(obj.__len__())]
except: pass
if depthlimit > 0 and hasattr(obj, "__getitem__"):
try:
if type(obj) in (str,unicode):
pass # doesn't make sense to get subitems here
else:
subobj = obj.__getitem__(0)
extra_info = pp_extra_info(subobj, depthlimit - 1)
if extra_info != "":
s += ["_[0]: {" + extra_info + "}"]
except: pass
return ", ".join(s)
def pretty_print(obj):
s = repr(obj)
limit = output_limit()
if len(s) > limit:
s = s[:limit - 3] + "..."
extra_info = pp_extra_info(obj)
if extra_info != "": s += ", " + extra_info
return s
def fallback_findfile(filename):
mods = [ m for m in sys.modules.values() if m and hasattr(m, "__file__") and filename in m.__file__ ]
if len(mods) == 0: return None
altfn = mods[0].__file__
if altfn[-4:-1] == ".py": altfn = altfn[:-1] # *.pyc or whatever
return altfn
def better_exchook(etype, value, tb, out=sys.stdout):
output('Traceback (most recent call last):', out=out)
allLocals,allGlobals = {},{}
try:
import linecache
limit = None
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
_tb = tb
def _resolveIdentifier(namespace, id):
obj = namespace[id[0]]
for part in id[1:]:
obj = getattr(obj, part)
return obj
def _trySet(old, prefix, func):
if old is not None: return old
try: return prefix + func()
except KeyError: return old
except Exception as e:
return prefix + "!" + e.__class__.__name__ + ": " + str(e)
while _tb is not None and (limit is None or n < limit):
f = _tb.tb_frame
allLocals.update(f.f_locals)
allGlobals.update(f.f_globals)
lineno = _tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
output(' File "%s", line %d, in %s' % (filename,lineno,name), out=out)
if not os.path.isfile(filename):
altfn = fallback_findfile(filename)
if altfn:
output(" -- couldn't find file, trying this instead: " + altfn, out=out)
filename = altfn
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
output(' line: ' + line, out=out)
output(' locals:', out=out)
alreadyPrintedLocals = set()
for tokenstr in grep_full_py_identifiers(parse_py_statement(line)):
splittedtoken = tuple(tokenstr.split("."))
for token in map(lambda i: splittedtoken[0:i], range(1, len(splittedtoken) + 1)):
if token in alreadyPrintedLocals: continue
tokenvalue = None
tokenvalue = _trySet(tokenvalue, "<local> ", lambda: pretty_print(_resolveIdentifier(f.f_locals, token)))
tokenvalue = _trySet(tokenvalue, "<global> ", lambda: pretty_print(_resolveIdentifier(f.f_globals, token)))
tokenvalue = _trySet(tokenvalue, "<builtin> ", lambda: pretty_print(_resolveIdentifier(f.f_builtins, token)))
tokenvalue = tokenvalue or "<not found>"
output(' ' + ".".join(token) + " = " + tokenvalue, out=out)
alreadyPrintedLocals.add(token)
if len(alreadyPrintedLocals) == 0: output(" no locals", out=out)
else:
output(' -- code not available --', out=out)
_tb = _tb.tb_next
n += 1
except Exception:
output("ERROR: cannot get more detailed exception info because:", out=out)
import traceback
for l in traceback.format_exc().split("\n"): output(" " + l, out=out)
output("simple traceback:", out=out)
traceback.print_tb(tb, None, out)
import types
def _some_str(value):
try: return str(value)
except: return '<unprintable %s object>' % type(value).__name__
def _format_final_exc_line(etype, value):
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s" % etype
else:
line = "%s: %s" % (etype, valuestr)
return line
if (isinstance(etype, BaseException) or
(hasattr(types, "InstanceType") and isinstance(etype, types.InstanceType)) or
etype is None or type(etype) is str):
output(_format_final_exc_line(etype, value), out=out)
else:
output(_format_final_exc_line(etype.__name__, value), out=out)
def install():
sys.excepthook = better_exchook
| 8,082 | Python | .py | 225 | 32.688889 | 115 | 0.66756 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,240 | loader.py | wummel_linkchecker/linkcheck/loader.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam
"""
Functions to load plugin modules.
Example usage:
modules = loader.get_package_modules('plugins')
plugins = loader.get_plugins(modules, PluginClass)
"""
from __future__ import print_function
import os
import sys
import zipfile
import importlib
import imp
from .fileutil import is_writable_by_others
def is_frozen ():
"""Return True if running inside a py2exe- or py2app-generated
executable."""
return hasattr(sys, "frozen")
def check_writable_by_others(filename):
"""Check if file is writable by others on POSIX systems.
On non-POSIX systems the check is ignored."""
if os.name != 'posix':
# XXX on non-posix systems other bits are relevant
return
return is_writable_by_others(filename)
def get_package_modules(packagename):
"""Find all valid modules in the given package which must be a folder
in the same directory as this loader.py module. A valid module has
a .py extension, and is importable.
@return: all loaded valid modules
@rtype: iterator of module
"""
if is_frozen():
# find modules in library.zip filename
zipname = os.path.dirname(os.path.dirname(__file__))
parentmodule = os.path.basename(os.path.dirname(__file__))
with zipfile.ZipFile(zipname, 'r') as f:
prefix = "%s/%s/" % (parentmodule, packagename)
modnames = [os.path.splitext(n[len(prefix):])[0]
for n in f.namelist()
if n.startswith(prefix) and "__init__" not in n]
else:
dirname = os.path.join(os.path.dirname(__file__), packagename)
modnames = [x[:-3] for x in get_importable_files(dirname)]
for modname in modnames:
try:
name ="..%s.%s" % (packagename, modname)
yield importlib.import_module(name, __name__)
except ImportError as msg:
print("WARN: could not load module %s: %s" % (modname, msg))
def get_folder_modules(folder, parentpackage):
"""."""
if check_writable_by_others(folder):
print("ERROR: refuse to load modules from world writable folder %r" % folder)
return
for filename in get_importable_files(folder):
fullname = os.path.join(folder, filename)
modname = parentpackage+"."+filename[:-3]
try:
yield imp.load_source(modname, fullname)
except ImportError as msg:
print("WARN: could not load file %s: %s" % (fullname, msg))
def get_importable_files(folder):
"""Find all module files in the given folder that end with '.py' and
don't start with an underscore.
@return module names
@rtype: iterator of string
"""
for fname in os.listdir(folder):
if fname.endswith('.py') and not fname.startswith('_'):
fullname = os.path.join(folder, fname)
if check_writable_by_others(fullname):
print("ERROR: refuse to load module from world writable file %r" % fullname)
else:
yield fname
def get_plugins(modules, classes):
"""Find all given (sub-)classes in all modules.
@param modules: the modules to search
@ptype modules: iterator of modules
@return: found classes
@rytpe: iterator of class objects
"""
for module in modules:
for plugin in get_module_plugins(module, classes):
yield plugin
def get_module_plugins(module, classes):
"""Return all subclasses of a class in the module.
If the module defines __all__, only those entries will be searched,
otherwise all objects not starting with '_' will be searched.
"""
try:
names = module.__all__
except AttributeError:
names = [x for x in vars(module) if not x.startswith('_')]
for name in names:
try:
obj = getattr(module, name)
except AttributeError:
continue
try:
for classobj in classes:
if issubclass(obj, classobj):
yield obj
except TypeError:
continue
| 4,108 | Python | .py | 106 | 31.650943 | 92 | 0.643341 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,241 | fileutil.py | wummel_linkchecker/linkcheck/fileutil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
File and path utilities.
"""
import os
import locale
import stat
import fnmatch
import tempfile
import importlib
from distutils.spawn import find_executable
from .decorators import memoized
def write_file (filename, content, backup=False, callback=None):
"""Overwrite a possibly existing file with new content. Do this
in a manner that does not leave truncated or broken files behind.
@param filename: name of file to write
@type filename: string
@param content: file content to write
@type content: string
@param backup: if backup file should be left
@type backup: bool
@param callback: non-default storage function
@type callback: None or function taking two parameters (fileobj, content)
"""
# first write in a temp file
f = file(filename+".tmp", 'wb')
if callback is None:
f.write(content)
else:
callback(f, content)
f.close()
# move orig file to backup
if os.path.exists(filename):
os.rename(filename, filename+".bak")
# move temp file to orig
os.rename(filename+".tmp", filename)
# remove backup
if not backup and os.path.exists(filename+".bak"):
os.remove(filename+".bak")
def has_module (name, without_error=True):
"""Test if given module can be imported.
@param without_error: True if module must not throw any errors when importing
@return: flag if import is successful
@rtype: bool
"""
try:
importlib.import_module(name)
return True
except ImportError:
return False
except Exception:
# some modules raise errors when intitializing
return not without_error
class GlobDirectoryWalker (object):
"""A forward iterator that traverses a directory tree."""
def __init__ (self, directory, pattern="*"):
"""Set start directory and pattern matcher."""
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__ (self, index):
"""Search for next filename."""
while True:
try:
filename = self.files[self.index]
self.index += 1
except IndexError:
# Pop next directory from stack. This effectively
# stops the iteration if stack is empty.
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, filename)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(filename, self.pattern):
return fullname
# alias
rglob = GlobDirectoryWalker
class Buffer (object):
"""Holds buffered data"""
def __init__ (self, empty=''):
"""Initialize buffer."""
self.empty = self.buf = empty
self.tmpbuf = []
self.pos = 0
def __len__ (self):
"""Buffer length."""
return self.pos
def write (self, data):
"""Write data to buffer."""
self.tmpbuf.append(data)
self.pos += len(data)
def flush (self, overlap=0):
"""Flush buffered data and return it."""
self.buf += self.empty.join(self.tmpbuf)
self.tmpbuf = []
if overlap and overlap < self.pos:
data = self.buf[:-overlap]
self.buf = self.buf[-overlap:]
else:
data = self.buf
self.buf = self.empty
return data
def get_mtime (filename):
"""Return modification time of filename or zero on errors."""
try:
return os.path.getmtime(filename)
except os.error:
return 0
def get_size (filename):
"""Return file size in Bytes, or -1 on error."""
try:
return os.path.getsize(filename)
except os.error:
return -1
# http://developer.gnome.org/doc/API/2.0/glib/glib-running.html
if "G_FILENAME_ENCODING" in os.environ:
FSCODING = os.environ["G_FILENAME_ENCODING"].split(",")[0]
if FSCODING == "@locale":
FSCODING = locale.getpreferredencoding()
elif "G_BROKEN_FILENAMES" in os.environ:
FSCODING = locale.getpreferredencoding()
else:
FSCODING = "utf-8"
def pathencode (path):
"""Encode a path string with the platform file system encoding."""
if isinstance(path, unicode) and not os.path.supports_unicode_filenames:
path = path.encode(FSCODING, "replace")
return path
# cache for modified check {absolute filename -> mtime}
_mtime_cache = {}
def has_changed (filename):
"""Check if filename has changed since the last check. If this
is the first check, assume the file is changed."""
key = os.path.abspath(filename)
mtime = get_mtime(key)
if key not in _mtime_cache:
_mtime_cache[key] = mtime
return True
return mtime > _mtime_cache[key]
def get_temp_file (mode='r', **kwargs):
"""Return tuple (open file object, filename) pointing to a temporary
file."""
fd, filename = tempfile.mkstemp(**kwargs)
return os.fdopen(fd, mode), filename
def is_tty (fp):
"""Check if is a file object pointing to a TTY."""
return (hasattr(fp, "isatty") and fp.isatty())
@memoized
def is_readable(filename):
"""Check if file is a regular file and is readable."""
return os.path.isfile(filename) and os.access(filename, os.R_OK)
def is_accessable_by_others(filename):
"""Check if file is group or world accessable."""
mode = os.stat(filename)[stat.ST_MODE]
return mode & (stat.S_IRWXG | stat.S_IRWXO)
def is_writable_by_others(filename):
"""Check if file or directory is world writable."""
mode = os.stat(filename)[stat.ST_MODE]
return mode & stat.S_IWOTH
@memoized
def is_writable(filename):
"""Check if
- the file is a regular file and is writable, or
- the file does not exist and its parent directory exists and is
writable
"""
if not os.path.exists(filename):
parentdir = os.path.dirname(filename)
return os.path.isdir(parentdir) and os.access(parentdir, os.W_OK)
return os.path.isfile(filename) and os.access(filename, os.W_OK)
| 7,068 | Python | .py | 190 | 31.047368 | 81 | 0.660284 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,242 | mem.py | wummel_linkchecker/linkcheck/mem.py | # -*- coding: iso-8859-1 -*-
# Copyright: Jean Brouwers
# License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Copied from the Python Cookbook recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/286222
To find the memory usage in a particular section of code these
functions are typically used as follows::
m0 = memory()
...
m1 = memory(m0)
"""
import os
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB (VmKey):
"""Parse /proc/<pid>/status file for given key.
@return: requested number value of status entry
@rtype: float
"""
if os.name != 'posix':
# not supported
return 0.0
global _proc_status, _scale
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except IOError:
# unsupported platform (non-Linux?)
return 0.0
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory (since=0.0):
"""Get memory usage.
@return: memory usage in bytes
@rtype: float
"""
return _VmB('VmSize:') - since
def resident (since=0.0):
"""Get resident memory usage.
@return: resident memory usage in bytes
@rtype: float
"""
return _VmB('VmRSS:') - since
def stacksize (since=0.0):
"""Get stack size.
@return: stack size in bytes
@rtype: float
"""
return _VmB('VmStk:') - since
| 2,697 | Python | .py | 75 | 32.013333 | 79 | 0.685079 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,243 | url.py | wummel_linkchecker/linkcheck/url.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Functions for parsing and matching URL strings.
"""
import re
import os
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import urllib
import requests
from . import log, LOG_CHECK
for scheme in ('ldap', 'irc'):
if scheme not in urlparse.uses_netloc:
urlparse.uses_netloc.append(scheme)
# The character set to encode non-ASCII characters in a URL. See also
# http://tools.ietf.org/html/rfc2396#section-2.1
# Note that the encoding is not really specified, but most browsers
# encode in UTF-8 when no encoding is specified by the HTTP headers,
# else they use the page encoding for followed link. See als
# http://code.google.com/p/browsersec/wiki/Part1#Unicode_in_URLs
url_encoding = "utf-8"
# constants defining url part indexes
SCHEME = 0
HOSTNAME = DOMAIN = 1
PORT = 2
DOCUMENT = 3
default_ports = {
'http': 80,
'https': 443,
'nntps': 563,
'ftp': 21,
}
# adapted from David Wheelers "Secure Programming for Linux and Unix HOWTO"
# http://www.dwheeler.com/secure-programs/Secure-Programs-HOWTO/\
# filter-html.html#VALIDATING-URIS
_basic = {
"_path": r"\-\_\.\!\~\*\'\(\),",
"_hex_safe": r"2-9a-f",
"_hex_full": r"0-9a-f",
"_part": r"([a-z0-9][-a-z0-9]{0,61}|[a-z])",
}
_safe_char = r"([a-z0-9%(_path)s\+]|"\
r"(%%[%(_hex_safe)s][%(_hex_full)s]))" % _basic
_safe_scheme_pattern = r"(https?|ftp)"
_safe_domain_pattern = r"(%(_part)s(\.%(_part)s)*\.?)" % _basic
_safe_host_pattern = _safe_domain_pattern+r"(:(80|8080|8000|443))?" % _basic
_safe_path_pattern = r"((/([a-z0-9%(_path)s]|"\
r"(%%[%(_hex_safe)s][%(_hex_full)s]))+)*/?)" % _basic
_safe_fragment_pattern = r"%s*" % _safe_char
_safe_cgi = r"%s+(=(%s|/)+)?" % (_safe_char, _safe_char)
_safe_query_pattern = r"(%s(&%s)*)?" % (_safe_cgi, _safe_cgi)
_safe_param_pattern = r"(%s(;%s)*)?" % (_safe_cgi, _safe_cgi)
safe_url_pattern = r"%s://%s%s(#%s)?" % \
(_safe_scheme_pattern, _safe_host_pattern,
_safe_path_pattern, _safe_fragment_pattern)
is_safe_char = re.compile("(?i)^%s$" % _safe_char).match
is_safe_url = re.compile("(?i)^%s$" % safe_url_pattern).match
is_safe_domain = re.compile("(?i)^%s$" % _safe_domain_pattern).match
is_safe_host = re.compile("(?i)^%s$" % _safe_host_pattern).match
is_safe_path = re.compile("(?i)^%s$" % _safe_path_pattern).match
is_safe_parameter = re.compile("(?i)^%s$" % _safe_param_pattern).match
is_safe_query = re.compile("(?i)^%s$" % _safe_query_pattern).match
is_safe_fragment = re.compile("(?i)^%s$" % _safe_fragment_pattern).match
# snatched form urlparse.py
def splitparams (path):
"""Split off parameter part from path.
Returns tuple (path-without-param, param)
"""
if '/' in path:
i = path.find(';', path.rfind('/'))
else:
i = path.find(';')
if i < 0:
return path, ''
return path[:i], path[i+1:]
def is_numeric_port (portstr):
"""return: integer port (== True) iff portstr is a valid port number,
False otherwise
"""
if portstr.isdigit():
port = int(portstr)
# 65536 == 2**16
if 0 < port < 65536:
return port
return False
def safe_host_pattern (host):
"""Return regular expression pattern with given host for URL testing."""
return "(?i)%s://%s%s(#%s)?" % \
(_safe_scheme_pattern, host, _safe_path_pattern, _safe_fragment_pattern)
def parse_qsl (qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument.
@param qs: URL-encoded query string to be parsed
@type qs: string
@param keep_blank_values: flag indicating whether blank values in
URL encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
@type keep_blank_values: bool
@param strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
@type strict_parsing: bool
@returns: list of triples (key, value, separator) where key and value
are the splitted CGI parameter and separator the used separator
for this CGI parameter which is either a semicolon or an ampersand
@rtype: list of triples
"""
pairs = []
name_value_amp = qs.split('&')
for name_value in name_value_amp:
if ';' in name_value:
pairs.extend([x, ';'] for x in name_value.split(';'))
pairs[-1][1] = '&'
else:
pairs.append([name_value, '&'])
pairs[-1][1] = ''
r = []
for name_value, sep in pairs:
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % name_value)
elif len(nv) == 1:
# None value indicates missing equal sign
nv = (nv[0], None)
else:
continue
if nv[1] or keep_blank_values:
name = urllib.unquote(nv[0].replace('+', ' '))
if nv[1]:
value = urllib.unquote(nv[1].replace('+', ' '))
else:
value = nv[1]
r.append((name, value, sep))
return r
def idna_encode (host):
"""Encode hostname as internationalized domain name (IDN) according
to RFC 3490.
@raise: UnicodeError if hostname is not properly IDN encoded.
"""
if host and isinstance(host, unicode):
try:
host.encode('ascii')
return host, False
except UnicodeError:
uhost = host.encode('idna').decode('ascii')
return uhost, uhost != host
return host, False
def url_fix_host (urlparts):
"""Unquote and fix hostname. Returns is_idn."""
if not urlparts[1]:
urlparts[2] = urllib.unquote(urlparts[2])
return False
userpass, netloc = urllib.splituser(urlparts[1])
if userpass:
userpass = urllib.unquote(userpass)
netloc, is_idn = idna_encode(urllib.unquote(netloc).lower())
# a leading backslash in path causes urlsplit() to add the
# path components up to the first slash to host
# try to find this case...
i = netloc.find("\\")
if i != -1:
# ...and fix it by prepending the misplaced components to the path
comps = netloc[i:] # note: still has leading backslash
if not urlparts[2] or urlparts[2] == '/':
urlparts[2] = comps
else:
urlparts[2] = "%s%s" % (comps, urllib.unquote(urlparts[2]))
netloc = netloc[:i]
else:
# a leading ? in path causes urlsplit() to add the query to the
# host name
i = netloc.find("?")
if i != -1:
netloc, urlparts[3] = netloc.split('?', 1)
# path
urlparts[2] = urllib.unquote(urlparts[2])
if userpass:
# append AT for easy concatenation
userpass += "@"
else:
userpass = ""
if urlparts[0] in default_ports:
dport = default_ports[urlparts[0]]
host, port = splitport(netloc, port=dport)
if host.endswith("."):
host = host[:-1]
if port != dport:
host = "%s:%d" % (host, port)
netloc = host
urlparts[1] = userpass+netloc
return is_idn
def url_fix_common_typos (url):
"""Fix common typos in given URL like forgotten colon."""
if url.startswith("http//"):
url = "http://" + url[6:]
elif url.startswith("https//"):
url = "https://" + url[7:]
return url
def url_fix_mailto_urlsplit (urlparts):
"""Split query part of mailto url if found."""
if "?" in urlparts[2]:
urlparts[2], urlparts[3] = urlparts[2].split('?', 1)
def url_parse_query (query, encoding=None):
"""Parse and re-join the given CGI query."""
if isinstance(query, unicode):
if encoding is None:
encoding = url_encoding
query = query.encode(encoding, 'ignore')
# if ? is in the query, split it off, seen at msdn.microsoft.com
append = ""
while '?' in query:
query, rest = query.rsplit('?', 1)
append = '?'+url_parse_query(rest)+append
l = []
for k, v, sep in parse_qsl(query, keep_blank_values=True):
k = url_quote_part(k, '/-:,;')
if v:
v = url_quote_part(v, '/-:,;')
l.append("%s=%s%s" % (k, v, sep))
elif v is None:
l.append("%s%s" % (k, sep))
else:
# some sites do not work when the equal sign is missing
l.append("%s=%s" % (k, sep))
return ''.join(l) + append
def urlunsplit (urlparts):
"""Same as urlparse.urlunsplit but with extra UNC path handling
for Windows OS."""
res = urlparse.urlunsplit(urlparts)
if os.name == 'nt' and urlparts[0] == 'file' and '|' not in urlparts[2]:
# UNC paths must have 4 slashes: 'file:////server/path'
# Depending on the path in urlparts[2], urlparse.urlunsplit()
# left only two or three slashes. This is fixed below
repl = 'file://' if urlparts[2].startswith('//') else 'file:/'
res = res.replace('file:', repl)
return res
def url_norm (url, encoding=None):
"""Normalize the given URL which must be quoted. Supports unicode
hostnames (IDNA encoding) according to RFC 3490.
@return: (normed url, idna flag)
@rtype: tuple of length two
"""
if isinstance(url, unicode):
# try to decode the URL to ascii since urllib.unquote()
# handles non-unicode strings differently
try:
url = url.encode('ascii')
except UnicodeEncodeError:
pass
encode_unicode = True
else:
encode_unicode = False
urlparts = list(urlparse.urlsplit(url))
# scheme
urlparts[0] = urllib.unquote(urlparts[0]).lower()
# mailto: urlsplit is broken
if urlparts[0] == 'mailto':
url_fix_mailto_urlsplit(urlparts)
# host (with path or query side effects)
is_idn = url_fix_host(urlparts)
# query
urlparts[3] = url_parse_query(urlparts[3], encoding=encoding)
if urlparts[0] in urlparse.uses_relative:
# URL has a hierarchical path we should norm
if not urlparts[2]:
# Empty path is allowed if both query and fragment are also empty.
# Note that in relative links, urlparts[0] might be empty.
# In this case, do not make any assumptions.
if urlparts[0] and (urlparts[3] or urlparts[4]):
urlparts[2] = '/'
else:
# fix redundant path parts
urlparts[2] = collapse_segments(urlparts[2])
# anchor
urlparts[4] = urllib.unquote(urlparts[4])
# quote parts again
urlparts[0] = url_quote_part(urlparts[0], encoding=encoding) # scheme
urlparts[1] = url_quote_part(urlparts[1], safechars='@:', encoding=encoding) # host
urlparts[2] = url_quote_part(urlparts[2], safechars=_nopathquote_chars, encoding=encoding) # path
urlparts[4] = url_quote_part(urlparts[4], encoding=encoding) # anchor
res = urlunsplit(urlparts)
if url.endswith('#') and not urlparts[4]:
# re-append trailing empty fragment
res += '#'
if encode_unicode:
res = unicode(res)
return (res, is_idn)
_slashes_ro = re.compile(r"/+")
_thisdir_ro = re.compile(r"^\./")
_samedir_ro = re.compile(r"/\./|/\.$")
_parentdir_ro = re.compile(r"^/(\.\./)+|/(?!\.\./)[^/]+/\.\.(/|$)")
_relparentdir_ro = re.compile(r"^(?!\.\./)[^/]+/\.\.(/|$)")
def collapse_segments (path):
"""Remove all redundant segments from the given URL path.
Precondition: path is an unquoted url path"""
# replace backslashes
# note: this is _against_ the specification (which would require
# backslashes to be left alone, and finally quoted with '%5C')
# But replacing has several positive effects:
# - Prevents path attacks on Windows systems (using \.. parent refs)
# - Fixes bad URLs where users used backslashes instead of slashes.
# This is a far more probable case than users having an intentional
# backslash in the path name.
path = path.replace('\\', '/')
# shrink multiple slashes to one slash
path = _slashes_ro.sub("/", path)
# collapse redundant path segments
path = _thisdir_ro.sub("", path)
path = _samedir_ro.sub("/", path)
# collapse parent path segments
# note: here we exploit the fact that the replacements happen
# to be from left to right (see also _parentdir_ro above)
newpath = _parentdir_ro.sub("/", path)
while newpath != path:
path = newpath
newpath = _parentdir_ro.sub("/", path)
# collapse parent path segments of relative paths
# (ie. without leading slash)
newpath = _relparentdir_ro.sub("", path)
while newpath != path:
path = newpath
newpath = _relparentdir_ro.sub("", path)
return path
url_is_absolute = re.compile(r"^[-\.a-z]+:", re.I).match
def url_quote (url):
"""Quote given URL."""
if not url_is_absolute(url):
return document_quote(url)
urlparts = list(urlparse.urlsplit(url))
urlparts[0] = url_quote_part(urlparts[0]) # scheme
urlparts[1] = url_quote_part(urlparts[1], ':') # host
urlparts[2] = url_quote_part(urlparts[2], '/=,') # path
urlparts[3] = url_quote_part(urlparts[3], '&=,') # query
l = []
for k, v, sep in parse_qsl(urlparts[3], True): # query
k = url_quote_part(k, '/-:,;')
if v:
v = url_quote_part(v, '/-:,;')
l.append("%s=%s%s" % (k, v, sep))
else:
l.append("%s%s" % (k, sep))
urlparts[3] = ''.join(l)
urlparts[4] = url_quote_part(urlparts[4]) # anchor
return urlunsplit(urlparts)
def url_quote_part (s, safechars='/', encoding=None):
"""Wrap urllib.quote() to support unicode strings. A unicode string
is first converted to UTF-8. After that urllib.quote() is called."""
if isinstance(s, unicode):
if encoding is None:
encoding = url_encoding
s = s.encode(encoding, 'ignore')
return urllib.quote(s, safechars)
def document_quote (document):
"""Quote given document."""
doc, query = urllib.splitquery(document)
doc = url_quote_part(doc, '/=,')
if query:
return "%s?%s" % (doc, query)
return doc
def match_url (url, domainlist):
"""Return True if host part of url matches an entry in given domain list.
"""
if not url:
return False
return match_host(url_split(url)[1], domainlist)
def match_host (host, domainlist):
"""Return True if host matches an entry in given domain list."""
if not host:
return False
for domain in domainlist:
if domain.startswith('.'):
if host.endswith(domain):
return True
elif host == domain:
return True
return False
_nopathquote_chars = "-;/=,~*+()@!"
if os.name == 'nt':
_nopathquote_chars += "|"
_safe_url_chars = re.escape(_nopathquote_chars + "_:.&#%?[]!")+"a-zA-Z0-9"
_safe_url_chars_ro = re.compile(r"^[%s]*$" % _safe_url_chars)
def url_needs_quoting (url):
"""Check if url needs percent quoting. Note that the method does
only check basic character sets, and not any other syntax.
The URL might still be syntactically incorrect even when
it is properly quoted.
"""
if url.rstrip() != url:
# handle trailing whitespace as a special case
# since '$' matches immediately before a end-of-line
return True
return not _safe_url_chars_ro.match(url)
def url_split (url):
"""Split url in a tuple (scheme, hostname, port, document) where
hostname is always lowercased.
Precondition: url is syntactically correct URI (eg has no whitespace)
"""
scheme, netloc = urllib.splittype(url)
host, document = urllib.splithost(netloc)
port = default_ports.get(scheme, 0)
if host:
host = host.lower()
host, port = splitport(host, port=port)
return scheme, host, port, document
def url_unsplit (parts):
"""Rejoin URL parts to a string."""
if parts[2] == default_ports.get(parts[0]):
return "%s://%s%s" % (parts[0], parts[1], parts[3])
return "%s://%s:%d%s" % parts
def splitport (host, port=0):
"""Split optional port number from host. If host has no port number,
the given default port is returned.
@param host: host name
@ptype host: string
@param port: the port number (default 0)
@ptype port: int
@return: tuple of (host, port)
@rtype: tuple of (string, int)
"""
if ":" in host:
shost, sport = host.split(":", 1)
iport = is_numeric_port(sport)
if iport:
host, port = shost, iport
elif not sport:
# empty port, ie. the host was "hostname:"
host = shost
else:
# For an invalid non-empty port leave the host name as is
pass
return host, port
def get_content(url, user=None, password=None, proxy=None, data=None,
addheaders=None):
"""Get URL content and info.
@return: (decoded text content of URL, headers) or
(None, errmsg) on error.
@rtype: tuple (String, dict) or (None, String)
"""
from . import configuration
headers = {
'User-Agent': configuration.UserAgent,
}
if addheaders:
headers.update(addheaders)
method = 'GET'
kwargs = dict(headers=headers)
if user and password:
kwargs['auth'] = (user, password)
if data:
kwargs['data'] = data
method = 'POST'
if proxy:
kwargs['proxy'] = dict(http=proxy)
from .configuration import get_share_file
try:
kwargs["verify"] = get_share_file('cacert.pem')
except ValueError:
pass
try:
response = requests.request(method, url, **kwargs)
return response.text, response.headers
except (requests.exceptions.RequestException,
requests.exceptions.BaseHTTPError) as msg:
log.warn(LOG_CHECK, ("Could not get content of URL %(url)s: %(msg)s.") \
% {"url": url, "msg": str(msg)})
return None, str(msg)
def shorten_duplicate_content_url(url):
"""Remove anchor part and trailing index.html from URL."""
if '#' in url:
url = url.split('#', 1)[0]
if url.endswith('index.html'):
return url[:-10]
if url.endswith('index.htm'):
return url[:-9]
return url
def is_duplicate_content_url(url1, url2):
"""Check if both URLs are allowed to point to the same content."""
if url1 == url2:
return True
if url2 in url1:
url1 = shorten_duplicate_content_url(url1)
if not url2.endswith('/') and url1.endswith('/'):
url2 += '/'
return url1 == url2
if url1 in url2:
url2 = shorten_duplicate_content_url(url2)
if not url1.endswith('/') and url2.endswith('/'):
url1 += '/'
return url1 == url2
return False
| 19,967 | Python | .py | 510 | 32.801961 | 101 | 0.618355 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,244 | httputil.py | wummel_linkchecker/linkcheck/httputil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import base64
from datetime import datetime
DEFAULT_KEEPALIVE = 300
MAX_HEADER_BYTES = 8*1024
def encode_base64 (s):
"""Encode given string in base64, excluding trailing newlines."""
return base64.b64encode(s)
def x509_to_dict(x509):
"""Parse a x509 pyopenssl object to a dictionary with keys
subject, subjectAltName and optional notAfter.
"""
from requests.packages.urllib3.contrib.pyopenssl import get_subj_alt_name
res = {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
notAfter = x509.get_notAfter()
if notAfter is not None:
parsedtime = asn1_generaltime_to_seconds(notAfter)
if parsedtime is not None:
res['notAfter'] = parsedtime.strftime('%b %d %H:%M:%S %Y')
if parsedtime.tzinfo is None:
res['notAfter'] += ' GMT'
else:
# give up parsing, just set the string
res['notAfter'] = notAfter
return res
def asn1_generaltime_to_seconds(timestr):
"""The given string has one of the following formats
YYYYMMDDhhmmssZ
YYYYMMDDhhmmss+hhmm
YYYYMMDDhhmmss-hhmm
@return: a datetime object or None on error
"""
res = None
timeformat = "%Y%m%d%H%M%S"
try:
res = datetime.strptime(timestr, timeformat + 'Z')
except ValueError:
try:
res = datetime.strptime(timestr, timeformat + '%z')
except ValueError:
pass
return res
def has_header_value (headers, name, value):
"""
Look in headers for a specific header name and value.
Both name and value are case insensitive.
@return: True if header name and value are found
@rtype: bool
"""
name = name.lower()
value = value.lower()
for hname, hvalue in headers:
if hname.lower()==name and hvalue.lower()==value:
return True
return False
def http_persistent (response):
"""
See if the HTTP connection can be kept open according the the
header values found in the response object.
@param response: response instance
@type response: httplib.HTTPResponse
@return: True if connection is persistent
@rtype: bool
"""
headers = response.getheaders()
if response.version == 11:
return not has_header_value(headers, 'Connection', 'Close')
return has_header_value(headers, "Connection", "Keep-Alive")
def http_keepalive (headers):
"""
Get HTTP keepalive value, either from the Keep-Alive header or a
default value.
@param headers: HTTP headers
@type headers: dict
@return: keepalive in seconds
@rtype: int
"""
keepalive = headers.get("Keep-Alive")
if keepalive is not None:
try:
keepalive = int(keepalive[8:].strip())
except (ValueError, OverflowError):
keepalive = DEFAULT_KEEPALIVE
else:
keepalive = DEFAULT_KEEPALIVE
return keepalive
def get_content_type (headers):
"""
Get the MIME type from the Content-Type header value, or
'application/octet-stream' if not found.
@return: MIME type
@rtype: string
"""
ptype = headers.get('Content-Type', 'application/octet-stream')
if ";" in ptype:
# split off not needed extension info
ptype = ptype.split(';')[0]
return ptype.strip().lower()
def get_charset(headers):
"""
Get the charset encoding from the Content-Type header value, or
None if not found.
@return: the content charset encoding
@rtype: string or None
"""
from linkcheck.HtmlParser import get_ctype_charset
return get_ctype_charset(headers.get('Content-Type', ''))
def get_content_encoding (headers):
"""
Get the content encoding from the Content-Encoding header value, or
an empty string if not found.
@return: encoding string
@rtype: string
"""
return headers.get("Content-Encoding", "").strip()
| 4,858 | Python | .py | 138 | 29.376812 | 77 | 0.670004 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,245 | cmdline.py | wummel_linkchecker/linkcheck/cmdline.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Utility functions suitable for command line clients.
"""
from __future__ import print_function
import sys
import argparse
from . import checker, fileutil, strformat, plugins
from .director import console
class LCArgumentParser(argparse.ArgumentParser):
"""Custom argument parser to format help text."""
def print_help(self, file=sys.stdout):
"""Print a help message to stdout."""
msg = console.encode(self.format_help())
if fileutil.is_tty(file):
strformat.paginate(msg)
else:
print(msg, file=file)
def print_version(exit_code=0):
"""Print the program version and exit."""
console.print_version()
sys.exit(exit_code)
def print_plugins(folders, exit_code=0):
"""Print available plugins and exit."""
modules = plugins.get_plugin_modules(folders)
pluginclasses = sorted(plugins.get_plugin_classes(modules), key=lambda x: x.__name__)
for pluginclass in pluginclasses:
print(pluginclass.__name__)
doc = strformat.wrap(pluginclass.__doc__, 80)
print(strformat.indent(doc))
print()
sys.exit(exit_code)
def print_usage (msg, exit_code=2):
"""Print a program msg text to stderr and exit."""
program = sys.argv[0]
print(_("Error: %(msg)s") % {"msg": msg}, file=console.stderr)
print(_("Execute '%(program)s -h' for help") % {"program": program}, file=console.stderr)
sys.exit(exit_code)
def aggregate_url (aggregate, url, err_exit_code=2):
"""Append given commandline URL to input queue."""
get_url_from = checker.get_url_from
url = checker.guess_url(url)
url_data = get_url_from(url, 0, aggregate, extern=(0, 0))
aggregate.urlqueue.put(url_data)
| 2,501 | Python | .py | 59 | 38.338983 | 93 | 0.709053 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,246 | strformat.py | wummel_linkchecker/linkcheck/strformat.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Some functions have been taken and adjusted from the quodlibet
# source. Quodlibet is (C) 2004-2005 Joe Wreschnig, Michael Urman
# and licensed under the GNU General Public License version 2.
"""
Various string utility functions. Note that these functions are not
necessarily optimised for large strings, so use with care.
"""
# some handy time constants
SECONDS_PER_MINUTE = 60
SECONDS_PER_HOUR = 60 * SECONDS_PER_MINUTE
SECONDS_PER_DAY = 24 * SECONDS_PER_HOUR
import re
import textwrap
import codecs
import os
import math
import time
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import locale
import pydoc
from . import i18n
def unicode_safe (s, encoding=i18n.default_encoding, errors='replace'):
"""Get unicode string without raising encoding errors. Unknown
characters of the given encoding will be ignored.
@param s: the string to be decoded
@type s: any object except None
@return: if s is already unicode, return s unchanged; else return
decoded unicode string of str(s)
@rtype: unicode
"""
assert s is not None, "argument to unicode_safe was None"
if isinstance(s, unicode):
# s is already unicode, nothing to do
return s
return unicode(str(s), encoding, errors)
def ascii_safe (s):
"""Get ASCII string without raising encoding errors. Unknown
characters of the given encoding will be ignored.
@param s: the Unicode string to be encoded
@type s: unicode or None
@return: encoded ASCII version of s, or None if s was None
@rtype: string
"""
if isinstance(s, unicode):
s = s.encode('ascii', 'ignore')
return s
def is_ascii (s):
"""Test if a string can be encoded in ASCII."""
try:
s.encode('ascii', 'strict')
return True
except (UnicodeEncodeError, UnicodeDecodeError):
return False
def is_encoding (text):
"""Check if string is a valid encoding."""
try:
return codecs.lookup(text)
except (LookupError, ValueError):
return False
def url_unicode_split (url):
"""Like urlparse.urlsplit(), but always returning unicode parts."""
return [unicode_safe(s) for s in urlparse.urlsplit(url)]
def unquote (s, matching=False):
"""Remove leading and ending single and double quotes.
The quotes need to match if matching is True. Only one quote from each
end will be stripped.
@return: if s evaluates to False, return s as is, else return
string with stripped quotes
@rtype: unquoted string, or s unchanged if it is evaluting to False
"""
if not s:
return s
if len(s) < 2:
return s
if matching:
if s[0] in ("\"'") and s[0] == s[-1]:
s = s[1:-1]
else:
if s[0] in ("\"'"):
s = s[1:]
if s[-1] in ("\"'"):
s = s[:-1]
return s
_para_mac = r"(?:%(sep)s)(?:(?:%(sep)s)\s*)+" % {'sep': '\r'}
_para_posix = r"(?:%(sep)s)(?:(?:%(sep)s)\s*)+" % {'sep': '\n'}
_para_win = r"(?:%(sep)s)(?:(?:%(sep)s)\s*)+" % {'sep': '\r\n'}
_para_ro = re.compile("%s|%s|%s" % (_para_mac, _para_posix, _para_win))
def get_paragraphs (text):
"""A new paragraph is considered to start at a line which follows
one or more blank lines (lines containing nothing or just spaces).
The first line of the text also starts a paragraph."""
if not text:
return []
return _para_ro.split(text)
def wrap (text, width, **kwargs):
"""Adjust lines of text to be not longer than width. The text will be
returned unmodified if width <= 0.
See textwrap.wrap() for a list of supported kwargs.
Returns text with lines no longer than given width."""
if width <= 0 or not text:
return text
ret = []
for para in get_paragraphs(text):
text = " ".join(para.strip().split())
ret.extend(textwrap.wrap(text, width, **kwargs))
return os.linesep.join(ret)
def indent (text, indent_string=" "):
"""Indent each line of text with the given indent string."""
lines = str(text).splitlines()
return os.linesep.join("%s%s" % (indent_string, x) for x in lines)
def get_line_number (s, index):
r"""Return the line number of s[index] or zero on errors.
Lines are assumed to be separated by the ASCII character '\n'."""
i = 0
if index < 0:
return 0
line = 1
while i < index:
if s[i] == '\n':
line += 1
i += 1
return line
def paginate (text):
"""Print text in pages of lines."""
pydoc.pager(text)
_markup_re = re.compile("<.*?>", re.DOTALL)
def remove_markup (s):
"""Remove all <*> html markup tags from s."""
mo = _markup_re.search(s)
while mo:
s = s[0:mo.start()] + s[mo.end():]
mo = _markup_re.search(s)
return s
def strsize (b, grouping=True):
"""Return human representation of bytes b. A negative number of bytes
raises a value error."""
if b < 0:
raise ValueError("Invalid negative byte number")
if b < 1024:
return u"%sB" % locale.format("%d", b, grouping)
if b < 1024 * 10:
return u"%sKB" % locale.format("%d", (b // 1024), grouping)
if b < 1024 * 1024:
return u"%sKB" % locale.format("%.2f", (float(b) / 1024), grouping)
if b < 1024 * 1024 * 10:
return u"%sMB" % locale.format("%.2f", (float(b) / (1024*1024)), grouping)
if b < 1024 * 1024 * 1024:
return u"%sMB" % locale.format("%.1f", (float(b) / (1024*1024)), grouping)
if b < 1024 * 1024 * 1024 * 10:
return u"%sGB" % locale.format("%.2f", (float(b) / (1024*1024*1024)), grouping)
return u"%sGB" % locale.format("%.1f", (float(b) / (1024*1024*1024)), grouping)
def strtime (t, func=time.localtime):
"""Return ISO 8601 formatted time."""
return time.strftime("%Y-%m-%d %H:%M:%S", func(t)) + strtimezone()
# from quodlibet
def strduration (duration):
"""Turn a time value in seconds into hh:mm:ss or mm:ss."""
if duration < 0:
duration = abs(duration)
prefix = "-"
else:
prefix = ""
duration = math.ceil(duration)
if duration >= SECONDS_PER_HOUR: # 1 hour
# time, in hours:minutes:seconds
return "%s%02d:%02d:%02d" % (prefix, duration // SECONDS_PER_HOUR,
(duration % SECONDS_PER_HOUR) // SECONDS_PER_MINUTE,
duration % SECONDS_PER_MINUTE)
else:
# time, in minutes:seconds
return "%s%02d:%02d" % (prefix, duration // SECONDS_PER_MINUTE,
duration % SECONDS_PER_MINUTE)
# from quodlibet
def strduration_long (duration, do_translate=True):
"""Turn a time value in seconds into x hours, x minutes, etc."""
if do_translate:
# use global translator functions
global _, _n
else:
# do not translate
_ = lambda x: x
_n = lambda a, b, n: a if n==1 else b
if duration < 0:
duration = abs(duration)
prefix = "-"
else:
prefix = ""
if duration < 1:
return _("%(prefix)s%(duration).02f seconds") % \
{"prefix": prefix, "duration": duration}
# translation dummies
_n("%d second", "%d seconds", 1)
_n("%d minute", "%d minutes", 1)
_n("%d hour", "%d hours", 1)
_n("%d day", "%d days", 1)
_n("%d year", "%d years", 1)
cutoffs = [
(60, "%d second", "%d seconds"),
(60, "%d minute", "%d minutes"),
(24, "%d hour", "%d hours"),
(365, "%d day", "%d days"),
(None, "%d year", "%d years"),
]
time_str = []
for divisor, single, plural in cutoffs:
if duration < 1:
break
if divisor is None:
duration, unit = 0, duration
else:
duration, unit = divmod(duration, divisor)
if unit:
time_str.append(_n(single, plural, unit) % unit)
time_str.reverse()
if len(time_str) > 2:
time_str.pop()
return "%s%s" % (prefix, ", ".join(time_str))
def strtimezone ():
"""Return timezone info, %z on some platforms, but not supported on all.
"""
if time.daylight:
zone = time.altzone
else:
zone = time.timezone
return "%+04d" % (-zone//SECONDS_PER_HOUR)
def stripurl(s):
"""Remove any lines from string after the first line.
Also remove whitespace at start and end from given string."""
if not s:
return s
return s.splitlines()[0].strip()
def limit (s, length=72):
"""If the length of the string exceeds the given limit, it will be cut
off and three dots will be appended.
@param s: the string to limit
@type s: string
@param length: maximum length
@type length: non-negative integer
@return: limited string, at most length+3 characters long
"""
assert length >= 0, "length limit must be a non-negative integer"
if not s or len(s) <= length:
return s
if length == 0:
return ""
return "%s..." % s[:length]
def strline (s):
"""Display string representation on one line."""
return strip_control_chars(u"`%s'" % unicode(s).replace(u"\n", u"\\n"))
def format_feature_warning (**kwargs):
"""Format warning that a module could not be imported and that it should
be installed for a certain URL.
"""
return _("Could not import %(module)s for %(feature)s. Install %(module)s from %(url)s to use this feature.") % kwargs
def strip_control_chars(text):
"""Remove console control characters from text."""
if text:
return re.sub(r"[\x01-\x1F\x7F]", "", text)
return text
| 10,436 | Python | .py | 280 | 31.564286 | 122 | 0.624641 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,247 | ansicolor.py | wummel_linkchecker/linkcheck/ansicolor.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
ANSI Color definitions and functions. For Windows systems, the colorama module
uses ctypes and Windows DLLs to generate colored output.
From Term::ANSIColor, applies also to this module:
The codes output by this module are standard terminal control codes,
complying with ECMA-48 and ISO 6429 (generally referred to as ``ANSI color''
for the color codes). The non-color control codes (bold, dark, italic,
underline, and reverse) are part of the earlier ANSI X3.64 standard for
control sequences for video terminals and peripherals.
Note that not all displays are ISO 6429-compliant, or even X3.64-compliant
(or are even attempting to be so).
Jean Delvare provided the following table of different common terminal
emulators and their support for the various attributes and others have
helped me flesh it out::
clear bold dark under blink reverse conceal
------------------------------------------------------------------------
xterm yes yes no yes bold yes yes
linux yes yes yes bold yes yes no
rxvt yes yes no yes bold/black yes no
dtterm yes yes yes yes reverse yes yes
teraterm yes reverse no yes rev/red yes no
aixterm kinda normal no yes no yes yes
PuTTY yes color no yes no yes no
Windows yes no no no no yes no
Cygwin SSH yes yes no color color color yes
SEE ALSO
ECMA-048 is available on-line (at least at the time of this writing) at
http://www.ecma-international.org/publications/standards/ECMA-048.HTM.
ISO 6429 is available from ISO for a charge; the author of this module does
not own a copy of it. Since the source material for ISO 6429 was ECMA-048
and the latter is available for free, there seems little reason to obtain
the ISO standard.
"""
import os
import logging
import types
from .fileutil import has_module, is_tty
if os.name == 'nt':
from . import colorama
has_curses = has_module("curses")
# Color constants
# Escape for ANSI colors
AnsiEsc = "\x1b[%sm"
# Control constants
bold = 'bold'
light = 'light'
underline = 'underline'
blink = 'blink'
invert = 'invert'
concealed = 'concealed'
# Control numbers
AnsiControl = {
None: '',
bold: '1',
light: '2',
#italic: '3', # unsupported
underline: '4',
blink: '5',
#rapidblink: '6', # unsupported
invert: '7',
concealed: '8',
#strikethrough: '9', # unsupported
}
# Color constants
default = 'default'
black = 'black'
red = 'red'
green = 'green'
yellow = 'yellow'
blue = 'blue'
purple = 'purple'
cyan = 'cyan'
white = 'white'
# inverse colors
Black = 'Black'
Red = 'Red'
Green = 'Green'
Yellow = 'Yellow'
Blue = 'Blue'
Purple = 'Purple'
Cyan = 'Cyna'
White = 'White'
InverseColors = (Black, Red, Green, Yellow, Blue, Purple, Cyan, White)
# Ansi color numbers; capitalized colors are inverse
AnsiColor = {
None: '0',
default: '0',
black: '30',
red: '31',
green: '32',
yellow: '33',
blue: '34',
purple: '35',
cyan: '36',
white: '37',
Black: '40',
Red: '41',
Green: '42',
Yellow: '43',
Blue: '44',
Purple: '45',
Cyan: '46',
White: '47',
}
if os.name == 'nt':
# Windows color numbers; capitalized colors are used as background
WinColor = {
None: None,
default: colorama.GREY,
black: colorama.BLACK,
red: colorama.RED,
green: colorama.GREEN,
yellow: colorama.YELLOW,
blue: colorama.BLUE,
purple: colorama.MAGENTA,
cyan: colorama.CYAN,
white: colorama.GREY,
Black: colorama.BLACK,
Red: colorama.RED,
Green: colorama.GREEN,
Yellow: colorama.YELLOW,
Blue: colorama.BLUE,
Purple: colorama.MAGENTA,
Cyan: colorama.CYAN,
White: colorama.GREY,
}
# pc speaker beep escape code
Beep = "\007"
def esc_ansicolor (color):
"""convert a named color definition to an escaped ANSI color"""
control = ''
if ";" in color:
control, color = color.split(";", 1)
control = AnsiControl.get(control, '')+";"
cnum = AnsiColor.get(color, '0')
return AnsiEsc % (control+cnum)
AnsiReset = esc_ansicolor(default)
def get_win_color(color):
"""Convert a named color definition to Windows console color foreground,
background and style numbers."""
foreground = background = style = None
control = ''
if ";" in color:
control, color = color.split(";", 1)
if control == bold:
style = colorama.BRIGHT
if color in InverseColors:
background = WinColor[color]
else:
foreground = WinColor.get(color)
return foreground, background, style
def has_colors (fp):
"""Test if given file is an ANSI color enabled tty."""
# The is_tty() function ensures that we do not colorize
# redirected streams, as this is almost never what we want
if not is_tty(fp):
return False
if os.name == 'nt':
return True
elif has_curses:
import curses
try:
curses.setupterm(os.environ.get("TERM"), fp.fileno())
# More than 8 colors are good enough.
return curses.tigetnum("colors") >= 8
except curses.error:
return False
return False
def get_columns (fp):
"""Return number of columns for given file."""
if not is_tty(fp):
return 80
if os.name == 'nt':
return colorama.get_console_size().X
if has_curses:
import curses
try:
curses.setupterm(os.environ.get("TERM"), fp.fileno())
return curses.tigetnum("cols")
except curses.error:
pass
return 80
def _write_color_colorama (fp, text, color):
"""Colorize text with given color."""
foreground, background, style = get_win_color(color)
colorama.set_console(foreground=foreground, background=background,
style=style)
fp.write(text)
colorama.reset_console()
def _write_color_ansi (fp, text, color):
"""Colorize text with given color."""
fp.write(esc_ansicolor(color))
fp.write(text)
fp.write(AnsiReset)
if os.name == 'nt':
write_color = _write_color_colorama
colorama.init()
else:
write_color = _write_color_ansi
class Colorizer (object):
"""Prints colored messages to streams."""
def __init__ (self, fp):
"""Initialize with given stream (file-like object)."""
super(Colorizer, self).__init__()
self.fp = fp
if has_colors(fp):
self.write = self._write_color
else:
self.write = self._write
def _write (self, text, color=None):
"""Print text as-is."""
self.fp.write(text)
def _write_color (self, text, color=None):
"""Print text with given color. If color is None, print text as-is."""
if color is None:
self.fp.write(text)
else:
write_color(self.fp, text, color)
def __getattr__ (self, name):
"""Delegate attribute access to the stored stream object."""
return getattr(self.fp, name)
class ColoredStreamHandler (logging.StreamHandler, object):
"""Send colored log messages to streams (file-like objects)."""
def __init__ (self, strm=None):
"""Log to given stream (a file-like object) or to stderr if
strm is None.
"""
super(ColoredStreamHandler, self).__init__(strm)
self.stream = Colorizer(self.stream)
# standard log level colors (used by get_color)
self.colors = {
logging.WARN: 'bold;yellow',
logging.ERROR: 'light;red',
logging.CRITICAL: 'bold;red',
logging.DEBUG: 'white',
}
def get_color (self, record):
"""Get appropriate color according to log level.
"""
return self.colors.get(record.levelno, 'default')
def emit (self, record):
"""Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback].
"""
color = self.get_color(record)
msg = self.format(record)
if not hasattr(types, "UnicodeType"):
# no unicode support
self.stream.write("%s" % msg, color=color)
else:
try:
self.stream.write("%s" % msg, color=color)
except UnicodeError:
self.stream.write("%s" % msg.encode("UTF-8"),
color=color)
self.stream.write(os.linesep)
self.flush()
| 9,747 | Python | .py | 275 | 29.941818 | 78 | 0.622599 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,248 | mimeutil.py | wummel_linkchecker/linkcheck/mimeutil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
File and path utilities.
"""
import os
import re
import mimetypes
from . import log
from .logconf import LOG_CHECK
mimedb = None
def init_mimedb():
"""Initialize the local MIME database."""
global mimedb
try:
mimedb = mimetypes.MimeTypes(strict=False)
except Exception as msg:
log.error(LOG_CHECK, "could not initialize MIME database: %s" % msg)
return
# For Opera bookmark files (opera6.adr)
add_mimetype(mimedb, 'text/plain', '.adr')
# To recognize PHP files as HTML with content check.
add_mimetype(mimedb, 'application/x-httpd-php', '.php')
# To recognize WML files
add_mimetype(mimedb, 'text/vnd.wap.wml', '.wml')
def add_mimetype(mimedb, mimetype, extension):
"""Add or replace a mimetype to be used with the given extension."""
# If extension is already a common type, strict=True must be used.
strict = extension in mimedb.types_map[True]
mimedb.add_type(mimetype, extension, strict=strict)
# if file extension lookup was unsuccessful, look at the content
PARSE_CONTENTS = {
"text/html": re.compile(r'^(?i)<(!DOCTYPE html|html|head|title)'),
"text/plain+opera": re.compile(r'^Opera Hotlist'),
"text/plain+chromium": re.compile(r'^{\s*"checksum":'),
"text/plain+linkchecker": re.compile(r'(?i)^# LinkChecker URL list'),
"application/xml+sitemapindex": re.compile(r'(?i)<\?xml[^<]+<sitemapindex\s+'),
"application/xml+sitemap": re.compile(r'(?i)<\?xml[^<]+<urlset\s+'),
}
def guess_mimetype (filename, read=None):
"""Return MIME type of file, or 'application/octet-stream' if it could
not be determined."""
mime, encoding = None, None
if mimedb:
mime, encoding = mimedb.guess_type(filename, strict=False)
basename = os.path.basename(filename)
# Special case for Safari Bookmark files
if not mime and basename == 'Bookmarks.plist':
return 'application/x-plist+safari'
# Special case for Google Chrome Bookmark files.
if not mime and basename == 'Bookmarks':
mime = 'text/plain'
# Some mime types can be differentiated further with content reading.
if mime in ("text/plain", "application/xml", "text/xml") and read is not None:
read_mime = guess_mimetype_read(read)
if read_mime is not None:
mime = read_mime
if not mime:
mime = "application/octet-stream"
elif ";" in mime:
# split off not needed extension info
mime = mime.split(';')[0]
return mime.strip().lower()
def guess_mimetype_read(read):
"""Try to read some content and do a poor man's file(1)."""
mime = None
try:
data = read()[:70]
except Exception:
pass
else:
for cmime, ro in PARSE_CONTENTS.items():
if ro.search(data):
mime = cmime
break
return mime
init_mimedb()
| 3,657 | Python | .py | 91 | 35.43956 | 83 | 0.685899 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,249 | i18n.py | wummel_linkchecker/linkcheck/i18n.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Application internationalization support.
"""
# i18n suppport
import os
import locale
import gettext
import sys
import codecs
# more supported languages are added in init()
supported_languages = set(['en'])
default_language = default_encoding = None
default_directory = None
default_domain = None
def install_builtin (translator, do_unicode):
"""Install _() and _n() gettext methods into default namespace."""
try:
import __builtin__ as builtins
except ImportError:
# Python 3
import builtins
# Python 3 has no ugettext
has_unicode = hasattr(translator, 'ugettext')
if do_unicode and has_unicode:
builtins.__dict__['_'] = translator.ugettext
# also install ngettext
builtins.__dict__['_n'] = translator.ungettext
else:
builtins.__dict__['_'] = translator.gettext
# also install ngettext
builtins.__dict__['_n'] = translator.ngettext
class Translator (gettext.GNUTranslations):
"""A translation class always installing its gettext methods into the
default namespace."""
def install (self, do_unicode):
"""Install gettext methods into the default namespace."""
install_builtin(self, do_unicode)
class NullTranslator (gettext.NullTranslations):
"""A dummy translation class always installing its gettext methods into
the default namespace."""
def install (self, do_unicode):
"""Install gettext methods into the default namespace."""
install_builtin(self, do_unicode)
def init (domain, directory, loc=None):
"""Initialize this gettext i18n module. Searches for supported languages
and installs the gettext translator class."""
global default_language, default_encoding, default_domain, default_directory
default_directory = directory
default_domain = domain
if os.path.isdir(directory):
# get supported languages
for lang in os.listdir(directory):
path = os.path.join(directory, lang, 'LC_MESSAGES')
mo_file = os.path.join(path, '%s.mo' % domain)
if os.path.exists(mo_file):
supported_languages.add(lang)
if loc is None:
loc, encoding = get_locale()
else:
encoding = get_locale()[1]
if loc in supported_languages:
default_language = loc
else:
default_language = "en"
# Even if the default language is not supported, the encoding should
# be installed. Otherwise the Python installation is borked.
default_encoding = encoding
install_language(default_language)
def install_language(language):
"""Install translation service routines into default namespace."""
translator = get_translator(default_domain, default_directory,
languages=[get_lang(language)], fallback=True)
do_unicode = True
translator.install(do_unicode)
def get_translator (domain, directory, languages=None,
translatorklass=Translator, fallback=False,
fallbackklass=NullTranslator):
"""Search the appropriate GNUTranslations class."""
translator = gettext.translation(domain, localedir=directory,
languages=languages, class_=translatorklass, fallback=fallback)
if not isinstance(translator, gettext.GNUTranslations) and fallbackklass:
translator = fallbackklass()
return translator
def get_lang (lang):
"""Return lang if it is supported, or the default language."""
if lang in supported_languages:
return lang
return default_language
def get_headers_lang (headers):
"""Return preferred supported language in given HTTP headers."""
if 'Accept-Language' not in headers:
return default_language
languages = headers['Accept-Language'].split(",")
# sort with preference values
pref_languages = []
for lang in languages:
pref = 1.0
if ";" in lang:
lang, _pref = lang.split(';', 1)
try:
pref = float(_pref)
except ValueError:
pass
pref_languages.append((pref, lang))
pref_languages.sort()
# search for lang
for lang in (x[1] for x in pref_languages):
if lang in supported_languages:
return lang
return default_language
def get_locale ():
"""Search the default platform locale and norm it.
@returns (locale, encoding)
@rtype (string, string)"""
try:
loc, encoding = locale.getdefaultlocale()
except ValueError:
# locale configuration is broken - ignore that
loc, encoding = None, None
if loc is None:
loc = "C"
else:
loc = norm_locale(loc)
if encoding is None:
encoding = "ascii"
return (loc, encoding)
def norm_locale (loc):
"""Normalize a locale."""
loc = locale.normalize(loc)
# split up the locale into its base components
pos = loc.find('@')
if pos >= 0:
loc = loc[:pos]
pos = loc.find('.')
if pos >= 0:
loc = loc[:pos]
pos = loc.find('_')
if pos >= 0:
loc = loc[:pos]
return loc
lang_names = {
'en': u'English',
'de': u'Deutsch',
}
lang_transis = {
'de': {'en': u'German'},
'en': {'de': u'Englisch'},
}
def lang_name (lang):
"""Return full name of given language."""
return lang_names[lang]
def lang_trans (lang, curlang):
"""Return translated full name of given language."""
return lang_transis[lang][curlang]
def get_encoded_writer (out=sys.stdout, encoding=None, errors='replace'):
"""Get wrapped output writer with given encoding and error handling."""
if encoding is None:
encoding = default_encoding
Writer = codecs.getwriter(encoding)
return Writer(out, errors)
| 6,537 | Python | .py | 176 | 31.454545 | 80 | 0.678143 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,250 | threader.py | wummel_linkchecker/linkcheck/threader.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Support for managing threads.
"""
import threading
class StoppableThread (threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__ (self):
"""Store stop event."""
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop (self):
"""Set stop event."""
self._stop.set()
def stopped (self, timeout=None):
"""Return True if stop event is set."""
return self._stop.wait(timeout)
| 1,347 | Python | .py | 33 | 37.363636 | 73 | 0.713522 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,251 | __init__.py | wummel_linkchecker/linkcheck/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main function module for link checking.
"""
# version checks
import sys
# Needs Python >= 2.7 because we use dictionary based logging config
# Needs Python >= 2.7.2 which fixed http://bugs.python.org/issue11467
if not (hasattr(sys, 'version_info') or
sys.version_info < (2, 7, 2, 'final', 0)):
import platform
version = platform.python_version()
raise SystemExit("This program requires Python 2.7.2 or later instead of %s." % version)
# require a reasonably recent requests module: 2.4.0 from 2014-08-29
import requests
# PEP 396 has only version strings, bummer! PEP 386 is also not helpful.
requests_version = requests.__version__.split('.')
# Depends on the version scheme of Python requests
if int(requests_version[0]) < 2 or \
(int(requests_version[0]) == 2 and int(requests_version[1]) < 4):
raise SystemExit("This program requires Python requests 2.4.0 or later instead of %s." % requests.__version__)
import os
# add the custom linkcheck_dns directory to sys.path
_dnspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'linkcheck_dns'))
if _dnspath not in sys.path:
sys.path.insert(0, _dnspath)
del _dnspath
import re
import signal
import traceback
from . import i18n, log
from .logconf import (
LOG_ROOT,
LOG_CMDLINE,
LOG_CHECK,
LOG_CACHE,
LOG_THREAD,
LOG_PLUGIN,
)
import _LinkChecker_configdata as configdata
def module_path ():
"""Return absolute directory of system executable."""
return os.path.dirname(os.path.abspath(sys.executable))
def get_install_data ():
"""Return absolute path of LinkChecker data installation directory."""
from .loader import is_frozen
if is_frozen():
return module_path()
return configdata.install_data
class LinkCheckerError(Exception):
"""Exception to be raised on linkchecker-specific check errors."""
pass
class LinkCheckerInterrupt(Exception):
"""Used for testing."""
pass
def get_link_pat (arg, strict=False):
"""Get a link pattern matcher for intern/extern links.
Returns a compiled pattern and a negate and strict option.
@param arg: pattern from config
@type arg: string
@param strict: if pattern is to be handled strict
@type strict: bool
@return: dictionary with keys 'pattern', 'negate' and 'strict'
@rtype: dict
@raises: re.error on invalid regular expressions
"""
log.debug(LOG_CHECK, "Link pattern %r strict=%s", arg, strict)
if arg.startswith('!'):
pattern = arg[1:]
negate = True
else:
pattern = arg
negate = False
try:
regex = re.compile(pattern)
except re.error as msg:
log.warn(LOG_CHECK, "invalid regular expression %r: %s" % (pattern, msg))
raise
return {
"pattern": regex,
"negate": negate,
"strict": strict,
}
def init_i18n (loc=None):
"""Initialize i18n with the configured locale dir. The environment
variable LOCPATH can also specify a locale dir.
@return: None
"""
if 'LOCPATH' in os.environ:
locdir = os.environ['LOCPATH']
else:
locdir = os.path.join(get_install_data(), 'share', 'locale')
i18n.init(configdata.name.lower(), locdir, loc=loc)
# install translated log level names
import logging
logging.addLevelName(logging.CRITICAL, _('CRITICAL'))
logging.addLevelName(logging.ERROR, _('ERROR'))
logging.addLevelName(logging.WARN, _('WARN'))
logging.addLevelName(logging.WARNING, _('WARNING'))
logging.addLevelName(logging.INFO, _('INFO'))
logging.addLevelName(logging.DEBUG, _('DEBUG'))
logging.addLevelName(logging.NOTSET, _('NOTSET'))
# initialize i18n, puts _() and _n() function into global namespace
init_i18n()
def drop_privileges ():
"""Make sure to drop root privileges on POSIX systems."""
if os.name != 'posix':
return
if os.geteuid() == 0:
log.warn(LOG_CHECK, _("Running as root user; "
"dropping privileges by changing user to nobody."))
import pwd
os.seteuid(pwd.getpwnam('nobody')[3])
def find_third_party_modules ():
"""Find third party modules and add them to the python path."""
parent = os.path.dirname(os.path.dirname(__file__))
third_party = os.path.join(parent, "third_party")
if os.path.isdir(third_party):
sys.path.append(os.path.join(third_party, "dnspython"))
find_third_party_modules()
if hasattr(signal, "SIGUSR1"):
# install SIGUSR1 handler
from .decorators import signal_handler
@signal_handler(signal.SIGUSR1)
def print_threadstacks(sig, frame):
"""Print stack traces of all running threads."""
log.warn(LOG_THREAD, "*** STACKTRACE START ***")
for threadId, stack in sys._current_frames().items():
log.warn(LOG_THREAD, "# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
log.warn(LOG_THREAD, 'File: "%s", line %d, in %s' % (filename, lineno, name))
line = line.strip()
if line:
log.warn(LOG_THREAD, " %s" % line)
log.warn(LOG_THREAD, "*** STACKTRACE END ***")
| 5,995 | Python | .py | 150 | 35.06 | 114 | 0.684247 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,252 | robotparser2.py | wummel_linkchecker/linkcheck/robotparser2.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Robots.txt parser.
The robots.txt Exclusion Protocol is implemented as specified in
http://www.robotstxt.org/wc/norobots-rfc.html
"""
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import urllib
import time
import requests
from . import log, LOG_CHECK, configuration
__all__ = ["RobotFileParser"]
ACCEPT_ENCODING = 'x-gzip,gzip,deflate'
class RobotFileParser (object):
"""This class provides a set of methods to read, parse and answer
questions about a single robots.txt file."""
def __init__ (self, url='', session=None, proxies=None, auth=None):
"""Initialize internal entry lists and store given url and
credentials."""
self.set_url(url)
if session is None:
self.session = requests.Session()
else:
self.session = session
self.proxies = proxies
self.auth = auth
self._reset()
def _reset (self):
"""Reset internal flags and entry lists."""
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.last_checked = 0
# list of tuples (sitemap url, line number)
self.sitemap_urls = []
def mtime (self):
"""Returns the time the robots.txt file was last fetched.
This is useful for long-running web spiders that need to
check for new robots.txt files periodically.
@return: last modified in time.time() format
@rtype: number
"""
return self.last_checked
def modified (self):
"""Set the time the robots.txt file was last fetched to the
current time."""
self.last_checked = time.time()
def set_url (self, url):
"""Set the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urlparse.urlparse(url)[1:3]
def read (self):
"""Read the robots.txt URL and feeds it to the parser."""
self._reset()
kwargs = dict(
headers = {
'User-Agent': configuration.UserAgent,
'Accept-Encoding': ACCEPT_ENCODING,
}
)
if self.auth:
kwargs["auth"] = self.auth
if self.proxies:
kwargs["proxies"] = self.proxies
try:
response = self.session.get(self.url, **kwargs)
response.raise_for_status()
content_type = response.headers.get('content-type')
if content_type and content_type.lower().startswith('text/plain'):
self.parse(response.iter_lines())
else:
log.debug(LOG_CHECK, "%r allow all (no text content)", self.url)
self.allow_all = True
except requests.HTTPError as x:
if x.response.status_code in (401, 403):
self.disallow_all = True
log.debug(LOG_CHECK, "%r disallow all (code %d)", self.url, x.response.status_code)
else:
self.allow_all = True
log.debug(LOG_CHECK, "%r allow all (HTTP error)", self.url)
except requests.exceptions.Timeout:
raise
except requests.exceptions.RequestException:
# no network or other failure
self.allow_all = True
log.debug(LOG_CHECK, "%r allow all (request error)", self.url)
def _add_entry (self, entry):
"""Add a parsed entry to entry list.
@return: None
"""
if "*" in entry.useragents:
# the default entry is considered last
self.default_entry = entry
else:
self.entries.append(entry)
def parse (self, lines):
"""Parse the input lines from a robot.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
@return: None
"""
log.debug(LOG_CHECK, "%r parse lines", self.url)
state = 0
linenumber = 0
entry = Entry()
for line in lines:
line = line.strip()
linenumber += 1
if not line:
if state == 1:
log.debug(LOG_CHECK, "%r line %d: allow or disallow directives without any user-agent line", self.url, linenumber)
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
log.debug(LOG_CHECK, "%r line %d: missing blank line before user-agent directive", self.url, linenumber)
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
elif line[0] == "crawl-delay":
if state == 0:
log.debug(LOG_CHECK, "%r line %d: missing user-agent directive before this line", self.url, linenumber)
pass
else:
try:
entry.crawldelay = max(0, int(line[1]))
state = 2
except (ValueError, OverflowError):
log.debug(LOG_CHECK, "%r line %d: invalid delay number %r", self.url, linenumber, line[1])
pass
elif line[0] == "sitemap":
# Note that sitemap URLs must be absolute according to
# http://www.sitemaps.org/protocol.html#submit_robots
# But this should be checked by the calling layer.
self.sitemap_urls.append((line[1], linenumber))
else:
log.debug(LOG_CHECK, "%r line %d: unknown key %r", self.url, linenumber, line[0])
pass
else:
log.debug(LOG_CHECK, "%r line %d: malformed line %r", self.url, linenumber, line)
pass
if state in (1, 2):
self.entries.append(entry)
self.modified()
log.debug(LOG_CHECK, "Parsed rules:\n%s", str(self))
def can_fetch (self, useragent, url):
"""Using the parsed robots.txt decide if useragent can fetch url.
@return: True if agent can fetch url, else False
@rtype: bool
"""
log.debug(LOG_CHECK, "%r check allowance for:\n user agent: %r\n url: %r ...", self.url, useragent, url)
if not isinstance(useragent, str):
useragent = useragent.encode("ascii", "ignore")
if not isinstance(url, str):
url = url.encode("ascii", "ignore")
if self.disallow_all:
log.debug(LOG_CHECK, " ... disallow all.")
return False
if self.allow_all:
log.debug(LOG_CHECK, " ... allow all.")
return True
# search for given user agent matches
# the first match counts
url = urllib.quote(urlparse.urlparse(urllib.unquote(url))[2]) or "/"
for entry in self.entries:
if entry.applies_to(useragent):
return entry.allowance(url)
# try the default entry last
if self.default_entry is not None:
return self.default_entry.allowance(url)
# agent not found ==> access granted
log.debug(LOG_CHECK, " ... agent not found, allow.")
return True
def get_crawldelay (self, useragent):
"""Look for a configured crawl delay.
@return: crawl delay in seconds or zero
@rtype: integer >= 0
"""
for entry in self.entries:
if entry.applies_to(useragent):
return entry.crawldelay
return 0
def __str__ (self):
"""Constructs string representation, usable as contents of a
robots.txt file.
@return: robots.txt format
@rtype: string
"""
lines = [str(entry) for entry in self.entries]
if self.default_entry is not None:
lines.append(str(self.default_entry))
return "\n\n".join(lines)
class RuleLine (object):
"""A rule line is a single "Allow:" (allowance==1) or "Disallow:"
(allowance==0) followed by a path.
"""
def __init__ (self, path, allowance):
"""Initialize with given path and allowance info."""
if path == '' and not allowance:
# an empty value means allow all
allowance = True
path = '/'
self.path = urllib.quote(path)
self.allowance = allowance
def applies_to (self, path):
"""Look if given path applies to this rule.
@return: True if pathname applies to this rule, else False
@rtype: bool
"""
return self.path == "*" or path.startswith(self.path)
def __str__ (self):
"""Construct string representation in robots.txt format.
@return: robots.txt format
@rtype: string
"""
return ("Allow" if self.allowance else "Disallow")+": "+self.path
class Entry (object):
"""An entry has one or more user-agents and zero or more rulelines."""
def __init__ (self):
"""Initialize user agent and rule list."""
self.useragents = []
self.rulelines = []
self.crawldelay = 0
def __str__ (self):
"""string representation in robots.txt format.
@return: robots.txt format
@rtype: string
"""
lines = ["User-agent: %s" % agent for agent in self.useragents]
if self.crawldelay:
lines.append("Crawl-delay: %d" % self.crawldelay)
lines.extend([str(line) for line in self.rulelines])
return "\n".join(lines)
def applies_to (self, useragent):
"""Check if this entry applies to the specified agent.
@return: True if this entry applies to the agent, else False.
@rtype: bool
"""
if not useragent:
return True
useragent = useragent.lower()
for agent in self.useragents:
if agent == '*':
# we have the catch-all agent
return True
if agent.lower() in useragent:
return True
return False
def allowance (self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded
Check if given filename is allowed to acces this entry.
@return: True if allowed, else False
@rtype: bool
"""
for line in self.rulelines:
log.debug(LOG_CHECK, "%s %s %s", filename, str(line), line.allowance)
if line.applies_to(filename):
log.debug(LOG_CHECK, " ... rule line %s", line)
return line.allowance
log.debug(LOG_CHECK, " ... no rule lines of %s applied to %s; allowed.", self.useragents, filename)
return True
| 12,956 | Python | .py | 313 | 30.182109 | 134 | 0.560263 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,253 | log.py | wummel_linkchecker/linkcheck/log.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2003-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Logging and debug functions.
"""
import logging
import os
import inspect
import traceback
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
# memory leak debugging
#import gc
#gc.enable()
#gc.set_debug(gc.DEBUG_LEAK)
PRINT_LOCALVARS = False
def _stack_format (stack):
"""Format a stack trace to a message.
@return: formatted stack message
@rtype: string
"""
s = StringIO()
s.write('Traceback:')
s.write(os.linesep)
for frame, fname, lineno, method, lines, dummy in reversed(stack):
s.write(' File %r, line %d, in %s' % (fname, lineno, method))
s.write(os.linesep)
s.write(' %s' % lines[0].lstrip())
if PRINT_LOCALVARS:
for key, value in frame.f_locals.items():
s.write(" %s = " % key)
# be careful not to cause a new error in the error output
try:
s.write(repr(value))
s.write(os.linesep)
except Exception:
s.write("error in repr() call%s" % os.linesep)
return s.getvalue()
def _log (fun, msg, args, **kwargs):
"""Log a message with given function. Optional the following keyword
arguments are supported:
traceback(bool) - if True print traceback of current function
exception(bool) - if True print last exception traceback
@return: None
"""
fun(msg, *args)
if kwargs.get("traceback"):
# note: get rid of last parts of the stack
fun(_stack_format(inspect.stack()[2:]))
if kwargs.get("exception"):
fun(traceback.format_exc())
def debug (logname, msg, *args, **kwargs):
"""Log a debug message.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.DEBUG):
_log(log.debug, msg, args, **kwargs)
def info (logname, msg, *args, **kwargs):
"""Log an informational message.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.INFO):
_log(log.info, msg, args, **kwargs)
def warn (logname, msg, *args, **kwargs):
"""Log a warning.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.WARN):
_log(log.warn, msg, args, **kwargs)
def error (logname, msg, *args, **kwargs):
"""Log an error.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.ERROR):
_log(log.error, msg, args, **kwargs)
def critical (logname, msg, *args, **kwargs):
"""Log a critical error.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.CRITICAL):
_log(log.critical, msg, args, **kwargs)
def exception (logname, msg, *args, **kwargs):
"""Log an exception.
return: None
"""
log = logging.getLogger(logname)
if log.isEnabledFor(logging.ERROR):
_log(log.exception, msg, args, **kwargs)
def is_debug (logname):
"""See if logger is on debug level."""
return logging.getLogger(logname).isEnabledFor(logging.DEBUG)
def shutdown ():
"""Flush and close all log handlers."""
logging.shutdown()
| 3,994 | Python | .py | 116 | 29.215517 | 73 | 0.659392 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,254 | gzip2.py | wummel_linkchecker/linkcheck/gzip2.py | # -*- coding: iso-8859-1 -*-
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# Copied from Python source; License: Python License
# Copyright Guido van Rossum <guido@cwi.nl> and others
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile:
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# Make sure we don't inadvertently enable universal newlines on the
# underlying file object - in read mode, this causes data corruption.
if mode:
mode = mode.replace('U', '')
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
# Issue #13781: os.fdopen() creates a fileobj with a bogus name
# attribute. Avoid saving this in the gzip header's filename field.
if hasattr(fileobj, 'name') and fileobj.name != '<fdopen>':
filename = fileobj.name
else:
filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError("Mode %r not supported" % mode)
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError('Not a gzipped file')
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError('Unknown compression method')
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
chunk = self.extrabuf[:size]
self.extrabuf = self.extrabuf[size:]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrabuf = buf + self.extrabuf
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError("Reached EOF")
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError("Reached EOF")
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError('Reached EOF')
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
rewind = -len(self.decompress.unused_data)+8
if rewind > 0:
# too few unused data encountered, assume EOF
errmsg = "Unexpected EOF: %r" % self.decompress.unused_data
raise EOFError(errmsg)
self.fileobj.seek(rewind, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.extrabuf = self.extrabuf + data
self.extrasize = self.extrasize + len(data)
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def __del__(self):
try:
if (self.myfileobj is None and
self.fileobj is None):
return
except AttributeError:
return
self.close()
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def isatty(self):
return False
def tell(self):
return self.offset
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in xrange(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in xrange(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def readlines(self, sizehint=0):
# Negative numbers result in reading all the lines
if sizehint <= 0:
sizehint = sys.maxint
L = []
while sizehint > 0:
line = self.readline()
if line == "":
break
L.append(line)
sizehint = sizehint - len(line)
return L
def writelines(self, L):
for line in L:
self.write(line)
def __iter__(self):
return self
def next(self):
line = self.readline()
if line:
return line
else:
raise StopIteration
def __enter__(self):
if self.fileobj is None:
raise ValueError("I/O operation on closed GzipFile object")
return self
def __exit__(self, *args):
self.close()
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| 19,370 | Python | .py | 471 | 30.197452 | 79 | 0.560995 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,255 | updater.py | wummel_linkchecker/linkcheck/updater.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2011-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Function to check for updates.
"""
import os
from .configuration import Version as CurrentVersion
from .url import get_content
from distutils.version import StrictVersion
# Use the Freecode submit file as source since that file gets updated
# only when releasing a new version.
UPDATE_URL = "https://raw.github.com/wummel/linkchecker/master/linkchecker.freecode"
VERSION_TAG = 'Version:'
if os.name == 'nt':
URL_TAG = 'Windows-installer-URL:'
else:
URL_TAG = 'Source-Package-URL:'
def check_update ():
"""Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version
"""
version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == CurrentVersion:
# user has newest version
return True, None
if is_newer_version(version):
# value is an URL linking to the update package
return True, (version, value)
# user is running a local or development version
return True, (version, None)
def get_online_version ():
"""Download update info and parse it."""
# prevent getting a cached answer
headers = {'Pragma': 'no-cache', 'Cache-Control': 'no-cache'}
content, info = get_content(UPDATE_URL, addheaders=headers)
if content is None:
return content, info
version, url = None, None
for line in content.splitlines():
if line.startswith(VERSION_TAG):
version = line.split(':', 1)[1].strip()
elif line.startswith(URL_TAG):
url = line.split(':', 1)[1].strip()
url = url.replace('${version}', version)
return version, url
def is_newer_version (version):
"""Check if given version is newer than current version."""
return StrictVersion(version) > StrictVersion(CurrentVersion)
| 2,808 | Python | .py | 68 | 37 | 84 | 0.707174 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,256 | ftpparse.py | wummel_linkchecker/linkcheck/ftpparse.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2009-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Python implementation of a part of Dan Bernstein's ftpparse library.
See also http://cr.yp.to/ftpparse.html
"""
months = ("jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep",
"oct", "nov", "dec")
def ismonth (txt):
"""Check if given text is a month name."""
return txt.lower() in months
def ftpparse (line):
"""Parse a FTP list line into a dictionary with attributes:
name - name of file (string)
trycwd - False if cwd is definitely pointless, True otherwise
tryretr - False if retr is definitely pointless, True otherwise
If the line has no file information, None is returned
"""
if len(line) < 2:
# an empty name in EPLF, with no info, could be 2 chars
return None
info = dict(name=None, trycwd=False, tryretr=False)
# EPLF format
# http://pobox.com/~djb/proto/eplf.html
# "+i8388621.29609,m824255902,/,\tdev"
# "+i8388621.44468,m839956783,r,s10376,\tRFCEPLF"
if line[0] == '+':
if '\t' in line:
flags, name = line.split('\t', 1)
info['name'] = name
flags = flags.split(',')
info['trycwd'] = '/' in flags
info['tryretr'] = 'r' in flags
return info
# UNIX-style listing, without inum and without blocks
# "-rw-r--r-- 1 root other 531 Jan 29 03:26 README"
# "dr-xr-xr-x 2 root other 512 Apr 8 1994 etc"
# "dr-xr-xr-x 2 root 512 Apr 8 1994 etc"
# "lrwxrwxrwx 1 root other 7 Jan 25 00:17 bin -> usr/bin"
# Also produced by Microsoft's FTP servers for Windows:
# "---------- 1 owner group 1803128 Jul 10 10:18 ls-lR.Z"
# "d--------- 1 owner group 0 May 9 19:45 Softlib"
# Also WFTPD for MSDOS:
# "-rwxrwxrwx 1 noone nogroup 322 Aug 19 1996 message.ftp"
# Also NetWare:
# "d [R----F--] supervisor 512 Jan 16 18:53 login"
# "- [R----F--] rhesus 214059 Oct 20 15:27 cx.exe"
# Also NetPresenz for the Mac:
# "-------r-- 326 1391972 1392298 Nov 22 1995 MegaPhone.sit"
# "drwxrwxr-x folder 2 May 10 1996 network"
if line[0] in 'bcdlps-':
if line[0] == 'd':
info['trycwd'] = True
if line[0] == '-':
info['tryretr'] = True
if line[0] == 'l':
info['trycwd'] = info['tryretr'] = True
parts = line.split()
if len(parts) < 7:
return None
del parts[0] # skip permissions
if parts[0] != 'folder':
del parts[0] # skip nlink
del parts[0] # skip uid
del parts[0] # skip gid or size
if not ismonth(parts[0]):
del parts[0] # skip size
if not ismonth(parts[0]):
return None
del parts[0] # skip month
del parts[0] # skip day
if not parts:
return None
del parts[0] # skip year or time
name = " ".join(parts)
# resolve links
if line[0] == 'l' and ' -> ' in name:
name = name.split(' -> ', 1)[1]
# eliminate extra NetWare spaces
if line[1] in ' [' and name.startswith(' '):
name = name[3:]
info["name"] = name
return info
# MultiNet (some spaces removed from examples)
# "00README.TXT;1 2 30-DEC-1996 17:44 [SYSTEM] (RWED,RWED,RE,RE)"
# "CORE.DIR;1 1 8-SEP-1996 16:09 [SYSTEM] (RWE,RWE,RE,RE)"
# and non-MutliNet VMS:
# "CII-MANUAL.TEX;1 213/216 29-JAN-1996 03:33:12 [ANONYMOU,ANONYMOUS] (RWED,RWED,,)"
i = line.find(';')
if i != -1:
name = line[:i]
if name.endswith(".DIR"):
name = name[:-4]
info["trycwd"] = True
else:
info["tryretr"] = True
info["name"] = name
return info
# MSDOS format
# 04-27-00 09:09PM <DIR> licensed
# 07-18-00 10:16AM <DIR> pub
# 04-14-00 03:47PM 589 readme.htm
if line[0].isdigit():
parts = line.split()
if len(parts) != 4:
return None
info['name'] = parts[3]
if parts[2][0] == '<':
info['trycwd'] = True
else:
info['tryretr'] = True
return info
# Some useless lines, safely ignored:
# "Total of 11 Files, 10966 Blocks." (VMS)
# "total 14786" (UNIX)
# "DISK$ANONFTP:[ANONYMOUS]" (VMS)
# "Directory DISK$PCSA:[ANONYM]" (VMS)
return None
| 5,339 | Python | .py | 132 | 33.628788 | 93 | 0.565519 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,257 | memoryutil.py | wummel_linkchecker/linkcheck/memoryutil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Memory utilities.
"""
import gc
import pprint
from . import strformat, log, LOG_CHECK
from .fileutil import get_temp_file
# Message to display when meliae package is not installed
MemoryDebugMsg = strformat.format_feature_warning(module=u'meliae',
feature=u'memory debugging',
url=u'https://launchpad.net/meliae')
def write_memory_dump():
"""Dump memory to a temporary filename with the meliae package.
@return: JSON filename where memory dump has been written to
@rtype: string
"""
# first do a full garbage collection run
gc.collect()
if gc.garbage:
log.warn(LOG_CHECK, "Unreachabe objects: %s", pprint.pformat(gc.garbage))
from meliae import scanner
fo, filename = get_temp_file(mode='wb', suffix='.json', prefix='lcdump_')
try:
scanner.dump_all_objects(fo)
finally:
fo.close()
return filename
| 1,681 | Python | .py | 43 | 35.767442 | 81 | 0.734394 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,258 | decorators.py | wummel_linkchecker/linkcheck/decorators.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Simple decorators (usable in Python >= 2.4).
Example:
@synchronized(thread.allocate_lock())
def f ():
"Synchronized function"
print("i am synchronized:", f, f.__doc__)
@deprecated
def g ():
"this function is deprecated"
pass
@notimplemented
def h ():
"todo"
pass
"""
from __future__ import print_function
import warnings
import signal
import os
import sys
import time
def update_func_meta (fake_func, real_func):
"""Set meta information (eg. __doc__) of fake function to that
of the real function.
@return fake_func
"""
fake_func.__module__ = real_func.__module__
fake_func.__name__ = real_func.__name__
fake_func.__doc__ = real_func.__doc__
fake_func.__dict__.update(real_func.__dict__)
return fake_func
def deprecated (func):
"""A decorator which can be used to mark functions as deprecated.
It emits a warning when the function is called."""
def newfunc (*args, **kwargs):
"""Print deprecated warning and execute original function."""
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
return update_func_meta(newfunc, func)
def signal_handler (signal_number):
"""From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/410666
A decorator to set the specified function as handler for a signal.
This function is the 'outer' decorator, called with only the
(non-function) arguments.
If signal_number is not a valid signal (for example signal.SIGN),
no handler is set.
"""
# create the 'real' decorator which takes only a function as an argument
def newfunc (function):
"""Register function as signal handler."""
# note: actually the kill(2) function uses the signal number of 0
# for a special case, but for signal(2) only positive integers
# are allowed
is_valid_signal = 0 < signal_number < signal.NSIG
if is_valid_signal and os.name == 'posix':
signal.signal(signal_number, function)
return function
return newfunc
def synchronize (lock, func, log_duration_secs=0):
"""Return synchronized function acquiring the given lock."""
def newfunc (*args, **kwargs):
"""Execute function synchronized."""
t = time.time()
with lock:
duration = time.time() - t
if duration > log_duration_secs > 0:
print("WARN:", func.__name__, "locking took %0.2f seconds" % duration, file=sys.stderr)
return func(*args, **kwargs)
return update_func_meta(newfunc, func)
def synchronized (lock):
"""A decorator calling a function with aqcuired lock."""
return lambda func: synchronize(lock, func)
def notimplemented (func):
"""Raises a NotImplementedError if the function is called."""
def newfunc (*args, **kwargs):
"""Raise NotImplementedError"""
co = func.func_code
attrs = (co.co_name, co.co_filename, co.co_firstlineno)
raise NotImplementedError("function %s at %s:%d is not implemented" % attrs)
return update_func_meta(newfunc, func)
def timeit (func, log, limit):
"""Print execution time of the function. For quick'n'dirty profiling."""
def newfunc (*args, **kwargs):
"""Execute function and print execution time."""
t = time.time()
res = func(*args, **kwargs)
duration = time.time() - t
if duration > limit:
print(func.__name__, "took %0.2f seconds" % duration, file=log)
print(args, file=log)
print(kwargs, file=log)
return res
return update_func_meta(newfunc, func)
def timed (log=sys.stderr, limit=2.0):
"""Decorator to run a function with timing info."""
return lambda func: timeit(func, log, limit)
class memoized (object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated."""
def __init__(self, func):
"""Store function and initialize the cache."""
self.func = func
self.cache = {}
def __call__(self, *args):
"""Lookup and return cached result if found. Else call stored
function with given arguments."""
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
class curried (object):
"""Decorator that returns a function that keeps returning functions
until all arguments are supplied; then the original function is
evaluated."""
def __init__(self, func, *a):
"""Store function and arguments."""
self.func = func
self.args = a
def __call__(self, *a):
"""If all arguments function arguments are supplied, call it.
Else return another curried object."""
args = self.args + a
if len(args) < self.func.func_code.co_argcount:
return curried(self.func, *args)
else:
return self.func(*args)
| 6,226 | Python | .py | 153 | 34.529412 | 103 | 0.659659 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,259 | containers.py | wummel_linkchecker/linkcheck/containers.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Special container classes.
"""
from collections import namedtuple
class AttrDict (dict):
"""Dictionary allowing attribute access to its elements if they
are valid attribute names and not already existing methods."""
def __getattr__ (self, name):
"""Return attribute name from dict."""
return self[name]
class ListDict (dict):
"""A dictionary whose iterators reflect the order in which elements
were added.
"""
def __init__ (self):
"""Initialize sorted key list."""
super(ListDict, self).__init__()
# sorted list of keys
self._keys = []
def setdefault (self, key, *args):
"""Remember key order if key not found."""
if key not in self:
self._keys.append(key)
return super(ListDict, self).setdefault(key, *args)
def __setitem__ (self, key, value):
"""Add key,value to dict, append key to sorted list."""
if key not in self:
self._keys.append(key)
super(ListDict, self).__setitem__(key, value)
def __delitem__ (self, key):
"""Remove key from dict."""
self._keys.remove(key)
super(ListDict, self).__delitem__(key)
def pop (self, key):
"""Remove key from dict and return value."""
if key in self._keys:
self._keys.remove(key)
super(ListDict, self).pop(key)
def popitem (self):
"""Remove oldest key from dict and return item."""
if self._keys:
k = self._keys[0]
v = self[k]
del self[k]
return (k, v)
raise KeyError("popitem() on empty dictionary")
def values (self):
"""Return sorted list of values."""
return [self[k] for k in self._keys]
def items (self):
"""Return sorted list of items."""
return [(k, self[k]) for k in self._keys]
def keys (self):
"""Return sorted list of keys."""
return self._keys[:]
def itervalues (self):
"""Return iterator over sorted values."""
for k in self._keys:
yield self[k]
def iteritems (self):
"""Return iterator over sorted items."""
for k in self._keys:
yield (k, self[k])
def iterkeys (self):
"""Return iterator over sorted keys."""
return iter(self._keys)
def clear (self):
"""Remove all dict entries."""
self._keys = []
super(ListDict, self).clear()
def get_true (self, key, default):
"""Return default element if key is not in the dict, or if self[key]
evaluates to False. Useful for example if value is None, but
default value should be an empty string.
"""
if key not in self or not self[key]:
return default
return self[key]
class CaselessDict (dict):
"""A dictionary ignoring the case of keys (which must be strings)."""
def __getitem__ (self, key):
"""Return lowercase key item."""
assert isinstance(key, basestring)
return dict.__getitem__(self, key.lower())
def __delitem__ (self, key):
"""Remove lowercase key item."""
assert isinstance(key, basestring)
return dict.__delitem__(self, key.lower())
def __setitem__ (self, key, value):
"""Set lowercase key item."""
assert isinstance(key, basestring)
dict.__setitem__(self, key.lower(), value)
def __contains__ (self, key):
"""Check lowercase key item."""
assert isinstance(key, basestring)
return dict.__contains__(self, key.lower())
def get (self, key, def_val=None):
"""Return lowercase key value."""
assert isinstance(key, basestring)
return dict.get(self, key.lower(), def_val)
def setdefault (self, key, *args):
"""Set lowercase key value and return."""
assert isinstance(key, basestring)
return dict.setdefault(self, key.lower(), *args)
def update (self, other):
"""Update this dict with lowercase key from other dict"""
for k, v in other.items():
dict.__setitem__(self, k.lower(), v)
def fromkeys (cls, iterable, value=None):
"""Construct new caseless dict from given data."""
d = cls()
for k in iterable:
dict.__setitem__(d, k.lower(), value)
return d
fromkeys = classmethod(fromkeys)
def pop (self, key, *args):
"""Remove lowercase key from dict and return value."""
assert isinstance(key, basestring)
return dict.pop(self, key.lower(), *args)
class CaselessSortedDict (CaselessDict):
"""Caseless dictionary with sorted keys."""
def keys (self):
"""Return sorted key list."""
return sorted(super(CaselessSortedDict, self).keys())
def items (self):
"""Return sorted item list."""
return [(x, self[x]) for x in self.keys()]
def iteritems (self):
"""Return sorted item iterator."""
return ((x, self[x]) for x in self.keys())
class LFUCache (dict):
"""Limited cache which purges least frequently used items."""
def __init__ (self, size=1000):
"""Initialize internal LFU cache."""
super(LFUCache, self).__init__()
if size < 1:
raise ValueError("invalid cache size %d" % size)
self.size = size
def __setitem__ (self, key, val):
"""Store given key/value."""
if key in self:
# store value, do not increase number of uses
super(LFUCache, self).__getitem__(key)[1] = val
else:
super(LFUCache, self).__setitem__(key, [0, val])
# check for size limit
if len(self) > self.size:
self.shrink()
def shrink (self):
"""Shrink ca. 5% of entries."""
trim = int(0.05*len(self))
if trim:
items = super(LFUCache, self).items()
# sorting function for items
keyfunc = lambda x: x[1][0]
values = sorted(items, key=keyfunc)
for item in values[0:trim]:
del self[item[0]]
def __getitem__ (self, key):
"""Update key usage and return value."""
entry = super(LFUCache, self).__getitem__(key)
entry[0] += 1
return entry[1]
def uses (self, key):
"""Get number of uses for given key (without increasing the number of
uses)"""
return super(LFUCache, self).__getitem__(key)[0]
def get (self, key, def_val=None):
"""Update key usage if found and return value, else return default."""
if key in self:
return self[key]
return def_val
def setdefault (self, key, def_val=None):
"""Update key usage if found and return value, else set and return
default."""
if key in self:
return self[key]
self[key] = def_val
return def_val
def items (self):
"""Return list of items, not updating usage count."""
return [(key, value[1]) for key, value in super(LFUCache, self).items()]
def iteritems (self):
"""Return iterator of items, not updating usage count."""
for key, value in super(LFUCache, self).items():
yield (key, value[1])
def values (self):
"""Return list of values, not updating usage count."""
return [value[1] for value in super(LFUCache, self).values()]
def itervalues (self):
"""Return iterator of values, not updating usage count."""
for value in super(LFUCache, self).values():
yield value[1]
def popitem (self):
"""Remove and return an item."""
key, value = super(LFUCache, self).popitem()
return (key, value[1])
def pop (self):
"""Remove and return a value."""
value = super(LFUCache, self).pop()
return value[1]
def enum (*names):
"""Return an enum datatype instance from given list of keyword names.
The enum values are zero-based integers.
>>> Status = enum('open', 'pending', 'closed')
>>> Status.open
0
>>> Status.pending
1
>>> Status.closed
2
"""
return namedtuple('Enum', ' '.join(names))(*range(len(names)))
| 9,026 | Python | .py | 229 | 31.598253 | 80 | 0.601441 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,260 | logconf.py | wummel_linkchecker/linkcheck/logconf.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Logging configuration
"""
import logging.config
import sys
from . import ansicolor
# application log areas
LOG_ROOT = "linkcheck"
LOG_CMDLINE = "linkcheck.cmdline"
LOG_CHECK = "linkcheck.check"
LOG_CACHE = "linkcheck.cache"
LOG_THREAD = "linkcheck.thread"
LOG_PLUGIN = "linkcheck.plugin"
lognames = {
"cmdline": LOG_CMDLINE,
"checking": LOG_CHECK,
"cache": LOG_CACHE,
"thread": LOG_THREAD,
"plugin": LOG_PLUGIN,
"all": LOG_ROOT,
}
lognamelist = ", ".join(repr(name) for name in lognames)
# logging configuration
configdict = {
'version': 1,
'loggers': {
},
'root': {
'level': 'WARN',
},
'incremental': True,
}
def init_log_config(handler=None):
"""Set up the application logging (not to be confused with check loggers).
"""
for applog in lognames.values():
# propagate except for root app logger 'linkcheck'
propagate = (applog != LOG_ROOT)
configdict['loggers'][applog] = dict(level='INFO', propagate=propagate)
logging.config.dictConfig(configdict)
if handler is None:
handler = ansicolor.ColoredStreamHandler(strm=sys.stderr)
add_loghandler(handler)
def add_loghandler (handler):
"""Add log handler to root logger and LOG_ROOT and set formatting."""
format = "%(levelname)s %(name)s %(asctime)s %(threadName)s %(message)s"
handler.setFormatter(logging.Formatter(format))
logging.getLogger(LOG_ROOT).addHandler(handler)
logging.getLogger().addHandler(handler)
def remove_loghandler (handler):
"""Remove log handler from root logger and LOG_ROOT."""
logging.getLogger(LOG_ROOT).removeHandler(handler)
logging.getLogger().removeHandler(handler)
def reset_loglevel():
"""Reset log level to display only warnings and errors."""
set_loglevel(['all'], logging.WARN)
def set_debug(loggers):
"""Set debugging log level."""
set_loglevel(loggers, logging.DEBUG)
# enable for httplib debugging (used by requests.packages.urllib3)
#import httplib
#httplib.HTTPConnection.debuglevel = 1
def set_loglevel(loggers, level):
"""Set logging levels for given loggers."""
if not loggers:
return
if 'all' in loggers:
loggers = lognames.keys()
for key in loggers:
logging.getLogger(lognames[key]).setLevel(level)
| 3,102 | Python | .py | 86 | 32.453488 | 79 | 0.718427 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,261 | dummy.py | wummel_linkchecker/linkcheck/dummy.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Dummy objects.
"""
class Dummy (object):
"""A dummy object ignores all access to it. Useful for testing."""
def __init__ (self, *args, **kwargs):
"""Return None"""
pass
def __call__ (self, *args, **kwargs):
"""Return self."""
return self
def __getattr__ (self, name):
"""Return self."""
return self
def __setattr__ (self, name, value):
"""Return None"""
pass
def __delattr__ (self, name):
"""Return None"""
pass
def __str__ (self):
"""Return 'dummy'"""
return "dummy"
def __repr__ (self):
"""Return '<dummy>'"""
return "<dummy>"
def __unicode__ (self):
"""Return u'dummy'"""
return u"dummy"
def __len__ (self):
"""Return zero"""
return 0
def __getitem__ (self, key):
"""Return self"""
return self
def __setitem__ (self, key, value):
"""Return None"""
pass
def __delitem__ (self, key):
"""Return None"""
pass
def __contains__ (self, key):
"""Return False"""
return False
def dummy (*args, **kwargs):
"""Ignore any positional or keyword arguments, return None."""
pass
| 2,039 | Python | .py | 63 | 26.793651 | 73 | 0.609184 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,262 | sitemap.py | wummel_linkchecker/linkcheck/parser/sitemap.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main functions for link parsing
"""
from xml.parsers.expat import ParserCreate
from xml.parsers.expat import ExpatError
from ..checker.const import (WARN_XML_PARSE_ERROR)
class XmlTagUrlParser(object):
"""Parse XML files and find URLs in text content of a tag name."""
def __init__(self, tag):
"""Initialize the parser."""
self.tag = tag
self.parser = ParserCreate()
self.parser.buffer_text = True
self.parser.returns_unicode = True
self.parser.StartElementHandler = self.start_element
self.parser.EndElementHandler = self.end_element
self.parser.CharacterDataHandler = self.char_data
def parse(self, url_data):
"""Parse XML URL data."""
self.url_data = url_data
self.loc = False
self.url = u""
data = url_data.get_content()
isfinal = True
try:
self.parser.Parse(data, isfinal)
except ExpatError as expaterr:
self.url_data.add_warning(expaterr.message,tag=WARN_XML_PARSE_ERROR)
def start_element(self, name, attrs):
"""Set tag status for start element."""
self.in_tag = (name == self.tag)
self.url = u""
def end_element(self, name):
"""If end tag is our tag, call add_url()."""
self.in_tag = False
if name == self.tag:
self.add_url()
def add_url(self):
"""Add non-empty URLs to the queue."""
if self.url:
self.url_data.add_url(self.url, line=self.parser.CurrentLineNumber,
column=self.parser.CurrentColumnNumber)
self.url = u""
def char_data(self, data):
"""If inside the wanted tag, append data to URL."""
if self.loc:
self.url += data
def parse_sitemap(url_data):
"""Parse XML sitemap data."""
XmlTagUrlParser(u"loc").parse(url_data)
def parse_sitemapindex(url_data):
"""Parse XML sitemap index data."""
XmlTagUrlParser(u"loc").parse(url_data)
| 2,781 | Python | .py | 69 | 34.217391 | 80 | 0.667531 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,263 | __init__.py | wummel_linkchecker/linkcheck/parser/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main functions for link parsing
"""
from .. import log, LOG_CHECK, strformat, url as urlutil
from ..htmlutil import linkparse
from ..HtmlParser import htmlsax
from ..bookmarks import firefox
def parse_url(url_data):
"""Parse a URL."""
if url_data.is_directory():
# both ftp and file links represent directories as HTML data
key = "html"
elif url_data.is_file() and firefox.has_sqlite and firefox.extension.search(url_data.url):
key = "firefox"
elif url_data.scheme == "itms-services":
key = "itms_services"
else:
# determine parse routine according to content types
mime = url_data.content_type
key = url_data.ContentMimetypes[mime]
funcname = "parse_"+key
if funcname in globals():
globals()[funcname](url_data)
else:
url_data.aggregate.plugin_manager.run_parser_plugins(url_data, pagetype=key)
def parse_html (url_data):
"""Parse into HTML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
find_links(url_data, url_data.add_url, linkparse.LinkTags)
def parse_opera (url_data):
"""Parse an opera bookmark file."""
from ..bookmarks.opera import parse_bookmark_data
for url, name, lineno in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, line=lineno, name=name)
def parse_chromium (url_data):
"""Parse a Chromium or Google Chrome bookmark file."""
from ..bookmarks.chromium import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name)
def parse_safari (url_data):
"""Parse a Safari bookmark file."""
from ..bookmarks.safari import parse_bookmark_data
for url, name in parse_bookmark_data(url_data.get_content()):
url_data.add_url(url, name=name)
def parse_text (url_data):
"""Parse a text file with one url per line; comment and blank
lines are ignored."""
lineno = 0
for line in url_data.get_content().splitlines():
lineno += 1
line = line.strip()
if not line or line.startswith('#'):
continue
url_data.add_url(line, line=lineno)
def parse_css (url_data):
"""
Parse a CSS file for url() patterns.
"""
lineno = 0
linkfinder = linkparse.css_url_re.finditer
strip_comments = linkparse.strip_c_comments
for line in strip_comments(url_data.get_content()).splitlines():
lineno += 1
for mo in linkfinder(line):
column = mo.start("url")
url = strformat.unquote(mo.group("url").strip())
url_data.add_url(url, line=lineno, column=column)
def parse_swf (url_data):
"""Parse a SWF file for URLs."""
linkfinder = linkparse.swf_url_re.finditer
for mo in linkfinder(url_data.get_content()):
url = mo.group()
url_data.add_url(url)
def parse_wml (url_data):
"""Parse into WML content and search for URLs to check.
Found URLs are added to the URL queue.
"""
find_links(url_data, url_data.add_url, linkparse.WmlTags)
def find_links (url_data, callback, tags):
"""Parse into content and search for URLs to check.
Found URLs are added to the URL queue.
"""
# construct parser object
handler = linkparse.LinkFinder(callback, tags)
parser = htmlsax.parser(handler)
if url_data.charset:
parser.encoding = url_data.charset
handler.parser = parser
# parse
try:
parser.feed(url_data.get_content())
parser.flush()
except linkparse.StopParse as msg:
log.debug(LOG_CHECK, "Stopped parsing: %s", msg)
pass
# break cyclic dependencies
handler.parser = None
parser.handler = None
def parse_firefox (url_data):
"""Parse a Firefox3 bookmark file."""
filename = url_data.get_os_filename()
for url, name in firefox.parse_bookmark_file(filename):
url_data.add_url(url, name=name)
def parse_itms_services(url_data):
"""Get "url" CGI parameter value as child URL."""
query = url_data.urlparts[3]
for k, v, sep in urlutil.parse_qsl(query, keep_blank_values=True):
if k == "url":
url_data.add_url(v)
break
from .sitemap import parse_sitemap, parse_sitemapindex
| 5,065 | Python | .py | 128 | 34.273438 | 94 | 0.683771 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,264 | formsearch.py | wummel_linkchecker/linkcheck/htmlutil/formsearch.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
HTML form utils
"""
from ..HtmlParser import htmlsax
from .. import log, LOG_CHECK
class Form(object):
"""Store HTML form URL and form data."""
def __init__(self, url):
"""Set URL and empty form data."""
self.url = url
self.data = {}
def add_value(self, key, value):
"""Add a form value."""
self.data[key] = value
def __repr__(self):
"""Return unicode representation displaying URL and form data."""
return unicode(self)
def __unicode__(self):
"""Return unicode string displaying URL and form data."""
return u"<url=%s data=%s>" % (self.url, self.data)
def __str__(self):
"""Return string displaying URL and form data."""
return unicode(self).encode('utf-8')
class FormFinder(object):
"""Base class handling HTML start elements.
TagFinder instances are used as HtmlParser handlers."""
def __init__(self):
"""Initialize local variables."""
super(FormFinder, self).__init__()
# parser object will be initialized when it is used as
# a handler object
self.parser = None
self.forms = []
self.form = None
def start_element(self, tag, attrs):
"""Does nothing, override in a subclass."""
if tag == u'form':
if u'action' in attrs:
url = attrs['action']
self.form = Form(url)
elif tag == u'input':
if self.form:
if 'name' in attrs:
key = attrs['name']
value = attrs.get('value')
self.form.add_value(key, value)
else:
log.warning(LOG_CHECK, "nameless form input %s" % attrs)
pass
else:
log.warning(LOG_CHECK, "formless input´%s" % attrs)
pass
def start_end_element(self, tag, attrs):
"""Delegate a combined start/end element (eg. <input .../>) to
the start_element method. Ignore the end element part."""
self.start_element(tag, attrs)
def end_element(self, tag):
"""search for ending form values."""
if tag == u'form':
self.forms.append(self.form)
self.form = None
def search_form(content, cgiuser, cgipassword, encoding='utf-8'):
"""Search for a HTML form in the given HTML content that has the given
CGI fields. If no form is found return None.
"""
handler = FormFinder()
parser = htmlsax.parser(handler)
handler.parser = parser
parser.encoding = encoding
# parse
parser.feed(content)
parser.flush()
# break cyclic dependencies
handler.parser = None
parser.handler = None
log.debug(LOG_CHECK, "Found forms %s", handler.forms)
cginames = (cgiuser.lower(), cgipassword.lower())
for form in handler.forms:
for key, value in form.data.items():
if key.lower() in cginames:
return form
# not found
return None
| 3,811 | Python | .py | 99 | 31.090909 | 76 | 0.620606 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,265 | linkparse.py | wummel_linkchecker/linkcheck/htmlutil/linkparse.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2001-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Find link tags in HTML text.
"""
import re
from .. import strformat, log, LOG_CHECK, url as urlutil
from . import linkname
MAX_NAMELEN = 256
MAX_TITLELEN = 256
unquote = strformat.unquote
# HTML4/5 link tags
# ripped mainly from HTML::Tagset.pm with HTML5 added
LinkTags = {
'a': [u'href'],
'applet': [u'archive', u'src'],
'area': [u'href'],
'audio': [u'src'], # HTML5
'bgsound': [u'src'],
'blockquote': [u'cite'],
'body': [u'background'],
'button': [u'formaction'], # HTML5
'del': [u'cite'],
'embed': [u'pluginspage', u'src'],
'form': [u'action'],
'frame': [u'src', u'longdesc'],
'head': [u'profile'],
'html': [u'manifest'], # HTML5
'iframe': [u'src', u'longdesc'],
'ilayer': [u'background'],
'img': [u'src', u'lowsrc', u'longdesc', u'usemap', u'srcset'],
'input': [u'src', u'usemap', u'formaction'],
'ins': [u'cite'],
'isindex': [u'action'],
'layer': [u'background', u'src'],
'link': [u'href'],
'meta': [u'content', u'href'],
'object': [u'classid', u'data', u'archive', u'usemap', u'codebase'],
'q': [u'cite'],
'script': [u'src'],
'source': [u'src'], # HTML5
'table': [u'background'],
'td': [u'background'],
'th': [u'background'],
'tr': [u'background'],
'track': [u'src'], # HTML5
'video': [u'src'], # HTML5
'xmp': [u'href'],
None: [u'style', u'itemtype'],
}
# HTML anchor tags
AnchorTags = {
'a': [u'name'],
None: [u'id'],
}
# WML tags
WmlTags = {
'a': [u'href'],
'go': [u'href'],
'img': [u'src'],
}
# matcher for <meta http-equiv=refresh> tags
refresh_re = re.compile(ur"(?i)^\d+;\s*url=(?P<url>.+)$")
_quoted_pat = ur"('[^']+'|\"[^\"]+\"|[^\)\s]+)"
css_url_re = re.compile(ur"url\(\s*(?P<url>%s)\s*\)" % _quoted_pat)
swf_url_re = re.compile("(?i)%s" % urlutil.safe_url_pattern)
c_comment_re = re.compile(ur"/\*.*?\*/", re.DOTALL)
def strip_c_comments (text):
"""Remove C/CSS-style comments from text. Note that this method also
deliberately removes comments inside of strings."""
return c_comment_re.sub('', text)
class StopParse(Exception):
"""Raised when parsing should stop."""
pass
class TitleFinder (object):
"""Find title tags in HTML text."""
def __init__ (self):
"""Initialize title."""
super(TitleFinder, self).__init__()
log.debug(LOG_CHECK, "HTML title parser")
self.title = None
def start_element (self, tag, attrs):
"""Search for <title> tag."""
if tag == 'title':
data = self.parser.peek(MAX_TITLELEN)
data = data.decode(self.parser.encoding, "ignore")
self.title = linkname.title_name(data)
raise StopParse("found <title> tag")
elif tag == 'body':
raise StopParse("found <body> tag")
class TagFinder (object):
"""Base class handling HTML start elements.
TagFinder instances are used as HtmlParser handlers."""
def __init__ (self):
"""Initialize local variables."""
super(TagFinder, self).__init__()
# parser object will be initialized when it is used as
# a handler object
self.parser = None
def start_element (self, tag, attrs):
"""Does nothing, override in a subclass."""
pass
def start_end_element (self, tag, attrs):
"""Delegate a combined start/end element (eg. <br/>) to
the start_element method. Ignore the end element part."""
self.start_element(tag, attrs)
class MetaRobotsFinder (TagFinder):
"""Class for finding robots.txt meta values in HTML."""
def __init__ (self):
"""Initialize follow and index flags."""
super(MetaRobotsFinder, self).__init__()
log.debug(LOG_CHECK, "meta robots finder")
self.follow = self.index = True
def start_element (self, tag, attrs):
"""Search for meta robots.txt "nofollow" and "noindex" flags."""
if tag == 'meta' and attrs.get('name') == 'robots':
val = attrs.get_true('content', u'').lower().split(u',')
self.follow = u'nofollow' not in val
self.index = u'noindex' not in val
raise StopParse("found <meta name=robots> tag")
elif tag == 'body':
raise StopParse("found <body> tag")
def is_meta_url (attr, attrs):
"""Check if the meta attributes contain a URL."""
res = False
if attr == "content":
equiv = attrs.get_true('http-equiv', u'').lower()
scheme = attrs.get_true('scheme', u'').lower()
res = equiv in (u'refresh',) or scheme in (u'dcterms.uri',)
if attr == "href":
rel = attrs.get_true('rel', u'').lower()
res = rel in (u'shortcut icon', u'icon')
return res
def is_form_get(attr, attrs):
"""Check if this is a GET form action URL."""
res = False
if attr == "action":
method = attrs.get_true('method', u'').lower()
res = method != 'post'
return res
class LinkFinder (TagFinder):
"""Find HTML links, and apply them to the callback function with the
format (url, lineno, column, name, codebase)."""
def __init__ (self, callback, tags):
"""Store content in buffer and initialize URL list."""
super(LinkFinder, self).__init__()
self.callback = callback
# set universal tag attributes using tagname None
self.universal_attrs = set(tags.get(None, []))
self.tags = dict()
for tag, attrs in tags.items():
self.tags[tag] = set(attrs)
# add universal tag attributes
self.tags[tag].update(self.universal_attrs)
self.base_ref = u''
def start_element (self, tag, attrs):
"""Search for links and store found URLs in a list."""
log.debug(LOG_CHECK, "LinkFinder tag %s attrs %s", tag, attrs)
log.debug(LOG_CHECK, "line %d col %d old line %d old col %d", self.parser.lineno(), self.parser.column(), self.parser.last_lineno(), self.parser.last_column())
if tag == "base" and not self.base_ref:
self.base_ref = attrs.get_true("href", u'')
tagattrs = self.tags.get(tag, self.universal_attrs)
# parse URLs in tag (possibly multiple URLs in CSS styles)
for attr in tagattrs.intersection(attrs):
if tag == "meta" and not is_meta_url(attr, attrs):
continue
if tag == "form" and not is_form_get(attr, attrs):
continue
# name of this link
name = self.get_link_name(tag, attrs, attr)
# possible codebase
base = u''
if tag == 'applet':
base = attrs.get_true('codebase', u'')
if not base:
base = self.base_ref
# note: value can be None
value = attrs.get(attr)
if tag == 'link' and attrs.get('rel') == 'dns-prefetch':
if ':' in value:
value = value.split(':', 1)[1]
value = 'dns:' + value.rstrip('/')
# parse tag for URLs
self.parse_tag(tag, attr, value, name, base)
log.debug(LOG_CHECK, "LinkFinder finished tag %s", tag)
def get_link_name (self, tag, attrs, attr):
"""Parse attrs for link name. Return name of link."""
if tag == 'a' and attr == 'href':
# Look for name only up to MAX_NAMELEN characters
data = self.parser.peek(MAX_NAMELEN)
data = data.decode(self.parser.encoding, "ignore")
name = linkname.href_name(data)
if not name:
name = attrs.get_true('title', u'')
elif tag == 'img':
name = attrs.get_true('alt', u'')
if not name:
name = attrs.get_true('title', u'')
else:
name = u""
return name
def parse_tag (self, tag, attr, value, name, base):
"""Add given url data to url list."""
assert isinstance(tag, unicode), repr(tag)
assert isinstance(attr, unicode), repr(attr)
assert isinstance(name, unicode), repr(name)
assert isinstance(base, unicode), repr(base)
assert isinstance(value, unicode) or value is None, repr(value)
# look for meta refresh
if tag == u'meta' and value:
mo = refresh_re.match(value)
if mo:
self.found_url(mo.group("url"), name, base)
elif attr != 'content':
self.found_url(value, name, base)
elif attr == u'style' and value:
for mo in css_url_re.finditer(value):
url = unquote(mo.group("url"), matching=True)
self.found_url(url, name, base)
elif attr == u'archive':
for url in value.split(u','):
self.found_url(url, name, base)
elif attr == u'srcset':
for img_candidate in value.split(u','):
url = img_candidate.split()[0]
self.found_url(url, name, base)
else:
self.found_url(value, name, base)
def found_url(self, url, name, base):
"""Add newly found URL to queue."""
assert isinstance(url, unicode) or url is None, repr(url)
self.callback(url, line=self.parser.last_lineno(),
column=self.parser.last_column(), name=name, base=base)
| 10,270 | Python | .py | 247 | 33.91498 | 167 | 0.577651 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,266 | __init__.py | wummel_linkchecker/linkcheck/htmlutil/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2008-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
HTML utils
"""
| 799 | Python | .py | 19 | 41.052632 | 73 | 0.770513 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,267 | linkname.py | wummel_linkchecker/linkcheck/htmlutil/linkname.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2001-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Parse names of title tags and link types.
"""
import re
from .. import HtmlParser, strformat
imgtag_re = re.compile(r"(?i)\s+alt\s*=\s*"+\
r"""(?P<name>("[^"\n]*"|'[^'\n]*'|[^\s>]+))""")
img_re = re.compile(r"""(?i)<\s*img\s+("[^"\n]*"|'[^'\n]*'|[^>])+>""")
def endtag_re (tag):
"""Return matcher for given end tag"""
return re.compile(r"(?i)</%s\s*>" % tag)
a_end_search = endtag_re("a").search
title_end_search = endtag_re("title").search
def _unquote (txt):
"""Resolve entities and remove markup from txt."""
return HtmlParser.resolve_entities(strformat.remove_markup(txt))
def image_name (txt):
"""Return the alt part of the first <img alt=""> tag in txt."""
mo = imgtag_re.search(txt)
if mo:
name = strformat.unquote(mo.group('name').strip())
return _unquote(name)
return u''
def href_name (txt):
"""Return the name part of the first <a href="">name</a> link in txt."""
name = u""
endtag = a_end_search(txt)
if not endtag:
return name
name = txt[:endtag.start()]
if img_re.search(name):
return image_name(name)
return _unquote(name)
def title_name (txt):
"""Return the part of the first <title>name</title> in txt."""
name = u""
endtag = title_end_search(txt)
if not endtag:
return name
name = txt[:endtag.start()]
return _unquote(name)
| 2,188 | Python | .py | 57 | 34.561404 | 76 | 0.661313 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,268 | iputil.py | wummel_linkchecker/linkcheck/network/iputil.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2003-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Ip number related utility functions.
"""
import re
import socket
import struct
from .. import log, LOG_CHECK
# IP Adress regular expressions
# Note that each IPv4 octet can be encoded in dezimal, hexadezimal and octal.
_ipv4_num = r"\d{1,3}"
_ipv4_hex = r"0*[\da-f]{1,2}"
_ipv4_oct = r"0+[0-7]{0, 3}"
# XXX
_ipv4_num_4 = r"%s\.%s\.%s\.%s" % ((_ipv4_num,) * 4)
_ipv4_re = re.compile(r"^%s$" % _ipv4_num_4)
_ipv4_hex_4 = r"%s\.%s\.%s\.%s" % ((_ipv4_hex,) * 4)
# IPv4 encoded in octal, eg. 0x42.0x66.0x0d.0x63
_ipv4_oct = r"0*[0-7]{1,3}"
_ipv4_hex_4 = r"%s\.%s\.%s\.%s" % ((_ipv4_hex,) * 4)
# IPv6; See also rfc2373
_ipv6_num = r"[\da-f]{1,4}"
_ipv6_re = re.compile(r"^%s:%s:%s:%s:%s:%s:%s:%s$" % ((_ipv6_num,) * 8))
_ipv6_ipv4_re = re.compile(r"^%s:%s:%s:%s:%s:%s:" % ((_ipv6_num,) * 6) + \
r"%s$" % _ipv4_num_4)
_ipv6_abbr_re = re.compile(r"^((%s:){0,6}%s)?::((%s:){0,6}%s)?$" % \
((_ipv6_num,) * 4))
_ipv6_ipv4_abbr_re = re.compile(r"^((%s:){0,4}%s)?::((%s:){0,5})?" % \
((_ipv6_num,) * 3) + \
"%s$" % _ipv4_num_4)
# netmask regex
_host_netmask_re = re.compile(r"^%s/%s$" % (_ipv4_num_4, _ipv4_num_4))
_host_cidrmask_re = re.compile(r"^%s/\d{1,2}$" % _ipv4_num_4)
def expand_ipv6 (ip, num):
"""
Expand an IPv6 address with included :: to num octets.
@raise: ValueError on invalid IP addresses
"""
i = ip.find("::")
prefix = ip[:i]
suffix = ip[i + 2:]
count = prefix.count(":") + suffix.count(":")
if prefix:
count += 1
prefix = prefix + ":"
if suffix:
count += 1
suffix = ":" + suffix
if count >= num:
raise ValueError("invalid ipv6 number: %s" % ip)
fill = (num - count - 1) * "0:" + "0"
return prefix + fill + suffix
def expand_ip (ip):
"""
ipv6 addresses are expanded to full 8 octets, all other
addresses are left untouched
return a tuple (ip, num) where num==1 if ip is a numeric ip, 0
otherwise.
"""
if _ipv4_re.match(ip) or \
_ipv6_re.match(ip) or \
_ipv6_ipv4_re.match(ip):
return (ip, 1)
if _ipv6_abbr_re.match(ip):
return (expand_ipv6(ip, 8), 1)
if _ipv6_ipv4_abbr_re.match(ip):
i = ip.rfind(":") + 1
return (expand_ipv6(ip[:i], 6) + ip[i:], 1)
return (ip, 0)
def is_valid_ip (ip):
"""
Return True if given ip is a valid IPv4 or IPv6 address.
"""
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
def is_valid_ipv4 (ip):
"""
Return True if given ip is a valid IPv4 address.
"""
if not _ipv4_re.match(ip):
return False
a, b, c, d = [int(i) for i in ip.split(".")]
return a <= 255 and b <= 255 and c <= 255 and d <= 255
def is_valid_ipv6 (ip):
"""
Return True if given ip is a valid IPv6 address.
"""
# XXX this is not complete: check ipv6 and ipv4 semantics too here
if not (_ipv6_re.match(ip) or _ipv6_ipv4_re.match(ip) or
_ipv6_abbr_re.match(ip) or _ipv6_ipv4_abbr_re.match(ip)):
return False
return True
def is_valid_cidrmask (mask):
"""
Check if given mask is a valid network bitmask in CIDR notation.
"""
return 0 <= mask <= 32
def dq2num (ip):
"""
Convert decimal dotted quad string to long integer.
"""
return struct.unpack('!L', socket.inet_aton(ip))[0]
def num2dq (n):
"""
Convert long int to dotted quad string.
"""
return socket.inet_ntoa(struct.pack('!L', n))
def cidr2mask (n):
"""
Return a mask where the n left-most of 32 bits are set.
"""
return ((1 << n) - 1) << (32 - n)
def netmask2mask (ip):
"""
Return a mask of bits as a long integer.
"""
return dq2num(ip)
def mask2netmask (mask):
"""
Return dotted quad string as netmask.
"""
return num2dq(mask)
def dq2net (ip, mask):
"""
Return a tuple (network ip, network mask) for given ip and mask.
"""
return dq2num(ip) & mask
def dq_in_net (n, mask):
"""
Return True iff numerical ip n is in given network.
"""
return (n & mask) == mask
def host_in_set (ip, hosts, nets):
"""
Return True if given ip is in host or network list.
"""
if ip in hosts:
return True
if is_valid_ipv4(ip):
n = dq2num(ip)
for net in nets:
if dq_in_net(n, net):
return True
return False
def strhosts2map (strhosts):
"""
Convert a string representation of hosts and networks to
a tuple (hosts, networks).
"""
return hosts2map([s.strip() for s in strhosts.split(",") if s])
def hosts2map (hosts):
"""
Return a set of named hosts, and a list of subnets (host/netmask
adresses).
Only IPv4 host/netmasks are supported.
"""
hostset = set()
nets = []
for host in hosts:
if _host_cidrmask_re.match(host):
host, mask = host.split("/")
mask = int(mask)
if not is_valid_cidrmask(mask):
log.error(LOG_CHECK,
"CIDR mask %d is not a valid network mask", mask)
continue
if not is_valid_ipv4(host):
log.error(LOG_CHECK, "host %r is not a valid ip address", host)
continue
nets.append(dq2net(host, cidr2mask(mask)))
elif _host_netmask_re.match(host):
host, mask = host.split("/")
if not is_valid_ipv4(host):
log.error(LOG_CHECK, "host %r is not a valid ip address", host)
continue
if not is_valid_ipv4(mask):
log.error(LOG_CHECK,
"mask %r is not a valid ip network mask", mask)
continue
nets.append(dq2net(host, netmask2mask(mask)))
elif is_valid_ip(host):
hostset.add(expand_ip(host)[0])
else:
hostset |= set(resolve_host(host))
return (hostset, nets)
def map2hosts (hostmap):
"""
Convert a tuple (hosts, networks) into a host/network list
suitable for storing in a config file.
"""
ret = hostmap[0].copy()
for net, mask in hostmap[1]:
ret.add("%s/%d" % (num2dq(net), mask2netmask(mask)))
return ret
def lookup_ips (ips):
"""
Return set of host names that resolve to given ips.
"""
hosts = set()
for ip in ips:
try:
hosts.add(socket.gethostbyaddr(ip)[0])
except socket.error:
hosts.add(ip)
return hosts
def resolve_host (host):
"""
@host: hostname or IP address
Return list of ip numbers for given host.
"""
ips = []
try:
for res in socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM):
# res is a tuple (address family, socket type, protocol,
# canonical name, socket address)
# add first ip of socket address
ips.append(res[4][0])
except socket.error:
log.info(LOG_CHECK, "Ignored invalid host %r", host)
return ips
def obfuscate_ip(ip):
"""Obfuscate given host in IP form.
@ip: IPv4 address string
@return: hexadecimal IP string ('0x1ab...')
@raise: ValueError on invalid IP addresses
"""
if is_valid_ipv4(ip):
res = "0x%s" % "".join(hex(int(x))[2:] for x in ip.split("."))
else:
raise ValueError('Invalid IP value %r' % ip)
assert is_obfuscated_ip(res), '%r obfuscation error' % res
return res
is_obfuscated_ip = re.compile(r"^(0x[a-f0-9]+|[0-9]+)$").match
| 8,355 | Python | .py | 247 | 27.647773 | 79 | 0.586476 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,269 | __init__.py | wummel_linkchecker/linkcheck/network/__init__.py | # -*- coding: iso-8859-1 -*-
"""from http://twistedmatrix.com/wiki/python/IfConfig
"""
import socket
import sys
import errno
import array
import struct
import subprocess
from ._network import ifreq_size
from .. import log, LOG_CHECK
def pipecmd (cmd1, cmd2):
"""Return output of "cmd1 | cmd2"."""
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
return p2.communicate()[0]
def ifconfig_inet (iface):
"""Return parsed IPv4 info from ifconfig(8) binary."""
res = pipecmd(["ifconfig", iface], ["grep", "inet "])
info = {}
lastpart = None
for part in res.split():
# Linux systems have prefixes for each value
if part.startswith("addr:"):
info["address"] = part[5:]
elif part.startswith("Bcast:"):
info["broadcast"] = part[6:]
elif part.startswith("Mask:"):
info["netmask"] = part[5:]
elif lastpart == "inet":
info["address"] = part
elif lastpart in ("netmask", "broadcast"):
info[lastpart] = part
lastpart = part
return info
class IfConfig (object):
"""Access to socket interfaces"""
SIOCGIFNAME = 0x8910
SIOCGIFCONF = 0x8912
SIOCGIFFLAGS = 0x8913
SIOCGIFADDR = 0x8915
SIOCGIFBRDADDR = 0x8919
SIOCGIFNETMASK = 0x891b
SIOCGIFCOUNT = 0x8938
IFF_UP = 0x1 # Interface is up.
IFF_BROADCAST = 0x2 # Broadcast address valid.
IFF_DEBUG = 0x4 # Turn on debugging.
IFF_LOOPBACK = 0x8 # Is a loopback net.
IFF_POINTOPOINT = 0x10 # Interface is point-to-point link.
IFF_NOTRAILERS = 0x20 # Avoid use of trailers.
IFF_RUNNING = 0x40 # Resources allocated.
IFF_NOARP = 0x80 # No address resolution protocol.
IFF_PROMISC = 0x100 # Receive all packets.
IFF_ALLMULTI = 0x200 # Receive all multicast packets.
IFF_MASTER = 0x400 # Master of a load balancer.
IFF_SLAVE = 0x800 # Slave of a load balancer.
IFF_MULTICAST = 0x1000 # Supports multicast.
IFF_PORTSEL = 0x2000 # Can set media type.
IFF_AUTOMEDIA = 0x4000 # Auto media select active.
def __init__ (self):
"""Initialize a socket and determine ifreq structure size."""
# create a socket so we have a handle to query
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Note that sizeof(struct ifreq) is not always 32
# (eg. on *BSD, x86_64 Linux) Thus the function call.
self.ifr_size = ifreq_size()
def _ioctl (self, func, args):
"""Call ioctl() with given parameters."""
import fcntl
return fcntl.ioctl(self.sockfd.fileno(), func, args)
def _getifreq (self, ifname):
"""Return ifreq buffer for given interface name."""
return struct.pack("%ds" % self.ifr_size, ifname)
def _getaddr (self, ifname, func):
"""Get interface address."""
try:
result = self._ioctl(func, self._getifreq(ifname))
except IOError as msg:
log.warn(LOG_CHECK,
"error getting addr for interface %r: %s", ifname, msg)
return None
return socket.inet_ntoa(result[20:24])
def getInterfaceList (self, flags=0):
"""Get all interface names in a list."""
if sys.platform == 'darwin':
command = ['ifconfig', '-l']
if flags & self.IFF_UP:
command.append('-u')
# replace with subprocess.check_output() for Python 2.7
res = subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]
return res.split()
# initial 8kB buffer to hold interface data
bufsize = 8192
# 80kB buffer should be enough for most boxen
max_bufsize = bufsize * 10
while True:
buf = array.array('c', '\0' * bufsize)
ifreq = struct.pack("iP", buf.buffer_info()[1], buf.buffer_info()[0])
try:
result = self._ioctl(self.SIOCGIFCONF, ifreq)
break
except IOError as msg:
# in case of EINVAL the buffer size was too small
if msg[0] != errno.EINVAL or bufsize == max_bufsize:
raise
# increase buffer
bufsize += 8192
# loop over interface names
data = buf.tostring()
iflist = []
size, ptr = struct.unpack("iP", result)
i = 0
while i < size:
ifconf = data[i:i+self.ifr_size]
name = struct.unpack("16s%ds" % (self.ifr_size-16), ifconf)[0]
name = name.split('\0', 1)[0]
if name:
if flags and not (self.getFlags(name) & flags):
continue
iflist.append(name)
i += self.ifr_size
return iflist
def getFlags (self, ifname):
"""Get the flags for an interface"""
try:
result = self._ioctl(self.SIOCGIFFLAGS, self._getifreq(ifname))
except IOError as msg:
log.warn(LOG_CHECK,
"error getting flags for interface %r: %s", ifname, msg)
return 0
# extract the interface's flags from the return value
flags, = struct.unpack('H', result[16:18])
# return "UP" bit
return flags
def getAddr (self, ifname):
"""Get the inet addr for an interface.
@param ifname: interface name
@type ifname: string
"""
if sys.platform == 'darwin':
return ifconfig_inet(ifname).get('address')
return self._getaddr(ifname, self.SIOCGIFADDR)
def getMask (self, ifname):
"""Get the netmask for an interface.
@param ifname: interface name
@type ifname: string
"""
if sys.platform == 'darwin':
return ifconfig_inet(ifname).get('netmask')
return self._getaddr(ifname, self.SIOCGIFNETMASK)
def getBroadcast (self, ifname):
"""Get the broadcast addr for an interface.
@param ifname: interface name
@type ifname: string
"""
if sys.platform == 'darwin':
return ifconfig_inet(ifname).get('broadcast')
return self._getaddr(ifname, self.SIOCGIFBRDADDR)
def isUp (self, ifname):
"""Check whether interface is UP.
@param ifname: interface name
@type ifname: string
"""
return (self.getFlags(ifname) & self.IFF_UP) != 0
def isLoopback (self, ifname):
"""Check whether interface is a loopback device.
@param ifname: interface name
@type ifname: string
"""
# since not all systems have IFF_LOOPBACK as a flag defined,
# the ifname is tested first
if ifname.startswith('lo'):
return True
return (self.getFlags(ifname) & self.IFF_LOOPBACK) != 0
| 7,073 | Python | .py | 175 | 31.702857 | 84 | 0.590057 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,270 | htmllib.py | wummel_linkchecker/linkcheck/HtmlParser/htmllib.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Default HTML parser handler classes.
"""
import sys
class HtmlPrinter (object):
"""
Handles all functions by printing the function name and attributes.
"""
def __init__ (self, fd=sys.stdout):
"""
Write to given file descriptor.
@param fd: file like object (default=sys.stdout)
@type fd: file
"""
self.fd = fd
def _print (self, *attrs):
"""
Print function attributes to stored file descriptor.
@param attrs: list of values to print
@type attrs: tuple
@return: None
"""
print >> self.fd, self.mem, attrs
def __getattr__ (self, name):
"""
Remember the called method name in self.mem.
@param name: attribute name
@type name: string
@return: method which just prints out its arguments
@rtype: a bound function object
"""
self.mem = name
return self._print
class HtmlPrettyPrinter (object):
"""
Print out all parsed HTML data in encoded form.
Also stores error and warnings messages.
"""
def __init__ (self, fd=sys.stdout, encoding="iso8859-1"):
"""
Write to given file descriptor in given encoding.
@param fd: file like object (default=sys.stdout)
@type fd: file
@param encoding: encoding (default=iso8859-1)
@type encoding: string
"""
self.fd = fd
self.encoding = encoding
def comment (self, data):
"""
Print HTML comment.
@param data: the comment
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<!--%s-->" % data)
def start_element (self, tag, attrs):
"""
Print HTML start element.
@param tag: tag name
@type tag: string
@param attrs: tag attributes
@type attrs: dict
@return: None
"""
self._start_element(tag, attrs, ">")
def start_end_element (self, tag, attrs):
"""
Print HTML start-end element.
@param tag: tag name
@type tag: string
@param attrs: tag attributes
@type attrs: dict
@return: None
"""
self._start_element(tag, attrs, "/>")
def _start_element (self, tag, attrs, end):
"""
Print HTML element with end string.
@param tag: tag name
@type tag: string
@param attrs: tag attributes
@type attrs: dict
@param end: either > or />
@type end: string
@return: None
"""
tag = tag.encode(self.encoding, "ignore")
self.fd.write("<%s" % tag.replace("/", ""))
for key, val in attrs.items():
key = key.encode(self.encoding, "ignore")
if val is None:
self.fd.write(" %s" % key)
else:
val = val.encode(self.encoding, "ignore")
self.fd.write(' %s="%s"' % (key, quote_attrval(val)))
self.fd.write(end)
def end_element (self, tag):
"""
Print HTML end element.
@param tag: tag name
@type tag: string
@return: None
"""
tag = tag.encode(self.encoding, "ignore")
self.fd.write("</%s>" % tag)
def doctype (self, data):
"""
Print HTML document type.
@param data: the document type
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<!DOCTYPE%s>" % data)
def pi (self, data):
"""
Print HTML pi.
@param data: the tag data
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<?%s?>" % data)
def cdata (self, data):
"""
Print HTML cdata.
@param data: the character data
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write("<![CDATA[%s]]>" % data)
def characters (self, data):
"""
Print characters.
@param data: the character data
@type data: string
@return: None
"""
data = data.encode(self.encoding, "ignore")
self.fd.write(data)
def quote_attrval (s):
"""
Quote a HTML attribute to be able to wrap it in double quotes.
@param s: the attribute string to quote
@type s: string
@return: the quoted HTML attribute
@rtype: string
"""
res = []
for c in s:
if ord(c) <= 127:
# ASCII
if c == u'&':
res.append(u"&")
elif c == u'"':
res.append(u""")
else:
res.append(c)
else:
res.append(u"&#%d;" % ord(c))
return u"".join(res)
| 5,723 | Python | .py | 180 | 23.85 | 73 | 0.568706 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,271 | __init__.py | wummel_linkchecker/linkcheck/HtmlParser/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Fast HTML parser module written in C with the following features:
- Reentrant
As soon as any HTML string data is available, we try to feed it
to the HTML parser. This means that the parser has to scan possible
incomplete data, recognizing as much as it can. Incomplete trailing
data is saved for subsequent calls, or it is just flushed into the
output buffer with the flush() function.
A reset() brings the parser back to its initial state, throwing away all
buffered data.
- Coping with HTML syntax errors
The parser recognizes as much as it can and passes the rest
of the data as TEXT tokens.
The scanner only passes complete recognized HTML syntax elements to
the parser. Invalid syntax elements are passed as TEXT. This way we do
not need the bison error recovery.
Incomplete data is rescanned the next time the parser calls yylex() or
when it is being flush()ed.
The following syntax errors will be recognized correctly:
- Unquoted attribute values.
- Missing beginning quote of attribute values.
- Invalid "</...>" end tags in script modus.
- Missing ">" in tags.
- Invalid characters in tag or attribute names.
The following syntax errors will not be recognized:
- Missing end quote of attribute values. On the TODO list.
- Unknown HTML tag or attribute names.
- Invalid nesting of tags.
Additionally the parser has the following features:
- NULL bytes are changed into spaces
- <!-- ... --> inside a <script> or <style> are not treated as
comments but as DATA
- Rewrites all tag and attribute names to lowercase for easier
matching.
- Speed
The FLEX code is configured to generate a large but fast scanner.
The parser ignores forbidden or unnecessary HTML end tags.
The parser converts tag and attribute names to lower case for easier
matching.
The parser quotes all attribute values.
Python memory management interface is used.
- Character encoding aware
The parser itself is not encoding aware, but output strings are
always Python Unicode strings.
- Retain HTML attribute order
The parser keeps the order in which HTML tag attributes are parsed.
The attributes are stored in a custom dictionary class ListDict which
iterates over the dictionary keys in insertion order.
USAGE
First make a HTML SAX handler object. Missing callback functions are
ignored. The object returned from callbacks is also ignored.
Note that a missing attribute value is stored as the value None
in the ListDict (ie. "<a href>" with lead to a {href: None} dict entry).
Used callbacks of a handler are:
- Comments: <!--data-->
def comment (data)
@param data:
@type data: Unicode string
- Start tag: <tag {attr1:value1, attr2:value2, ..}>
def start_element (tag, attrs)
@param tag: tag name
@type tag: Unicode string
@param attrs: tag attributes
@type attrs: ListDict
- Start-end tag: <tag {attr1:value1, attr2:value2, ..}/>
def start_end_element(tag, attrs):
@param tag: tag name
@type tag: Unicode string
@param attrs: tag attributes
@type attrs: ListDict
- End tag: </tag>
def end_element (tag)
@param tag: tag name
@type tag: Unicode string
- Document type: <!DOCTYPE data>
def doctype (data)
@param data: doctype string data
@type data: Unicode string
- Processing instruction (PI): <?name data?>
def pi (name, data=None)
@param name: instruction name
@type name: Unicode string
@param data: instruction data
@type data: Unicode string
- Character data: <![CDATA[data]]>
def cdata (data)
@param data: character data
@type data: Unicode string
- Characters: data
def characters(data): data
@param data: data
@type data: Unicode string
Additionally, there are error and warning callbacks:
- Parser warning.
def warning (msg)
@param msg: warning message
@type msg: Unicode string
- Parser error.
def error (msg)
@param msg: error message
@type msg: Unicode string
- Fatal parser error
def fatal_error (msg)
@param msg: error message
@type msg: Unicode string
EXAMPLE
# This handler prints out the parsed HTML.
handler = HtmlParser.htmllib.HtmlPrettyPrinter()
# Create a new HTML parser object with the handler as parameter.
parser = HtmlParser.htmlsax.parser(handler)
# Feed data.
parser.feed("<html><body>Blubb</body></html>")
# Flush for finishing things up.
parser.flush()
"""
import re
import codecs
try:
from htmlentitydefs import name2codepoint
except ImportError:
from html.entities import name2codepoint
def _resolve_entity (mo):
"""
Resolve a HTML entity.
@param mo: matched _entity_re object with a "entity" match group
@type mo: MatchObject instance
@return: resolved entity char, or empty string on error
@rtype: unicode string
"""
ent = mo.group("entity")
s = mo.group()
if s.startswith('&#'):
if s[2] in 'xX':
radix = 16
else:
radix = 10
try:
num = int(ent, radix)
except (ValueError, OverflowError):
return u''
else:
num = name2codepoint.get(ent)
if num is None or num < 0:
# unknown entity -> ignore
return u''
try:
return unichr(num)
except ValueError:
return u''
_entity_re = re.compile(u'(?i)&(#x?)?(?P<entity>[0-9a-z]+);')
def resolve_entities (s):
"""
Resolve HTML entities in s.
@param s: string with entities
@type s: string
@return: string with resolved entities
@rtype: string
"""
return _entity_re.sub(_resolve_entity, s)
SUPPORTED_CHARSETS = ["utf-8", "iso-8859-1", "iso-8859-15"]
_encoding_ro = re.compile(r"charset=(?P<encoding>[-0-9a-zA-Z]+)")
def set_encoding (parsobj, attrs):
"""
Set document encoding for the HTML parser according to the <meta>
tag attribute information.
@param attrs: attributes of a <meta> HTML tag
@type attrs: dict
@return: None
"""
charset = attrs.get_true('charset', u'')
if charset:
# <meta charset="utf-8">
# eg. in http://cn.dolphin-browser.com/activity/Dolphinjump
charset = charset.encode('ascii', 'ignore').lower()
elif attrs.get_true('http-equiv', u'').lower() == u"content-type":
# <meta http-equiv="content-type" content="text/html;charset="utf-8">
charset = attrs.get_true('content', u'')
charset = charset.encode('ascii', 'ignore').lower()
charset = get_ctype_charset(charset)
if charset and charset in SUPPORTED_CHARSETS:
parsobj.encoding = charset
def get_ctype_charset (text):
"""
Extract charset information from mime content type string, eg.
"text/html; charset=iso8859-1".
"""
for param in text.lower().split(';'):
param = param.strip()
if param.startswith('charset='):
charset = param[8:].strip()
try:
codecs.lookup(charset)
return charset
except (LookupError, ValueError):
pass
return None
def set_doctype (parsobj, doctype):
"""
Set document type of the HTML parser according to the given
document type string.
@param doctype: document type
@type doctype: string
@return: None
"""
if u"XHTML" in doctype:
parsobj.doctype = "XHTML"
| 8,110 | Python | .py | 222 | 32.22973 | 77 | 0.706707 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,272 | confparse.py | wummel_linkchecker/linkcheck/configuration/confparse.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Parse configuration files"""
import ConfigParser
import os
from .. import LinkCheckerError, get_link_pat, LOG_CHECK, log, fileutil, plugins, logconf
def read_multiline (value):
"""Helper function reading multiline values."""
for line in value.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
class LCConfigParser (ConfigParser.RawConfigParser, object):
"""
Parse a LinkChecker configuration file.
"""
def __init__ (self, config):
"""Initialize configuration."""
super(LCConfigParser, self).__init__()
self.config = config
def read (self, files):
"""Read settings from given config files.
@raises: LinkCheckerError on syntax errors in the config file(s)
"""
assert isinstance(files, list), "Invalid file list %r" % files
try:
self.read_ok = super(LCConfigParser, self).read(files)
if len(self.read_ok) < len(files):
failed_files = set(files) - set(self.read_ok)
log.warn(LOG_CHECK, "Could not read configuration files %s.", failed_files)
# Read all the configuration parameters from the given files.
self.read_checking_config()
self.read_authentication_config()
self.read_filtering_config()
self.read_output_config()
self.read_plugin_config()
except Exception as msg:
raise LinkCheckerError(
_("Error parsing configuration: %s") % unicode(msg))
def read_string_option (self, section, option, allowempty=False):
"""Read a string option."""
if self.has_option(section, option):
value = self.get(section, option)
if not allowempty and not value:
raise LinkCheckerError(_("invalid empty value for %s: %s\n") % (option, value))
self.config[option] = value
def read_boolean_option(self, section, option):
"""Read a boolean option."""
if self.has_option(section, option):
self.config[option] = self.getboolean(section, option)
def read_int_option (self, section, option, key=None, min=None, max=None):
"""Read an integer option."""
if self.has_option(section, option):
num = self.getint(section, option)
if min is not None and num < min:
raise LinkCheckerError(
_("invalid value for %s: %d must not be less than %d") % (option, num, min))
if max is not None and num < max:
raise LinkCheckerError(
_("invalid value for %s: %d must not be greater than %d") % (option, num, max))
if key is None:
key = option
self.config[key] = num
def read_output_config (self):
"""Read configuration options in section "output"."""
section = "output"
from ..logger import LoggerClasses
for c in LoggerClasses:
key = c.LoggerName
if self.has_section(key):
for opt in self.options(key):
self.config[key][opt] = self.get(key, opt)
if self.has_option(key, 'parts'):
val = self.get(key, 'parts')
parts = [f.strip().lower() for f in val.split(',')]
self.config[key]['parts'] = parts
self.read_boolean_option(section, "warnings")
if self.has_option(section, "verbose"):
if self.getboolean(section, "verbose"):
self.config["verbose"] = True
self.config["warnings"] = True
if self.has_option(section, "quiet"):
if self.getboolean(section, "quiet"):
self.config['output'] = 'none'
self.config['quiet'] = True
if self.has_option(section, "debug"):
val = self.get(section, "debug")
parts = [f.strip().lower() for f in val.split(',')]
logconf.set_debug(parts)
self.read_boolean_option(section, "status")
if self.has_option(section, "log"):
val = self.get(section, "log").strip().lower()
self.config['output'] = val
if self.has_option(section, "fileoutput"):
loggers = self.get(section, "fileoutput").split(",")
# strip names from whitespace
loggers = (x.strip().lower() for x in loggers)
# no file output for the blacklist and none Logger
from ..logger import LoggerNames
loggers = (x for x in loggers if x in LoggerNames and
x not in ("blacklist", "none"))
for val in loggers:
output = self.config.logger_new(val, fileoutput=1)
self.config['fileoutput'].append(output)
def read_checking_config (self):
"""Read configuration options in section "checking"."""
section = "checking"
self.read_int_option(section, "threads", min=-1)
self.config['threads'] = max(0, self.config['threads'])
self.read_int_option(section, "timeout", min=1)
self.read_int_option(section, "aborttimeout", min=1)
self.read_int_option(section, "recursionlevel", min=-1)
self.read_string_option(section, "nntpserver")
self.read_string_option(section, "useragent")
self.read_int_option(section, "maxrequestspersecond", min=1)
self.read_int_option(section, "maxnumurls", min=0)
self.read_int_option(section, "maxfilesizeparse", min=1)
self.read_int_option(section, "maxfilesizedownload", min=1)
if self.has_option(section, "allowedschemes"):
self.config['allowedschemes'] = [x.strip().lower() for x in \
self.get(section, 'allowedschemes').split(',')]
self.read_boolean_option(section, "debugmemory")
self.read_string_option(section, "cookiefile")
self.read_string_option(section, "localwebroot")
try:
self.read_boolean_option(section, "sslverify")
except ValueError:
self.read_string_option(section, "sslverify")
self.read_int_option(section, "maxrunseconds", min=0)
def read_authentication_config (self):
"""Read configuration options in section "authentication"."""
section = "authentication"
password_fields = []
if self.has_option(section, "entry"):
for val in read_multiline(self.get(section, "entry")):
auth = val.split()
if len(auth) == 3:
self.config.add_auth(pattern=auth[0], user=auth[1],
password=auth[2])
password_fields.append("entry/%s/%s" % (auth[0], auth[1]))
elif len(auth) == 2:
self.config.add_auth(pattern=auth[0], user=auth[1])
else:
raise LinkCheckerError(
_("missing auth part in entry %(val)r") % {"val": val})
# read login URL and field names
if self.has_option(section, "loginurl"):
val = self.get(section, "loginurl").strip()
if not (val.lower().startswith("http:") or
val.lower().startswith("https:")):
raise LinkCheckerError(_("invalid login URL `%s'. Only " \
"HTTP and HTTPS URLs are supported.") % val)
self.config["loginurl"] = val
self.read_string_option(section, "loginuserfield")
self.read_string_option(section, "loginpasswordfield")
# read login extra fields
if self.has_option(section, "loginextrafields"):
for val in read_multiline(self.get(section, "loginextrafields")):
name, value = val.split(":", 1)
self.config["loginextrafields"][name] = value
self.check_password_readable(section, password_fields)
def check_password_readable(self, section, fields):
"""Check if there is a readable configuration file and print a warning."""
if not fields:
return
# The information which of the configuration files
# included which option is not available. To avoid false positives,
# a warning is only printed if exactly one file has been read.
if len(self.read_ok) != 1:
return
fn = self.read_ok[0]
if fileutil.is_accessable_by_others(fn):
log.warn(LOG_CHECK, "The configuration file %s contains password information (in section [%s] and options %s) and the file is readable by others. Please make the file only readable by you.", fn, section, fields)
if os.name == 'posix':
log.warn(LOG_CHECK, _("For example execute 'chmod go-rw %s'.") % fn)
elif os.name == 'nt':
log.warn(LOG_CHECK, _("See http://support.microsoft.com/kb/308419 for more info on setting file permissions."))
def read_filtering_config (self):
"""
Read configuration options in section "filtering".
"""
section = "filtering"
if self.has_option(section, "ignorewarnings"):
self.config['ignorewarnings'] = [f.strip().lower() for f in \
self.get(section, 'ignorewarnings').split(',')]
if self.has_option(section, "ignore"):
for line in read_multiline(self.get(section, "ignore")):
pat = get_link_pat(line, strict=1)
self.config["externlinks"].append(pat)
if self.has_option(section, "nofollow"):
for line in read_multiline(self.get(section, "nofollow")):
pat = get_link_pat(line, strict=0)
self.config["externlinks"].append(pat)
if self.has_option(section, "internlinks"):
pat = get_link_pat(self.get(section, "internlinks"))
self.config["internlinks"].append(pat)
self.read_boolean_option(section, "checkextern")
def read_plugin_config(self):
"""Read plugin-specific configuration values."""
folders = self.config["pluginfolders"]
modules = plugins.get_plugin_modules(folders)
for pluginclass in plugins.get_plugin_classes(modules):
section = pluginclass.__name__
if self.has_section(section):
self.config["enabledplugins"].append(section)
self.config[section] = pluginclass.read_config(self)
| 11,274 | Python | .py | 221 | 39.941176 | 223 | 0.602755 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,273 | __init__.py | wummel_linkchecker/linkcheck/configuration/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Store metadata and options.
"""
import os
import re
import urllib
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import shutil
import socket
import _LinkChecker_configdata as configdata
from .. import (log, LOG_CHECK, get_install_data, fileutil)
from . import confparse
from ..decorators import memoized
Version = configdata.version
ReleaseDate = configdata.release_date
AppName = configdata.name
App = AppName+u" "+Version
Author = configdata.author
HtmlAuthor = Author.replace(u' ', u' ')
Copyright = u"Copyright (C) 2000-2014 "+Author
HtmlCopyright = u"Copyright © 2000-2014 "+HtmlAuthor
AppInfo = App+u" "+Copyright
HtmlAppInfo = App+u", "+HtmlCopyright
Url = configdata.url
SupportUrl = u"https://github.com/wummel/linkchecker/issues"
DonateUrl = u"http://wummel.github.io/linkchecker/donations.html"
Email = configdata.author_email
UserAgent = u"Mozilla/5.0 (compatible; %s/%s; +%s)" % (AppName, Version, Url)
Freeware = AppName+u""" comes with ABSOLUTELY NO WARRANTY!
This is free software, and you are welcome to redistribute it
under certain conditions. Look at the file `LICENSE' within this
distribution."""
Portable = configdata.portable
def normpath (path):
"""Norm given system path with all available norm or expand functions
in os.path."""
expanded = os.path.expanduser(os.path.expandvars(path))
return os.path.normcase(os.path.normpath(expanded))
# List Python modules in the form (module, name, version attribute)
Modules = (
# required modules
("requests", "Requests", "__version__"),
# optional modules
("argcomplete", u"Argcomplete", None),
("GeoIP", u"GeoIP", 'lib_version'), # on Unix systems
("pygeoip", u"GeoIP", 'lib_version'), # on Windows systems
("sqlite3", u"Pysqlite", 'version'),
("sqlite3", u"Sqlite", 'sqlite_version'),
("gconf", u"Gconf", '__version__'),
("meliae", u"Meliae", '__version__'),
)
def get_modules_info():
"""Return unicode string with detected module info."""
module_infos = []
for (mod, name, version_attr) in Modules:
if not fileutil.has_module(mod):
continue
if hasattr(mod, version_attr):
attr = getattr(mod, version_attr)
version = attr() if callable(attr) else attr
module_infos.append("%s %s" % (name, version))
else:
# ignore attribute errors in case library developers
# change the version information attribute
module_infos.append(name)
return u"Modules: %s" % (u", ".join(module_infos))
def get_share_dir ():
"""Return absolute path of LinkChecker example configuration."""
return os.path.join(get_install_data(), "share", "linkchecker")
def get_share_file (filename, devel_dir=None):
"""Return a filename in the share directory.
@param devel_dir: directory to search when developing
@ptype devel_dir: string
@param filename: filename to search for
@ptype filename: string
@return: the found filename or None
@rtype: string
@raises: ValueError if not found
"""
paths = [get_share_dir()]
if devel_dir is not None:
# when developing
paths.insert(0, devel_dir)
for path in paths:
fullpath = os.path.join(path, filename)
if os.path.isfile(fullpath):
return fullpath
# not found
msg = "%s not found in %s; check your installation" % (filename, paths)
raise ValueError(msg)
def get_system_cert_file():
"""Try to find a system-wide SSL certificate file.
@return: the filename to the cert file
@raises: ValueError when no system cert file could be found
"""
if os.name == 'posix':
filename = "/etc/ssl/certs/ca-certificates.crt"
if os.path.isfile(filename):
return filename
msg = "no system certificate file found"
raise ValueError(msg)
def get_certifi_file():
"""Get the SSL certifications installed by the certifi package.
@return: the filename to the cert file
@rtype: string
@raises: ImportError when certifi is not installed or ValueError when
the file is not found
"""
import certifi
filename = certifi.where()
if os.path.isfile(filename):
return filename
msg = "%s not found; check your certifi installation" % filename
raise ValueError(msg)
# dynamic options
class Configuration (dict):
"""
Storage for configuration options. Options can both be given from
the command line as well as from configuration files.
"""
def __init__ (self):
"""
Initialize the default options.
"""
super(Configuration, self).__init__()
## checking options
self["allowedschemes"] = []
self['cookiefile'] = None
self["debugmemory"] = False
self["localwebroot"] = None
self["maxfilesizeparse"] = 1*1024*1024
self["maxfilesizedownload"] = 5*1024*1024
self["maxnumurls"] = None
self["maxrunseconds"] = None
self["maxrequestspersecond"] = 10
self["maxhttpredirects"] = 10
self["nntpserver"] = os.environ.get("NNTP_SERVER", None)
self["proxy"] = urllib.getproxies()
self["sslverify"] = True
self["threads"] = 10
self["timeout"] = 60
self["aborttimeout"] = 300
self["recursionlevel"] = -1
self["useragent"] = UserAgent
## authentication
self["authentication"] = []
self["loginurl"] = None
self["loginuserfield"] = "login"
self["loginpasswordfield"] = "password"
self["loginextrafields"] = {}
## filtering
self["externlinks"] = []
self["ignorewarnings"] = []
self["internlinks"] = []
self["checkextern"] = False
## plugins
self["pluginfolders"] = get_plugin_folders()
self["enabledplugins"] = []
## output
self['trace'] = False
self['quiet'] = False
self["verbose"] = False
self["warnings"] = True
self["fileoutput"] = []
self['output'] = 'text'
self["status"] = False
self["status_wait_seconds"] = 5
self['logger'] = None
self.loggers = {}
from ..logger import LoggerClasses
for c in LoggerClasses:
key = c.LoggerName
self[key] = {}
self.loggers[key] = c
def set_status_logger(self, status_logger):
"""Set the status logger."""
self.status_logger = status_logger
def logger_new (self, loggername, **kwargs):
"""Instantiate new logger and return it."""
args = self[loggername]
args.update(kwargs)
return self.loggers[loggername](**args)
def logger_add (self, loggerclass):
"""Add a new logger type to the known loggers."""
self.loggers[loggerclass.LoggerName] = loggerclass
self[loggerclass.LoggerName] = {}
def read (self, files=None):
"""
Read settings from given config files.
@raises: LinkCheckerError on syntax errors in the config file(s)
"""
if files is None:
cfiles = []
else:
cfiles = files[:]
if not cfiles:
userconf = get_user_config()
if os.path.isfile(userconf):
cfiles.append(userconf)
# filter invalid files
filtered_cfiles = []
for cfile in cfiles:
if not os.path.isfile(cfile):
log.warn(LOG_CHECK, _("Configuration file %r does not exist."), cfile)
elif not fileutil.is_readable(cfile):
log.warn(LOG_CHECK, _("Configuration file %r is not readable."), cfile)
else:
filtered_cfiles.append(cfile)
log.debug(LOG_CHECK, "reading configuration from %s", filtered_cfiles)
confparse.LCConfigParser(self).read(filtered_cfiles)
def add_auth (self, user=None, password=None, pattern=None):
"""Add given authentication data."""
if not user or not pattern:
log.warn(LOG_CHECK,
_("missing user or URL pattern in authentication data."))
return
entry = dict(
user=user,
password=password,
pattern=re.compile(pattern),
)
self["authentication"].append(entry)
def get_user_password (self, url):
"""Get tuple (user, password) from configured authentication
that matches the given URL.
Both user and password can be None if not specified, or no
authentication matches the given URL.
"""
for auth in self["authentication"]:
if auth['pattern'].match(url):
return (auth['user'], auth['password'])
return (None, None)
def get_connectionlimits(self):
"""Get dict with limit per connection type."""
return {key: self['maxconnections%s' % key] for key in ('http', 'https', 'ftp')}
def sanitize (self):
"Make sure the configuration is consistent."
if self['logger'] is None:
self.sanitize_logger()
if self['loginurl']:
self.sanitize_loginurl()
self.sanitize_proxies()
self.sanitize_plugins()
self.sanitize_ssl()
# set default socket timeout
socket.setdefaulttimeout(self['timeout'])
def sanitize_logger (self):
"""Make logger configuration consistent."""
if not self['output']:
log.warn(LOG_CHECK, _("activating text logger output."))
self['output'] = 'text'
self['logger'] = self.logger_new(self['output'])
def sanitize_loginurl (self):
"""Make login configuration consistent."""
url = self["loginurl"]
disable = False
if not self["loginpasswordfield"]:
log.warn(LOG_CHECK,
_("no CGI password fieldname given for login URL."))
disable = True
if not self["loginuserfield"]:
log.warn(LOG_CHECK,
_("no CGI user fieldname given for login URL."))
disable = True
if self.get_user_password(url) == (None, None):
log.warn(LOG_CHECK,
_("no user/password authentication data found for login URL."))
disable = True
if not url.lower().startswith(("http:", "https:")):
log.warn(LOG_CHECK, _("login URL is not a HTTP URL."))
disable = True
urlparts = urlparse.urlsplit(url)
if not urlparts[0] or not urlparts[1] or not urlparts[2]:
log.warn(LOG_CHECK, _("login URL is incomplete."))
disable = True
if disable:
log.warn(LOG_CHECK,
_("disabling login URL %(url)s.") % {"url": url})
self["loginurl"] = None
def sanitize_proxies (self):
"""Try to read additional proxy settings which urllib does not
support."""
if os.name != 'posix':
return
if "http" not in self["proxy"]:
http_proxy = get_gconf_http_proxy() or get_kde_http_proxy()
if http_proxy:
self["proxy"]["http"] = http_proxy
if "ftp" not in self["proxy"]:
ftp_proxy = get_gconf_ftp_proxy() or get_kde_ftp_proxy()
if ftp_proxy:
self["proxy"]["ftp"] = ftp_proxy
def sanitize_plugins(self):
"""Ensure each plugin is configurable."""
for plugin in self["enabledplugins"]:
if plugin not in self:
self[plugin] = {}
def sanitize_ssl(self):
"""Use local installed certificate file if available.
Tries to get system, then certifi, then the own
installed certificate file."""
if self["sslverify"] is True:
try:
self["sslverify"] = get_system_cert_file()
except ValueError:
try:
self["sslverify"] = get_certifi_file()
except (ValueError, ImportError):
try:
self["sslverify"] = get_share_file('cacert.pem')
except ValueError:
pass
def get_plugin_folders():
"""Get linkchecker plugin folders. Default is ~/.linkchecker/plugins/."""
folders = []
defaultfolder = normpath("~/.linkchecker/plugins")
if not os.path.exists(defaultfolder) and not Portable:
try:
make_userdir(defaultfolder)
except Exception as errmsg:
msg = _("could not create plugin directory %(dirname)r: %(errmsg)r")
args = dict(dirname=defaultfolder, errmsg=errmsg)
log.warn(LOG_CHECK, msg % args)
if os.path.exists(defaultfolder):
folders.append(defaultfolder)
return folders
def make_userdir(child):
"""Create a child directory."""
userdir = os.path.dirname(child)
if not os.path.isdir(userdir):
if os.name == 'nt':
# Windows forbids filenames with leading dot unless
# a trailing dot is added.
userdir += "."
os.mkdir(userdir, 0700)
def get_user_config():
"""Get the user configuration filename.
If the user configuration file does not exist, copy it from the initial
configuration file, but only if this is not a portable installation.
Returns path to user config file (which might not exist due to copy
failures or on portable systems).
@return configuration filename
@rtype string
"""
# initial config (with all options explained)
initialconf = normpath(os.path.join(get_share_dir(), "linkcheckerrc"))
# per user config settings
userconf = normpath("~/.linkchecker/linkcheckerrc")
if os.path.isfile(initialconf) and not os.path.exists(userconf) and \
not Portable:
# copy the initial configuration to the user configuration
try:
make_userdir(userconf)
shutil.copy(initialconf, userconf)
except Exception as errmsg:
msg = _("could not copy initial configuration file %(src)r to %(dst)r: %(errmsg)r")
args = dict(src=initialconf, dst=userconf, errmsg=errmsg)
log.warn(LOG_CHECK, msg % args)
return userconf
def get_gconf_http_proxy ():
"""Return host:port for GConf HTTP proxy if found, else None."""
try:
import gconf
except ImportError:
return None
try:
client = gconf.client_get_default()
if client.get_bool("/system/http_proxy/use_http_proxy"):
host = client.get_string("/system/http_proxy/host")
port = client.get_int("/system/http_proxy/port")
if host:
if not port:
port = 8080
return "%s:%d" % (host, port)
except Exception as msg:
log.debug(LOG_CHECK, "error getting HTTP proxy from gconf: %s", msg)
pass
return None
def get_gconf_ftp_proxy ():
"""Return host:port for GConf FTP proxy if found, else None."""
try:
import gconf
except ImportError:
return None
try:
client = gconf.client_get_default()
host = client.get_string("/system/proxy/ftp_host")
port = client.get_int("/system/proxy/ftp_port")
if host:
if not port:
port = 8080
return "%s:%d" % (host, port)
except Exception as msg:
log.debug(LOG_CHECK, "error getting FTP proxy from gconf: %s", msg)
pass
return None
def get_kde_http_proxy ():
"""Return host:port for KDE HTTP proxy if found, else None."""
config_dir = get_kde_config_dir()
if not config_dir:
# could not find any KDE configuration directory
return
try:
data = read_kioslaverc(config_dir)
return data.get("http_proxy")
except Exception as msg:
log.debug(LOG_CHECK, "error getting HTTP proxy from KDE: %s", msg)
pass
def get_kde_ftp_proxy ():
"""Return host:port for KDE HTTP proxy if found, else None."""
config_dir = get_kde_config_dir()
if not config_dir:
# could not find any KDE configuration directory
return
try:
data = read_kioslaverc(config_dir)
return data.get("ftp_proxy")
except Exception as msg:
log.debug(LOG_CHECK, "error getting FTP proxy from KDE: %s", msg)
pass
# The following KDE functions are largely ported and ajusted from
# Google Chromium:
# http://src.chromium.org/viewvc/chrome/trunk/src/net/proxy/proxy_config_service_linux.cc?revision=HEAD&view=markup
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def get_kde_config_dir ():
"""Return KDE configuration directory or None if not found."""
kde_home = get_kde_home_dir()
if not kde_home:
# could not determine the KDE home directory
return
return kde_home_to_config(kde_home)
def kde_home_to_config (kde_home):
"""Add subdirectories for config path to KDE home directory."""
return os.path.join(kde_home, "share", "config")
def get_kde_home_dir ():
"""Return KDE home directory or None if not found."""
if os.environ.get("KDEHOME"):
kde_home = os.path.abspath(os.environ["KDEHOME"])
else:
home = os.environ.get("HOME")
if not home:
# $HOME is not set
return
kde3_home = os.path.join(home, ".kde")
kde4_home = os.path.join(home, ".kde4")
if fileutil.find_executable("kde4-config"):
# kde4
kde3_file = kde_home_to_config(kde3_home)
kde4_file = kde_home_to_config(kde4_home)
if os.path.exists(kde4_file) and os.path.exists(kde3_file):
if fileutil.get_mtime(kde4_file) >= fileutil.get_mtime(kde3_file):
kde_home = kde4_home
else:
kde_home = kde3_home
else:
kde_home = kde4_home
else:
# kde3
kde_home = kde3_home
return kde_home if os.path.exists(kde_home) else None
loc_ro = re.compile(r"\[.*\]$")
@memoized
def read_kioslaverc (kde_config_dir):
"""Read kioslaverc into data dictionary."""
data = {}
filename = os.path.join(kde_config_dir, "kioslaverc")
with open(filename) as fd:
# First read all lines into dictionary since they can occur
# in any order.
for line in fd:
line = line.rstrip()
if line.startswith('['):
in_proxy_settings = line.startswith("[Proxy Settings]")
elif in_proxy_settings:
if '=' not in line:
continue
key, value = line.split('=', 1)
key = key.strip()
value = value.strip()
if not key:
continue
# trim optional localization
key = loc_ro.sub("", key).strip()
if not key:
continue
add_kde_setting(key, value, data)
resolve_kde_settings(data)
return data
def add_kde_proxy (key, value, data):
"""Add a proxy value to data dictionary after sanity checks."""
if not value or value[:3] == "//:":
return
data[key] = value
def add_kde_setting (key, value, data):
"""Add a KDE proxy setting value to data dictionary."""
if key == "ProxyType":
mode = None
int_value = int(value)
if int_value == 1:
mode = "manual"
elif int_value == 2:
# PAC URL
mode = "pac"
elif int_value == 3:
# WPAD.
mode = "wpad"
elif int_value == 4:
# Indirect manual via environment variables.
mode = "indirect"
data["mode"] = mode
elif key == "Proxy Config Script":
data["autoconfig_url"] = value
elif key == "httpProxy":
add_kde_proxy("http_proxy", value, data)
elif key == "httpsProxy":
add_kde_proxy("https_proxy", value, data)
elif key == "ftpProxy":
add_kde_proxy("ftp_proxy", value, data)
elif key == "ReversedException":
data["reversed_bypass"] = bool(value == "true" or int(value))
elif key == "NoProxyFor":
data["ignore_hosts"] = split_hosts(value)
elif key == "AuthMode":
mode = int(value)
# XXX todo
def split_hosts (value):
"""Split comma-separated host list."""
return [host for host in value.split(", ") if host]
def resolve_indirect (data, key, splithosts=False):
"""Replace name of environment variable with its value."""
value = data[key]
env_value = os.environ.get(value)
if env_value:
if splithosts:
data[key] = split_hosts(env_value)
else:
data[key] = env_value
else:
del data[key]
def resolve_kde_settings (data):
"""Write final proxy configuration values in data dictionary."""
if "mode" not in data:
return
if data["mode"] == "indirect":
for key in ("http_proxy", "https_proxy", "ftp_proxy"):
if key in data:
resolve_indirect(data, key)
if "ignore_hosts" in data:
resolve_indirect(data, "ignore_hosts", splithosts=True)
elif data["mode"] != "manual":
# unsupported config
for key in ("http_proxy", "https_proxy", "ftp_proxy"):
if key in data:
del data[key]
| 23,744 | Python | .py | 600 | 31.716667 | 115 | 0.62292 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,274 | ignoreurl.py | wummel_linkchecker/linkcheck/checker/ignoreurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle ignored URLs.
"""
from . import unknownurl
class IgnoreUrl (unknownurl.UnknownUrl):
"""Always ignored URL."""
def is_ignored (self):
"""Return True if this URL scheme is ignored."""
return True
| 1,012 | Python | .py | 25 | 38.4 | 73 | 0.751016 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,275 | internpaturl.py | wummel_linkchecker/linkcheck/checker/internpaturl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2005-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Intern URL pattern support.
"""
import re
from . import urlbase, absolute_url
from .. import strformat, url as urlutil
def get_intern_pattern (url):
"""Return intern pattern for given URL. Redirections to the same
domain with or without "www." prepended are allowed."""
parts = strformat.url_unicode_split(url)
scheme = parts[0].lower()
domain = parts[1].lower()
domain, is_idn = urlutil.idna_encode(domain)
# allow redirection www.example.com -> example.com and vice versa
if domain.startswith('www.'):
domain = domain[4:]
if not (domain and scheme):
return None
path = urlutil.splitparams(parts[2])[0]
segments = path.split('/')[:-1]
path = "/".join(segments)
if url.endswith('/'):
path += '/'
args = list(re.escape(x) for x in (scheme, domain, path))
if args[0] in ('http', 'https'):
args[0] = 'https?'
args[1] = r"(www\.|)%s" % args[1]
return "^%s://%s%s" % tuple(args)
class InternPatternUrl (urlbase.UrlBase):
"""Class supporting an intern URL pattern."""
def get_intern_pattern (self, url=None):
"""
Get pattern for intern URL matching.
@return non-empty regex pattern or None
@rtype String or None
"""
if url is None:
url = absolute_url(self.base_url, self.base_ref, self.parent_url)
if not url:
return None
return get_intern_pattern(url)
| 2,233 | Python | .py | 57 | 34.631579 | 77 | 0.67788 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,276 | proxysupport.py | wummel_linkchecker/linkcheck/checker/proxysupport.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Mixin class for URLs that can be fetched over a proxy.
"""
import urllib
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import os
from .. import LinkCheckerError, log, LOG_CHECK, url as urlutil, httputil
class ProxySupport (object):
"""Get support for proxying and for URLs with user:pass@host setting."""
def set_proxy (self, proxy):
"""Parse given proxy information and store parsed values.
Note that only http:// proxies are supported, both for ftp://
and http:// URLs.
"""
self.proxy = proxy
self.proxytype = "http"
self.proxyauth = None
if not self.proxy:
return
proxyurl = urlparse.urlparse(self.proxy)
self.proxytype = proxyurl.scheme
if self.proxytype not in ('http', 'https'):
# Note that invalid proxies might raise TypeError in urllib2,
# so make sure to stop checking at this point, not later.
msg = _("Proxy value `%(proxy)s' must start with 'http:' or 'https:'.") \
% dict(proxy=proxy)
raise LinkCheckerError(msg)
if self.ignore_proxy_host():
# log proxy without auth info
log.debug(LOG_CHECK, "ignoring proxy %r", self.proxy)
self.add_info(_("Ignoring proxy setting `%(proxy)s'.") %
dict(proxy=proxy))
self.proxy = None
return
log.debug(LOG_CHECK, "using proxy %r", self.proxy)
self.add_info(_("Using proxy `%(proxy)s'.") % dict(proxy=self.proxy))
self.proxyhost = proxyurl.hostname
self.proxyport = proxyurl.port
if proxyurl.username is not None:
username = proxyurl.username
password = proxyurl.password if proxy.password is not None else ""
auth = "%s:%s" % (username, password)
self.proxyauth = "Basic "+httputil.encode_base64(auth)
def ignore_proxy_host (self):
"""Check if self.host is in the $no_proxy ignore list."""
if urllib.proxy_bypass(self.host):
return True
no_proxy = os.environ.get("no_proxy")
if no_proxy:
entries = [parse_host_port(x) for x in no_proxy.split(",")]
for host, port in entries:
if host.lower() == self.host and port == self.port:
return True
return False
def get_netloc(self):
"""Determine scheme, host and port for this connection taking
proxy data into account.
@return: tuple (scheme, host, port)
@rtype: tuple(string, string, int)
"""
if self.proxy:
scheme = self.proxytype
host = self.proxyhost
port = self.proxyport
else:
scheme = self.scheme
host = self.host
port = self.port
return (scheme, host, port)
def parse_host_port (host_port):
"""Parse a host:port string into separate components."""
host, port = urllib.splitport(host_port.strip())
if port is not None:
if urlutil.is_numeric_port(port):
port = int(port)
return host, port
| 3,969 | Python | .py | 96 | 33.510417 | 85 | 0.632954 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,277 | unknownurl.py | wummel_linkchecker/linkcheck/checker/unknownurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2001-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle uncheckable URLs.
"""
import re
from . import urlbase
class UnknownUrl (urlbase.UrlBase):
"""Handle unknown or just plain broken URLs."""
def build_url (self):
"""Only logs that this URL is unknown."""
super(UnknownUrl, self).build_url()
if self.is_ignored():
self.add_info(_("%(scheme)s URL ignored.") %
{"scheme": self.scheme.capitalize()})
self.set_result(_("ignored"))
else:
self.set_result(_("URL is unrecognized or has invalid syntax"),
valid=False)
def is_ignored (self):
"""Return True if this URL scheme is ignored."""
return is_unknown_scheme(self.scheme)
def can_get_content (self):
"""Unknown URLs have no content.
@return: False
@rtype: bool
"""
return False
# do not edit anything below since these entries are generated from
# scripts/update_iana_uri_schemes.sh
# DO NOT REMOVE
# from https://www.iana.org/assignments/uri-schemes/uri-schemes.xhtml
ignored_schemes_permanent = r"""
|aaa # Diameter Protocol
|aaas # Diameter Protocol with Secure Transport
|about # about
|acap # application configuration access protocol
|acct # acct
|cap # Calendar Access Protocol
|cid # content identifier
|coap # coap
|coaps # coaps
|crid # TV-Anytime Content Reference Identifier
|data # data
|dav # dav
|dict # dictionary service protocol
|dns # Domain Name System
|geo # Geographic Locations
|go # go
|gopher # The Gopher Protocol
|h323 # H.323
|iax # Inter-Asterisk eXchange Version 2
|icap # Internet Content Adaptation Protocol
|im # Instant Messaging
|imap # internet message access protocol
|info # Information Assets with Identifiers in Public Namespaces
|ipp # Internet Printing Protocol
|iris # Internet Registry Information Service
|iris\.beep # iris.beep
|iris\.lwz # iris.lwz
|iris\.xpc # iris.xpc
|iris\.xpcs # iris.xpcs
|jabber # jabber
|ldap # Lightweight Directory Access Protocol
|mid # message identifier
|msrp # Message Session Relay Protocol
|msrps # Message Session Relay Protocol Secure
|mtqp # Message Tracking Query Protocol
|mupdate # Mailbox Update (MUPDATE) Protocol
|nfs # network file system protocol
|ni # ni
|nih # nih
|opaquelocktoken # opaquelocktokent
|pop # Post Office Protocol v3
|pres # Presence
|reload # reload
|rtsp # Real-time Streaming Protocol (RTSP)
|rtsps # Real-time Streaming Protocol (RTSP) over TLS
|rtspu # Real-time Streaming Protocol (RTSP) over unreliable datagram transport
|service # service location
|session # session
|shttp # Secure Hypertext Transfer Protocol
|sieve # ManageSieve Protocol
|sip # session initiation protocol
|sips # secure session initiation protocol
|sms # Short Message Service
|snmp # Simple Network Management Protocol
|soap\.beep # soap.beep
|soap\.beeps # soap.beeps
|stun # stun
|stuns # stuns
|tag # tag
|tel # telephone
|telnet # Reference to interactive sessions
|tftp # Trivial File Transfer Protocol
|thismessage # multipart/related relative reference resolution
|tip # Transaction Internet Protocol
|tn3270 # Interactive 3270 emulation sessions
|turn # turn
|turns # turns
|tv # TV Broadcasts
|urn # Uniform Resource Names
|vemmi # versatile multimedia interface
|ws # WebSocket connections
|wss # Encrypted WebSocket connections
|xcon # xcon
|xcon\-userid # xcon-userid
|xmlrpc\.beep # xmlrpc.beep
|xmlrpc\.beeps # xmlrpc.beeps
|xmpp # Extensible Messaging and Presence Protocol
|z39\.50r # Z39.50 Retrieval
|z39\.50s # Z39.50 Session
"""
ignored_schemes_provisional = r"""
|adiumxtra # adiumxtra
|afp # afp
|afs # Andrew File System global file names
|aim # aim
|apt # apt
|attachment # attachment
|aw # aw
|barion # barion
|beshare # beshare
|bitcoin # bitcoin
|bolo # bolo
|callto # callto
|chrome # chrome
|chrome\-extension # chrome-extension
|com\-eventbrite\-attendee # com-eventbrite-attendee
|content # content
|cvs # cvs
|dlna\-playcontainer # dlna-playcontainer
|dlna\-playsingle # dlna-playsingle
|dtn # DTNRG research and development
|dvb # dvb
|ed2k # ed2k
|facetime # facetime
|feed # feed
|feedready # feedready
|finger # finger
|fish # fish
|gg # gg
|git # git
|gizmoproject # gizmoproject
|gtalk # gtalk
|ham # ham
|hcp # hcp
|icon # icon
|ipn # ipn
|irc # irc
|irc6 # irc6
|ircs # ircs
|itms # itms
|jar # jar
|jms # Java Message Service
|keyparc # keyparc
|lastfm # lastfm
|ldaps # ldaps
|magnet # magnet
|maps # maps
|market # market
|message # message
|mms # mms
|ms\-help # ms-help
|ms\-settings\-power # ms-settings-power
|msnim # msnim
|mumble # mumble
|mvn # mvn
|notes # notes
|oid # oid
|palm # palm
|paparazzi # paparazzi
|pkcs11 # pkcs11
|platform # platform
|proxy # proxy
|psyc # psyc
|query # query
|res # res
|resource # resource
|rmi # rmi
|rsync # rsync
|rtmp # rtmp
|secondlife # query
|sftp # query
|sgn # sgn
|skype # skype
|smb # smb
|smtp # smtp
|soldat # soldat
|spotify # spotify
|ssh # ssh
|steam # steam
|submit # submit
|svn # svn
|teamspeak # teamspeak
|things # things
|udp # udp
|unreal # unreal
|ut2004 # ut2004
|ventrilo # ventrilo
|view\-source # view-source
|webcal # webcal
|wtai # wtai
|wyciwyg # wyciwyg
|xfire # xfire
|xri # xri
|ymsgr # ymsgr
"""
ignored_schemes_historical = r"""
|fax # fax
|mailserver # Access to data available from mail servers
|modem # modem
|pack # pack
|prospero # Prospero Directory Service
|snews # NNTP over SSL/TLS
|videotex # videotex
|wais # Wide Area Information Servers
|z39\.50 # Z39.50 information access
"""
ignored_schemes_other = r"""
|clsid # Microsoft specific
|find # Mozilla specific
|isbn # ISBN (int. book numbers)
|javascript # JavaScript
"""
ignored_schemes = "^(%s%s%s%s)$" % (
ignored_schemes_permanent,
ignored_schemes_provisional,
ignored_schemes_historical,
ignored_schemes_other,
)
ignored_schemes_re = re.compile(ignored_schemes, re.VERBOSE)
is_unknown_scheme = ignored_schemes_re.match
| 7,589 | Python | .py | 247 | 28.825911 | 84 | 0.653017 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,278 | const.py | wummel_linkchecker/linkcheck/checker/const.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Helper constants.
"""
import socket
import select
import nntplib
import ftplib
import requests
from .. import LinkCheckerError
from dns.exception import DNSException
# Catch these exception on syntax checks.
ExcSyntaxList = [
LinkCheckerError,
]
# Catch these exceptions on content and connect checks. All other
# exceptions are internal or system errors
ExcCacheList = [
IOError,
OSError, # OSError is thrown on Windows when a file is not found
LinkCheckerError,
DNSException,
socket.error,
select.error,
# nttp errors (including EOFError)
nntplib.NNTPError,
EOFError,
# http errors
requests.exceptions.RequestException,
requests.packages.urllib3.exceptions.HTTPError,
# ftp errors
ftplib.Error,
# idna.encode(), called from socket.create_connection()
UnicodeError,
]
# Exceptions that do not put the URL in the cache so that the URL can
# be checked again.
ExcNoCacheList = [
socket.timeout,
]
# firefox bookmark file needs sqlite3 for parsing
try:
import sqlite3
ExcCacheList.append(sqlite3.Error)
except ImportError:
pass
# pyOpenSSL errors
try:
import OpenSSL
ExcCacheList.append(OpenSSL.SSL.Error)
except ImportError:
pass
ExcList = ExcCacheList + ExcNoCacheList
# Maximum URL length
# https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers
URL_MAX_LENGTH = 2047
# the warnings
WARN_URL_EFFECTIVE_URL = "url-effective-url"
WARN_URL_ERROR_GETTING_CONTENT = "url-error-getting-content"
WARN_URL_CONTENT_SIZE_TOO_LARGE = "url-content-too-large"
WARN_URL_CONTENT_SIZE_ZERO = "url-content-size-zero"
WARN_URL_OBFUSCATED_IP = "url-obfuscated-ip"
WARN_URL_TOO_LONG = "url-too-long"
WARN_URL_WHITESPACE = "url-whitespace"
WARN_FILE_MISSING_SLASH = "file-missing-slash"
WARN_FILE_SYSTEM_PATH = "file-system-path"
WARN_FTP_MISSING_SLASH = "ftp-missing-slash"
WARN_HTTP_EMPTY_CONTENT = "http-empty-content"
WARN_HTTP_COOKIE_STORE_ERROR = "http-cookie-store-error"
WARN_IGNORE_URL = "ignore-url"
WARN_MAIL_NO_MX_HOST = "mail-no-mx-host"
WARN_NNTP_NO_SERVER = "nntp-no-server"
WARN_NNTP_NO_NEWSGROUP = "nntp-no-newsgroup"
WARN_XML_PARSE_ERROR = "xml-parse-error"
# registered warnings
Warnings = {
WARN_URL_EFFECTIVE_URL:
_("The effective URL is different from the original."),
WARN_URL_ERROR_GETTING_CONTENT:
_("Could not get the content of the URL."),
WARN_URL_CONTENT_SIZE_TOO_LARGE: _("The URL content size is too large."),
WARN_URL_CONTENT_SIZE_ZERO: _("The URL content size is zero."),
WARN_URL_TOO_LONG: _("The URL is longer than the recommended size."),
WARN_URL_WHITESPACE: _("The URL contains leading or trailing whitespace."),
WARN_FILE_MISSING_SLASH: _("The file: URL is missing a trailing slash."),
WARN_FILE_SYSTEM_PATH:
_("The file: path is not the same as the system specific path."),
WARN_FTP_MISSING_SLASH: _("The ftp: URL is missing a trailing slash."),
WARN_HTTP_EMPTY_CONTENT: _("The URL had no content."),
WARN_HTTP_COOKIE_STORE_ERROR:
_("An error occurred while storing a cookie."),
WARN_IGNORE_URL: _("The URL has been ignored."),
WARN_MAIL_NO_MX_HOST: _("The mail MX host could not be found."),
WARN_NNTP_NO_SERVER: _("No NNTP server was found."),
WARN_NNTP_NO_NEWSGROUP: _("The NNTP newsgroup could not be found."),
WARN_URL_OBFUSCATED_IP: _("The IP is obfuscated."),
WARN_XML_PARSE_ERROR: _("XML could not be parsed."),
}
| 4,278 | Python | .py | 113 | 35.035398 | 102 | 0.741276 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,279 | fileurl.py | wummel_linkchecker/linkcheck/checker/fileurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle local file: links.
"""
import re
import os
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import urllib
try:
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
from datetime import datetime
from . import urlbase, get_index_html
from .. import log, LOG_CHECK, fileutil, mimeutil, LinkCheckerError, url as urlutil
from ..bookmarks import firefox
from .const import WARN_FILE_MISSING_SLASH, WARN_FILE_SYSTEM_PATH
def get_files (dirname):
"""Get iterator of entries in directory. Only allows regular files
and directories, no symlinks."""
for entry in os.listdir(dirname):
fullentry = os.path.join(dirname, entry)
if os.path.islink(fullentry):
continue
if os.path.isfile(fullentry):
yield entry
elif os.path.isdir(fullentry):
yield entry+"/"
def prepare_urlpath_for_nt (path):
"""
URLs like 'file://server/path/' result in a path named '/server/path'.
However urllib.url2pathname expects '////server/path'.
"""
if '|' not in path:
return "////"+path.lstrip("/")
return path
def get_nt_filename (path):
"""Return case sensitive filename for NT path."""
unc, rest = os.path.splitunc(path)
head, tail = os.path.split(rest)
if not tail:
return path
for fname in os.listdir(unc+head):
if fname.lower() == tail.lower():
return os.path.join(get_nt_filename(unc+head), fname)
log.error(LOG_CHECK, "could not find %r in %r", tail, head)
return path
def get_os_filename (path):
"""Return filesystem path for given URL path."""
if os.name == 'nt':
path = prepare_urlpath_for_nt(path)
res = urllib.url2pathname(fileutil.pathencode(path))
if os.name == 'nt' and res.endswith(':') and len(res) == 2:
# Work around http://bugs.python.org/issue11474
res += os.sep
return res
def is_absolute_path (path):
"""Check if given path is absolute. On Windows absolute paths start
with a drive letter. On all other systems absolute paths start with
a slash."""
if os.name == 'nt':
if re.search(r"^[a-zA-Z]:", path):
return True
path = path.replace("\\", "/")
return path.startswith("/")
class FileUrl (urlbase.UrlBase):
"""
Url link with file scheme.
"""
def init (self, base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern):
"""Initialize the scheme."""
super(FileUrl, self).init(base_ref, base_url, parent_url,
recursion_level, aggregate, line, column, page, name, url_encoding, extern)
self.scheme = u'file'
def build_base_url(self):
"""The URL is normed according to the platform:
- the base URL is made an absolute file:// URL
- under Windows platform the drive specifier is normed
"""
if self.base_url is None:
return
base_url = self.base_url
if not (self.parent_url or self.base_ref or base_url.startswith("file:")):
base_url = os.path.expanduser(base_url)
if not is_absolute_path(base_url):
try:
base_url = os.getcwd()+"/"+base_url
except OSError as msg:
# occurs on stale remote filesystems (eg. NFS)
errmsg = _("Could not get current working directory: %(msg)s") % dict(msg=msg)
raise LinkCheckerError(errmsg)
if os.path.isdir(base_url):
base_url += "/"
base_url = "file://"+base_url
if os.name == "nt":
base_url = base_url.replace("\\", "/")
# transform c:/windows into /c|/windows
base_url = re.sub("^file://(/?)([a-zA-Z]):", r"file:///\2|", base_url)
# transform file://path into file:///path
base_url = re.sub("^file://([^/])", r"file:///\1", base_url)
self.base_url = unicode(base_url)
def build_url (self):
"""
Calls super.build_url() and adds a trailing slash to directories.
"""
self.build_base_url()
if self.parent_url is not None:
# URL joining with the parent URL only works if the query
# of the base URL are removed first.
# Otherwise the join function thinks the query is part of
# the file name.
from .urlbase import url_norm
# norm base url - can raise UnicodeError from url.idna_encode()
base_url, is_idn = url_norm(self.base_url, self.encoding)
urlparts = list(urlparse.urlsplit(base_url))
# ignore query part for filesystem urls
urlparts[3] = ''
self.base_url = urlutil.urlunsplit(urlparts)
super(FileUrl, self).build_url()
# ignore query and fragment url parts for filesystem urls
self.urlparts[3] = self.urlparts[4] = ''
if self.is_directory() and not self.urlparts[2].endswith('/'):
self.add_warning(_("Added trailing slash to directory."),
tag=WARN_FILE_MISSING_SLASH)
self.urlparts[2] += '/'
self.url = urlutil.urlunsplit(self.urlparts)
def add_size_info (self):
"""Get size of file content and modification time from filename path."""
if self.is_directory():
# Directory size always differs from the customer index.html
# that is generated. So return without calculating any size.
return
filename = self.get_os_filename()
self.size = fileutil.get_size(filename)
self.modified = datetime.utcfromtimestamp(fileutil.get_mtime(filename))
def check_connection (self):
"""
Try to open the local file. Under NT systems the case sensitivity
is checked.
"""
if (self.parent_url is not None and
not self.parent_url.startswith(u"file:")):
msg = _("local files are only checked without parent URL or when the parent URL is also a file")
raise LinkCheckerError(msg)
if self.is_directory():
self.set_result(_("directory"))
else:
url = fileutil.pathencode(self.url)
self.url_connection = urlopen(url)
self.check_case_sensitivity()
def check_case_sensitivity (self):
"""
Check if url and windows path name match cases
else there might be problems when copying such
files on web servers that are case sensitive.
"""
if os.name != 'nt':
return
path = self.get_os_filename()
realpath = get_nt_filename(path)
if path != realpath:
self.add_warning(_("The URL path %(path)r is not the same as the "
"system path %(realpath)r. You should always use "
"the system path in URLs.") % \
{"path": path, "realpath": realpath},
tag=WARN_FILE_SYSTEM_PATH)
def read_content (self):
"""Return file content, or in case of directories a dummy HTML file
with links to the files."""
if self.is_directory():
data = get_index_html(get_files(self.get_os_filename()))
if isinstance(data, unicode):
data = data.encode("iso8859-1", "ignore")
else:
data = super(FileUrl, self).read_content()
return data
def get_os_filename (self):
"""
Construct os specific file path out of the file:// URL.
@return: file name
@rtype: string
"""
return get_os_filename(self.urlparts[2])
def get_temp_filename (self):
"""Get filename for content to parse."""
return self.get_os_filename()
def is_directory (self):
"""
Check if file is a directory.
@return: True iff file is a directory
@rtype: bool
"""
filename = self.get_os_filename()
return os.path.isdir(filename) and not os.path.islink(filename)
def is_parseable (self):
"""Check if content is parseable for recursion.
@return: True if content is parseable
@rtype: bool
"""
if self.is_directory():
return True
if firefox.has_sqlite and firefox.extension.search(self.url):
return True
if self.content_type in self.ContentMimetypes:
return True
log.debug(LOG_CHECK, "File with content type %r is not parseable.", self.content_type)
return False
def set_content_type (self):
"""Return URL content type, or an empty string if content
type could not be found."""
if self.url:
self.content_type = mimeutil.guess_mimetype(self.url, read=self.get_content)
else:
self.content_type = u""
def get_intern_pattern (self, url=None):
"""Get pattern for intern URL matching.
@return non-empty regex pattern or None
@rtype String or None
"""
if url is None:
url = self.url
if not url:
return None
if url.startswith('file://'):
i = url.rindex('/')
if i > 6:
# remove last filename to make directory internal
url = url[:i+1]
return re.escape(url)
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None):
"""If a local webroot directory is configured, replace absolute URLs
with it. After that queue the URL data for checking."""
webroot = self.aggregate.config["localwebroot"]
if webroot and url and url.startswith(u"/"):
url = webroot + url[1:]
log.debug(LOG_CHECK, "Applied local webroot `%s' to `%s'.", webroot, url)
super(FileUrl, self).add_url(url, line=line, column=column, page=page, name=name, base=base)
| 10,873 | Python | .py | 258 | 33.25969 | 108 | 0.608807 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,280 | httpurl.py | wummel_linkchecker/linkcheck/checker/httpurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle http links.
"""
import requests
# The validity of SSL certs is ignored to be able
# the check the URL and recurse into it.
# The warning about invalid SSL certs is given to the
# user instead.
import warnings
warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from .. import (log, LOG_CHECK, strformat, mimeutil,
url as urlutil, LinkCheckerError, httputil)
from . import (internpaturl, proxysupport)
from ..HtmlParser import htmlsax
from ..htmlutil import linkparse
# import warnings
from .const import WARN_HTTP_EMPTY_CONTENT
from requests.sessions import REDIRECT_STATI
# assumed HTTP header encoding
HEADER_ENCODING = "iso-8859-1"
HTTP_SCHEMAS = ('http://', 'https://')
# helper alias
unicode_safe = strformat.unicode_safe
class HttpUrl (internpaturl.InternPatternUrl, proxysupport.ProxySupport):
"""
Url link with http scheme.
"""
def reset (self):
"""
Initialize HTTP specific variables.
"""
super(HttpUrl, self).reset()
# initialize check data
# server headers
self.headers = {}
self.auth = None
self.ssl_cipher = None
self.ssl_cert = None
def allows_robots (self, url):
"""
Fetch and parse the robots.txt of given url. Checks if LinkChecker
can get the requested resource content.
@param url: the url to be requested
@type url: string
@return: True if access is granted, otherwise False
@rtype: bool
"""
return self.aggregate.robots_txt.allows_url(self)
def content_allows_robots (self):
"""
Return False if the content of this URL forbids robots to
search for recursive links.
"""
if not self.is_html():
return True
# construct parser object
handler = linkparse.MetaRobotsFinder()
parser = htmlsax.parser(handler)
handler.parser = parser
if self.charset:
parser.encoding = self.charset
# parse
try:
parser.feed(self.get_content())
parser.flush()
except linkparse.StopParse as msg:
log.debug(LOG_CHECK, "Stopped parsing: %s", msg)
pass
# break cyclic dependencies
handler.parser = None
parser.handler = None
return handler.follow
def add_size_info (self):
"""Get size of URL content from HTTP header."""
if self.headers and "Content-Length" in self.headers and \
"Transfer-Encoding" not in self.headers:
# Note that content-encoding causes size differences since
# the content data is always decoded.
try:
self.size = int(self.getheader("Content-Length"))
except (ValueError, OverflowError):
pass
else:
self.size = -1
def check_connection (self):
"""
Check a URL with HTTP protocol.
Here is an excerpt from RFC 1945 with common response codes:
The first digit of the Status-Code defines the class of response. The
last two digits do not have any categorization role. There are 5
values for the first digit:
- 1xx: Informational - Not used, but reserved for future use
- 2xx: Success - The action was successfully received,
understood, and accepted.
- 3xx: Redirection - Further action must be taken in order to
complete the request
- 4xx: Client Error - The request contains bad syntax or cannot
be fulfilled
- 5xx: Server Error - The server failed to fulfill an apparently
valid request
"""
self.session = self.aggregate.get_request_session()
# set the proxy, so a 407 status after this is an error
self.set_proxy(self.aggregate.config["proxy"].get(self.scheme))
self.construct_auth()
# check robots.txt
if not self.allows_robots(self.url):
self.add_info(_("Access denied by robots.txt, checked only syntax."))
self.set_result(_("syntax OK"))
self.do_check_content = False
return
# check the http connection
request = self.build_request()
self.send_request(request)
self._add_response_info()
self.follow_redirections(request)
self.check_response()
if self.allows_simple_recursion():
self.parse_header_links()
def build_request(self):
"""Build a prepared request object."""
clientheaders = {}
if (self.parent_url and
self.parent_url.lower().startswith(HTTP_SCHEMAS)):
clientheaders["Referer"] = self.parent_url
kwargs = dict(
method='GET',
url=self.url,
headers=clientheaders,
)
if self.auth:
kwargs['auth'] = self.auth
log.debug(LOG_CHECK, "Prepare request with %s", kwargs)
request = requests.Request(**kwargs)
return self.session.prepare_request(request)
def send_request(self, request):
"""Send request and store response in self.url_connection."""
# throttle the number of requests to each host
self.aggregate.wait_for_host(self.urlparts[1])
kwargs = self.get_request_kwargs()
kwargs["allow_redirects"] = False
self._send_request(request, **kwargs)
def _send_request(self, request, **kwargs):
"""Send GET request."""
log.debug(LOG_CHECK, "Send request %s with %s", request, kwargs)
log.debug(LOG_CHECK, "Request headers %s", request.headers)
self.url_connection = self.session.send(request, **kwargs)
self.headers = self.url_connection.headers
self._add_ssl_info()
def _add_response_info(self):
"""Set info from established HTTP(S) connection."""
self.charset = httputil.get_charset(self.headers)
self.set_content_type()
self.add_size_info()
def _get_ssl_sock(self):
"""Get raw SSL socket."""
assert self.scheme == u"https", self
raw_connection = self.url_connection.raw._connection
if raw_connection.sock is None:
# sometimes the socket is not yet connected
# see https://github.com/kennethreitz/requests/issues/1966
raw_connection.connect()
return raw_connection.sock
def _add_ssl_info(self):
"""Add SSL cipher info."""
if self.scheme == u'https':
sock = self._get_ssl_sock()
if hasattr(sock, 'cipher'):
self.ssl_cert = sock.getpeercert()
else:
# using pyopenssl
cert = sock.connection.get_peer_certificate()
self.ssl_cert = httputil.x509_to_dict(cert)
log.debug(LOG_CHECK, "Got SSL certificate %s", self.ssl_cert)
else:
self.ssl_cert = None
def construct_auth (self):
"""Construct HTTP Basic authentication credentials if there
is user/password information available. Does not overwrite if
credentials have already been constructed."""
if self.auth:
return
_user, _password = self.get_user_password()
if _user is not None and _password is not None:
self.auth = (_user, _password)
def set_content_type (self):
"""Return content MIME type or empty string."""
self.content_type = httputil.get_content_type(self.headers)
def is_redirect(self):
"""Check if current response is a redirect."""
return ('location' in self.headers and
self.url_connection.status_code in REDIRECT_STATI)
def get_request_kwargs(self):
"""Construct keyword parameters for Session.request() and
Session.resolve_redirects()."""
kwargs = dict(stream=True, timeout=self.aggregate.config["timeout"])
if self.proxy:
kwargs["proxies"] = {self.proxytype: self.proxy}
if self.scheme == u"https" and self.aggregate.config["sslverify"]:
kwargs['verify'] = self.aggregate.config["sslverify"]
else:
kwargs['verify'] = False
return kwargs
def get_redirects(self, request):
"""Return iterator of redirects for given request."""
kwargs = self.get_request_kwargs()
return self.session.resolve_redirects(self.url_connection,
request, **kwargs)
def follow_redirections(self, request):
"""Follow all redirections of http response."""
log.debug(LOG_CHECK, "follow all redirections")
if self.is_redirect():
# run connection plugins for old connection
self.aggregate.plugin_manager.run_connection_plugins(self)
response = None
for response in self.get_redirects(request):
newurl = response.url
log.debug(LOG_CHECK, "Redirected to %r", newurl)
self.aliases.append(newurl)
# XXX on redirect errors this is not printed
self.add_info(_("Redirected to `%(url)s'.") % {'url': newurl})
# Reset extern and recalculate
self.extern = None
self.set_extern(newurl)
self.urlparts = strformat.url_unicode_split(newurl)
self.build_url_parts()
self.url_connection = response
self.headers = response.headers
self.url = urlutil.urlunsplit(self.urlparts)
self.scheme = self.urlparts[0].lower()
self._add_ssl_info()
self._add_response_info()
if self.is_redirect():
# run connection plugins for old connection
self.aggregate.plugin_manager.run_connection_plugins(self)
def getheader (self, name, default=None):
"""Get decoded header value.
@return: decoded header value or default of not found
@rtype: unicode or type of default
"""
value = self.headers.get(name)
if value is None:
return default
return unicode_safe(value, encoding=HEADER_ENCODING)
def check_response (self):
"""Check final result and log it."""
if self.url_connection.status_code >= 400:
self.set_result(u"%d %s" % (self.url_connection.status_code, self.url_connection.reason),
valid=False)
else:
if self.url_connection.status_code == 204:
# no content
self.add_warning(self.url_connection.reason,
tag=WARN_HTTP_EMPTY_CONTENT)
if self.url_connection.status_code >= 200:
self.set_result(u"%r %s" % (self.url_connection.status_code, self.url_connection.reason))
else:
self.set_result(_("OK"))
def read_content(self):
"""Return data and data size for this URL.
Can be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
buf = StringIO()
for data in self.url_connection.iter_content(chunk_size=self.ReadChunkBytes):
if buf.tell() + len(data) > maxbytes:
raise LinkCheckerError(_("File size too large"))
buf.write(data)
return buf.getvalue()
def parse_header_links(self):
"""Parse URLs in HTTP headers Link:."""
for linktype, linkinfo in self.url_connection.links.items():
url = linkinfo["url"]
name = u"Link: header %s" % linktype
self.add_url(url, name=name)
if 'Refresh' in self.headers:
from ..htmlutil.linkparse import refresh_re
value = self.headers['Refresh'].strip()
mo = refresh_re.match(value)
if mo:
url = unicode_safe(mo.group("url"))
name = u"Refresh: header"
self.add_url(url, name=name)
if 'Content-Location' in self.headers:
url = self.headers['Content-Location'].strip()
name = u"Content-Location: header"
self.add_url(url, name=name)
def is_parseable (self):
"""
Check if content is parseable for recursion.
@return: True if content is parseable
@rtype: bool
"""
if not self.valid:
return False
# some content types must be validated with the page content
if self.content_type in ("application/xml", "text/xml"):
rtype = mimeutil.guess_mimetype_read(self.get_content)
if rtype is not None:
# XXX side effect
self.content_type = rtype
if self.content_type not in self.ContentMimetypes:
log.debug(LOG_CHECK, "URL with content type %r is not parseable", self.content_type)
return False
return True
def get_robots_txt_url (self):
"""
Get the according robots.txt URL for this URL.
@return: robots.txt URL
@rtype: string
"""
return "%s://%s/robots.txt" % tuple(self.urlparts[0:2])
| 14,032 | Python | .py | 332 | 32.906627 | 105 | 0.62179 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,281 | dnsurl.py | wummel_linkchecker/linkcheck/checker/dnsurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle for dns: links.
"""
import socket
from . import urlbase
class DnsUrl (urlbase.UrlBase):
"""
Url link with dns scheme.
"""
def can_get_content (self):
"""
dns: URLs do not have any content
@return: False
@rtype: bool
"""
return False
def check_connection(self):
"""Resolve hostname."""
host = self.urlparts[1]
addresses = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)
args = {'host': host}
if addresses:
args['ips'] = [x[4][0] for x in addresses]
self.set_result(_('%(host)s resolved to IPs %(ips)s') % args, valid=True)
else:
self.set_result(_('%(host)r could not be resolved') % args, valid=False)
| 1,554 | Python | .py | 42 | 32.214286 | 85 | 0.671096 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,282 | __init__.py | wummel_linkchecker/linkcheck/checker/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Main functions for link checking.
"""
import os
import cgi
import urllib
from .. import strformat, url as urlutil, log, LOG_CHECK
MAX_FILESIZE = 1024*1024*10 # 10MB
def guess_url(url):
"""Guess if URL is a http or ftp URL.
@param url: the URL to check
@ptype url: unicode
@return: url with http:// or ftp:// prepended if it's detected as
a http respective ftp URL.
@rtype: unicode
"""
if url.lower().startswith("www."):
# syntactic sugar
return "http://%s" % url
elif url.lower().startswith("ftp."):
# syntactic sugar
return "ftp://%s" % url
return url
def absolute_url (base_url, base_ref, parent_url):
"""
Search for the absolute url to detect the link type. This does not
join any url fragments together!
@param base_url: base url from a link tag
@type base_url: string or None
@param base_ref: base url from <base> tag
@type base_ref: string or None
@param parent_url: url of parent document
@type parent_url: string or None
"""
if base_url and urlutil.url_is_absolute(base_url):
return base_url
elif base_ref and urlutil.url_is_absolute(base_ref):
return base_ref
elif parent_url and urlutil.url_is_absolute(parent_url):
return parent_url
return u""
def get_url_from (base_url, recursion_level, aggregate,
parent_url=None, base_ref=None, line=0, column=0, page=0,
name=u"", parent_content_type=None, extern=None):
"""
Get url data from given base data.
@param base_url: base url from a link tag
@type base_url: string or None
@param recursion_level: current recursion level
@type recursion_level: number
@param aggregate: aggregate object
@type aggregate: aggregate.Consumer
@param parent_url: parent url
@type parent_url: string or None
@param base_ref: base url from <base> tag
@type base_ref string or None
@param line: line number
@type line: number
@param column: column number
@type column: number
@param page: page number
@type page: number
@param name: link name
@type name: string
@param extern: (is_extern, is_strict) or None
@type extern: tuple(int, int) or None
"""
if base_url is not None:
base_url = strformat.unicode_safe(base_url)
# left strip for detection of URL scheme
base_url_stripped = base_url.lstrip()
else:
base_url_stripped = base_url
if parent_url is not None:
parent_url = strformat.unicode_safe(parent_url)
if base_ref is not None:
base_ref = strformat.unicode_safe(base_ref)
name = strformat.unicode_safe(name)
url = absolute_url(base_url_stripped, base_ref, parent_url).lower()
if ":" in url:
scheme = url.split(":", 1)[0].lower()
else:
scheme = None
if not (url or name):
# use filename as base url, with slash as path seperator
name = base_url.replace("\\", "/")
allowed_schemes = aggregate.config["allowedschemes"]
# ignore local PHP files with execution directives
local_php = (parent_content_type == 'application/x-httpd-php' and
'<?' in base_url and '?>' in base_url and scheme == 'file')
if local_php or (allowed_schemes and scheme not in allowed_schemes):
klass = ignoreurl.IgnoreUrl
else:
assume_local_file = (recursion_level == 0)
klass = get_urlclass_from(scheme, assume_local_file=assume_local_file)
log.debug(LOG_CHECK, "%s handles url %s", klass.__name__, base_url)
return klass(base_url, recursion_level, aggregate,
parent_url=parent_url, base_ref=base_ref,
line=line, column=column, page=page, name=name, extern=extern)
def get_urlclass_from (scheme, assume_local_file=False):
"""Return checker class for given URL scheme. If the scheme
cannot be matched and assume_local_file is True, assume a local file.
"""
if scheme in ("http", "https"):
klass = httpurl.HttpUrl
elif scheme == "ftp":
klass = ftpurl.FtpUrl
elif scheme == "file":
klass = fileurl.FileUrl
elif scheme == "telnet":
klass = telneturl.TelnetUrl
elif scheme == "mailto":
klass = mailtourl.MailtoUrl
elif scheme in ("nntp", "news", "snews"):
klass = nntpurl.NntpUrl
elif scheme == "dns":
klass = dnsurl.DnsUrl
elif scheme == "itms-services":
klass = itmsservicesurl.ItmsServicesUrl
elif scheme and unknownurl.is_unknown_scheme(scheme):
klass = unknownurl.UnknownUrl
elif assume_local_file:
klass = fileurl.FileUrl
else:
klass = unknownurl.UnknownUrl
return klass
def get_index_html (urls):
"""
Construct artificial index.html from given URLs.
@param urls: URL strings
@type urls: iterator of string
"""
lines = ["<html>", "<body>"]
for entry in urls:
name = cgi.escape(entry)
try:
url = cgi.escape(urllib.quote(entry))
except KeyError:
# Some unicode entries raise KeyError.
url = name
lines.append('<a href="%s">%s</a>' % (url, name))
lines.extend(["</body>", "</html>"])
return os.linesep.join(lines)
# all the URL classes
from . import (fileurl, unknownurl, ftpurl, httpurl, dnsurl,
mailtourl, telneturl, nntpurl, ignoreurl, itmsservicesurl)
| 6,221 | Python | .py | 162 | 32.611111 | 79 | 0.66435 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,283 | telneturl.py | wummel_linkchecker/linkcheck/checker/telneturl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle telnet: links.
"""
import telnetlib
from . import urlbase
from .. import log, LOG_CHECK
def encode(s, encoding="iso-8859-1", errors="ignore"):
"""Encode telnet data like username and password."""
return s.encode(encoding, errors)
class TelnetUrl (urlbase.UrlBase):
"""
Url link with telnet scheme.
"""
def build_url (self):
"""
Call super.build_url(), set default telnet port and initialize
the login credentials.
"""
super(TelnetUrl, self).build_url()
# default port
if self.port is None:
self.port = 23
# set user/pass
self.user, self.password = self.get_user_password()
def local_check (self):
"""
Warn about empty host names. Else call super.local_check().
"""
if not self.host:
self.set_result(_("Host is empty"), valid=False)
return
super(TelnetUrl, self).local_check()
def check_connection (self):
"""
Open a telnet connection and try to login. Expected login
label is "login: ", expected password label is "Password: ".
"""
self.url_connection = telnetlib.Telnet(timeout=self.aggregate.config["timeout"])
if log.is_debug(LOG_CHECK):
self.url_connection.set_debuglevel(1)
self.url_connection.open(self.host, self.port)
if self.user:
self.url_connection.read_until("login: ", 10)
self.url_connection.write(encode(self.user)+"\n")
if self.password:
self.url_connection.read_until("Password: ", 10)
self.url_connection.write(encode(self.password)+"\n")
# XXX how to tell if we are logged in??
self.url_connection.write("exit\n")
def can_get_content (self):
"""
Telnet URLs have no content.
@return: False
@rtype: bool
"""
return False
| 2,740 | Python | .py | 72 | 31.361111 | 88 | 0.647856 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,284 | ftpurl.py | wummel_linkchecker/linkcheck/checker/ftpurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle FTP links.
"""
import ftplib
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from .. import log, LOG_CHECK, LinkCheckerError, mimeutil
from . import proxysupport, httpurl, internpaturl, get_index_html
from .const import WARN_FTP_MISSING_SLASH
class FtpUrl (internpaturl.InternPatternUrl, proxysupport.ProxySupport):
"""
Url link with ftp scheme.
"""
def reset (self):
"""
Initialize FTP url data.
"""
super(FtpUrl, self).reset()
# list of files for recursion
self.files = []
# last part of URL filename
self.filename = None
self.filename_encoding = 'iso-8859-1'
def check_connection (self):
"""
In case of proxy, delegate to HttpUrl. Else check in this
order: login, changing directory, list the file.
"""
# proxy support (we support only http)
self.set_proxy(self.aggregate.config["proxy"].get(self.scheme))
if self.proxy:
# using a (HTTP) proxy
http = httpurl.HttpUrl(self.base_url,
self.recursion_level,
self.aggregate,
parent_url=self.parent_url,
base_ref=self.base_ref,
line=self.line,
column=self.column,
name=self.name)
http.build_url()
return http.check()
self.login()
self.negotiate_encoding()
self.filename = self.cwd()
self.listfile()
self.files = []
return None
def login (self):
"""Log into ftp server and check the welcome message."""
self.url_connection = ftplib.FTP(timeout=self.aggregate.config["timeout"])
if log.is_debug(LOG_CHECK):
self.url_connection.set_debuglevel(1)
try:
self.url_connection.connect(self.host, self.port)
_user, _password = self.get_user_password()
if _user is None:
self.url_connection.login()
elif _password is None:
self.url_connection.login(_user)
else:
self.url_connection.login(_user, _password)
info = self.url_connection.getwelcome()
if info:
# note that the info may change every time a user logs in,
# so don't add it to the url_data info.
log.debug(LOG_CHECK, "FTP info %s", info)
pass
else:
raise LinkCheckerError(_("Got no answer from FTP server"))
except EOFError as msg:
raise LinkCheckerError(
_("Remote host has closed connection: %(msg)s") % str(msg))
def negotiate_encoding (self):
"""Check if server can handle UTF-8 encoded filenames.
See also RFC 2640."""
try:
features = self.url_connection.sendcmd("FEAT")
except ftplib.error_perm as msg:
log.debug(LOG_CHECK, "Ignoring error when getting FTP features: %s" % msg)
pass
else:
log.debug(LOG_CHECK, "FTP features %s", features)
if " UTF-8" in features.splitlines():
self.filename_encoding = "utf-8"
def cwd (self):
"""
Change to URL parent directory. Return filename of last path
component.
"""
path = self.urlparts[2].encode(self.filename_encoding, 'replace')
dirname = path.strip('/')
dirs = dirname.split('/')
filename = dirs.pop()
self.url_connection.cwd('/')
for d in dirs:
self.url_connection.cwd(d)
return filename
def listfile (self):
"""
See if filename is in the current FTP directory.
"""
if not self.filename:
return
files = self.get_files()
log.debug(LOG_CHECK, "FTP files %s", str(files))
if self.filename in files:
# file found
return
# it could be a directory if the trailing slash was forgotten
if "%s/" % self.filename in files:
if not self.url.endswith('/'):
self.add_warning(
_("Missing trailing directory slash in ftp url."),
tag=WARN_FTP_MISSING_SLASH)
self.url += '/'
return
raise ftplib.error_perm("550 File not found")
def get_files (self):
"""Get list of filenames in directory. Subdirectories have an
ending slash."""
files = []
def add_entry (line):
"""Parse list line and add the entry it points to to the file
list."""
log.debug(LOG_CHECK, "Directory entry %r", line)
from ..ftpparse import ftpparse
fpo = ftpparse(line)
if fpo is not None and fpo["name"]:
name = fpo["name"]
if fpo["trycwd"]:
name += "/"
if fpo["trycwd"] or fpo["tryretr"]:
files.append(name)
self.url_connection.dir(add_entry)
return files
def is_parseable (self):
"""See if URL target is parseable for recursion."""
if self.is_directory():
return True
if self.content_type in self.ContentMimetypes:
return True
log.debug(LOG_CHECK, "URL with content type %r is not parseable.", self.content_type)
return False
def is_directory (self):
"""See if URL target is a directory."""
# either the path is empty, or ends with a slash
path = self.urlparts[2]
return (not path) or path.endswith('/')
def set_content_type (self):
"""Set URL content type, or an empty string if content
type could not be found."""
self.content_type = mimeutil.guess_mimetype(self.url, read=self.get_content)
def read_content (self):
"""Return URL target content, or in case of directories a dummy HTML
file with links to the files."""
if self.is_directory():
self.url_connection.cwd(self.filename)
self.files = self.get_files()
# XXX limit number of files?
data = get_index_html(self.files)
else:
# download file in BINARY mode
ftpcmd = "RETR %s" % self.filename
buf = StringIO()
def stor_data (s):
"""Helper method storing given data"""
# limit the download size
if (buf.tell() + len(s)) > self.max_size:
raise LinkCheckerError(_("FTP file size too large"))
buf.write(s)
self.url_connection.retrbinary(ftpcmd, stor_data)
data = buf.getvalue()
buf.close()
return data
def close_connection (self):
"""Release the open connection from the connection pool."""
if self.url_connection is not None:
try:
self.url_connection.quit()
except Exception:
pass
self.url_connection = None
| 7,943 | Python | .py | 202 | 29.143564 | 93 | 0.581359 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,285 | mailtourl.py | wummel_linkchecker/linkcheck/checker/mailtourl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle for mailto: links.
"""
import re
import urllib
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
from email._parseaddr import AddressList
from . import urlbase
from .. import log, LOG_CHECK, strformat, url as urlutil
from dns import resolver
from ..network import iputil
from .const import WARN_MAIL_NO_MX_HOST
def getaddresses (addr):
"""Return list of email addresses from given field value."""
parsed = [mail for name, mail in AddressList(addr).addresslist if mail]
if parsed:
addresses = parsed
elif addr:
# we could not parse any mail addresses, so try with the raw string
addresses = [addr]
else:
addresses = []
return addresses
def is_quoted (addr):
"""Return True iff mail address string is quoted."""
return addr.startswith(u'"') and addr.endswith(u'"')
def is_literal (domain):
"""Return True iff domain string is a literal."""
return domain.startswith(u'[') and domain.endswith(u']')
_remove_quoted = re.compile(ur'\\.').sub
_quotes = re.compile(ur'["\\]')
def is_missing_quote (addr):
"""Return True iff mail address is not correctly quoted."""
return _quotes.match(_remove_quoted(u"", addr[1:-1]))
# list of CGI keys to search for email addresses
EMAIL_CGI_ADDRESS = ("to", "cc", "bcc")
EMAIL_CGI_SUBJECT = "subject"
class MailtoUrl (urlbase.UrlBase):
"""
Url link with mailto scheme.
"""
def build_url (self):
"""Call super.build_url(), extract list of mail addresses from URL,
and check their syntax.
"""
super(MailtoUrl, self).build_url()
self.addresses = set()
self.subject = None
self.parse_addresses()
if self.addresses:
for addr in sorted(self.addresses):
self.check_email_syntax(addr)
if not self.valid:
break
elif not self.subject:
self.add_warning(_("No mail addresses or email subject found in `%(url)s'.") % \
{"url": self.url})
def parse_addresses (self):
"""Parse all mail addresses out of the URL target. Also parses
optional CGI headers like "?to=foo@example.org".
Stores parsed addresses in the self.addresses set.
"""
# cut off leading mailto: and unquote
url = urllib.unquote(self.base_url[7:])
# search for cc, bcc, to and store in headers
mode = 0 # 0=default, 1=quote, 2=esc
quote = None
i = 0
for i, c in enumerate(url):
if mode == 0:
if c == '?':
break
elif c in '<"':
quote = c
mode = 1
elif c == '\\':
mode = 2
elif mode==1:
if c == '"' and quote == '"':
mode = 0
elif c == '>' and quote == '<':
mode = 0
elif mode == 2:
mode = 0
if i < (len(url) - 1):
self.addresses.update(getaddresses(url[:i]))
try:
headers = urlparse.parse_qs(url[(i+1):], strict_parsing=True)
for key, vals in headers.items():
if key.lower() in EMAIL_CGI_ADDRESS:
# Only the first header value is added
self.addresses.update(getaddresses(urllib.unquote(vals[0])))
if key.lower() == EMAIL_CGI_SUBJECT:
self.subject = vals[0]
except ValueError as err:
self.add_warning(_("Error parsing CGI values: %s") % str(err))
else:
self.addresses.update(getaddresses(url))
log.debug(LOG_CHECK, "addresses: %s", self.addresses)
def check_email_syntax (self, mail):
"""Check email syntax. The relevant RFCs:
- How to check names (memo):
http://tools.ietf.org/html/rfc3696
- Email address syntax
http://tools.ietf.org/html/rfc2822
- SMTP protocol
http://tools.ietf.org/html/rfc5321#section-4.1.3
- IPv6
http://tools.ietf.org/html/rfc4291#section-2.2
- Host syntax
http://tools.ietf.org/html/rfc1123#section-2
"""
# length checks
# restrict email length to 256 characters
# http://www.rfc-editor.org/errata_search.php?eid=1003
if len(mail) > 256:
self.set_result(_("Mail address `%(addr)s' too long. Allowed 256 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(mail)}, valid=False, overwrite=False)
return
if "@" not in mail:
self.set_result(_("Missing `@' in mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
# note: be sure to use rsplit since "@" can occur in local part
local, domain = mail.rsplit("@", 1)
if not local:
self.set_result(_("Missing local part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if not domain:
self.set_result(_("Missing domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if len(local) > 64:
self.set_result(_("Local part of mail address `%(addr)s' too long. Allowed 64 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(local)}, valid=False, overwrite=False)
return
if len(domain) > 255:
self.set_result(_("Domain part of mail address `%(addr)s' too long. Allowed 255 chars, was %(length)d chars.") % \
{"addr": mail, "length": len(local)}, valid=False, overwrite=False)
return
# local part syntax check
# Rules taken from http://tools.ietf.org/html/rfc3696#section-3
if is_quoted(local):
if is_missing_quote(local):
self.set_result(_("Unquoted double quote or backslash in mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
else:
if local.startswith(u"."):
self.set_result(_("Local part of mail address `%(addr)s' may not start with a dot.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if local.endswith(u"."):
self.set_result(_("Local part of mail address `%(addr)s' may not end with a dot.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if u".." in local:
self.set_result(_("Local part of mail address `%(addr)s' may not contain two dots.") % \
{"addr": mail}, valid=False, overwrite=False)
return
for char in u'@ \\",[]':
if char in local.replace(u"\\%s"%char, u""):
self.set_result(_("Local part of mail address `%(addr)s' contains unquoted character `%(char)s.") % \
{"addr": mail, "char": char}, valid=False, overwrite=False)
return
# domain part syntax check
if is_literal(domain):
# it's an IP address
ip = domain[1:-1]
if ip.startswith(u"IPv6:"):
ip = ip[5:]
if not iputil.is_valid_ip(ip):
self.set_result(_("Domain part of mail address `%(addr)s' has invalid IP.") % \
{"addr": mail}, valid=False, overwrite=False)
return
else:
# it's a domain name
if not urlutil.is_safe_domain(domain):
self.set_result(_("Invalid domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
if domain.endswith(".") or domain.split(".")[-1].isdigit():
self.set_result(_("Invalid top level domain part of mail address `%(addr)s'.") % \
{"addr": mail}, valid=False, overwrite=False)
return
def check_connection (self):
"""
Verify a list of email addresses. If one address fails,
the whole list will fail.
For each mail address the MX DNS records are found.
If no MX records are found, print a warning and try
to look for A DNS records. If no A records are found either
print an error.
"""
for mail in sorted(self.addresses):
self.check_smtp_domain(mail)
if not self.valid:
break
def check_smtp_domain (self, mail):
"""
Check a single mail address.
"""
from dns.exception import DNSException
log.debug(LOG_CHECK, "checking mail address %r", mail)
mail = strformat.ascii_safe(mail)
username, domain = mail.rsplit('@', 1)
log.debug(LOG_CHECK, "looking up MX mailhost %r", domain)
try:
answers = resolver.query(domain, 'MX')
except DNSException:
answers = []
if len(answers) == 0:
self.add_warning(_("No MX mail host for %(domain)s found.") %
{'domain': domain},
tag=WARN_MAIL_NO_MX_HOST)
try:
answers = resolver.query(domain, 'A')
except DNSException:
answers = []
if len(answers) == 0:
self.set_result(_("No host for %(domain)s found.") %
{'domain': domain}, valid=False,
overwrite=True)
return
# set preference to zero
mxdata = [(0, rdata.to_text(omit_final_dot=True))
for rdata in answers]
else:
from dns.rdtypes.mxbase import MXBase
mxdata = [(rdata.preference,
rdata.exchange.to_text(omit_final_dot=True))
for rdata in answers if isinstance(rdata, MXBase)]
if not mxdata:
self.set_result(
_("Got invalid DNS answer %(answer)s for %(domain)s.") %
{'answer': answers, 'domain': domain}, valid=False,
overwrite=True)
return
# sort according to preference (lower preference means this
# host should be preferred)
mxdata.sort()
# debug output
log.debug(LOG_CHECK, "found %d MX mailhosts:", len(answers))
for preference, host in mxdata:
log.debug(LOG_CHECK, "MX host %r, preference %d", host, preference)
pass
self.set_result(_("Valid mail address syntax"))
def set_cache_url(self):
"""
The cache url is a comma separated list of emails.
"""
emails = u",".join(sorted(self.addresses))
self.cache_url = u"%s:%s" % (self.scheme, emails)
def can_get_content (self):
"""
mailto: URLs do not have any content
@return: False
@rtype: bool
"""
return False
| 12,040 | Python | .py | 281 | 31.879004 | 126 | 0.55762 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,286 | nntpurl.py | wummel_linkchecker/linkcheck/checker/nntpurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle nntp: and news: links.
"""
import re
import time
import nntplib
import random
from . import urlbase
from .. import log, LinkCheckerError, LOG_CHECK
from .const import WARN_NNTP_NO_SERVER, WARN_NNTP_NO_NEWSGROUP
random.seed()
class NntpUrl (urlbase.UrlBase):
"""
Url link with NNTP scheme.
"""
def check_connection (self):
"""
Connect to NNTP server and try to request the URL article
resource (if specified).
"""
nntpserver = self.host or self.aggregate.config["nntpserver"]
if not nntpserver:
self.add_warning(
_("No NNTP server was specified, skipping this URL."),
tag=WARN_NNTP_NO_SERVER)
return
nntp = self._connect_nntp(nntpserver)
group = self.urlparts[2]
while group[:1] == '/':
group = group[1:]
if '@' in group:
# request article info (resp, number mid)
number = nntp.stat("<"+group+">")[1]
self.add_info(_('Article number %(num)s found.') % {"num": number})
else:
# split off trailing articel span
group = group.split('/', 1)[0]
if group:
# request group info (resp, count, first, last, name)
name = nntp.group(group)[4]
self.add_info(_("News group %(name)s found.") % {"name": name})
else:
# group name is the empty string
self.add_warning(_("No newsgroup specified in NNTP URL."),
tag=WARN_NNTP_NO_NEWSGROUP)
def _connect_nntp (self, nntpserver):
"""
This is done only once per checking task. Also, the newly
introduced error codes 504 and 505 (both inclining "Too busy, retry
later", are caught.
"""
tries = 0
nntp = None
while tries < 2:
tries += 1
try:
nntp = nntplib.NNTP(nntpserver, usenetrc=False)
except nntplib.NNTPTemporaryError:
self.wait()
except nntplib.NNTPPermanentError as msg:
if re.compile("^50[45]").search(str(msg)):
self.wait()
else:
raise
if nntp is None:
raise LinkCheckerError(
_("NNTP server too busy; tried more than %d times.") % tries)
if log.is_debug(LOG_CHECK):
nntp.set_debuglevel(1)
self.add_info(nntp.getwelcome())
return nntp
def wait (self):
"""Wait some time before trying to connect again."""
time.sleep(random.randrange(10, 30))
def can_get_content (self):
"""
NNTP urls have no content.
@return: False
@rtype: bool
"""
return False
| 3,617 | Python | .py | 97 | 28.618557 | 79 | 0.595557 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,287 | itmsservicesurl.py | wummel_linkchecker/linkcheck/checker/itmsservicesurl.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle itms-services URLs.
"""
from . import urlbase
from .. import log, LOG_CHECK
class ItmsServicesUrl(urlbase.UrlBase):
"""Apple iOS application download URLs."""
def check_syntax(self):
"""Only logs that this URL is unknown."""
super(ItmsServicesUrl, self).check_syntax()
if u"url=" not in self.urlparts[3]:
self.set_result(_("Missing required url parameter"), valid=False)
def local_check(self):
"""Disable content checks."""
log.debug(LOG_CHECK, "Checking %s", unicode(self))
pass
def check_content(self):
"""Allow recursion to check the url CGI param."""
return True
def is_parseable(self):
"""This URL is parseable."""
return True
| 1,537 | Python | .py | 38 | 36.315789 | 77 | 0.707105 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,288 | urlbase.py | wummel_linkchecker/linkcheck/checker/urlbase.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Base URL handler.
"""
import sys
import os
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
try:
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
import urllib
import time
import errno
import socket
import select
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from . import absolute_url, get_url_from
from .. import (log, LOG_CHECK,
strformat, LinkCheckerError, url as urlutil, trace, get_link_pat)
from ..network import iputil
from .const import (WARN_URL_EFFECTIVE_URL,
WARN_URL_ERROR_GETTING_CONTENT, WARN_URL_OBFUSCATED_IP,
WARN_URL_CONTENT_SIZE_ZERO, WARN_URL_CONTENT_SIZE_TOO_LARGE,
WARN_URL_WHITESPACE, URL_MAX_LENGTH, WARN_URL_TOO_LONG,
ExcList, ExcSyntaxList, ExcNoCacheList)
# helper alias
unicode_safe = strformat.unicode_safe
# schemes that are invalid with an empty hostname
scheme_requires_host = ("ftp", "http", "telnet")
def urljoin (parent, url):
"""
If url is relative, join parent and url. Else leave url as-is.
@return joined url
"""
if urlutil.url_is_absolute(url):
return url
return urlparse.urljoin(parent, url)
def url_norm (url, encoding=None):
"""Wrapper for url.url_norm() to convert UnicodeError in
LinkCheckerError."""
try:
return urlutil.url_norm(url, encoding=encoding)
except UnicodeError:
msg = _("URL has unparsable domain name: %(name)s") % \
{"name": sys.exc_info()[1]}
raise LinkCheckerError(msg)
class UrlBase (object):
"""An URL with additional information like validity etc."""
# file types that can be parsed recursively
ContentMimetypes = {
"text/html": "html",
"application/xhtml+xml": "html",
# Include PHP file which helps when checking local .php files.
# It does not harm other URL schemes like HTTP since HTTP servers
# should not send this content type. They send text/html instead.
"application/x-httpd-php": "html",
"text/css": "css",
"application/x-shockwave-flash": "swf",
"application/msword": "word",
"text/plain+linkchecker": "text",
"text/plain+opera": "opera",
"text/plain+chromium": "chromium",
"application/x-plist+safari": "safari",
"text/vnd.wap.wml": "wml",
"application/xml+sitemap": "sitemap",
"application/xml+sitemapindex": "sitemapindex",
"application/pdf": "pdf",
"application/x-pdf": "pdf",
}
# Read in 16kb chunks
ReadChunkBytes = 1024*16
def __init__ (self, base_url, recursion_level, aggregate,
parent_url=None, base_ref=None, line=-1, column=-1, page=-1,
name=u"", url_encoding=None, extern=None):
"""
Initialize check data, and store given variables.
@param base_url: unquoted and possibly unnormed url
@param recursion_level: on what check level lies the base url
@param aggregate: aggregate instance
@param parent_url: quoted and normed url of parent or None
@param base_ref: quoted and normed url of <base href=""> or None
@param line: line number of url in parent content
@param column: column number of url in parent content
@param page: page number of url in parent content
@param name: name of url or empty
@param url_encoding: encoding of URL or None
@param extern: None or (is_extern, is_strict)
"""
self.reset()
self.init(base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern)
self.check_syntax()
if recursion_level == 0:
self.add_intern_pattern()
self.set_extern(self.url)
if self.extern[0] and self.extern[1]:
self.add_info(_("The URL is outside of the domain "
"filter, checked only syntax."))
if not self.has_result:
self.set_result(_("filtered"))
def init (self, base_ref, base_url, parent_url, recursion_level,
aggregate, line, column, page, name, url_encoding, extern):
"""
Initialize internal data.
"""
self.base_ref = base_ref
if self.base_ref is not None:
assert isinstance(self.base_ref, unicode), repr(self.base_ref)
self.base_url = base_url.strip() if base_url else base_url
if self.base_url is not None:
assert isinstance(self.base_url, unicode), repr(self.base_url)
self.parent_url = parent_url
if self.parent_url is not None:
assert isinstance(self.parent_url, unicode), repr(self.parent_url)
self.recursion_level = recursion_level
self.aggregate = aggregate
self.line = line
self.column = column
self.page = page
self.name = name
assert isinstance(self.name, unicode), repr(self.name)
self.encoding = url_encoding
self.charset = None
self.extern = extern
if self.base_ref:
assert not urlutil.url_needs_quoting(self.base_ref), \
"unquoted base reference URL %r" % self.base_ref
if self.parent_url:
assert not urlutil.url_needs_quoting(self.parent_url), \
"unquoted parent URL %r" % self.parent_url
url = absolute_url(self.base_url, base_ref, parent_url)
# assume file link if no scheme is found
self.scheme = url.split(":", 1)[0].lower() or "file"
if self.base_url != base_url:
self.add_warning(_("Leading or trailing whitespace in URL `%(url)s'.") %
{"url": base_url}, tag=WARN_URL_WHITESPACE)
def reset (self):
"""
Reset all variables to default values.
"""
# self.url is constructed by self.build_url() out of base_url
# and (base_ref or parent) as absolute and normed url.
# This the real url we use when checking so it also referred to
# as 'real url'
self.url = None
# a splitted version of url for convenience
self.urlparts = None
# the scheme, host, port and anchor part of url
self.scheme = self.host = self.port = self.anchor = None
# the result message string and flag
self.result = u""
self.has_result = False
# valid or not
self.valid = True
# list of warnings (without duplicates)
self.warnings = []
# list of infos
self.info = []
# content size
self.size = -1
# last modification time of content in HTTP-date format as specified in RFC2616 chapter 3.3.1
self.modified = None
# download time
self.dltime = -1
# check time
self.checktime = 0
# connection object
self.url_connection = None
# data of url content, (data == None) means no data is available
self.data = None
# cache url is set by build_url() calling set_cache_url()
self.cache_url = None
# extern flags (is_extern, is_strict)
self.extern = None
# flag if the result should be cached
self.caching = True
# title is either the URL or parsed from content
self.title = None
# flag if content should be checked or not
self.do_check_content = True
# MIME content type
self.content_type = u""
# URLs seen through redirections
self.aliases = []
def set_result (self, msg, valid=True, overwrite=False):
"""
Set result string and validity.
"""
if self.has_result and not overwrite:
log.warn(LOG_CHECK,
"Double result %r (previous %r) for %s", msg, self.result, self)
else:
self.has_result = True
if not isinstance(msg, unicode):
log.warn(LOG_CHECK, "Non-unicode result for %s: %r", self, msg)
elif not msg:
log.warn(LOG_CHECK, "Empty result for %s", self)
self.result = msg
self.valid = valid
# free content data
self.data = None
def get_title (self):
"""Return title of page the URL refers to.
This is per default the filename or the URL."""
if self.title is None:
url = u""
if self.base_url:
url = self.base_url
elif self.url:
url = self.url
self.title = url
if "/" in url:
title = url.rsplit("/", 1)[1]
if title:
self.title = title
return self.title
def is_parseable (self):
"""
Return True iff content of this url is parseable.
"""
return False
def is_html (self):
"""Return True iff content of this url is HTML formatted."""
return self._is_ctype("html")
def is_css (self):
"""Return True iff content of this url is CSS stylesheet."""
return self._is_ctype("css")
def _is_ctype(self, ctype):
"""Return True iff content is valid and of the given type."""
if not self.valid:
return False
mime = self.content_type
return self.ContentMimetypes.get(mime) == ctype
def is_http (self):
"""Return True for http:// or https:// URLs."""
return self.scheme in ("http", "https")
def is_file (self):
"""Return True for file:// URLs."""
return self.scheme == "file"
def is_directory(self):
"""Return True if current URL represents a directory."""
return False
def is_local(self):
"""Return True for local (ie. file://) URLs."""
return self.is_file()
def add_warning (self, s, tag=None):
"""
Add a warning string.
"""
item = (tag, s)
if item not in self.warnings and \
tag not in self.aggregate.config["ignorewarnings"]:
self.warnings.append(item)
def add_info (self, s):
"""
Add an info string.
"""
if s not in self.info:
self.info.append(s)
def set_cache_url (self):
"""Set the URL to be used for caching."""
# remove anchor from cached target url since we assume
# URLs with different anchors to have the same content
self.cache_url = urlutil.urlunsplit(self.urlparts[:4]+[u''])
if self.cache_url is not None:
assert isinstance(self.cache_url, unicode), repr(self.cache_url)
def check_syntax (self):
"""
Called before self.check(), this function inspects the
url syntax. Success enables further checking, failure
immediately logs this url. Syntax checks must not
use any network resources.
"""
log.debug(LOG_CHECK, "checking syntax")
if self.base_url is None:
self.base_url = u""
if not (self.base_url or self.parent_url):
self.set_result(_("URL is empty"), valid=False)
return
try:
self.build_url()
self.check_url_warnings()
except tuple(ExcSyntaxList) as msg:
self.set_result(unicode_safe(msg), valid=False)
else:
self.set_cache_url()
def check_url_warnings(self):
"""Check URL name and length."""
effectiveurl = urlutil.urlunsplit(self.urlparts)
if self.url != effectiveurl:
self.add_warning(_("Effective URL %(url)r.") %
{"url": effectiveurl},
tag=WARN_URL_EFFECTIVE_URL)
self.url = effectiveurl
if len(self.url) > URL_MAX_LENGTH and self.scheme != u"data":
args = dict(len=len(self.url), max=URL_MAX_LENGTH)
self.add_warning(_("URL length %(len)d is longer than %(max)d.") % args, tag=WARN_URL_TOO_LONG)
def build_url (self):
"""
Construct self.url and self.urlparts out of the given base
url information self.base_url, self.parent_url and self.base_ref.
"""
# norm base url - can raise UnicodeError from url.idna_encode()
base_url, is_idn = url_norm(self.base_url, self.encoding)
# make url absolute
if self.base_ref:
# use base reference as parent url
if ":" not in self.base_ref:
# some websites have a relative base reference
self.base_ref = urljoin(self.parent_url, self.base_ref)
self.url = urljoin(self.base_ref, base_url)
elif self.parent_url:
# strip the parent url query and anchor
urlparts = list(urlparse.urlsplit(self.parent_url))
urlparts[4] = ""
parent_url = urlutil.urlunsplit(urlparts)
self.url = urljoin(parent_url, base_url)
else:
self.url = base_url
# urljoin can unnorm the url path, so norm it again
urlparts = list(urlparse.urlsplit(self.url))
if urlparts[2]:
urlparts[2] = urlutil.collapse_segments(urlparts[2])
self.url = urlutil.urlunsplit(urlparts)
# split into (modifiable) list
self.urlparts = strformat.url_unicode_split(self.url)
self.build_url_parts()
# and unsplit again
self.url = urlutil.urlunsplit(self.urlparts)
def build_url_parts (self):
"""Set userinfo, host, port and anchor from self.urlparts.
Also checks for obfuscated IP addresses.
"""
# check userinfo@host:port syntax
self.userinfo, host = urllib.splituser(self.urlparts[1])
port = urlutil.default_ports.get(self.scheme, 0)
host, port = urlutil.splitport(host, port=port)
if port is None:
raise LinkCheckerError(_("URL host %(host)r has invalid port") %
{"host": host})
self.port = port
# set host lowercase
self.host = host.lower()
if self.scheme in scheme_requires_host:
if not self.host:
raise LinkCheckerError(_("URL has empty hostname"))
self.check_obfuscated_ip()
if not self.port or self.port == urlutil.default_ports.get(self.scheme):
host = self.host
else:
host = "%s:%d" % (self.host, self.port)
if self.userinfo:
self.urlparts[1] = "%s@%s" % (self.userinfo, host)
else:
self.urlparts[1] = host
# safe anchor for later checking
self.anchor = self.urlparts[4]
if self.anchor is not None:
assert isinstance(self.anchor, unicode), repr(self.anchor)
def check_obfuscated_ip (self):
"""Warn if host of this URL is obfuscated IP address."""
# check if self.host can be an IP address
# check for obfuscated IP address
if iputil.is_obfuscated_ip(self.host):
ips = iputil.resolve_host(self.host)
if ips:
self.host = ips[0]
self.add_warning(
_("URL %(url)s has obfuscated IP address %(ip)s") % \
{"url": self.base_url, "ip": ips[0]},
tag=WARN_URL_OBFUSCATED_IP)
def check (self):
"""Main check function for checking this URL."""
if self.aggregate.config["trace"]:
trace.trace_on()
try:
self.local_check()
except (socket.error, select.error):
# on Unix, ctrl-c can raise
# error: (4, 'Interrupted system call')
etype, value = sys.exc_info()[:2]
if etype == errno.EINTR:
raise KeyboardInterrupt(value)
else:
raise
def local_check (self):
"""Local check function can be overridden in subclasses."""
log.debug(LOG_CHECK, "Checking %s", unicode(self))
# strict extern URLs should not be checked
assert not self.extern[1], 'checking strict extern URL'
# check connection
log.debug(LOG_CHECK, "checking connection")
try:
self.check_connection()
self.set_content_type()
self.add_size_info()
self.aggregate.plugin_manager.run_connection_plugins(self)
except tuple(ExcList) as exc:
value = self.handle_exception()
# make nicer error msg for unknown hosts
if isinstance(exc, socket.error) and exc.args[0] == -2:
value = _('Hostname not found')
elif isinstance(exc, UnicodeError):
# idna.encode(host) failed
value = _('Bad hostname %(host)r: %(msg)s') % {'host': self.host, 'msg': str(value)}
self.set_result(unicode_safe(value), valid=False)
def check_content(self):
"""Check content of URL.
@return: True if content can be parsed, else False
"""
if self.do_check_content and self.valid:
# check content and recursion
try:
if self.can_get_content():
self.aggregate.plugin_manager.run_content_plugins(self)
if self.allows_recursion():
return True
except tuple(ExcList):
value = self.handle_exception()
self.add_warning(_("could not get content: %(msg)s") %
{"msg": str(value)}, tag=WARN_URL_ERROR_GETTING_CONTENT)
return False
def close_connection (self):
"""
Close an opened url connection.
"""
if self.url_connection is None:
# no connection is open
return
try:
self.url_connection.close()
except Exception:
# ignore close errors
pass
self.url_connection = None
def handle_exception (self):
"""
An exception occurred. Log it and set the cache flag.
"""
etype, evalue = sys.exc_info()[:2]
log.debug(LOG_CHECK, "Error in %s: %s %s", self.url, etype, evalue, exception=True)
# note: etype must be the exact class, not a subclass
if (etype in ExcNoCacheList) or \
(etype == socket.error and evalue.args[0]==errno.EBADF) or \
not evalue:
# EBADF occurs when operating on an already socket
self.caching = False
# format unicode message "<exception name>: <error message>"
errmsg = unicode(etype.__name__)
uvalue = strformat.unicode_safe(evalue)
if uvalue:
errmsg += u": %s" % uvalue
# limit length to 240
return strformat.limit(errmsg, length=240)
def check_connection (self):
"""
The basic connection check uses urlopen to initialize
a connection object.
"""
self.url_connection = urlopen(self.url)
def add_size_info (self):
"""Set size of URL content (if any)..
Should be overridden in subclasses."""
maxbytes = self.aggregate.config["maxfilesizedownload"]
if self.size > maxbytes:
self.add_warning(
_("Content size %(size)s is larger than %(maxbytes)s.") %
dict(size=strformat.strsize(self.size),
maxbytes=strformat.strsize(maxbytes)),
tag=WARN_URL_CONTENT_SIZE_TOO_LARGE)
def allows_simple_recursion(self):
"""Check recursion level and extern status."""
rec_level = self.aggregate.config["recursionlevel"]
if rec_level >= 0 and self.recursion_level >= rec_level:
log.debug(LOG_CHECK, "... no, maximum recursion level reached.")
return False
if self.extern[0]:
log.debug(LOG_CHECK, "... no, extern.")
return False
return True
def allows_recursion (self):
"""
Return True iff we can recurse into the url's content.
"""
log.debug(LOG_CHECK, "checking recursion of %r ...", self.url)
if not self.valid:
log.debug(LOG_CHECK, "... no, invalid.")
return False
if not self.can_get_content():
log.debug(LOG_CHECK, "... no, cannot get content.")
return False
if not self.allows_simple_recursion():
return False
if self.size > self.aggregate.config["maxfilesizeparse"]:
log.debug(LOG_CHECK, "... no, maximum parse size.")
return False
if not self.is_parseable():
log.debug(LOG_CHECK, "... no, not parseable.")
return False
if not self.content_allows_robots():
log.debug(LOG_CHECK, "... no, robots.")
return False
log.debug(LOG_CHECK, "... yes, recursion.")
return True
def content_allows_robots(self):
"""Returns True: only check robots.txt on HTTP links."""
return True
def set_extern (self, url):
"""
Match URL against extern and intern link patterns. If no pattern
matches the URL is extern. Sets self.extern to a tuple (bool,
bool) with content (is_extern, is_strict).
@return: None
"""
if self.extern:
return
if not url:
self.extern = (1, 1)
return
for entry in self.aggregate.config["externlinks"]:
match = entry['pattern'].search(url)
if (entry['negate'] and not match) or \
(match and not entry['negate']):
log.debug(LOG_CHECK, "Extern URL %r", url)
self.extern = (1, entry['strict'])
return
for entry in self.aggregate.config["internlinks"]:
match = entry['pattern'].search(url)
if (entry['negate'] and not match) or \
(match and not entry['negate']):
log.debug(LOG_CHECK, "Intern URL %r", url)
self.extern = (0, 0)
return
if self.aggregate.config['checkextern']:
self.extern = (1, 0)
else:
self.extern = (1, 1)
def set_content_type (self):
"""Set content MIME type.
Should be overridden in subclasses."""
pass
def can_get_content (self):
"""Indicate wether url get_content() can be called."""
return self.size <= self.aggregate.config["maxfilesizedownload"]
def get_content (self):
"""Precondition: url_connection is an opened URL."""
if self.data is None:
log.debug(LOG_CHECK, "Get content of %r", self.url)
t = time.time()
self.data = self.read_content()
self.size = len(self.data)
self.dltime = time.time() - t
if self.size == 0:
self.add_warning(_("Content size is zero."),
tag=WARN_URL_CONTENT_SIZE_ZERO)
else:
self.aggregate.add_downloaded_bytes(self.size)
return self.data
def read_content(self):
"""Return data for this URL. Can be overridden in subclasses."""
buf = StringIO()
data = self.read_content_chunk()
while data:
if buf.tell() + len(data) > self.aggregate.config["maxfilesizedownload"]:
raise LinkCheckerError(_("File size too large"))
buf.write(data)
data = self.read_content_chunk()
return buf.getvalue()
def read_content_chunk(self):
"""Read one chunk of content from this URL."""
return self.url_connection.read(self.ReadChunkBytes)
def get_user_password (self):
"""Get tuple (user, password) from configured authentication.
Both user and password can be None.
"""
if self.userinfo:
# URL itself has authentication info
return urllib.splitpasswd(self.userinfo)
return self.aggregate.config.get_user_password(self.url)
def add_url (self, url, line=0, column=0, page=0, name=u"", base=None):
"""Add new URL to queue."""
if base:
base_ref = urlutil.url_norm(base)[0]
else:
base_ref = None
url_data = get_url_from(url, self.recursion_level+1, self.aggregate,
parent_url=self.url, base_ref=base_ref, line=line, column=column,
page=page, name=name, parent_content_type=self.content_type)
self.aggregate.urlqueue.put(url_data)
def serialized (self, sep=os.linesep):
"""
Return serialized url check data as unicode string.
"""
return unicode_safe(sep).join([
u"%s link" % self.scheme,
u"base_url=%r" % self.base_url,
u"parent_url=%r" % self.parent_url,
u"base_ref=%r" % self.base_ref,
u"recursion_level=%d" % self.recursion_level,
u"url_connection=%s" % self.url_connection,
u"line=%d" % self.line,
u"column=%d" % self.column,
u"page=%d" % self.page,
u"name=%r" % self.name,
u"anchor=%r" % self.anchor,
u"cache_url=%s" % self.cache_url,
])
def get_intern_pattern (self, url=None):
"""Get pattern for intern URL matching.
@param url: the URL to set intern pattern for, else self.url
@ptype url: unicode or None
@return non-empty regex pattern or None
@rtype String or None
"""
return None
def add_intern_pattern(self, url=None):
"""Add intern URL regex to config."""
try:
pat = self.get_intern_pattern(url=url)
if pat:
log.debug(LOG_CHECK, "Add intern pattern %r", pat)
self.aggregate.config['internlinks'].append(get_link_pat(pat))
except UnicodeError as msg:
res = _("URL has unparsable domain name: %(domain)s") % \
{"domain": msg}
self.set_result(res, valid=False)
def __unicode__(self):
"""
Get URL info.
@return: URL info
@rtype: unicode
"""
return self.serialized()
def __str__(self):
"""
Get URL info.
@return: URL info, encoded with the output logger encoding
@rtype: string
"""
s = unicode(self)
return self.aggregate.config['logger'].encode(s)
def __repr__ (self):
"""
Get URL info.
@return: URL info
@rtype: unicode
"""
return u"<%s>" % self.serialized(sep=u", ")
def to_wire_dict (self):
"""Return a simplified transport object for logging and caching.
The transport object must contain these attributes:
- url_data.valid: bool
Indicates if URL is valid
- url_data.result: unicode
Result string
- url_data.warnings: list of tuples (tag, warning message)
List of tagged warnings for this URL.
- url_data.name: unicode string or None
name of URL (eg. filename or link name)
- url_data.parent_url: unicode or None
Parent URL
- url_data.base_ref: unicode
HTML base reference URL of parent
- url_data.url: unicode
Fully qualified URL.
- url_data.domain: unicode
URL domain part.
- url_data.checktime: int
Number of seconds needed to check this link, default: zero.
- url_data.dltime: int
Number of seconds needed to download URL content, default: -1
- url_data.size: int
Size of downloaded URL content, default: -1
- url_data.info: list of unicode
Additional information about this URL.
- url_data.line: int
Line number of this URL at parent document, or -1
- url_data.column: int
Column number of this URL at parent document, or -1
- url_data.page: int
Page number of this URL at parent document, or -1
- url_data.cache_url: unicode
Cache url for this URL.
- url_data.content_type: unicode
MIME content type for URL content.
- url_data.level: int
Recursion level until reaching this URL from start URL
- url_data.last_modified: datetime
Last modification date of retrieved page (or None).
"""
return dict(valid=self.valid,
extern=self.extern[0],
result=self.result,
warnings=self.warnings[:],
name=self.name or u"",
title=self.get_title(),
parent_url=self.parent_url or u"",
base_ref=self.base_ref or u"",
base_url=self.base_url or u"",
url=self.url or u"",
domain=(self.urlparts[1] if self.urlparts else u""),
checktime=self.checktime,
dltime=self.dltime,
size=self.size,
info=self.info,
line=self.line,
column=self.column,
page=self.page,
cache_url=self.cache_url,
content_type=self.content_type,
level=self.recursion_level,
modified=self.modified,
)
def to_wire (self):
"""Return compact UrlData object with information from to_wire_dict().
"""
return CompactUrlData(self.to_wire_dict())
urlDataAttr = [
'valid',
'extern',
'result',
'warnings',
'name',
'title',
'parent_url',
'base_ref',
'base_url',
'url',
'domain',
'checktime',
'dltime',
'size',
'info',
'modified',
'line',
'column',
'page',
'cache_url',
'content_type',
'level',
]
class CompactUrlData (object):
"""Store selected UrlData attributes in slots to minimize memory usage."""
__slots__ = urlDataAttr
def __init__(self, wired_url_data):
'''Set all attributes according to the dictionnary wired_url_data'''
for attr in urlDataAttr:
setattr(self, attr, wired_url_data[attr])
| 30,871 | Python | .py | 773 | 30.276843 | 107 | 0.588032 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,289 | checker.py | wummel_linkchecker/linkcheck/director/checker.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
URL checking functions.
"""
import copy
import time
from . import task
from ..cache import urlqueue
from .. import parser
# Interval in which each check thread looks if it's stopped.
QUEUE_POLL_INTERVALL_SECS = 1.0
def check_urls (urlqueue, logger):
"""Check URLs without threading."""
while not urlqueue.empty():
url_data = urlqueue.get()
try:
check_url(url_data, logger)
finally:
urlqueue.task_done(url_data)
def check_url(url_data, logger):
"""Check a single URL with logging."""
if url_data.has_result:
logger.log_url(url_data.to_wire())
else:
cache = url_data.aggregate.result_cache
key = url_data.cache_url
result = cache.get_result(key)
if result is None:
# check
check_start = time.time()
try:
url_data.check()
do_parse = url_data.check_content()
url_data.checktime = time.time() - check_start
# Add result to cache
result = url_data.to_wire()
cache.add_result(key, result)
for alias in url_data.aliases:
# redirect aliases
cache.add_result(alias, result)
# parse content recursively
# XXX this could add new warnings which should be cached.
if do_parse:
parser.parse_url(url_data)
finally:
# close/release possible open connection
url_data.close_connection()
else:
# copy data from cache and adjust it
result = copy.copy(result)
result.parent_url = url_data.parent_url
result.base_ref = url_data.base_ref or u""
result.base_url = url_data.base_url or u""
result.line = url_data.line
result.column = url_data.column
result.level = url_data.recursion_level
result.name = url_data.name
logger.log_url(result)
class Checker(task.LoggedCheckedTask):
"""URL check thread."""
def __init__ (self, urlqueue, logger, add_request_session):
"""Store URL queue and logger."""
super(Checker, self).__init__(logger)
self.urlqueue = urlqueue
self.origname = self.getName()
self.add_request_session = add_request_session
def run_checked (self):
"""Check URLs in the queue."""
# construct per-thread HTTP/S requests session
self.add_request_session()
while not self.stopped(0):
self.check_url()
def check_url (self):
"""Try to get URL data from queue and check it."""
try:
url_data = self.urlqueue.get(timeout=QUEUE_POLL_INTERVALL_SECS)
if url_data is not None:
try:
self.check_url_data(url_data)
finally:
self.urlqueue.task_done(url_data)
self.setName(self.origname)
except urlqueue.Empty:
pass
except Exception:
self.internal_error()
def check_url_data (self, url_data):
"""Check one URL data instance."""
if url_data.url is None:
url = ""
else:
url = url_data.url.encode("ascii", "replace")
self.setName("CheckThread-%s" % url)
check_url(url_data, self.logger)
| 4,218 | Python | .py | 109 | 29.633028 | 75 | 0.606393 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,290 | interrupt.py | wummel_linkchecker/linkcheck/director/interrupt.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Status message handling"""
import time
from . import task
from .. import log, LOG_CHECK, strformat
class Interrupt (task.CheckedTask):
"""Thread that raises KeyboardInterrupt after a specified duration.
This gives us a portable SIGALRM implementation.
The duration is checked every 5 seconds.
"""
WaitSeconds = 5
def __init__ (self, duration):
"""Initialize the task.
@param duration: raise KeyboardInterrupt after given number of seconds
@ptype duration: int
"""
super(Interrupt, self).__init__()
self.duration = duration
def run_checked (self):
"""Wait and raise KeyboardInterrupt after."""
self.start_time = time.time()
self.setName("Interrupt")
while not self.stopped(self.WaitSeconds):
duration = time.time() - self.start_time
if duration > self.duration:
log.warn(LOG_CHECK, "Interrupt after %s" % strformat.strduration_long(duration))
raise KeyboardInterrupt()
| 1,819 | Python | .py | 42 | 38.309524 | 96 | 0.706712 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,291 | console.py | wummel_linkchecker/linkcheck/director/console.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Helpers for console output.
"""
from __future__ import print_function
import sys
import os
import time
from .. import i18n, configuration, strformat, better_exchook2
# Output to stdout and stderr, encoded with the default encoding
stderr = i18n.get_encoded_writer(out=sys.stderr)
stdout = i18n.get_encoded_writer()
def encode (text):
"""Encode text with default encoding if its Unicode."""
if isinstance(text, unicode):
return text.encode(i18n.default_encoding, 'ignore')
return text
class StatusLogger (object):
"""Standard status logger. Default output is stderr."""
def __init__ (self, fd=stderr):
"""Save file descriptor for logging."""
self.fd = fd
def log_status (self, checked, in_progress, queue, duration, num_urls):
"""Write status message to file descriptor."""
msg = _n("%2d thread active", "%2d threads active", in_progress) % \
in_progress
self.write(u"%s, " % msg)
msg = _n("%5d link queued", "%5d links queued", queue) % queue
self.write(u"%s, " % msg)
msg = _n("%4d link", "%4d links", checked) % checked
self.write(u"%s" % msg)
msg = _n("%3d URL", "%3d URLs", num_urls) % num_urls
self.write(u" in %s checked, " % msg)
msg = _("runtime %s") % strformat.strduration_long(duration)
self.writeln(msg)
self.flush()
def write (self, msg):
"""Write message to file descriptor."""
self.fd.write(msg)
def writeln (self, msg):
"""Write status message and line break to file descriptor."""
self.fd.write(u"%s%s" % (msg, unicode(os.linesep)))
def flush (self):
"""Flush file descriptor."""
self.fd.flush()
def internal_error (out=stderr, etype=None, evalue=None, tb=None):
"""Print internal error message (output defaults to stderr)."""
print(os.linesep, file=out)
print(_("""********** Oops, I did it again. *************
You have found an internal error in LinkChecker. Please write a bug report
at %s
and include the following information:
- the URL or file you are testing
- the system information below
When using the commandline client:
- your commandline arguments and any custom configuration files.
- the output of a debug run with option "-Dall"
Not disclosing some of the information above due to privacy reasons is ok.
I will try to help you nonetheless, but you have to give me something
I can work with ;) .
""") % configuration.SupportUrl, file=out)
if etype is None:
etype = sys.exc_info()[0]
if evalue is None:
evalue = sys.exc_info()[1]
if tb is None:
tb = sys.exc_info()[2]
better_exchook2.better_exchook(etype, evalue, tb, out=out)
print_app_info(out=out)
print_proxy_info(out=out)
print_locale_info(out=out)
print(os.linesep,
_("******** LinkChecker internal error, over and out ********"), file=out)
def print_env_info (key, out=stderr):
"""If given environment key is defined, print it out."""
value = os.getenv(key)
if value is not None:
print(key, "=", repr(value), file=out)
def print_proxy_info (out=stderr):
"""Print proxy info."""
for key in ("http_proxy", "ftp_proxy", "no_proxy"):
print_env_info(key, out=out)
def print_locale_info (out=stderr):
"""Print locale info."""
for key in ("LANGUAGE", "LC_ALL", "LC_CTYPE", "LANG"):
print_env_info(key, out=out)
print(_("Default locale:"), i18n.get_locale(), file=out)
# Environment variables influencing the interpreter execution
# See python(1) man page.
PYTHON_ENV_VARS = (
'PYTHONHOME',
'PYTHONPATH',
'PYTHONSTARTUP',
'PYTHONY2K',
'PYTHONOPTIMIZE',
'PYTHONDEBUG',
'PYTHONDONTWRITEBYTECODE',
'PYTHONINSPECT',
'PYTHONIOENCODING',
'PYTHONNOUSERSITE',
'PYTHONUNBUFFERED',
'PYTHONVERBOSE',
'PYTHONWARNINGS',
'PYTHONHASHSEED',
)
def print_app_info (out=stderr):
"""Print system and application info (output defaults to stderr)."""
print(_("System info:"), file=out)
print(configuration.App, file=out)
print(_("Released on:"), configuration.ReleaseDate, file=out)
print(_("Python %(version)s on %(platform)s") %
{"version": sys.version, "platform": sys.platform}, file=out)
for key in PYTHON_ENV_VARS:
print_env_info(key, out=out)
print(configuration.get_modules_info(), file=out)
stime = strformat.strtime(time.time())
print(_("Local time:"), stime, file=out)
print(_("sys.argv:"), sys.argv, file=out)
def print_version (out=stdout):
"""Print the program version (output defaults to stdout)."""
print(configuration.App, _("released"),
configuration.ReleaseDate, file=out)
print(configuration.Copyright, file=out)
| 5,590 | Python | .py | 138 | 35.782609 | 81 | 0.671518 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,292 | aggregator.py | wummel_linkchecker/linkcheck/director/aggregator.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Aggregate needed object instances for checker threads.
"""
import threading
import thread
import requests
import time
try:
import urlparse
except ImportError:
# Python 3
from urllib import parse as urlparse
import random
from .. import log, LOG_CHECK, strformat, LinkCheckerError
from ..decorators import synchronized
from ..cache import urlqueue
from ..htmlutil import formsearch
from . import logger, status, checker, interrupt
_threads_lock = threading.RLock()
_hosts_lock = threading.RLock()
_downloadedbytes_lock = threading.RLock()
def new_request_session(config, cookies):
"""Create a new request session."""
session = requests.Session()
if cookies:
session.cookies = cookies
session.max_redirects = config["maxhttpredirects"]
session.headers.update({
"User-Agent": config["useragent"],
})
if config["cookiefile"]:
for cookie in cookies.from_file(config["cookiefile"]):
session.cookies = requests.cookies.merge_cookies(session.cookies, cookie)
return session
class Aggregate (object):
"""Store thread-safe data collections for checker threads."""
def __init__ (self, config, urlqueue, robots_txt, plugin_manager,
result_cache):
"""Store given link checking objects."""
self.config = config
self.urlqueue = urlqueue
self.logger = logger.Logger(config)
self.threads = []
self.request_sessions = {}
self.robots_txt = robots_txt
self.plugin_manager = plugin_manager
self.result_cache = result_cache
self.times = {}
self.cookies = None
requests_per_second = config["maxrequestspersecond"]
self.wait_time_min = 1.0 / requests_per_second
self.wait_time_max = max(self.wait_time_min + 0.5, 0.5)
self.downloaded_bytes = 0
def visit_loginurl(self):
"""Check for a login URL and visit it."""
url = self.config["loginurl"]
if not url:
return
user, password = self.config.get_user_password(url)
session = requests.Session()
# XXX user-agent header
# XXX timeout
response = session.get(url)
cgiuser = self.config["loginuserfield"]
cgipassword = self.config["loginpasswordfield"]
form = formsearch.search_form(response.content, cgiuser, cgipassword,
encoding=response.encoding)
form.data[cgiuser] = user
form.data[cgipassword] = password
for key, value in self.config["loginextrafields"].items():
form.data[key] = value
formurl = urlparse.urljoin(url, form.url)
response = session.post(formurl, data=form.data)
self.cookies = session.cookies
if len(self.cookies) == 0:
raise LinkCheckerError("No cookies set by login URL %s" % url)
@synchronized(_threads_lock)
def start_threads (self):
"""Spawn threads for URL checking and status printing."""
if self.config["status"]:
t = status.Status(self, self.config["status_wait_seconds"])
t.start()
self.threads.append(t)
if self.config["maxrunseconds"]:
t = interrupt.Interrupt(self.config["maxrunseconds"])
t.start()
self.threads.append(t)
num = self.config["threads"]
if num > 0:
for dummy in range(num):
t = checker.Checker(self.urlqueue, self.logger, self.add_request_session)
self.threads.append(t)
t.start()
else:
self.request_sessions[thread.get_ident()] = new_request_session(self.config, self.cookies)
checker.check_urls(self.urlqueue, self.logger)
@synchronized(_threads_lock)
def add_request_session(self):
"""Add a request session for current thread."""
session = new_request_session(self.config, self.cookies)
self.request_sessions[thread.get_ident()] = session
@synchronized(_threads_lock)
def get_request_session(self):
"""Get the request session for current thread."""
return self.request_sessions[thread.get_ident()]
@synchronized(_hosts_lock)
def wait_for_host(self, host):
"""Throttle requests to one host."""
t = time.time()
if host in self.times:
due_time = self.times[host]
if due_time > t:
wait = due_time - t
time.sleep(wait)
t = time.time()
wait_time = random.uniform(self.wait_time_min, self.wait_time_max)
self.times[host] = t + wait_time
@synchronized(_threads_lock)
def print_active_threads (self):
"""Log all currently active threads."""
debug = log.is_debug(LOG_CHECK)
if debug:
first = True
for name in self.get_check_threads():
if first:
log.info(LOG_CHECK, _("These URLs are still active:"))
first = False
log.info(LOG_CHECK, name[12:])
args = dict(
num=len([x for x in self.threads if x.getName().startswith("CheckThread-")]),
timeout=strformat.strduration_long(self.config["aborttimeout"]),
)
log.info(LOG_CHECK, _("%(num)d URLs are still active. After a timeout of %(timeout)s the active URLs will stop.") % args)
@synchronized(_threads_lock)
def get_check_threads(self):
"""Return iterator of checker threads."""
for t in self.threads:
name = t.getName()
if name.startswith("CheckThread-"):
yield name
def cancel (self):
"""Empty the URL queue."""
self.urlqueue.do_shutdown()
def abort (self):
"""Print still-active URLs and empty the URL queue."""
self.print_active_threads()
self.cancel()
timeout = self.config["aborttimeout"]
try:
self.urlqueue.join(timeout=timeout)
except urlqueue.Timeout:
log.warn(LOG_CHECK, "Abort timed out after %d seconds, stopping application." % timeout)
raise KeyboardInterrupt()
@synchronized(_threads_lock)
def remove_stopped_threads (self):
"""Remove the stopped threads from the internal thread list."""
self.threads = [t for t in self.threads if t.is_alive()]
@synchronized(_threads_lock)
def finish (self):
"""Wait for checker threads to finish."""
if not self.urlqueue.empty():
# This happens when all checker threads died.
self.cancel()
for t in self.threads:
t.stop()
@synchronized(_threads_lock)
def is_finished (self):
"""Determine if checking is finished."""
self.remove_stopped_threads()
return self.urlqueue.empty() and not self.threads
@synchronized(_downloadedbytes_lock)
def add_downloaded_bytes(self, numbytes):
"""Add to number of downloaded bytes."""
self.downloaded_bytes += numbytes
def end_log_output(self, **kwargs):
"""Print ending output to log."""
kwargs.update(dict(
downloaded_bytes=self.downloaded_bytes,
num_urls = len(self.result_cache),
))
self.logger.end_log_output(**kwargs)
| 8,085 | Python | .py | 197 | 32.994924 | 129 | 0.638409 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,293 | status.py | wummel_linkchecker/linkcheck/director/status.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Status message handling"""
import time
from . import task
class Status (task.LoggedCheckedTask):
"""Thread that gathers and logs the status periodically."""
def __init__ (self, aggregator, wait_seconds):
"""Initialize the status logger task.
@param urlqueue: the URL queue
@ptype urlqueue: Urlqueue
@param logger: the logger object to inform about status
@ptype logger: console.StatusLogger
@param wait_seconds: interval in seconds to report status
@ptype wait_seconds: int
"""
logger = aggregator.config.status_logger
super(Status, self).__init__(logger)
self.aggregator = aggregator
self.wait_seconds = wait_seconds
assert self.wait_seconds >= 1
def run_checked (self):
"""Print periodic status messages."""
self.start_time = time.time()
self.setName("Status")
# the first status should be after a second
wait_seconds = 1
first_wait = True
while not self.stopped(wait_seconds):
self.log_status()
if first_wait:
wait_seconds = self.wait_seconds
first_wait = False
def log_status (self):
"""Log a status message."""
duration = time.time() - self.start_time
checked, in_progress, queue = self.aggregator.urlqueue.status()
num_urls = len(self.aggregator.result_cache)
self.logger.log_status(checked, in_progress, queue, duration, num_urls)
| 2,298 | Python | .py | 53 | 37.132075 | 79 | 0.683036 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,294 | __init__.py | wummel_linkchecker/linkcheck/director/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Management of checking a queue of links with several threads.
"""
import os
import thread
import time
from .. import log, LOG_CHECK, LinkCheckerInterrupt, plugins
from ..cache import urlqueue, robots_txt, results
from . import aggregator, console
def check_urls (aggregate):
"""Main check function; checks all configured URLs until interrupted
with Ctrl-C.
@return: None
"""
try:
aggregate.visit_loginurl()
except Exception as msg:
log.warn(LOG_CHECK, _("Error using login URL: %(msg)s.") % \
dict(msg=msg))
raise
try:
aggregate.logger.start_log_output()
except Exception as msg:
log.error(LOG_CHECK, _("Error starting log output: %(msg)s.") % \
dict(msg=msg))
raise
try:
if not aggregate.urlqueue.empty():
aggregate.start_threads()
check_url(aggregate)
aggregate.finish()
aggregate.end_log_output()
except LinkCheckerInterrupt:
raise
except KeyboardInterrupt:
interrupt(aggregate)
except thread.error:
log.warn(LOG_CHECK,
_("Could not start a new thread. Check that the current user" \
" is allowed to start new threads."))
abort(aggregate)
except Exception:
# Catching "Exception" is intentionally done. This saves the program
# from libraries that raise all kinds of strange exceptions.
console.internal_error()
aggregate.logger.log_internal_error()
abort(aggregate)
# Not catched exceptions at this point are SystemExit and GeneratorExit,
# and both should be handled by the calling layer.
def check_url (aggregate):
"""Helper function waiting for URL queue."""
while True:
try:
aggregate.urlqueue.join(timeout=30)
break
except urlqueue.Timeout:
# Cleanup threads every 30 seconds
aggregate.remove_stopped_threads()
if not any(aggregate.get_check_threads()):
break
def interrupt (aggregate):
"""Interrupt execution and shutdown, ignoring any subsequent
interrupts."""
while True:
try:
log.warn(LOG_CHECK,
_("interrupt; waiting for active threads to finish"))
log.warn(LOG_CHECK,
_("another interrupt will exit immediately"))
abort(aggregate)
break
except KeyboardInterrupt:
pass
def abort (aggregate):
"""Helper function to ensure a clean shutdown."""
while True:
try:
aggregate.abort()
aggregate.finish()
aggregate.end_log_output(interrupt=True)
break
except KeyboardInterrupt:
log.warn(LOG_CHECK, _("user abort; force shutdown"))
aggregate.end_log_output(interrupt=True)
abort_now()
def abort_now ():
"""Force exit of current process without cleanup."""
if os.name == 'posix':
# Unix systems can use signals
import signal
os.kill(os.getpid(), signal.SIGTERM)
time.sleep(1)
os.kill(os.getpid(), signal.SIGKILL)
elif os.name == 'nt':
# NT has os.abort()
os.abort()
else:
# All other systems have os._exit() as best shot.
os._exit(3)
def get_aggregate (config):
"""Get an aggregator instance with given configuration."""
_urlqueue = urlqueue.UrlQueue(max_allowed_urls=config["maxnumurls"])
_robots_txt = robots_txt.RobotsTxt(config["useragent"])
plugin_manager = plugins.PluginManager(config)
result_cache = results.ResultCache()
return aggregator.Aggregate(config, _urlqueue, _robots_txt, plugin_manager,
result_cache)
| 4,546 | Python | .py | 123 | 29.95122 | 79 | 0.657674 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,295 | logger.py | wummel_linkchecker/linkcheck/director/logger.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Logger for aggregator instances"""
import threading
import thread
from ..decorators import synchronized
_lock = threading.Lock()
class Logger (object):
"""Thread safe multi-logger class used by aggregator instances."""
def __init__ (self, config):
"""Initialize basic logging variables."""
self.loggers = [config['logger']]
self.loggers.extend(config['fileoutput'])
self.verbose = config["verbose"]
self.warnings = config["warnings"]
def start_log_output (self):
"""
Start output of all configured loggers.
"""
for logger in self.loggers:
logger.start_output()
def end_log_output (self, **kwargs):
"""
End output of all configured loggers.
"""
for logger in self.loggers:
logger.end_output(**kwargs)
def do_print (self, url_data):
"""Determine if URL entry should be logged or not."""
if self.verbose:
return True
if self.warnings and url_data.warnings:
return True
return not url_data.valid
@synchronized(_lock)
def log_url (self, url_data):
"""Send new url to all configured loggers."""
self.check_active_loggers()
do_print = self.do_print(url_data)
# Only send a transport object to the loggers, not the complete
# object instance.
for log in self.loggers:
log.log_filter_url(url_data, do_print)
@synchronized(_lock)
def log_internal_error (self):
"""Document that an internal error occurred."""
for logger in self.loggers:
logger.log_internal_error()
def check_active_loggers(self):
"""Check if all loggers are deactivated due to I/O errors."""
for logger in self.loggers:
if logger.is_active:
break
else:
thread.interrupt_main()
| 2,702 | Python | .py | 69 | 32.57971 | 73 | 0.661204 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,296 | task.py | wummel_linkchecker/linkcheck/director/task.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import thread
from ..decorators import notimplemented
from .. import threader
from . import console
class CheckedTask (threader.StoppableThread):
"""Stoppable URL check task, handling error conditions while running."""
def run (self):
"""Handle keyboard interrupt and other errors."""
try:
self.run_checked()
except KeyboardInterrupt:
thread.interrupt_main()
except Exception:
self.internal_error()
@notimplemented
def run_checked (self):
"""Overload in subclass."""
pass
@notimplemented
def internal_error (self):
"""Overload in subclass."""
pass
class LoggedCheckedTask (CheckedTask):
"""URL check task with a logger instance and internal error handling."""
def __init__ (self, logger):
"""Initialize super instance and store given logger."""
super(CheckedTask, self).__init__()
self.logger = logger
def internal_error (self):
"""Log an internal error on console and the logger."""
console.internal_error()
self.logger.log_internal_error()
| 1,915 | Python | .py | 48 | 34.875 | 76 | 0.703445 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,297 | urlqueue.py | wummel_linkchecker/linkcheck/cache/urlqueue.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Handle a queue of URLs to check.
"""
import threading
import collections
from time import time as _time
from .. import log, LOG_CACHE
class Timeout(Exception):
"""Raised by join()"""
pass
class Empty(Exception):
"""Exception raised by get()."""
pass
NUM_PUTS_CLEANUP = 10000
class UrlQueue (object):
"""A queue supporting several consumer tasks. The task_done() idea is
from the Python 2.5 implementation of Queue.Queue()."""
def __init__ (self, max_allowed_urls=None):
"""Initialize the queue state and task counters."""
# Note: don't put a maximum size on the queue since it would
# lead to deadlocks when all worker threads called put().
self.queue = collections.deque()
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the two conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
self.finished_tasks = 0
self.in_progress = 0
self.shutdown = False
# Each put() decreases the number of allowed puts.
# This way we can restrict the number of URLs that are checked.
if max_allowed_urls is not None and max_allowed_urls <= 0:
raise ValueError("Non-positive number of allowed URLs: %d" % max_allowed_urls)
self.max_allowed_urls = max_allowed_urls
self.num_puts = 0
def qsize (self):
"""Return the approximate size of the queue (not reliable!)."""
with self.mutex:
return len(self.queue)
def empty (self):
"""Return True if the queue is empty, False otherwise.
Result is thread-safe, but not reliable since the queue could have
been changed before the result is returned!"""
with self.mutex:
return self._empty()
def _empty (self):
"""Return True if the queue is empty, False otherwise.
Not thread-safe!"""
return not self.queue
def get (self, timeout=None):
"""Get first not-in-progress url from the queue and
return it. If no such url is available return None.
"""
with self.not_empty:
return self._get(timeout)
def _get (self, timeout):
"""Non thread-safe utility function of self.get() doing the real
work."""
if timeout is None:
while self._empty():
self.not_empty.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self._empty():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty()
self.not_empty.wait(remaining)
self.in_progress += 1
return self.queue.popleft()
def put (self, item):
"""Put an item into the queue.
Block if necessary until a free slot is available.
"""
with self.mutex:
self._put(item)
self.not_empty.notify()
def _put (self, url_data):
"""Put URL in queue, increase number of unfished tasks."""
if self.shutdown or self.max_allowed_urls == 0:
return
log.debug(LOG_CACHE, "queueing %s", url_data.url)
key = url_data.cache_url
cache = url_data.aggregate.result_cache
if url_data.has_result or cache.has_result(key):
self.queue.appendleft(url_data)
else:
assert key is not None, "no result for None key: %s" % url_data
if self.max_allowed_urls is not None:
self.max_allowed_urls -= 1
self.num_puts += 1
if self.num_puts >= NUM_PUTS_CLEANUP:
self.cleanup()
self.queue.append(url_data)
self.unfinished_tasks += 1
def cleanup(self):
"""Move cached elements to top."""
self.num_puts = 0
cached = []
for i, url_data in enumerate(self.queue):
key = url_data.cache_url
cache = url_data.aggregate.result_cache
if cache.has_result(key):
cached.append(i)
for pos in cached:
self._move_to_top(pos)
def _move_to_top(self, pos):
"""Move element at given position to top of queue."""
if pos > 0:
self.queue.rotate(-pos)
item = self.queue.popleft()
self.queue.rotate(pos)
self.queue.appendleft(item)
def task_done (self, url_data):
"""
Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
with self.all_tasks_done:
log.debug(LOG_CACHE, "task_done %s", url_data.url)
self.finished_tasks += 1
self.unfinished_tasks -= 1
self.in_progress -= 1
if self.unfinished_tasks <= 0:
if self.unfinished_tasks < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
def join (self, timeout=None):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
with self.all_tasks_done:
if timeout is None:
while self.unfinished_tasks:
self.all_tasks_done.wait()
else:
if timeout < 0:
raise ValueError("'timeout' must be a positive number")
endtime = _time() + timeout
while self.unfinished_tasks:
remaining = endtime - _time()
if remaining <= 0.0:
raise Timeout()
self.all_tasks_done.wait(remaining)
def do_shutdown (self):
"""Shutdown the queue by not accepting any more URLs."""
with self.mutex:
unfinished = self.unfinished_tasks - len(self.queue)
self.queue.clear()
if unfinished <= 0:
if unfinished < 0:
raise ValueError('shutdown is in error')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
self.shutdown = True
def status (self):
"""Get tuple (finished tasks, in progress, queue size)."""
# no need to acquire self.mutex since the numbers are unreliable anyways.
return (self.finished_tasks, self.in_progress, len(self.queue))
| 8,426 | Python | .py | 193 | 34.051813 | 90 | 0.612817 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,298 | results.py | wummel_linkchecker/linkcheck/cache/results.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Cache check results.
"""
from ..decorators import synchronized
from ..lock import get_lock
# lock object
cache_lock = get_lock("results_cache_lock")
class ResultCache(object):
"""
Thread-safe cache of UrlData.to_wire() results.
the cache is limited in size since we rather recheck the same URL
multiple times instead of running out of memory.
format: {cache key (string) -> result (UrlData.towire())}
"""
def __init__(self, max_size=100000):
"""Initialize result cache."""
# mapping {URL -> cached result}
self.cache = {}
self.max_size = max_size
@synchronized(cache_lock)
def get_result(self, key):
"""Return cached result or None if not found."""
return self.cache.get(key)
@synchronized(cache_lock)
def add_result(self, key, result):
"""Add result object to cache with given key.
The request is ignored when the cache is already full or the key
is None.
"""
if len(self.cache) > self.max_size:
return
if key is not None:
self.cache[key] = result
def has_result(self, key):
"""Non-thread-safe function for fast containment checks."""
return key in self.cache
def __len__(self):
"""Get number of cached elements. This is not thread-safe and is
likely to change before the returned value is used."""
return len(self.cache)
| 2,223 | Python | .py | 56 | 34.75 | 73 | 0.687674 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |
22,299 | __init__.py | wummel_linkchecker/linkcheck/cache/__init__.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Store and provide cached data during checking in a thread-safe manner.
"""
| 859 | Python | .py | 19 | 44.210526 | 73 | 0.77381 | wummel/linkchecker | 1,417 | 234 | 200 | GPL-2.0 | 9/5/2024, 5:13:10 PM (Europe/Amsterdam) |