id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,900
|
_adapters.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/_adapters.py
|
from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec)._native()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def _native(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
| 4,504
|
Python
|
.py
| 126
| 26.68254
| 87
| 0.594139
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,901
|
test_contents.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_contents.py
|
import unittest
import importlib_resources as resources
from . import data01
from . import util
class ContentsTests:
expected = {
'__init__.py',
'binary.file',
'subdirectory',
'utf-16.file',
'utf-8.file',
}
def test_contents(self):
contents = {path.name for path in resources.files(self.data).iterdir()}
assert self.expected <= contents
class ContentsDiskTests(ContentsTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
pass
class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
expected = {
# no __init__ because of namespace design
# no subdirectory as incidental difference in fixture
'binary.file',
'utf-16.file',
'utf-8.file',
}
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
| 968
|
Python
|
.py
| 31
| 24.935484
| 79
| 0.668108
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,902
|
test_read.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_read.py
|
import unittest
import importlib_resources as resources
from . import data01
from . import util
from importlib import import_module
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_bytes()
class CommonTextTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_text()
class ReadTests:
def test_read_bytes(self):
result = resources.files(self.data).joinpath('binary.file').read_bytes()
self.assertEqual(result, b'\0\1\2\3')
def test_read_text_default_encoding(self):
result = resources.files(self.data).joinpath('utf-8.file').read_text()
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_read_text_given_encoding(self):
result = (
resources.files(self.data)
.joinpath('utf-16.file')
.read_text(encoding='utf-16')
)
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_read_text_with_errors(self):
# Raises UnicodeError without the 'errors' argument.
target = resources.files(self.data) / 'utf-16.file'
self.assertRaises(UnicodeError, target.read_text, encoding='utf-8')
result = target.read_text(encoding='utf-8', errors='ignore')
self.assertEqual(
result,
'H\x00e\x00l\x00l\x00o\x00,\x00 '
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
)
class ReadDiskTests(ReadTests, unittest.TestCase):
data = data01
class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
def test_read_submodule_resource(self):
submodule = import_module('ziptestdata.subdirectory')
result = resources.files(submodule).joinpath('binary.file').read_bytes()
self.assertEqual(result, b'\0\1\2\3')
def test_read_submodule_resource_by_name(self):
result = (
resources.files('ziptestdata.subdirectory')
.joinpath('binary.file')
.read_bytes()
)
self.assertEqual(result, b'\0\1\2\3')
class ReadNamespaceTests(ReadTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
if __name__ == '__main__':
unittest.main()
| 2,408
|
Python
|
.py
| 56
| 35.428571
| 80
| 0.666381
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,903
|
test_resource.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_resource.py
|
import sys
import unittest
import importlib_resources as resources
import uuid
import pathlib
from . import data01
from . import zipdata01, zipdata02
from . import util
from importlib import import_module
from ._compat import import_helper, unlink
class ResourceTests:
# Subclasses are expected to set the `data` attribute.
def test_is_file_exists(self):
target = resources.files(self.data) / 'binary.file'
self.assertTrue(target.is_file())
def test_is_file_missing(self):
target = resources.files(self.data) / 'not-a-file'
self.assertFalse(target.is_file())
def test_is_dir(self):
target = resources.files(self.data) / 'subdirectory'
self.assertFalse(target.is_file())
self.assertTrue(target.is_dir())
class ResourceDiskTests(ResourceTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase):
pass
def names(traversable):
return {item.name for item in traversable.iterdir()}
class ResourceLoaderTests(unittest.TestCase):
def test_resource_contents(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
)
self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'})
def test_is_file(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertTrue(resources.files(package).joinpath('B').is_file())
def test_is_dir(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertTrue(resources.files(package).joinpath('D').is_dir())
def test_resource_missing(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertFalse(resources.files(package).joinpath('Z').is_file())
class ResourceCornerCaseTests(unittest.TestCase):
def test_package_has_no_reader_fallback(self):
# Test odd ball packages which:
# 1. Do not have a ResourceReader as a loader
# 2. Are not on the file system
# 3. Are not in a zip file
module = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
)
# Give the module a dummy loader.
module.__loader__ = object()
# Give the module a dummy origin.
module.__file__ = '/path/which/shall/not/be/named'
module.__spec__.loader = module.__loader__
module.__spec__.origin = module.__file__
self.assertFalse(resources.files(module).joinpath('A').is_file())
class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata01 # type: ignore
def test_is_submodule_resource(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file())
def test_read_submodule_resource_by_name(self):
self.assertTrue(
resources.files('ziptestdata.subdirectory')
.joinpath('binary.file')
.is_file()
)
def test_submodule_contents(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertEqual(
names(resources.files(submodule)), {'__init__.py', 'binary.file'}
)
def test_submodule_contents_by_name(self):
self.assertEqual(
names(resources.files('ziptestdata.subdirectory')),
{'__init__.py', 'binary.file'},
)
def test_as_file_directory(self):
with resources.as_file(resources.files('ziptestdata')) as data:
assert data.name == 'ziptestdata'
assert data.is_dir()
assert data.joinpath('subdirectory').is_dir()
assert len(list(data.iterdir()))
assert not data.parent.exists()
class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata02 # type: ignore
def test_unrelated_contents(self):
"""
Test thata zip with two unrelated subpackages return
distinct resources. Ref python/importlib_resources#44.
"""
self.assertEqual(
names(resources.files('ziptestdata.one')),
{'__init__.py', 'resource1.txt'},
)
self.assertEqual(
names(resources.files('ziptestdata.two')),
{'__init__.py', 'resource2.txt'},
)
class DeletingZipsTest(unittest.TestCase):
"""Having accessed resources in a zip file should not keep an open
reference to the zip.
"""
ZIP_MODULE = zipdata01
def setUp(self):
modules = import_helper.modules_setup()
self.addCleanup(import_helper.modules_cleanup, *modules)
data_path = pathlib.Path(self.ZIP_MODULE.__file__)
data_dir = data_path.parent
self.source_zip_path = data_dir / 'ziptestdata.zip'
self.zip_path = pathlib.Path(f'{uuid.uuid4()}.zip').absolute()
self.zip_path.write_bytes(self.source_zip_path.read_bytes())
sys.path.append(str(self.zip_path))
self.data = import_module('ziptestdata')
def tearDown(self):
try:
sys.path.remove(str(self.zip_path))
except ValueError:
pass
try:
del sys.path_importer_cache[str(self.zip_path)]
del sys.modules[self.data.__name__]
except KeyError:
pass
try:
unlink(self.zip_path)
except OSError:
# If the test fails, this will probably fail too
pass
def test_iterdir_does_not_keep_open(self):
c = [item.name for item in resources.files('ziptestdata').iterdir()]
self.zip_path.unlink()
del c
def test_is_file_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('binary.file').is_file()
self.zip_path.unlink()
del c
def test_is_file_failure_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('not-present').is_file()
self.zip_path.unlink()
del c
@unittest.skip("Desired but not supported.")
def test_as_file_does_not_keep_open(self): # pragma: no cover
c = resources.as_file(resources.files('ziptestdata') / 'binary.file')
self.zip_path.unlink()
del c
def test_entered_path_does_not_keep_open(self):
# This is what certifi does on import to make its bundle
# available for the process duration.
c = resources.as_file(
resources.files('ziptestdata') / 'binary.file'
).__enter__()
self.zip_path.unlink()
del c
def test_read_binary_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('binary.file').read_bytes()
self.zip_path.unlink()
del c
def test_read_text_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('utf-8.file').read_text()
self.zip_path.unlink()
del c
class ResourceFromNamespaceTest01(unittest.TestCase):
site_dir = str(pathlib.Path(__file__).parent)
@classmethod
def setUpClass(cls):
sys.path.append(cls.site_dir)
@classmethod
def tearDownClass(cls):
sys.path.remove(cls.site_dir)
def test_is_submodule_resource(self):
self.assertTrue(
resources.files(import_module('namespacedata01'))
.joinpath('binary.file')
.is_file()
)
def test_read_submodule_resource_by_name(self):
self.assertTrue(
resources.files('namespacedata01').joinpath('binary.file').is_file()
)
def test_submodule_contents(self):
contents = names(resources.files(import_module('namespacedata01')))
try:
contents.remove('__pycache__')
except KeyError:
pass
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
def test_submodule_contents_by_name(self):
contents = names(resources.files('namespacedata01'))
try:
contents.remove('__pycache__')
except KeyError:
pass
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
if __name__ == '__main__':
unittest.main()
| 8,478
|
Python
|
.py
| 206
| 32.864078
| 85
| 0.630324
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,904
|
_compat.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/_compat.py
|
import os
try:
from test.support import import_helper # type: ignore
except ImportError:
# Python 3.9 and earlier
class import_helper: # type: ignore
from test.support import (
modules_setup,
modules_cleanup,
DirsOnSysPath,
CleanImport,
)
try:
from test.support import os_helper # type: ignore
except ImportError:
# Python 3.9 compat
class os_helper: # type:ignore
from test.support import temp_dir
try:
# Python 3.10
from test.support.os_helper import unlink
except ImportError:
from test.support import unlink as _unlink
def unlink(target):
return _unlink(os.fspath(target))
| 708
|
Python
|
.py
| 25
| 22.24
| 58
| 0.66716
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,905
|
util.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/util.py
|
import abc
import importlib
import io
import sys
import types
import pathlib
from . import data01
from . import zipdata01
from ..abc import ResourceReader
from ._compat import import_helper
from importlib.machinery import ModuleSpec
class Reader(ResourceReader):
def __init__(self, **kwargs):
vars(self).update(kwargs)
def get_resource_reader(self, package):
return self
def open_resource(self, path):
self._path = path
if isinstance(self.file, Exception):
raise self.file
return self.file
def resource_path(self, path_):
self._path = path_
if isinstance(self.path, Exception):
raise self.path
return self.path
def is_resource(self, path_):
self._path = path_
if isinstance(self.path, Exception):
raise self.path
def part(entry):
return entry.split('/')
return any(
len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents)
)
def contents(self):
if isinstance(self.path, Exception):
raise self.path
yield from self._contents
def create_package_from_loader(loader, is_package=True):
name = 'testingpackage'
module = types.ModuleType(name)
spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package)
module.__spec__ = spec
module.__loader__ = loader
return module
def create_package(file=None, path=None, is_package=True, contents=()):
return create_package_from_loader(
Reader(file=file, path=path, _contents=contents),
is_package,
)
class CommonTests(metaclass=abc.ABCMeta):
"""
Tests shared by test_open, test_path, and test_read.
"""
@abc.abstractmethod
def execute(self, package, path):
"""
Call the pertinent legacy API function (e.g. open_text, path)
on package and path.
"""
def test_package_name(self):
# Passing in the package name should succeed.
self.execute(data01.__name__, 'utf-8.file')
def test_package_object(self):
# Passing in the package itself should succeed.
self.execute(data01, 'utf-8.file')
def test_string_path(self):
# Passing in a string for the path should succeed.
path = 'utf-8.file'
self.execute(data01, path)
def test_pathlib_path(self):
# Passing in a pathlib.PurePath object for the path should succeed.
path = pathlib.PurePath('utf-8.file')
self.execute(data01, path)
def test_importing_module_as_side_effect(self):
# The anchor package can already be imported.
del sys.modules[data01.__name__]
self.execute(data01.__name__, 'utf-8.file')
def test_missing_path(self):
# Attempting to open or read or request the path for a
# non-existent path should succeed if open_resource
# can return a viable data stream.
bytes_data = io.BytesIO(b'Hello, world!')
package = create_package(file=bytes_data, path=FileNotFoundError())
self.execute(package, 'utf-8.file')
self.assertEqual(package.__loader__._path, 'utf-8.file')
def test_extant_path(self):
# Attempting to open or read or request the path when the
# path does exist should still succeed. Does not assert
# anything about the result.
bytes_data = io.BytesIO(b'Hello, world!')
# any path that exists
path = __file__
package = create_package(file=bytes_data, path=path)
self.execute(package, 'utf-8.file')
self.assertEqual(package.__loader__._path, 'utf-8.file')
def test_useless_loader(self):
package = create_package(file=FileNotFoundError(), path=FileNotFoundError())
with self.assertRaises(FileNotFoundError):
self.execute(package, 'utf-8.file')
class ZipSetupBase:
ZIP_MODULE = None
@classmethod
def setUpClass(cls):
data_path = pathlib.Path(cls.ZIP_MODULE.__file__)
data_dir = data_path.parent
cls._zip_path = str(data_dir / 'ziptestdata.zip')
sys.path.append(cls._zip_path)
cls.data = importlib.import_module('ziptestdata')
@classmethod
def tearDownClass(cls):
try:
sys.path.remove(cls._zip_path)
except ValueError:
pass
try:
del sys.path_importer_cache[cls._zip_path]
del sys.modules[cls.data.__name__]
except KeyError:
pass
try:
del cls.data
del cls._zip_path
except AttributeError:
pass
def setUp(self):
modules = import_helper.modules_setup()
self.addCleanup(import_helper.modules_cleanup, *modules)
class ZipSetup(ZipSetupBase):
ZIP_MODULE = zipdata01 # type: ignore
| 4,873
|
Python
|
.py
| 131
| 29.541985
| 88
| 0.640884
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,906
|
_path.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/_path.py
|
import pathlib
import functools
####
# from jaraco.path 3.4
def build(spec, prefix=pathlib.Path()):
"""
Build a set of files/directories, as described by the spec.
Each key represents a pathname, and the value represents
the content. Content may be a nested directory.
>>> spec = {
... 'README.txt': "A README file",
... "foo": {
... "__init__.py": "",
... "bar": {
... "__init__.py": "",
... },
... "baz.py": "# Some code",
... }
... }
>>> tmpdir = getfixture('tmpdir')
>>> build(spec, tmpdir)
"""
for name, contents in spec.items():
create(contents, pathlib.Path(prefix) / name)
@functools.singledispatch
def create(content, path):
path.mkdir(exist_ok=True)
build(content, prefix=path) # type: ignore
@create.register
def _(content: bytes, path):
path.write_bytes(content)
@create.register
def _(content: str, path):
path.write_text(content)
# end from jaraco.path
####
| 1,039
|
Python
|
.py
| 36
| 24.805556
| 63
| 0.581395
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,907
|
test_files.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_files.py
|
import typing
import textwrap
import unittest
import warnings
import importlib
import contextlib
import importlib_resources as resources
from ..abc import Traversable
from . import data01
from . import util
from . import _path
from ._compat import os_helper, import_helper
@contextlib.contextmanager
def suppress_known_deprecation():
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter('default', category=DeprecationWarning)
yield ctx
class FilesTests:
def test_read_bytes(self):
files = resources.files(self.data)
actual = files.joinpath('utf-8.file').read_bytes()
assert actual == b'Hello, UTF-8 world!\n'
def test_read_text(self):
files = resources.files(self.data)
actual = files.joinpath('utf-8.file').read_text(encoding='utf-8')
assert actual == 'Hello, UTF-8 world!\n'
@unittest.skipUnless(
hasattr(typing, 'runtime_checkable'),
"Only suitable when typing supports runtime_checkable",
)
def test_traversable(self):
assert isinstance(resources.files(self.data), Traversable)
def test_old_parameter(self):
"""
Files used to take a 'package' parameter. Make sure anyone
passing by name is still supported.
"""
with suppress_known_deprecation():
resources.files(package=self.data)
class OpenDiskTests(FilesTests, unittest.TestCase):
def setUp(self):
self.data = data01
class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
pass
class OpenNamespaceTests(FilesTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
class SiteDir:
def setUp(self):
self.fixtures = contextlib.ExitStack()
self.addCleanup(self.fixtures.close)
self.site_dir = self.fixtures.enter_context(os_helper.temp_dir())
self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir))
self.fixtures.enter_context(import_helper.CleanImport())
class ModulesFilesTests(SiteDir, unittest.TestCase):
def test_module_resources(self):
"""
A module can have resources found adjacent to the module.
"""
spec = {
'mod.py': '',
'res.txt': 'resources are the best',
}
_path.build(spec, self.site_dir)
import mod
actual = resources.files(mod).joinpath('res.txt').read_text()
assert actual == spec['res.txt']
class ImplicitContextFilesTests(SiteDir, unittest.TestCase):
def test_implicit_files(self):
"""
Without any parameter, files() will infer the location as the caller.
"""
spec = {
'somepkg': {
'__init__.py': textwrap.dedent(
"""
import importlib_resources as res
val = res.files().joinpath('res.txt').read_text()
"""
),
'res.txt': 'resources are the best',
},
}
_path.build(spec, self.site_dir)
assert importlib.import_module('somepkg').val == 'resources are the best'
if __name__ == '__main__':
unittest.main()
| 3,251
|
Python
|
.py
| 88
| 29.261364
| 81
| 0.645428
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,908
|
update-zips.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/update-zips.py
|
"""
Generate the zip test data files.
Run to build the tests/zipdataNN/ziptestdata.zip files from
files in tests/dataNN.
Replaces the file with the working copy, but does commit anything
to the source repo.
"""
import contextlib
import os
import pathlib
import zipfile
def main():
"""
>>> from unittest import mock
>>> monkeypatch = getfixture('monkeypatch')
>>> monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock())
>>> print(); main() # print workaround for bpo-32509
<BLANKLINE>
...data01... -> ziptestdata/...
...
...data02... -> ziptestdata/...
...
"""
suffixes = '01', '02'
tuple(map(generate, suffixes))
def generate(suffix):
root = pathlib.Path(__file__).parent.relative_to(os.getcwd())
zfpath = root / f'zipdata{suffix}/ziptestdata.zip'
with zipfile.ZipFile(zfpath, 'w') as zf:
for src, rel in walk(root / f'data{suffix}'):
dst = 'ziptestdata' / pathlib.PurePosixPath(rel.as_posix())
print(src, '->', dst)
zf.write(src, dst)
def walk(datapath):
for dirpath, dirnames, filenames in os.walk(datapath):
with contextlib.suppress(ValueError):
dirnames.remove('__pycache__')
for filename in filenames:
res = pathlib.Path(dirpath) / filename
rel = res.relative_to(datapath)
yield res, rel
__name__ == '__main__' and main()
| 1,417
|
Python
|
.py
| 42
| 28.285714
| 71
| 0.638563
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,909
|
test_open.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_open.py
|
import unittest
import importlib_resources as resources
from . import data01
from . import util
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
target = resources.files(package).joinpath(path)
with target.open('rb'):
pass
class CommonTextTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
target = resources.files(package).joinpath(path)
with target.open():
pass
class OpenTests:
def test_open_binary(self):
target = resources.files(self.data) / 'binary.file'
with target.open('rb') as fp:
result = fp.read()
self.assertEqual(result, b'\x00\x01\x02\x03')
def test_open_text_default_encoding(self):
target = resources.files(self.data) / 'utf-8.file'
with target.open() as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_open_text_given_encoding(self):
target = resources.files(self.data) / 'utf-16.file'
with target.open(encoding='utf-16', errors='strict') as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_open_text_with_errors(self):
# Raises UnicodeError without the 'errors' argument.
target = resources.files(self.data) / 'utf-16.file'
with target.open(encoding='utf-8', errors='strict') as fp:
self.assertRaises(UnicodeError, fp.read)
with target.open(encoding='utf-8', errors='ignore') as fp:
result = fp.read()
self.assertEqual(
result,
'H\x00e\x00l\x00l\x00o\x00,\x00 '
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
)
def test_open_binary_FileNotFoundError(self):
target = resources.files(self.data) / 'does-not-exist'
self.assertRaises(FileNotFoundError, target.open, 'rb')
def test_open_text_FileNotFoundError(self):
target = resources.files(self.data) / 'does-not-exist'
self.assertRaises(FileNotFoundError, target.open)
class OpenDiskTests(OpenTests, unittest.TestCase):
def setUp(self):
self.data = data01
class OpenDiskNamespaceTests(OpenTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| 2,565
|
Python
|
.py
| 60
| 34.8
| 67
| 0.654589
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,910
|
test_compatibilty_files.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_compatibilty_files.py
|
import io
import unittest
import importlib_resources as resources
from importlib_resources._adapters import (
CompatibilityFiles,
wrap_spec,
)
from . import util
class CompatibilityFilesTests(unittest.TestCase):
@property
def package(self):
bytes_data = io.BytesIO(b'Hello, world!')
return util.create_package(
file=bytes_data,
path='some_path',
contents=('a', 'b', 'c'),
)
@property
def files(self):
return resources.files(self.package)
def test_spec_path_iter(self):
self.assertEqual(
sorted(path.name for path in self.files.iterdir()),
['a', 'b', 'c'],
)
def test_child_path_iter(self):
self.assertEqual(list((self.files / 'a').iterdir()), [])
def test_orphan_path_iter(self):
self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
def test_spec_path_is(self):
self.assertFalse(self.files.is_file())
self.assertFalse(self.files.is_dir())
def test_child_path_is(self):
self.assertTrue((self.files / 'a').is_file())
self.assertFalse((self.files / 'a').is_dir())
def test_orphan_path_is(self):
self.assertFalse((self.files / 'a' / 'a').is_file())
self.assertFalse((self.files / 'a' / 'a').is_dir())
self.assertFalse((self.files / 'a' / 'a' / 'a').is_file())
self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir())
def test_spec_path_name(self):
self.assertEqual(self.files.name, 'testingpackage')
def test_child_path_name(self):
self.assertEqual((self.files / 'a').name, 'a')
def test_orphan_path_name(self):
self.assertEqual((self.files / 'a' / 'b').name, 'b')
self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c')
def test_spec_path_open(self):
self.assertEqual(self.files.read_bytes(), b'Hello, world!')
self.assertEqual(self.files.read_text(), 'Hello, world!')
def test_child_path_open(self):
self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!')
self.assertEqual((self.files / 'a').read_text(), 'Hello, world!')
def test_orphan_path_open(self):
with self.assertRaises(FileNotFoundError):
(self.files / 'a' / 'b').read_bytes()
with self.assertRaises(FileNotFoundError):
(self.files / 'a' / 'b' / 'c').read_bytes()
def test_open_invalid_mode(self):
with self.assertRaises(ValueError):
self.files.open('0')
def test_orphan_path_invalid(self):
with self.assertRaises(ValueError):
CompatibilityFiles.OrphanPath()
def test_wrap_spec(self):
spec = wrap_spec(self.package)
self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles)
class CompatibilityFilesNoReaderTests(unittest.TestCase):
@property
def package(self):
return util.create_package_from_loader(None)
@property
def files(self):
return resources.files(self.package)
def test_spec_path_joinpath(self):
self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath)
| 3,260
|
Python
|
.py
| 77
| 34.727273
| 88
| 0.624763
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,911
|
test_path.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_path.py
|
import io
import unittest
import importlib_resources as resources
from . import data01
from . import util
class CommonTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
with resources.as_file(resources.files(package).joinpath(path)):
pass
class PathTests:
def test_reading(self):
# Path should be readable.
# Test also implicitly verifies the returned object is a pathlib.Path
# instance.
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
self.assertTrue(path.name.endswith("utf-8.file"), repr(path))
# pathlib.Path.read_text() was introduced in Python 3.5.
with path.open('r', encoding='utf-8') as file:
text = file.read()
self.assertEqual('Hello, UTF-8 world!\n', text)
class PathDiskTests(PathTests, unittest.TestCase):
data = data01
def test_natural_path(self):
"""
Guarantee the internal implementation detail that
file-system-backed resources do not get the tempdir
treatment.
"""
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
assert 'data' in str(path)
class PathMemoryTests(PathTests, unittest.TestCase):
def setUp(self):
file = io.BytesIO(b'Hello, UTF-8 world!\n')
self.addCleanup(file.close)
self.data = util.create_package(
file=file, path=FileNotFoundError("package exists only in memory")
)
self.data.__spec__.origin = None
self.data.__spec__.has_location = False
class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase):
def test_remove_in_context_manager(self):
# It is not an error if the file that was temporarily stashed on the
# file system is removed inside the `with` stanza.
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
path.unlink()
if __name__ == '__main__':
unittest.main()
| 2,103
|
Python
|
.py
| 50
| 34.3
| 78
| 0.655223
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,912
|
test_reader.py
|
rembo10_headphones/lib/pkg_resources/_vendor/importlib_resources/tests/test_reader.py
|
import os.path
import sys
import pathlib
import unittest
from importlib import import_module
from importlib_resources.readers import MultiplexedPath, NamespaceReader
class MultiplexedPathTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pathlib.Path(__file__).parent / 'namespacedata01'
cls.folder = str(path)
def test_init_no_paths(self):
with self.assertRaises(FileNotFoundError):
MultiplexedPath()
def test_init_file(self):
with self.assertRaises(NotADirectoryError):
MultiplexedPath(os.path.join(self.folder, 'binary.file'))
def test_iterdir(self):
contents = {path.name for path in MultiplexedPath(self.folder).iterdir()}
try:
contents.remove('__pycache__')
except (KeyError, ValueError):
pass
self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'})
def test_iterdir_duplicate(self):
data01 = os.path.abspath(os.path.join(__file__, '..', 'data01'))
contents = {
path.name for path in MultiplexedPath(self.folder, data01).iterdir()
}
for remove in ('__pycache__', '__init__.pyc'):
try:
contents.remove(remove)
except (KeyError, ValueError):
pass
self.assertEqual(
contents,
{'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'},
)
def test_is_dir(self):
self.assertEqual(MultiplexedPath(self.folder).is_dir(), True)
def test_is_file(self):
self.assertEqual(MultiplexedPath(self.folder).is_file(), False)
def test_open_file(self):
path = MultiplexedPath(self.folder)
with self.assertRaises(FileNotFoundError):
path.read_bytes()
with self.assertRaises(FileNotFoundError):
path.read_text()
with self.assertRaises(FileNotFoundError):
path.open()
def test_join_path(self):
prefix = os.path.abspath(os.path.join(__file__, '..'))
data01 = os.path.join(prefix, 'data01')
path = MultiplexedPath(self.folder, data01)
self.assertEqual(
str(path.joinpath('binary.file'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'binary.file'),
)
self.assertEqual(
str(path.joinpath('subdirectory'))[len(prefix) + 1 :],
os.path.join('data01', 'subdirectory'),
)
self.assertEqual(
str(path.joinpath('imaginary'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'imaginary'),
)
self.assertEqual(path.joinpath(), path)
def test_join_path_compound(self):
path = MultiplexedPath(self.folder)
assert not path.joinpath('imaginary/foo.py').exists()
def test_repr(self):
self.assertEqual(
repr(MultiplexedPath(self.folder)),
f"MultiplexedPath('{self.folder}')",
)
def test_name(self):
self.assertEqual(
MultiplexedPath(self.folder).name,
os.path.basename(self.folder),
)
class NamespaceReaderTest(unittest.TestCase):
site_dir = str(pathlib.Path(__file__).parent)
@classmethod
def setUpClass(cls):
sys.path.append(cls.site_dir)
@classmethod
def tearDownClass(cls):
sys.path.remove(cls.site_dir)
def test_init_error(self):
with self.assertRaises(ValueError):
NamespaceReader(['path1', 'path2'])
def test_resource_path(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertEqual(
reader.resource_path('binary.file'), os.path.join(root, 'binary.file')
)
self.assertEqual(
reader.resource_path('imaginary'), os.path.join(root, 'imaginary')
)
def test_files(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertIsInstance(reader.files(), MultiplexedPath)
self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')")
if __name__ == '__main__':
unittest.main()
| 4,480
|
Python
|
.py
| 109
| 32.284404
| 88
| 0.625489
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,913
|
_tokenizer.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_tokenizer.py
|
import contextlib
import re
from dataclasses import dataclass
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
from .specifiers import Specifier
@dataclass
class Token:
name: str
text: str
position: int
class ParserSyntaxError(Exception):
"""The provided source text could not be parsed correctly."""
def __init__(
self,
message: str,
*,
source: str,
span: Tuple[int, int],
) -> None:
self.span = span
self.message = message
self.source = source
super().__init__()
def __str__(self) -> str:
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
return "\n ".join([self.message, self.source, marker])
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
"LEFT_PARENTHESIS": r"\(",
"RIGHT_PARENTHESIS": r"\)",
"LEFT_BRACKET": r"\[",
"RIGHT_BRACKET": r"\]",
"SEMICOLON": r";",
"COMMA": r",",
"QUOTED_STRING": re.compile(
r"""
(
('[^']*')
|
("[^"]*")
)
""",
re.VERBOSE,
),
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
"BOOLOP": r"\b(or|and)\b",
"IN": r"\bin\b",
"NOT": r"\bnot\b",
"VARIABLE": re.compile(
r"""
\b(
python_version
|python_full_version
|os[._]name
|sys[._]platform
|platform_(release|system)
|platform[._](version|machine|python_implementation)
|python_implementation
|implementation_(name|version)
|extra
)\b
""",
re.VERBOSE,
),
"SPECIFIER": re.compile(
Specifier._operator_regex_str + Specifier._version_regex_str,
re.VERBOSE | re.IGNORECASE,
),
"AT": r"\@",
"URL": r"[^ \t]+",
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
"VERSION_PREFIX_TRAIL": r"\.\*",
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
"WS": r"[ \t]+",
"END": r"$",
}
class Tokenizer:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: "Dict[str, Union[str, re.Pattern[str]]]",
) -> None:
self.source = source
self.rules: Dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Optional[Token] = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert (
self.next_token is None
), f"Cannot check for {name!r}, already have {self.next_token!r}"
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: Optional[int] = None,
span_end: Optional[int] = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
| 5,292
|
Python
|
.py
| 162
| 23.925926
| 84
| 0.540588
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,914
|
_structures.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_structures.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
| 1,431
|
Python
|
.py
| 39
| 30.205128
| 79
| 0.613139
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,915
|
markers.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/markers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import operator
import os
import platform
import sys
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from ._parser import (
MarkerAtom,
MarkerList,
Op,
Value,
Variable,
parse_marker as _parse_marker,
)
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
Operator = Callable[[str, str], bool]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: Dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Optional[Operator] = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
groups: List[List[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: "sys._version_info") -> str:
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Dict[str, str]:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker:
def __init__(self, marker: str) -> None:
# Note: We create a Marker object without calling this constructor in
# packaging.requirements.Requirement. If any additional logic is
# added here, make sure to mirror/adapt Requirement.
try:
self._markers = _normalize_extra_values(_parse_marker(marker))
# The attribute `_markers` can be described in terms of a recursive type:
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
#
# For example, the following expression:
# python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
#
# is parsed into:
# [
# (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
# 'and',
# [
# (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
# 'or',
# (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
# ]
# ]
except ParserSyntaxError as e:
raise InvalidMarker(str(e)) from e
def __str__(self) -> str:
return _format_marker(self._markers)
def __repr__(self) -> str:
return f"<Marker('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Marker):
return NotImplemented
return str(self) == str(other)
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
current_environment["extra"] = ""
if environment is not None:
current_environment.update(environment)
# The API used to allow setting extra to None. We need to handle this
# case for backwards compatibility.
if current_environment["extra"] is None:
current_environment["extra"] = ""
return _evaluate_markers(self._markers, current_environment)
| 8,208
|
Python
|
.py
| 204
| 32.607843
| 88
| 0.616893
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,916
|
tags.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/tags.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import logging
import platform
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from . import _manylinux, _musllinux
logger = logging.getLogger(__name__)
PythonVersion = Sequence[int]
MacVersion = Tuple[int, int]
INTERPRETER_SHORT_NAMES: Dict[str, str] = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = sys.maxsize <= 2**32
class Tag:
"""
A representation of the tag triple for a wheel.
Instances are considered immutable and thus are hashable. Equality checking
is also supported.
"""
__slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
def __init__(self, interpreter: str, abi: str, platform: str) -> None:
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
# The __hash__ of every single element in a Set[Tag] will be evaluated each time
# that a set calls its `.disjoint()` method, which may be called hundreds of
# times when scanning a page of links for packages with tags matching that
# Set[Tag]. Pre-computing the value here produces significant speedups for
# downstream consumers.
self._hash = hash((self._interpreter, self._abi, self._platform))
@property
def interpreter(self) -> str:
return self._interpreter
@property
def abi(self) -> str:
return self._abi
@property
def platform(self) -> str:
return self._platform
def __eq__(self, other: object) -> bool:
if not isinstance(other, Tag):
return NotImplemented
return (
(self._hash == other._hash) # Short-circuit ASAP for perf reasons.
and (self._platform == other._platform)
and (self._abi == other._abi)
and (self._interpreter == other._interpreter)
)
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f"{self._interpreter}-{self._abi}-{self._platform}"
def __repr__(self) -> str:
return f"<{self} @ {id(self)}>"
def parse_tag(tag: str) -> FrozenSet[Tag]:
"""
Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
Returning a set is required due to the possibility that the tag is a
compressed tag set.
"""
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
value: Union[int, str, None] = sysconfig.get_config_var(name)
if value is None and warn:
logger.debug(
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
)
return value
def _normalize_string(string: str) -> str:
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
def _abi3_applies(python_version: PythonVersion) -> bool:
"""
Determine if the Python version supports abi3.
PEP 384 was first implemented in Python 3.2.
"""
return len(python_version) > 1 and tuple(python_version) >= (3, 2)
def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
py_version = tuple(py_version) # To allow for version comparison.
abis = []
version = _version_nodot(py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = _get_config_var("Py_DEBUG", warn)
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append(f"cp{version}")
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def cpython_tags(
python_version: Optional[PythonVersion] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a CPython interpreter.
The tags consist of:
- cp<python_version>-<abi>-<platform>
- cp<python_version>-abi3-<platform>
- cp<python_version>-none-<platform>
- cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
If python_version only specifies a major version then user-provided ABIs and
the 'none' ABItag will be used.
If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
their normal position and not at the beginning.
"""
if not python_version:
python_version = sys.version_info[:2]
interpreter = f"cp{_version_nodot(python_version[:2])}"
if abis is None:
if len(python_version) > 1:
abis = _cpython_abis(python_version, warn)
else:
abis = []
abis = list(abis)
# 'abi3' and 'none' are explicitly handled later.
for explicit_abi in ("abi3", "none"):
try:
abis.remove(explicit_abi)
except ValueError:
pass
platforms = list(platforms or platform_tags())
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
if _abi3_applies(python_version):
yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
if _abi3_applies(python_version):
for minor_version in range(python_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{version}".format(
version=_version_nodot((python_version[0], minor_version))
)
yield Tag(interpreter, "abi3", platform_)
def _generic_abi() -> List[str]:
"""
Return the ABI tag based on EXT_SUFFIX.
"""
# The following are examples of `EXT_SUFFIX`.
# We want to keep the parts which are related to the ABI and remove the
# parts which are related to the platform:
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
# - mac: '.cpython-310-darwin.so' => cp310
# - win: '.cp310-win_amd64.pyd' => cp310
# - win: '.pyd' => cp37 (uses _cpython_abis())
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
# => graalpy_38_native
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
parts = ext_suffix.split(".")
if len(parts) < 3:
# CPython3.7 and earlier uses ".pyd" on Windows.
return _cpython_abis(sys.version_info[:2])
soabi = parts[1]
if soabi.startswith("cpython"):
# non-windows
abi = "cp" + soabi.split("-")[1]
elif soabi.startswith("cp"):
# windows
abi = soabi.split("-")[0]
elif soabi.startswith("pypy"):
abi = "-".join(soabi.split("-")[:2])
elif soabi.startswith("graalpy"):
abi = "-".join(soabi.split("-")[:3])
elif soabi:
# pyston, ironpython, others?
abi = soabi
else:
return []
return [_normalize_string(abi)]
def generic_tags(
interpreter: Optional[str] = None,
abis: Optional[Iterable[str]] = None,
platforms: Optional[Iterable[str]] = None,
*,
warn: bool = False,
) -> Iterator[Tag]:
"""
Yields the tags for a generic interpreter.
The tags consist of:
- <interpreter>-<abi>-<platform>
The "none" ABI will be added if it was not explicitly provided.
"""
if not interpreter:
interp_name = interpreter_name()
interp_version = interpreter_version(warn=warn)
interpreter = "".join([interp_name, interp_version])
if abis is None:
abis = _generic_abi()
else:
abis = list(abis)
platforms = list(platforms or platform_tags())
if "none" not in abis:
abis.append("none")
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
"""
Yields Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all previous versions of that major version.
"""
if len(py_version) > 1:
yield f"py{_version_nodot(py_version[:2])}"
yield f"py{py_version[0]}"
if len(py_version) > 1:
for minor in range(py_version[1] - 1, -1, -1):
yield f"py{_version_nodot((py_version[0], minor))}"
def compatible_tags(
python_version: Optional[PythonVersion] = None,
interpreter: Optional[str] = None,
platforms: Optional[Iterable[str]] = None,
) -> Iterator[Tag]:
"""
Yields the sequence of tags that are compatible with a specific version of Python.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any # ... if `interpreter` is provided.
- py*-none-any
"""
if not python_version:
python_version = sys.version_info[:2]
platforms = list(platforms or platform_tags())
for version in _py_interpreter_range(python_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
if interpreter:
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(python_version):
yield Tag(version, "none", "any")
def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
if cpu_arch in {"arm64", "x86_64"}:
formats.append("universal2")
if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
formats.append("universal")
return formats
def mac_platforms(
version: Optional[MacVersion] = None, arch: Optional[str] = None
) -> Iterator[str]:
"""
Yields the platform tags for a macOS system.
The `version` parameter is a two-item tuple specifying the macOS version to
generate platform tags for. The `arch` parameter is the CPU architecture to
generate platform tags for. Both parameters default to the appropriate value
for the current system.
"""
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
if version == (10, 16):
# When built against an older macOS SDK, Python will report macOS 10.16
# instead of the real version.
version_str = subprocess.run(
[
sys.executable,
"-sS",
"-c",
"import platform; print(platform.mac_ver()[0])",
],
check=True,
env={"SYSTEM_VERSION_COMPAT": "0"},
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
else:
version = version
if arch is None:
arch = _mac_arch(cpu_arch)
else:
arch = arch
if (10, 0) <= version and version < (11, 0):
# Prior to Mac OS 11, each yearly release of Mac OS bumped the
# "minor" version number. The major version was always 10.
for minor_version in range(version[1], -1, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=10, minor=minor_version, binary_format=binary_format
)
if version >= (11, 0):
# Starting with Mac OS 11, each yearly release bumps the major version
# number. The minor versions are now the midyear updates.
for major_version in range(version[0], 10, -1):
compat_version = major_version, 0
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=major_version, minor=0, binary_format=binary_format
)
if version >= (11, 0):
# Mac OS 11 on x86_64 is compatible with binaries from previous releases.
# Arm64 support was introduced in 11.0, so no Arm binaries from previous
# releases exist.
#
# However, the "universal2" binary format can have a
# macOS version earlier than 11.0 when the x86_64 part of the binary supports
# that version of macOS.
if arch == "x86_64":
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
else:
for minor_version in range(16, 3, -1):
compat_version = 10, minor_version
binary_format = "universal2"
yield "macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
linux = _normalize_string(sysconfig.get_platform())
if is_32bit:
if linux == "linux_x86_64":
linux = "linux_i686"
elif linux == "linux_aarch64":
linux = "linux_armv7l"
_, arch = linux.split("_", 1)
yield from _manylinux.platform_tags(linux, arch)
yield from _musllinux.platform_tags(arch)
yield linux
def _generic_platforms() -> Iterator[str]:
yield _normalize_string(sysconfig.get_platform())
def platform_tags() -> Iterator[str]:
"""
Provides the platform tags for this installation.
"""
if platform.system() == "Darwin":
return mac_platforms()
elif platform.system() == "Linux":
return _linux_platforms()
else:
return _generic_platforms()
def interpreter_name() -> str:
"""
Returns the name of the running interpreter.
Some implementations have a reserved, two-letter abbreviation which will
be returned when appropriate.
"""
name = sys.implementation.name
return INTERPRETER_SHORT_NAMES.get(name) or name
def interpreter_version(*, warn: bool = False) -> str:
"""
Returns the version of the running interpreter.
"""
version = _get_config_var("py_version_nodot", warn=warn)
if version:
version = str(version)
else:
version = _version_nodot(sys.version_info[:2])
return version
def _version_nodot(version: PythonVersion) -> str:
return "".join(map(str, version))
def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
interp_name = interpreter_name()
if interp_name == "cp":
yield from cpython_tags(warn=warn)
else:
yield from generic_tags()
if interp_name == "pp":
interp = "pp3"
elif interp_name == "cp":
interp = "cp" + interpreter_version(warn=warn)
else:
interp = None
yield from compatible_tags(interpreter=interp)
| 18,106
|
Python
|
.py
| 456
| 32.070175
| 88
| 0.610364
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,917
|
_manylinux.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_manylinux.py
|
import collections
import contextlib
import functools
import os
import re
import sys
import warnings
from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
from ._elffile import EIClass, EIData, ELFFile, EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
# as the type for `path` until then.
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
try:
with open(path, "rb") as f:
yield ELFFile(f)
except (OSError, TypeError, ValueError):
yield None
def _is_linux_armhf(executable: str) -> bool:
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.Arm
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
)
def _is_linux_i686(executable: str) -> bool:
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.I386
)
def _have_compatible_abi(executable: str, arch: str) -> bool:
if arch == "armv7l":
return _is_linux_armhf(executable)
if arch == "i686":
return _is_linux_i686(executable)
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
class _GLibCVersion(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> Optional[str]:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> Optional[str]:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
RuntimeWarning,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache()
def _get_glibc_version() -> Tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(linux: str, arch: str) -> Iterator[str]:
if not _have_compatible_abi(sys.executable, arch):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
| 8,926
|
Python
|
.py
| 205
| 36.965854
| 88
| 0.665899
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,918
|
_parser.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_parser.py
|
"""Handwritten parser of dependency specifiers.
The docstring for each __parse_* function contains ENBF-inspired grammar representing
the implementation.
"""
import ast
from typing import Any, List, NamedTuple, Optional, Tuple, Union
from ._tokenizer import DEFAULT_RULES, Tokenizer
class Node:
def __init__(self, value: str) -> None:
self.value = value
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__}('{self}')>"
def serialize(self) -> str:
raise NotImplementedError
class Variable(Node):
def serialize(self) -> str:
return str(self)
class Value(Node):
def serialize(self) -> str:
return f'"{self}"'
class Op(Node):
def serialize(self) -> str:
return str(self)
MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
# mypy does not support recursive type definition
# https://github.com/python/mypy/issues/731
MarkerAtom = Any
MarkerList = List[Any]
class ParsedRequirement(NamedTuple):
name: str
url: str
extras: List[str]
specifier: str
marker: Optional[MarkerList]
# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
"""
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
"""
tokenizer.consume("WS")
name_token = tokenizer.expect(
"IDENTIFIER", expected="package name at the start of dependency specifier"
)
name = name_token.text
tokenizer.consume("WS")
extras = _parse_extras(tokenizer)
tokenizer.consume("WS")
url, specifier, marker = _parse_requirement_details(tokenizer)
tokenizer.expect("END", expected="end of dependency specifier")
return ParsedRequirement(name, url, extras, specifier, marker)
def _parse_requirement_details(
tokenizer: Tokenizer,
) -> Tuple[str, str, Optional[MarkerList]]:
"""
requirement_details = AT URL (WS requirement_marker?)?
| specifier WS? (requirement_marker)?
"""
specifier = ""
url = ""
marker = None
if tokenizer.check("AT"):
tokenizer.read()
tokenizer.consume("WS")
url_start = tokenizer.position
url = tokenizer.expect("URL", expected="URL after @").text
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
tokenizer.expect("WS", expected="whitespace after URL")
# The input might end after whitespace.
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer, span_start=url_start, after="URL and whitespace"
)
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume("WS")
if tokenizer.check("END", peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(
tokenizer,
span_start=specifier_start,
after=(
"version specifier"
if specifier
else "name and no valid version specifier"
),
)
return (url, specifier, marker)
def _parse_requirement_marker(
tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
"""
requirement_marker = SEMICOLON marker WS?
"""
if not tokenizer.check("SEMICOLON"):
tokenizer.raise_syntax_error(
f"Expected end or semicolon (after {after})",
span_start=span_start,
)
tokenizer.read()
marker = _parse_marker(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
"""
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
"""
if not tokenizer.check("LEFT_BRACKET", peek=True):
return []
with tokenizer.enclosing_tokens(
"LEFT_BRACKET",
"RIGHT_BRACKET",
around="extras",
):
tokenizer.consume("WS")
extras = _parse_extras_list(tokenizer)
tokenizer.consume("WS")
return extras
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
"""
extras_list = identifier (wsp* ',' wsp* identifier)*
"""
extras: List[str] = []
if not tokenizer.check("IDENTIFIER"):
return extras
extras.append(tokenizer.read().text)
while True:
tokenizer.consume("WS")
if tokenizer.check("IDENTIFIER", peek=True):
tokenizer.raise_syntax_error("Expected comma between extra names")
elif not tokenizer.check("COMMA"):
break
tokenizer.read()
tokenizer.consume("WS")
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
extras.append(extra_token.text)
return extras
def _parse_specifier(tokenizer: Tokenizer) -> str:
"""
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
| WS? version_many WS?
"""
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="version specifier",
):
tokenizer.consume("WS")
parsed_specifiers = _parse_version_many(tokenizer)
tokenizer.consume("WS")
return parsed_specifiers
def _parse_version_many(tokenizer: Tokenizer) -> str:
"""
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
"""
parsed_specifiers = ""
while tokenizer.check("SPECIFIER"):
span_start = tokenizer.position
parsed_specifiers += tokenizer.read().text
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
tokenizer.raise_syntax_error(
".* suffix can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position + 1,
)
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
tokenizer.raise_syntax_error(
"Local version label can only be used with `==` or `!=` operators",
span_start=span_start,
span_end=tokenizer.position,
)
tokenizer.consume("WS")
if not tokenizer.check("COMMA"):
break
parsed_specifiers += tokenizer.read().text
tokenizer.consume("WS")
return parsed_specifiers
# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
"""
marker = marker_atom (BOOLOP marker_atom)+
"""
expression = [_parse_marker_atom(tokenizer)]
while tokenizer.check("BOOLOP"):
token = tokenizer.read()
expr_right = _parse_marker_atom(tokenizer)
expression.extend((token.text, expr_right))
return expression
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
"""
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
| WS? marker_item WS?
"""
tokenizer.consume("WS")
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
with tokenizer.enclosing_tokens(
"LEFT_PARENTHESIS",
"RIGHT_PARENTHESIS",
around="marker expression",
):
tokenizer.consume("WS")
marker: MarkerAtom = _parse_marker(tokenizer)
tokenizer.consume("WS")
else:
marker = _parse_marker_item(tokenizer)
tokenizer.consume("WS")
return marker
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
"""
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
"""
tokenizer.consume("WS")
marker_var_left = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
marker_op = _parse_marker_op(tokenizer)
tokenizer.consume("WS")
marker_var_right = _parse_marker_var(tokenizer)
tokenizer.consume("WS")
return (marker_var_left, marker_op, marker_var_right)
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
"""
marker_var = VARIABLE | QUOTED_STRING
"""
if tokenizer.check("VARIABLE"):
return process_env_var(tokenizer.read().text.replace(".", "_"))
elif tokenizer.check("QUOTED_STRING"):
return process_python_str(tokenizer.read().text)
else:
tokenizer.raise_syntax_error(
message="Expected a marker variable or quoted string"
)
def process_env_var(env_var: str) -> Variable:
if (
env_var == "platform_python_implementation"
or env_var == "python_implementation"
):
return Variable("platform_python_implementation")
else:
return Variable(env_var)
def process_python_str(python_str: str) -> Value:
value = ast.literal_eval(python_str)
return Value(str(value))
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
"""
marker_op = IN | NOT IN | OP
"""
if tokenizer.check("IN"):
tokenizer.read()
return Op("in")
elif tokenizer.check("NOT"):
tokenizer.read()
tokenizer.expect("WS", expected="whitespace after 'not'")
tokenizer.expect("IN", expected="'in' after 'not'")
return Op("not in")
elif tokenizer.check("OP"):
return Op(tokenizer.read().text)
else:
return tokenizer.raise_syntax_error(
"Expected marker operator, one of "
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
)
| 10,194
|
Python
|
.py
| 276
| 29.865942
| 88
| 0.614673
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,919
|
utils.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/utils.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import re
from typing import FrozenSet, NewType, Tuple, Union, cast
from .tags import Tag, parse_tag
from .version import InvalidVersion, Version
BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)
class InvalidWheelFilename(ValueError):
"""
An invalid wheel filename was found, users should refer to PEP 427.
"""
class InvalidSdistFilename(ValueError):
"""
An invalid sdist filename was found, users should refer to the packaging user guide.
"""
_canonicalize_regex = re.compile(r"[-_.]+")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")
def canonicalize_name(name: str) -> NormalizedName:
# This is taken from PEP 503.
value = _canonicalize_regex.sub("-", name).lower()
return cast(NormalizedName, value)
def canonicalize_version(
version: Union[Version, str], *, strip_trailing_zero: bool = True
) -> str:
"""
This is very similar to Version.__str__, but has one subtle difference
with the way it handles the release segment.
"""
if isinstance(version, str):
try:
parsed = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
else:
parsed = version
parts = []
# Epoch
if parsed.epoch != 0:
parts.append(f"{parsed.epoch}!")
# Release segment
release_segment = ".".join(str(x) for x in parsed.release)
if strip_trailing_zero:
# NB: This strips trailing '.0's to normalize
release_segment = re.sub(r"(\.0)+$", "", release_segment)
parts.append(release_segment)
# Pre-release
if parsed.pre is not None:
parts.append("".join(str(x) for x in parsed.pre))
# Post-release
if parsed.post is not None:
parts.append(f".post{parsed.post}")
# Development release
if parsed.dev is not None:
parts.append(f".dev{parsed.dev}")
# Local version segment
if parsed.local is not None:
parts.append(f"+{parsed.local}")
return "".join(parts)
def parse_wheel_filename(
filename: str,
) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
if not filename.endswith(".whl"):
raise InvalidWheelFilename(
f"Invalid wheel filename (extension must be '.whl'): {filename}"
)
filename = filename[:-4]
dashes = filename.count("-")
if dashes not in (4, 5):
raise InvalidWheelFilename(
f"Invalid wheel filename (wrong number of parts): {filename}"
)
parts = filename.split("-", dashes - 2)
name_part = parts[0]
# See PEP 427 for the rules on escaping the project name
if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
raise InvalidWheelFilename(f"Invalid project name: {filename}")
name = canonicalize_name(name_part)
version = Version(parts[1])
if dashes == 5:
build_part = parts[2]
build_match = _build_tag_regex.match(build_part)
if build_match is None:
raise InvalidWheelFilename(
f"Invalid build number: {build_part} in '{filename}'"
)
build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
else:
build = ()
tags = parse_tag(parts[-1])
return (name, version, build, tags)
def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
if filename.endswith(".tar.gz"):
file_stem = filename[: -len(".tar.gz")]
elif filename.endswith(".zip"):
file_stem = filename[: -len(".zip")]
else:
raise InvalidSdistFilename(
f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
f" {filename}"
)
# We are requiring a PEP 440 version, which cannot contain dashes,
# so we split on the last dash.
name_part, sep, version_part = file_stem.rpartition("-")
if not sep:
raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
name = canonicalize_name(name_part)
version = Version(version_part)
return (name, version)
| 4,355
|
Python
|
.py
| 112
| 32.696429
| 88
| 0.652349
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,920
|
metadata.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/metadata.py
|
import email.feedparser
import email.header
import email.message
import email.parser
import email.policy
import sys
import typing
from typing import Dict, List, Optional, Tuple, Union, cast
if sys.version_info >= (3, 8): # pragma: no cover
from typing import TypedDict
else: # pragma: no cover
if typing.TYPE_CHECKING:
from typing_extensions import TypedDict
else:
try:
from typing_extensions import TypedDict
except ImportError:
class TypedDict:
def __init_subclass__(*_args, **_kwargs):
pass
# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
class RawMetadata(TypedDict, total=False):
"""A dictionary of raw core metadata.
Each field in core metadata maps to a key of this dictionary (when data is
provided). The key is lower-case and underscores are used instead of dashes
compared to the equivalent core metadata field. Any core metadata field that
can be specified multiple times or can hold multiple values in a single
field have a key with a plural name.
Core metadata fields that can be specified multiple times are stored as a
list or dict depending on which is appropriate for the field. Any fields
which hold multiple values in a single field are stored as a list.
"""
# Metadata 1.0 - PEP 241
metadata_version: str
name: str
version: str
platforms: List[str]
summary: str
description: str
keywords: List[str]
home_page: str
author: str
author_email: str
license: str
# Metadata 1.1 - PEP 314
supported_platforms: List[str]
download_url: str
classifiers: List[str]
requires: List[str]
provides: List[str]
obsoletes: List[str]
# Metadata 1.2 - PEP 345
maintainer: str
maintainer_email: str
requires_dist: List[str]
provides_dist: List[str]
obsoletes_dist: List[str]
requires_python: str
requires_external: List[str]
project_urls: Dict[str, str]
# Metadata 2.0
# PEP 426 attempted to completely revamp the metadata format
# but got stuck without ever being able to build consensus on
# it and ultimately ended up withdrawn.
#
# However, a number of tools had started emiting METADATA with
# `2.0` Metadata-Version, so for historical reasons, this version
# was skipped.
# Metadata 2.1 - PEP 566
description_content_type: str
provides_extra: List[str]
# Metadata 2.2 - PEP 643
dynamic: List[str]
# Metadata 2.3 - PEP 685
# No new fields were added in PEP 685, just some edge case were
# tightened up to provide better interoptability.
_STRING_FIELDS = {
"author",
"author_email",
"description",
"description_content_type",
"download_url",
"home_page",
"license",
"maintainer",
"maintainer_email",
"metadata_version",
"name",
"requires_python",
"summary",
"version",
}
_LIST_STRING_FIELDS = {
"classifiers",
"dynamic",
"obsoletes",
"obsoletes_dist",
"platforms",
"provides",
"provides_dist",
"provides_extra",
"requires",
"requires_dist",
"requires_external",
"supported_platforms",
}
def _parse_keywords(data: str) -> List[str]:
"""Split a string of comma-separate keyboards into a list of keywords."""
return [k.strip() for k in data.split(",")]
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
"""Parse a list of label/URL string pairings separated by a comma."""
urls = {}
for pair in data:
# Our logic is slightly tricky here as we want to try and do
# *something* reasonable with malformed data.
#
# The main thing that we have to worry about, is data that does
# not have a ',' at all to split the label from the Value. There
# isn't a singular right answer here, and we will fail validation
# later on (if the caller is validating) so it doesn't *really*
# matter, but since the missing value has to be an empty str
# and our return value is dict[str, str], if we let the key
# be the missing value, then they'd have multiple '' values that
# overwrite each other in a accumulating dict.
#
# The other potentional issue is that it's possible to have the
# same label multiple times in the metadata, with no solid "right"
# answer with what to do in that case. As such, we'll do the only
# thing we can, which is treat the field as unparseable and add it
# to our list of unparsed fields.
parts = [p.strip() for p in pair.split(",", 1)]
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
# TODO: The spec doesn't say anything about if the keys should be
# considered case sensitive or not... logically they should
# be case-preserving and case-insensitive, but doing that
# would open up more cases where we might have duplicate
# entries.
label, url = parts
if label in urls:
# The label already exists in our set of urls, so this field
# is unparseable, and we can just add the whole thing to our
# unparseable data and stop processing it.
raise KeyError("duplicate labels in project urls")
urls[label] = url
return urls
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
"""Get the body of the message."""
# If our source is a str, then our caller has managed encodings for us,
# and we don't need to deal with it.
if isinstance(source, str):
payload: str = msg.get_payload()
return payload
# If our source is a bytes, then we're managing the encoding and we need
# to deal with it.
else:
bpayload: bytes = msg.get_payload(decode=True)
try:
return bpayload.decode("utf8", "strict")
except UnicodeDecodeError:
raise ValueError("payload in an invalid encoding")
# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.
# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
"author": "author",
"author-email": "author_email",
"classifier": "classifiers",
"description": "description",
"description-content-type": "description_content_type",
"download-url": "download_url",
"dynamic": "dynamic",
"home-page": "home_page",
"keywords": "keywords",
"license": "license",
"maintainer": "maintainer",
"maintainer-email": "maintainer_email",
"metadata-version": "metadata_version",
"name": "name",
"obsoletes": "obsoletes",
"obsoletes-dist": "obsoletes_dist",
"platform": "platforms",
"project-url": "project_urls",
"provides": "provides",
"provides-dist": "provides_dist",
"provides-extra": "provides_extra",
"requires": "requires",
"requires-dist": "requires_dist",
"requires-external": "requires_external",
"requires-python": "requires_python",
"summary": "summary",
"supported-platform": "supported_platforms",
"version": "version",
}
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
"""Parse a distribution's metadata.
This function returns a two-item tuple of dicts. The first dict is of
recognized fields from the core metadata specification. Fields that can be
parsed and translated into Python's built-in types are converted
appropriately. All other fields are left as-is. Fields that are allowed to
appear multiple times are stored as lists.
The second dict contains all other fields from the metadata. This includes
any unrecognized fields. It also includes any fields which are expected to
be parsed into a built-in type but were not formatted appropriately. Finally,
any fields that are expected to appear only once but are repeated are
included in this dict.
"""
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
unparsed: Dict[str, List[str]] = {}
if isinstance(data, str):
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
else:
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
# We have to wrap parsed.keys() in a set, because in the case of multiple
# values for a key (a list), the key will appear multiple times in the
# list of keys, but we're avoiding that by using get_all().
for name in frozenset(parsed.keys()):
# Header names in RFC are case insensitive, so we'll normalize to all
# lower case to make comparisons easier.
name = name.lower()
# We use get_all() here, even for fields that aren't multiple use,
# because otherwise someone could have e.g. two Name fields, and we
# would just silently ignore it rather than doing something about it.
headers = parsed.get_all(name)
# The way the email module works when parsing bytes is that it
# unconditionally decodes the bytes as ascii using the surrogateescape
# handler. When you pull that data back out (such as with get_all() ),
# it looks to see if the str has any surrogate escapes, and if it does
# it wraps it in a Header object instead of returning the string.
#
# As such, we'll look for those Header objects, and fix up the encoding.
value = []
# Flag if we have run into any issues processing the headers, thus
# signalling that the data belongs in 'unparsed'.
valid_encoding = True
for h in headers:
# It's unclear if this can return more types than just a Header or
# a str, so we'll just assert here to make sure.
assert isinstance(h, (email.header.Header, str))
# If it's a header object, we need to do our little dance to get
# the real data out of it. In cases where there is invalid data
# we're going to end up with mojibake, but there's no obvious, good
# way around that without reimplementing parts of the Header object
# ourselves.
#
# That should be fine since, if mojibacked happens, this key is
# going into the unparsed dict anyways.
if isinstance(h, email.header.Header):
# The Header object stores it's data as chunks, and each chunk
# can be independently encoded, so we'll need to check each
# of them.
chunks: List[Tuple[bytes, Optional[str]]] = []
for bin, encoding in email.header.decode_header(h):
try:
bin.decode("utf8", "strict")
except UnicodeDecodeError:
# Enable mojibake.
encoding = "latin1"
valid_encoding = False
else:
encoding = "utf8"
chunks.append((bin, encoding))
# Turn our chunks back into a Header object, then let that
# Header object do the right thing to turn them into a
# string for us.
value.append(str(email.header.make_header(chunks)))
# This is already a string, so just add it.
else:
value.append(h)
# We've processed all of our values to get them into a list of str,
# but we may have mojibake data, in which case this is an unparsed
# field.
if not valid_encoding:
unparsed[name] = value
continue
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
if raw_name is None:
# This is a bit of a weird situation, we've encountered a key that
# we don't know what it means, so we don't know whether it's meant
# to be a list or not.
#
# Since we can't really tell one way or another, we'll just leave it
# as a list, even though it may be a single item list, because that's
# what makes the most sense for email headers.
unparsed[name] = value
continue
# If this is one of our string fields, then we'll check to see if our
# value is a list of a single item. If it is then we'll assume that
# it was emitted as a single string, and unwrap the str from inside
# the list.
#
# If it's any other kind of data, then we haven't the faintest clue
# what we should parse it as, and we have to just add it to our list
# of unparsed stuff.
if raw_name in _STRING_FIELDS and len(value) == 1:
raw[raw_name] = value[0]
# If this is one of our list of string fields, then we can just assign
# the value, since email *only* has strings, and our get_all() call
# above ensures that this is a list.
elif raw_name in _LIST_STRING_FIELDS:
raw[raw_name] = value
# Special Case: Keywords
# The keywords field is implemented in the metadata spec as a str,
# but it conceptually is a list of strings, and is serialized using
# ", ".join(keywords), so we'll do some light data massaging to turn
# this into what it logically is.
elif raw_name == "keywords" and len(value) == 1:
raw[raw_name] = _parse_keywords(value[0])
# Special Case: Project-URL
# The project urls is implemented in the metadata spec as a list of
# specially-formatted strings that represent a key and a value, which
# is fundamentally a mapping, however the email format doesn't support
# mappings in a sane way, so it was crammed into a list of strings
# instead.
#
# We will do a little light data massaging to turn this into a map as
# it logically should be.
elif raw_name == "project_urls":
try:
raw[raw_name] = _parse_project_urls(value)
except KeyError:
unparsed[name] = value
# Nothing that we've done has managed to parse this, so it'll just
# throw it in our unparseable data and move on.
else:
unparsed[name] = value
# We need to support getting the Description from the message payload in
# addition to getting it from the the headers. This does mean, though, there
# is the possibility of it being set both ways, in which case we put both
# in 'unparsed' since we don't know which is right.
try:
payload = _get_payload(parsed, data)
except ValueError:
unparsed.setdefault("description", []).append(
parsed.get_payload(decode=isinstance(data, bytes))
)
else:
if payload:
# Check to see if we've already got a description, if so then both
# it, and this body move to unparseable.
if "description" in raw:
description_header = cast(str, raw.pop("description"))
unparsed.setdefault("description", []).extend(
[description_header, payload]
)
elif "description" in unparsed:
unparsed["description"].append(payload)
else:
raw["description"] = payload
# We need to cast our `raw` to a metadata, because a TypedDict only support
# literal key names, but we're computing our key names on purpose, but the
# way this function is implemented, our `TypedDict` can only have valid key
# names.
return cast(RawMetadata, raw), unparsed
| 16,397
|
Python
|
.py
| 364
| 37.123626
| 88
| 0.647257
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,921
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/__init__.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "23.1"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD-2-Clause or Apache-2.0"
__copyright__ = "2014-2019 %s" % __author__
| 501
|
Python
|
.py
| 11
| 44.181818
| 79
| 0.703704
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,922
|
_musllinux.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_musllinux.py
|
"""PEP 656 support.
This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""
import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Optional
from ._elffile import ELFFile
class _MuslVersion(NamedTuple):
major: int
minor: int
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
if len(lines) < 2 or lines[0][:4] != "musl":
return None
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
if not m:
return None
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
@functools.lru_cache()
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
"""Detect currently-running musl runtime version.
This is done by checking the specified executable's dynamic linking
information, and invoking the loader to parse its output for a version
string. If the loader is musl, the output would be something like::
musl libc (x86_64)
Version 1.2.2
Dynamic Program Loader
"""
try:
with open(executable, "rb") as f:
ld = ELFFile(f).interpreter
except (OSError, TypeError, ValueError):
return None
if ld is None or "musl" not in ld:
return None
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
return _parse_musl_version(proc.stderr)
def platform_tags(arch: str) -> Iterator[str]:
"""Generate musllinux tags compatible to the current platform.
:param arch: Should be the part of platform tag after the ``linux_``
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
prerequisite for the current platform to be musllinux-compatible.
:returns: An iterator of compatible musllinux tags.
"""
sys_musl = _get_musl_version(sys.executable)
if sys_musl is None: # Python not dynamically linked against musl.
return
for minor in range(sys_musl.minor, -1, -1):
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
if __name__ == "__main__": # pragma: no cover
import sysconfig
plat = sysconfig.get_platform()
assert plat.startswith("linux-"), "not linux"
print("plat:", plat)
print("musl:", _get_musl_version(sys.executable))
print("tags:", end=" ")
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
print(t, end="\n ")
| 2,524
|
Python
|
.py
| 61
| 36.131148
| 80
| 0.671849
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,923
|
_elffile.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/_elffile.py
|
"""
ELF file parser.
This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""
import enum
import os
import struct
from typing import IO, Optional, Tuple
class ELFInvalid(ValueError):
pass
class EIClass(enum.IntEnum):
C32 = 1
C64 = 2
class EIData(enum.IntEnum):
Lsb = 1
Msb = 2
class EMachine(enum.IntEnum):
I386 = 3
S390 = 22
Arm = 40
X8664 = 62
AArc64 = 183
class ELFFile:
"""
Representation of an ELF executable.
"""
def __init__(self, f: IO[bytes]) -> None:
self._f = f
try:
ident = self._read("16B")
except struct.error:
raise ELFInvalid("unable to parse identification")
magic = bytes(ident[:4])
if magic != b"\x7fELF":
raise ELFInvalid(f"invalid magic: {magic!r}")
self.capacity = ident[4] # Format for program header (bitness).
self.encoding = ident[5] # Data structure encoding (endianness).
try:
# e_fmt: Format for program header.
# p_fmt: Format for section header.
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
e_fmt, self._p_fmt, self._p_idx = {
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
}[(self.capacity, self.encoding)]
except KeyError:
raise ELFInvalid(
f"unrecognized capacity ({self.capacity}) or "
f"encoding ({self.encoding})"
)
try:
(
_,
self.machine, # Architecture type.
_,
_,
self._e_phoff, # Offset of program header.
_,
self.flags, # Processor-specific flags.
_,
self._e_phentsize, # Size of section.
self._e_phnum, # Number of sections.
) = self._read(e_fmt)
except struct.error as e:
raise ELFInvalid("unable to parse machine and section information") from e
def _read(self, fmt: str) -> Tuple[int, ...]:
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
@property
def interpreter(self) -> Optional[str]:
"""
The path recorded in the ``PT_INTERP`` section header.
"""
for index in range(self._e_phnum):
self._f.seek(self._e_phoff + self._e_phentsize * index)
try:
data = self._read(self._p_fmt)
except struct.error:
continue
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
continue
self._f.seek(data[self._p_idx[1]])
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
return None
| 3,266
|
Python
|
.py
| 88
| 27.522727
| 86
| 0.545915
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,924
|
requirements.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/requirements.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import urllib.parse
from typing import Any, List, Optional, Set
from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .specifiers import SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
class Requirement:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = _parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
if parsed.url:
parsed_url = urllib.parse.urlparse(parsed.url)
if parsed_url.scheme == "file":
if urllib.parse.urlunparse(parsed_url) != parsed.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement(f"Invalid URL: {parsed.url}")
self.url: Optional[str] = parsed.url
else:
self.url = None
self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def __str__(self) -> str:
parts: List[str] = [self.name]
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
parts.append(f"[{formatted_extras}]")
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append(f"@ {self.url}")
if self.marker:
parts.append(" ")
if self.marker:
parts.append(f"; {self.marker}")
return "".join(parts)
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
self.name == other.name
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
| 3,287
|
Python
|
.py
| 75
| 34.88
| 79
| 0.625627
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,925
|
version.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/version.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.version import parse, Version
"""
import collections
import itertools
import re
from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version: str) -> "Version":
"""Parse the given version string.
>>> parse('1.0.dev1')
<Version('1.0.dev1')>
:param version: The version string to parse.
:raises InvalidVersion: When the version string is not a valid version.
"""
return Version(version)
class InvalidVersion(ValueError):
"""Raised when a version string is not a valid version.
>>> Version("invalid")
Traceback (most recent call last):
...
packaging.version.InvalidVersion: Invalid version: 'invalid'
"""
class _BaseVersion:
_key: Tuple[Any, ...]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.
The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.
:meta hide-value:
"""
class Version(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be rounded-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
_epoch: int = self._version.epoch
return _epoch
@property
def release(self) -> Tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
_release: Tuple[int, ...] = self._version.release
return _release
@property
def pre(self) -> Optional[Tuple[str, int]]:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
@property
def post(self) -> Optional[int]:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1.2.3+abc.dev1").public
'1.2.3'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3+abc.dev1").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
| 16,326
|
Python
|
.py
| 455
| 27.969231
| 88
| 0.573912
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,926
|
specifiers.py
|
rembo10_headphones/lib/pkg_resources/_vendor/packaging/specifiers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::
from packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
from packaging.version import Version
"""
import abc
import itertools
import re
from typing import (
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
TypeVar,
Union,
)
from .utils import canonicalize_version
from .version import Version
UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]
def _coerce_version(version: UnparsedVersion) -> Version:
if not isinstance(version, Version):
version = Version(version)
return version
class InvalidSpecifier(ValueError):
"""
Raised when attempting to create a :class:`Specifier` with a specifier
string that is invalid.
>>> Specifier("lolwat")
Traceback (most recent call last):
...
packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
"""
class BaseSpecifier(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __str__(self) -> str:
"""
Returns the str representation of this Specifier-like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Returns a hash value for this Specifier-like object.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Returns a boolean representing whether or not the two Specifier-like
objects are equal.
:param other: The other object to check against.
"""
@property
@abc.abstractmethod
def prereleases(self) -> Optional[bool]:
"""Whether or not pre-releases as a whole are allowed.
This can be set to either ``True`` or ``False`` to explicitly enable or disable
prereleases or it can be set to ``None`` (the default) to use default semantics.
"""
@prereleases.setter
def prereleases(self, value: bool) -> None:
"""Setter for :attr:`prereleases`.
:param value: The value to set.
"""
@abc.abstractmethod
def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class Specifier(BaseSpecifier):
"""This class abstracts handling of version specifiers.
.. tip::
It is generally not required to instantiate this manually. You should instead
prefer to work with :class:`SpecifierSet` instead, which can parse
comma-separated version specifiers (which is what package metadata contains).
"""
_operator_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
"""
_version_regex_str = r"""
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s;)]* # The arbitrary version can be just about anything,
# we match everything except for whitespace, a
# semi-colon for marker support, and a closing paren
# since versions can be enclosed in them.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
# You cannot use a wild card and a pre-release, post-release, a dev or
# local version together so group them with a | and make them optional.
(?:
\.\* # Wild card syntax of .*
|
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(alpha|beta|preview|pre|a|b|c|rc)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(
r"^\s*" + _operator_regex_str + _version_regex_str + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
"""Initialize a Specifier instance.
:param spec:
The string representation of a specifier which will be parsed and
normalized before use.
:param prereleases:
This tells the specifier if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given specifier is invalid (i.e. bad syntax).
"""
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
self._spec: Tuple[str, str] = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
# https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
@property # type: ignore[override]
def prereleases(self) -> bool:
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if Version(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
@property
def operator(self) -> str:
"""The operator of this specifier.
>>> Specifier("==1.2.3").operator
'=='
"""
return self._spec[0]
@property
def version(self) -> str:
"""The version of this specifier.
>>> Specifier("==1.2.3").version
'1.2.3'
"""
return self._spec[1]
def __repr__(self) -> str:
"""A representation of the Specifier that shows all internal state.
>>> Specifier('>=1.0.0')
<Specifier('>=1.0.0')>
>>> Specifier('>=1.0.0', prereleases=False)
<Specifier('>=1.0.0', prereleases=False)>
>>> Specifier('>=1.0.0', prereleases=True)
<Specifier('>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the Specifier that can be round-tripped.
>>> str(Specifier('>=1.0.0'))
'>=1.0.0'
>>> str(Specifier('>=1.0.0', prereleases=False))
'>=1.0.0'
"""
return "{}{}".format(*self._spec)
@property
def _canonical_spec(self) -> Tuple[str, str]:
canonical_version = canonicalize_version(
self._spec[1],
strip_trailing_zero=(self._spec[0] != "~="),
)
return self._spec[0], canonical_version
def __hash__(self) -> int:
return hash(self._canonical_spec)
def __eq__(self, other: object) -> bool:
"""Whether or not the two Specifier-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
True
>>> (Specifier("==1.2.3", prereleases=False) ==
... Specifier("==1.2.3", prereleases=True))
True
>>> Specifier("==1.2.3") == "==1.2.3"
True
>>> Specifier("==1.2.3") == Specifier("==1.2.4")
False
>>> Specifier("==1.2.3") == Specifier("~=1.2.3")
False
"""
if isinstance(other, str):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def _get_operator(self, op: str) -> CallableOperator:
operator_callable: CallableOperator = getattr(
self, f"_compare_{self._operators[op]}"
)
return operator_callable
def _compare_compatible(self, prospective: Version, spec: str) -> bool:
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore suffix segments.
prefix = ".".join(
list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
def _compare_equal(self, prospective: Version, spec: str) -> bool:
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
normalized_prospective = canonicalize_version(
prospective.public, strip_trailing_zero=False
)
# Get the normalized version string ignoring the trailing .*
normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
split_spec = _version_split(normalized_spec)
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
split_prospective = _version_split(normalized_prospective)
# 0-pad the prospective version before shortening it to get the correct
# shortened version.
padded_prospective, _ = _pad_version(split_prospective, split_spec)
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = padded_prospective[: len(split_spec)]
return shortened_prospective == split_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
return not self._compare_equal(prospective, spec)
def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
return str(prospective).lower() == str(spec).lower()
def __contains__(self, item: Union[str, Version]) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in Specifier(">=1.2.3")
True
>>> Version("1.2.3") in Specifier(">=1.2.3")
True
>>> "1.0.0" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3")
False
>>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
True
"""
return self.contains(item)
def contains(
self, item: UnparsedVersion, prereleases: Optional[bool] = None
) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this Specifier. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> Specifier(">=1.2.3").contains("1.2.3")
True
>>> Specifier(">=1.2.3").contains(Version("1.2.3"))
True
>>> Specifier(">=1.2.3").contains("1.0.0")
False
>>> Specifier(">=1.2.3").contains("1.3.0a1")
False
>>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
True
>>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
True
"""
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version, this allows us to have a shortcut for
# "2.0" in Specifier(">=2")
normalized_item = _coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable: CallableOperator = self._get_operator(self.operator)
return operator_callable(normalized_item, self.version)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifier.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(Specifier().contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
['1.2.3', '1.3', <Version('1.4')>]
>>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
"""
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = _coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later in case nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version: str) -> List[str]:
result: List[str] = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _is_not_suffix(segment: str) -> bool:
return not any(
segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
)
def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(
self, specifiers: str = "", prereleases: Optional[bool] = None
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier.
parsed: Set[Specifier] = set()
for specifier in split_specifiers:
parsed.add(Specifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> Optional[bool]:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: Optional[bool] = None,
installed: Optional[bool] = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: List[UnparsedVersionVar] = []
found_prereleases: List[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
| 39,206
|
Python
|
.py
| 831
| 36.799037
| 88
| 0.579742
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,927
|
functools.py
|
rembo10_headphones/lib/pkg_resources/_vendor/jaraco/functools.py
|
import functools
import time
import inspect
import collections
import types
import itertools
import warnings
import pkg_resources.extern.more_itertools
from typing import Callable, TypeVar
CallableT = TypeVar("CallableT", bound=Callable[..., object])
def compose(*funcs):
"""
Compose any number of unary functions into a single unary function.
>>> import textwrap
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
>>> strip_and_dedent(compose.__doc__) == expected
True
Compose also allows the innermost function to take arbitrary arguments.
>>> round_three = lambda x: round(x, ndigits=3)
>>> f = compose(round_three, int.__truediv__)
>>> [f(3*x, x+1) for x in range(1,10)]
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
"""
def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs)
def method_caller(method_name, *args, **kwargs):
"""
Return a function that will call a named method on the
target object with optional positional and keyword
arguments.
>>> lower = method_caller('lower')
>>> lower('MyString')
'mystring'
"""
def call_method(target):
func = getattr(target, method_name)
return func(*args, **kwargs)
return call_method
def once(func):
"""
Decorate func so it's only ever called the first time.
This decorator can ensure that an expensive or non-idempotent function
will not be expensive on subsequent calls and is idempotent.
>>> add_three = once(lambda a: a+3)
>>> add_three(3)
6
>>> add_three(9)
6
>>> add_three('12')
6
To reset the stored value, simply clear the property ``saved_result``.
>>> del add_three.saved_result
>>> add_three(9)
12
>>> add_three(8)
12
Or invoke 'reset()' on it.
>>> add_three.reset()
>>> add_three(-3)
0
>>> add_three(0)
0
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper
def method_cache(
method: CallableT,
cache_wrapper: Callable[
[CallableT], CallableT
] = functools.lru_cache(), # type: ignore[assignment]
) -> CallableT:
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
def wrapper(self: object, *args: object, **kwargs: object) -> object:
# it's the first call, replace the method with a cached, bound method
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
method, self
)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
return ( # type: ignore[return-value]
_special_method_cache(method, cache_wrapper) or wrapper
)
def _special_method_cache(method, cache_wrapper):
"""
Because Python treats special methods differently, it's not
possible to use instance attributes to implement the cached
methods.
Instead, install the wrapper method under a different name
and return a simple proxy to that wrapper.
https://github.com/jaraco/jaraco.functools/issues/5
"""
name = method.__name__
special_names = '__getattr__', '__getitem__'
if name not in special_names:
return
wrapper_name = '__cached' + name
def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self):
bound = types.MethodType(method, self)
cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache)
else:
cache = getattr(self, wrapper_name)
return cache(*args, **kwargs)
return proxy
def apply(transform):
"""
Decorate a function with a transform function that is
invoked on results returned from the decorated function.
>>> @apply(reversed)
... def get_numbers(start):
... "doc for get_numbers"
... return range(start, start+3)
>>> list(get_numbers(4))
[6, 5, 4]
>>> get_numbers.__doc__
'doc for get_numbers'
"""
def wrap(func):
return functools.wraps(func)(compose(transform, func))
return wrap
def result_invoke(action):
r"""
Decorate a function with an action function that is
invoked on the results returned from the decorated
function (for its side-effect), then return the original
result.
>>> @result_invoke(print)
... def add_two(a, b):
... return a + b
>>> x = add_two(2, 3)
5
>>> x
5
"""
def wrap(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
action(result)
return result
return wrapper
return wrap
def invoke(f, *args, **kwargs):
"""
Call a function for its side effect after initialization.
The benefit of using the decorator instead of simply invoking a function
after defining it is that it makes explicit the author's intent for the
function to be called immediately. Whereas if one simply calls the
function immediately, it's less obvious if that was intentional or
incidental. It also avoids repeating the name - the two actions, defining
the function and calling it immediately are modeled separately, but linked
by the decorator construct.
The benefit of having a function construct (opposed to just invoking some
behavior inline) is to serve as a scope in which the behavior occurs. It
avoids polluting the global namespace with local variables, provides an
anchor on which to attach documentation (docstring), keeps the behavior
logically separated (instead of conceptually separated or not separated at
all), and provides potential to re-use the behavior for testing or other
purposes.
This function is named as a pithy way to communicate, "call this function
primarily for its side effect", or "while defining this function, also
take it aside and call it". It exists because there's no Python construct
for "define and call" (nor should there be, as decorators serve this need
just fine). The behavior happens immediately and synchronously.
>>> @invoke
... def func(): print("called")
called
>>> func()
called
Use functools.partial to pass parameters to the initial call
>>> @functools.partial(invoke, name='bingo')
... def func(name): print("called with", name)
called with bingo
"""
f(*args, **kwargs)
return f
def call_aside(*args, **kwargs):
"""
Deprecated name for invoke.
"""
warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning)
return invoke(*args, **kwargs)
class Throttler:
"""
Rate-limit a function (or other callable)
"""
def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler):
func = func.func
self.func = func
self.max_rate = max_rate
self.reset()
def reset(self):
self.last_called = 0
def __call__(self, *args, **kwargs):
self._wait()
return self.func(*args, **kwargs)
def _wait(self):
"ensure at least 1/max_rate seconds from last call"
elapsed = time.time() - self.last_called
must_wait = 1 / self.max_rate - elapsed
time.sleep(max(0, must_wait))
self.last_called = time.time()
def __get__(self, obj, type=None):
return first_invoke(self._wait, functools.partial(self.func, obj))
def first_invoke(func1, func2):
"""
Return a function that when invoked will invoke func1 without
any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result.
"""
def wrapper(*args, **kwargs):
func1()
return func2(*args, **kwargs)
return wrapper
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
"""
Given a callable func, trap the indicated exceptions
for up to 'retries' times, invoking cleanup on the
exception. On the final attempt, allow any exceptions
to propagate.
"""
attempts = itertools.count() if retries == float('inf') else range(retries)
for attempt in attempts:
try:
return func()
except trap:
cleanup()
return func()
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
def print_yielded(func):
"""
Convert a generator into a function that prints all yielded elements
>>> @print_yielded
... def x():
... yield 3; yield None
>>> x()
3
None
"""
print_all = functools.partial(map, print)
print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results)
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper
def assign_params(func, namespace):
"""
Assign parameters from namespace where func solicits.
>>> def func(x, y=3):
... print(x, y)
>>> assigned = assign_params(func, dict(x=2, z=4))
>>> assigned()
2 3
The usual errors are raised if a function doesn't receive
its required parameters:
>>> assigned = assign_params(func, dict(y=3, z=4))
>>> assigned()
Traceback (most recent call last):
TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
"""
sig = inspect.signature(func)
params = sig.parameters.keys()
call_ns = {k: namespace[k] for k in params if k in namespace}
return functools.partial(func, **call_ns)
def save_method_args(method):
"""
Wrap a method such that when it is called, the args and kwargs are
saved on the method.
>>> class MyClass:
... @save_method_args
... def method(self, a, b):
... print(a, b)
>>> my_ob = MyClass()
>>> my_ob.method(1, 2)
1 2
>>> my_ob._saved_method.args
(1, 2)
>>> my_ob._saved_method.kwargs
{}
>>> my_ob.method(a=3, b='foo')
3 foo
>>> my_ob._saved_method.args
()
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
True
The arguments are stored on the instance, allowing for
different instance to save different args.
>>> your_ob = MyClass()
>>> your_ob.method({str('x'): 3}, b=[4])
{'x': 3} [4]
>>> your_ob._saved_method.args
({'x': 3},)
>>> my_ob._saved_method.args
()
"""
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
attr_name = '_saved_' + method.__name__
attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr)
return method(self, *args, **kwargs)
return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate
| 15,056
|
Python
|
.py
| 429
| 29.137529
| 88
| 0.636966
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,928
|
context.py
|
rembo10_headphones/lib/pkg_resources/_vendor/jaraco/context.py
|
import os
import subprocess
import contextlib
import functools
import tempfile
import shutil
import operator
import warnings
@contextlib.contextmanager
def pushd(dir):
"""
>>> tmp_path = getfixture('tmp_path')
>>> with pushd(tmp_path):
... assert os.getcwd() == os.fspath(tmp_path)
>>> assert os.getcwd() != os.fspath(tmp_path)
"""
orig = os.getcwd()
os.chdir(dir)
try:
yield dir
finally:
os.chdir(orig)
@contextlib.contextmanager
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
"""
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
"""
if target_dir is None:
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
if runner is None:
runner = functools.partial(subprocess.check_call, shell=True)
else:
warnings.warn("runner parameter is deprecated", DeprecationWarning)
# In the tar command, use --strip-components=1 to strip the first path and
# then
# use -C to cause the files to be extracted to {target_dir}. This ensures
# that we always know where the files were extracted.
runner('mkdir {target_dir}'.format(**vars()))
try:
getter = 'wget {url} -O -'
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
cmd = ' | '.join((getter, extract))
runner(cmd.format(compression=infer_compression(url), **vars()))
with pushd(target_dir):
yield target_dir
finally:
runner('rm -Rf {target_dir}'.format(**vars()))
def infer_compression(url):
"""
Given a URL or filename, infer the compression code for tar.
>>> infer_compression('http://foo/bar.tar.gz')
'z'
>>> infer_compression('http://foo/bar.tgz')
'z'
>>> infer_compression('file.bz')
'j'
>>> infer_compression('file.xz')
'J'
"""
# cheat and just assume it's the last two characters
compression_indicator = url[-2:]
mapping = dict(gz='z', bz='j', xz='J')
# Assume 'z' (gzip) if no match
return mapping.get(compression_indicator, 'z')
@contextlib.contextmanager
def temp_dir(remover=shutil.rmtree):
"""
Create a temporary directory context. Pass a custom remover
to override the removal behavior.
>>> import pathlib
>>> with temp_dir() as the_dir:
... assert os.path.isdir(the_dir)
... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents')
>>> assert not os.path.exists(the_dir)
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
remover(temp_dir)
@contextlib.contextmanager
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
"""
Check out the repo indicated by url.
If dest_ctx is supplied, it should be a context manager
to yield the target directory for the check out.
"""
exe = 'git' if 'git' in url else 'hg'
with dest_ctx() as repo_dir:
cmd = [exe, 'clone', url, repo_dir]
if branch:
cmd.extend(['--branch', branch])
devnull = open(os.path.devnull, 'w')
stdout = devnull if quiet else None
subprocess.check_call(cmd, stdout=stdout)
yield repo_dir
@contextlib.contextmanager
def null():
"""
A null context suitable to stand in for a meaningful context.
>>> with null() as value:
... assert value is None
"""
yield
class ExceptionTrap:
"""
A context manager that will catch certain exceptions and provide an
indication they occurred.
>>> with ExceptionTrap() as trap:
... raise Exception()
>>> bool(trap)
True
>>> with ExceptionTrap() as trap:
... pass
>>> bool(trap)
False
>>> with ExceptionTrap(ValueError) as trap:
... raise ValueError("1 + 1 is not 3")
>>> bool(trap)
True
>>> trap.value
ValueError('1 + 1 is not 3')
>>> trap.tb
<traceback object at ...>
>>> with ExceptionTrap(ValueError) as trap:
... raise Exception()
Traceback (most recent call last):
...
Exception
>>> bool(trap)
False
"""
exc_info = None, None, None
def __init__(self, exceptions=(Exception,)):
self.exceptions = exceptions
def __enter__(self):
return self
@property
def type(self):
return self.exc_info[0]
@property
def value(self):
return self.exc_info[1]
@property
def tb(self):
return self.exc_info[2]
def __exit__(self, *exc_info):
type = exc_info[0]
matches = type and issubclass(type, self.exceptions)
if matches:
self.exc_info = exc_info
return matches
def __bool__(self):
return bool(self.type)
def raises(self, func, *, _test=bool):
"""
Wrap func and replace the result with the truth
value of the trap (True if an exception occurred).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> raises = ExceptionTrap(ValueError).raises
Now decorate a function that always fails.
>>> @raises
... def fail():
... raise ValueError('failed')
>>> fail()
True
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with ExceptionTrap(self.exceptions) as trap:
func(*args, **kwargs)
return _test(trap)
return wrapper
def passes(self, func):
"""
Wrap func and replace the result with the truth
value of the trap (True if no exception).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> passes = ExceptionTrap(ValueError).passes
Now decorate a function that always fails.
>>> @passes
... def fail():
... raise ValueError('failed')
>>> fail()
False
"""
return self.raises(func, _test=operator.not_)
class suppress(contextlib.suppress, contextlib.ContextDecorator):
"""
A version of contextlib.suppress with decorator support.
>>> @suppress(KeyError)
... def key_error():
... {}['']
>>> key_error()
"""
class on_interrupt(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(
self,
action='error',
# py3.7 compat
# /,
code=1,
):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'
| 7,460
|
Python
|
.py
| 234
| 25.538462
| 85
| 0.610429
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,929
|
__init__.py
|
rembo10_headphones/lib/pkg_resources/_vendor/jaraco/text/__init__.py
|
import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from pkg_resources.extern.importlib_resources import files # type: ignore
from pkg_resources.extern.jaraco.functools import compose, method_cache
from pkg_resources.extern.jaraco.context import ExceptionTrap
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
# Python 3.8 compatibility
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
@_unicode_trap.passes
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
value.decode()
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
def _nonblank(str):
return str and not str.startswith('#')
@functools.singledispatch
def yield_lines(iterable):
r"""
Yield valid lines of a string or iterable.
>>> list(yield_lines(''))
[]
>>> list(yield_lines(['foo', 'bar']))
['foo', 'bar']
>>> list(yield_lines('foo\nbar'))
['foo', 'bar']
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
['foo', 'baz #comment']
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
['foo', 'bar', 'baz', 'bing']
"""
return itertools.chain.from_iterable(map(yield_lines, iterable))
@yield_lines.register(str)
def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
def drop_comment(line):
"""
Drop comments.
>>> drop_comment('foo # bar')
'foo'
A hash without a space may be in a URL.
>>> drop_comment('http://example.com/foo#bar')
'http://example.com/foo#bar'
"""
return line.partition(' #')[0]
def join_continuation(lines):
r"""
Join lines continued by a trailing backslash.
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
['foobarbaz']
Not sure why, but...
The character preceeding the backslash is also elided.
>>> list(join_continuation(['goo\\', 'dly']))
['godly']
A terrible idea, but...
If no line is available to continue, suppress the lines.
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
['foo']
"""
lines = iter(lines)
for item in lines:
while item.endswith('\\'):
try:
item = item[:-2].strip() + next(lines)
except StopIteration:
return
yield item
| 15,526
|
Python
|
.py
| 446
| 29.002242
| 78
| 0.622697
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,930
|
utc.py
|
rembo10_headphones/lib/tempora/utc.py
|
"""
Facilities for common time operations in UTC.
Inspired by the `utc project <https://pypi.org/project/utc>`_.
>>> dt = now()
>>> dt == fromtimestamp(dt.timestamp())
True
>>> dt.tzinfo
datetime.timezone.utc
>>> from time import time as timestamp
>>> now().timestamp() - timestamp() < 0.1
True
>>> (now() - fromtimestamp(timestamp())).total_seconds() < 0.1
True
>>> datetime(2018, 6, 26, 0).tzinfo
datetime.timezone.utc
>>> time(0, 0).tzinfo
datetime.timezone.utc
"""
import datetime as std
import functools
__all__ = ['now', 'fromtimestamp', 'datetime', 'time']
now = functools.partial(std.datetime.now, std.timezone.utc)
fromtimestamp = functools.partial(std.datetime.fromtimestamp, tz=std.timezone.utc)
datetime = functools.partial(std.datetime, tzinfo=std.timezone.utc)
time = functools.partial(std.time, tzinfo=std.timezone.utc)
| 846
|
Python
|
.py
| 25
| 32.4
| 82
| 0.739506
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,931
|
schedule.py
|
rembo10_headphones/lib/tempora/schedule.py
|
"""
Classes for calling functions a schedule. Has time zone support.
For example, to run a job at 08:00 every morning in 'Asia/Calcutta':
>>> job = lambda: print("time is now", datetime.datetime())
>>> time = datetime.time(8, tzinfo=pytz.timezone('Asia/Calcutta'))
>>> cmd = PeriodicCommandFixedDelay.daily_at(time, job)
>>> sched = InvokeScheduler()
>>> sched.add(cmd)
>>> while True: # doctest: +SKIP
... sched.run_pending()
... time.sleep(.1)
"""
import datetime
import numbers
import abc
import bisect
import pytz
def now():
"""
Provide the current timezone-aware datetime.
A client may override this function to change the default behavior,
such as to use local time or timezone-naïve times.
"""
return datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
def from_timestamp(ts):
"""
Convert a numeric timestamp to a timezone-aware datetime.
A client may override this function to change the default behavior,
such as to use local time or timezone-naïve times.
"""
return datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
class DelayedCommand(datetime.datetime):
"""
A command to be executed after some delay (seconds or timedelta).
"""
@classmethod
def from_datetime(cls, other):
return cls(
other.year,
other.month,
other.day,
other.hour,
other.minute,
other.second,
other.microsecond,
other.tzinfo,
)
@classmethod
def after(cls, delay, target):
if not isinstance(delay, datetime.timedelta):
delay = datetime.timedelta(seconds=delay)
due_time = now() + delay
cmd = cls.from_datetime(due_time)
cmd.delay = delay
cmd.target = target
return cmd
@staticmethod
def _from_timestamp(input):
"""
If input is a real number, interpret it as a Unix timestamp
(seconds sinc Epoch in UTC) and return a timezone-aware
datetime object. Otherwise return input unchanged.
"""
if not isinstance(input, numbers.Real):
return input
return from_timestamp(input)
@classmethod
def at_time(cls, at, target):
"""
Construct a DelayedCommand to come due at `at`, where `at` may be
a datetime or timestamp.
"""
at = cls._from_timestamp(at)
cmd = cls.from_datetime(at)
cmd.delay = at - now()
cmd.target = target
return cmd
def due(self):
return now() >= self
class PeriodicCommand(DelayedCommand):
"""
Like a delayed command, but expect this command to run every delay
seconds.
"""
def _next_time(self):
"""
Add delay to self, localized
"""
return self._localize(self + self.delay)
@staticmethod
def _localize(dt):
"""
Rely on pytz.localize to ensure new result honors DST.
"""
try:
tz = dt.tzinfo
return tz.localize(dt.replace(tzinfo=None))
except AttributeError:
return dt
def next(self):
cmd = self.__class__.from_datetime(self._next_time())
cmd.delay = self.delay
cmd.target = self.target
return cmd
def __setattr__(self, key, value):
if key == 'delay' and not value > datetime.timedelta():
raise ValueError(
"A PeriodicCommand must have a positive, " "non-zero delay."
)
super(PeriodicCommand, self).__setattr__(key, value)
class PeriodicCommandFixedDelay(PeriodicCommand):
"""
Like a periodic command, but don't calculate the delay based on
the current time. Instead use a fixed delay following the initial
run.
"""
@classmethod
def at_time(cls, at, delay, target):
"""
>>> cmd = PeriodicCommandFixedDelay.at_time(0, 30, None)
>>> cmd.delay.total_seconds()
30.0
"""
at = cls._from_timestamp(at)
cmd = cls.from_datetime(at)
if isinstance(delay, numbers.Number):
delay = datetime.timedelta(seconds=delay)
cmd.delay = delay
cmd.target = target
return cmd
@classmethod
def daily_at(cls, at, target):
"""
Schedule a command to run at a specific time each day.
>>> from tempora import utc
>>> noon = utc.time(12, 0)
>>> cmd = PeriodicCommandFixedDelay.daily_at(noon, None)
>>> cmd.delay.total_seconds()
86400.0
"""
daily = datetime.timedelta(days=1)
# convert when to the next datetime matching this time
when = datetime.datetime.combine(datetime.date.today(), at)
when -= daily
while when < now():
when += daily
return cls.at_time(cls._localize(when), daily, target)
class Scheduler:
"""
A rudimentary abstract scheduler accepting DelayedCommands
and dispatching them on schedule.
"""
def __init__(self):
self.queue = []
def add(self, command):
assert isinstance(command, DelayedCommand)
bisect.insort(self.queue, command)
def run_pending(self):
while self.queue:
command = self.queue[0]
if not command.due():
break
self.run(command)
if isinstance(command, PeriodicCommand):
self.add(command.next())
del self.queue[0]
@abc.abstractmethod
def run(self, command):
"""
Run the command
"""
class InvokeScheduler(Scheduler):
"""
Command targets are functions to be invoked on schedule.
"""
def run(self, command):
command.target()
class CallbackScheduler(Scheduler):
"""
Command targets are passed to a dispatch callable on schedule.
"""
def __init__(self, dispatch):
super().__init__()
self.dispatch = dispatch
def run(self, command):
self.dispatch(command.target)
| 6,065
|
Python
|
.py
| 186
| 25.139785
| 76
| 0.616004
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,932
|
__init__.py
|
rembo10_headphones/lib/tempora/__init__.py
|
"Objects and routines pertaining to date and time (tempora)"
import datetime
import time
import re
import numbers
import functools
import warnings
import contextlib
from jaraco.functools import once
class Parser:
"""
*deprecated*
Datetime parser: parses a date-time string using multiple possible
formats.
>>> p = Parser(('%H%M', '%H:%M'))
>>> tuple(p.parse('1319'))
(1900, 1, 1, 13, 19, 0, 0, 1, -1)
>>> dateParser = Parser(('%m/%d/%Y', '%Y-%m-%d', '%d-%b-%Y'))
>>> tuple(dateParser.parse('2003-12-20'))
(2003, 12, 20, 0, 0, 0, 5, 354, -1)
>>> tuple(dateParser.parse('16-Dec-1994'))
(1994, 12, 16, 0, 0, 0, 4, 350, -1)
>>> tuple(dateParser.parse('5/19/2003'))
(2003, 5, 19, 0, 0, 0, 0, 139, -1)
>>> dtParser = Parser(('%Y-%m-%d %H:%M:%S', '%a %b %d %H:%M:%S %Y'))
>>> tuple(dtParser.parse('2003-12-20 19:13:26'))
(2003, 12, 20, 19, 13, 26, 5, 354, -1)
>>> tuple(dtParser.parse('Tue Jan 20 16:19:33 2004'))
(2004, 1, 20, 16, 19, 33, 1, 20, -1)
Be forewarned, a ValueError will be raised if more than one format
matches:
>>> Parser(('%H%M', '%H%M%S')).parse('732')
Traceback (most recent call last):
...
ValueError: More than one format string matched target 732.
>>> Parser(('%H',)).parse('22:21')
Traceback (most recent call last):
...
ValueError: No format strings matched the target 22:21.
"""
formats = ('%m/%d/%Y', '%m/%d/%y', '%Y-%m-%d', '%d-%b-%Y', '%d-%b-%y')
"some common default formats"
def __init__(self, formats=None):
warnings.warn("Use dateutil.parser", DeprecationWarning)
if formats:
self.formats = formats
def parse(self, target):
self.target = target
results = tuple(filter(None, map(self._parse, self.formats)))
del self.target
if not results:
tmpl = "No format strings matched the target {target}."
raise ValueError(tmpl.format(**locals()))
if not len(results) == 1:
tmpl = "More than one format string matched target {target}."
raise ValueError(tmpl.format(**locals()))
return results[0]
def _parse(self, format):
try:
result = time.strptime(self.target, format)
except ValueError:
result = False
return result
# some useful constants
osc_per_year = 290091329207984000
"""
mean vernal equinox year expressed in oscillations of atomic cesium at the
year 2000 (see http://webexhibits.org/calendars/timeline.html for more info).
"""
osc_per_second = 9192631770
seconds_per_second = 1
seconds_per_year = 31556940
seconds_per_minute = 60
minutes_per_hour = 60
hours_per_day = 24
seconds_per_hour = seconds_per_minute * minutes_per_hour
seconds_per_day = seconds_per_hour * hours_per_day
days_per_year = seconds_per_year / seconds_per_day
thirty_days = datetime.timedelta(days=30)
# these values provide useful averages
six_months = datetime.timedelta(days=days_per_year / 2)
seconds_per_month = seconds_per_year / 12
hours_per_month = hours_per_day * days_per_year / 12
@once
def _needs_year_help():
"""
Some versions of Python render %Y with only three characters :(
https://bugs.python.org/issue39103
"""
return len(datetime.date(900, 1, 1).strftime('%Y')) != 4
def ensure_datetime(ob):
"""
Given a datetime or date or time object from the ``datetime``
module, always return a datetime using default values.
"""
if isinstance(ob, datetime.datetime):
return ob
date = time = ob
if isinstance(ob, datetime.date):
time = datetime.time()
if isinstance(ob, datetime.time):
date = datetime.date(1900, 1, 1)
return datetime.datetime.combine(date, time)
def strftime(fmt, t):
"""
Portable strftime.
In the stdlib, strftime has `known portability problems
<https://bugs.python.org/issue13305>`_. This function
aims to smooth over those issues and provide a
consistent experience across the major platforms.
>>> strftime('%Y', datetime.datetime(1890, 1, 1))
'1890'
>>> strftime('%Y', datetime.datetime(900, 1, 1))
'0900'
Supports time.struct_time, tuples, and datetime.datetime objects.
>>> strftime('%Y-%m-%d', (1976, 5, 7))
'1976-05-07'
Also supports date objects
>>> strftime('%Y', datetime.date(1976, 5, 7))
'1976'
Also supports milliseconds using %s.
>>> strftime('%s', datetime.time(microsecond=20000))
'020'
Also supports microseconds (3 digits) using %µ
>>> strftime('%µ', datetime.time(microsecond=123456))
'456'
Historically, %u was used for microseconds, but now
it honors the value rendered by stdlib.
>>> strftime('%u', datetime.date(1976, 5, 7))
'5'
Also supports microseconds (6 digits) using %f
>>> strftime('%f', datetime.time(microsecond=23456))
'023456'
Even supports time values on date objects (discouraged):
>>> strftime('%f', datetime.date(1976, 1, 1))
'000000'
>>> strftime('%µ', datetime.date(1976, 1, 1))
'000'
>>> strftime('%s', datetime.date(1976, 1, 1))
'000'
And vice-versa:
>>> strftime('%Y', datetime.time())
'1900'
"""
if isinstance(t, (time.struct_time, tuple)):
t = datetime.datetime(*t[:6])
t = ensure_datetime(t)
subs = (
('%s', '%03d' % (t.microsecond // 1000)),
('%µ', '%03d' % (t.microsecond % 1000)),
)
if _needs_year_help(): # pragma: nocover
subs += (('%Y', '%04d' % t.year),)
def doSub(s, sub):
return s.replace(*sub)
def doSubs(s):
return functools.reduce(doSub, subs, s)
fmt = '%%'.join(map(doSubs, fmt.split('%%')))
return t.strftime(fmt)
def datetime_mod(dt, period, start=None):
"""
Find the time which is the specified date/time truncated to the time delta
relative to the start date/time.
By default, the start time is midnight of the same day as the specified
date/time.
>>> datetime_mod(datetime.datetime(2004, 1, 2, 3),
... datetime.timedelta(days = 1.5),
... start = datetime.datetime(2004, 1, 1))
datetime.datetime(2004, 1, 1, 0, 0)
>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),
... datetime.timedelta(days = 1.5),
... start = datetime.datetime(2004, 1, 1))
datetime.datetime(2004, 1, 2, 12, 0)
>>> datetime_mod(datetime.datetime(2004, 1, 2, 13),
... datetime.timedelta(days = 7),
... start = datetime.datetime(2004, 1, 1))
datetime.datetime(2004, 1, 1, 0, 0)
>>> datetime_mod(datetime.datetime(2004, 1, 10, 13),
... datetime.timedelta(days = 7),
... start = datetime.datetime(2004, 1, 1))
datetime.datetime(2004, 1, 8, 0, 0)
"""
if start is None:
# use midnight of the same day
start = datetime.datetime.combine(dt.date(), datetime.time())
# calculate the difference between the specified time and the start date.
delta = dt - start
# now aggregate the delta and the period into microseconds
# Use microseconds because that's the highest precision of these time
# pieces. Also, using microseconds ensures perfect precision (no floating
# point errors).
def get_time_delta_microseconds(td):
return (td.days * seconds_per_day + td.seconds) * 1000000 + td.microseconds
delta, period = map(get_time_delta_microseconds, (delta, period))
offset = datetime.timedelta(microseconds=delta % period)
# the result is the original specified time minus the offset
result = dt - offset
return result
def datetime_round(dt, period, start=None):
"""
Find the nearest even period for the specified date/time.
>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 11, 13),
... datetime.timedelta(hours = 1))
datetime.datetime(2004, 11, 13, 8, 0)
>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 31, 13),
... datetime.timedelta(hours = 1))
datetime.datetime(2004, 11, 13, 9, 0)
>>> datetime_round(datetime.datetime(2004, 11, 13, 8, 30),
... datetime.timedelta(hours = 1))
datetime.datetime(2004, 11, 13, 9, 0)
"""
result = datetime_mod(dt, period, start)
if abs(dt - result) >= period // 2:
result += period
return result
def get_nearest_year_for_day(day):
"""
Returns the nearest year to now inferred from a Julian date.
>>> freezer = getfixture('freezer')
>>> freezer.move_to('2019-05-20')
>>> get_nearest_year_for_day(20)
2019
>>> get_nearest_year_for_day(340)
2018
>>> freezer.move_to('2019-12-15')
>>> get_nearest_year_for_day(20)
2020
"""
now = time.gmtime()
result = now.tm_year
# if the day is far greater than today, it must be from last year
if day - now.tm_yday > 365 // 2:
result -= 1
# if the day is far less than today, it must be for next year.
if now.tm_yday - day > 365 // 2:
result += 1
return result
def gregorian_date(year, julian_day):
"""
Gregorian Date is defined as a year and a julian day (1-based
index into the days of the year).
>>> gregorian_date(2007, 15)
datetime.date(2007, 1, 15)
"""
result = datetime.date(year, 1, 1)
result += datetime.timedelta(days=julian_day - 1)
return result
def get_period_seconds(period):
"""
return the number of seconds in the specified period
>>> get_period_seconds('day')
86400
>>> get_period_seconds(86400)
86400
>>> get_period_seconds(datetime.timedelta(hours=24))
86400
>>> get_period_seconds('day + os.system("rm -Rf *")')
Traceback (most recent call last):
...
ValueError: period not in (second, minute, hour, day, month, year)
"""
if isinstance(period, str):
try:
name = 'seconds_per_' + period.lower()
result = globals()[name]
except KeyError:
msg = "period not in (second, minute, hour, day, month, year)"
raise ValueError(msg)
elif isinstance(period, numbers.Number):
result = period
elif isinstance(period, datetime.timedelta):
result = period.days * get_period_seconds('day') + period.seconds
else:
raise TypeError('period must be a string or integer')
return result
def get_date_format_string(period):
"""
For a given period (e.g. 'month', 'day', or some numeric interval
such as 3600 (in secs)), return the format string that can be
used with strftime to format that time to specify the times
across that interval, but no more detailed.
For example,
>>> get_date_format_string('month')
'%Y-%m'
>>> get_date_format_string(3600)
'%Y-%m-%d %H'
>>> get_date_format_string('hour')
'%Y-%m-%d %H'
>>> get_date_format_string(None)
Traceback (most recent call last):
...
TypeError: period must be a string or integer
>>> get_date_format_string('garbage')
Traceback (most recent call last):
...
ValueError: period not in (second, minute, hour, day, month, year)
"""
# handle the special case of 'month' which doesn't have
# a static interval in seconds
if isinstance(period, str) and period.lower() == 'month':
return '%Y-%m'
file_period_secs = get_period_seconds(period)
format_pieces = ('%Y', '-%m-%d', ' %H', '-%M', '-%S')
seconds_per_second = 1
intervals = (
seconds_per_year,
seconds_per_day,
seconds_per_hour,
seconds_per_minute,
seconds_per_second,
)
mods = list(map(lambda interval: file_period_secs % interval, intervals))
format_pieces = format_pieces[: mods.index(0) + 1]
return ''.join(format_pieces)
def divide_timedelta_float(td, divisor):
"""
Divide a timedelta by a float value
>>> one_day = datetime.timedelta(days=1)
>>> half_day = datetime.timedelta(days=.5)
>>> divide_timedelta_float(one_day, 2.0) == half_day
True
>>> divide_timedelta_float(one_day, 2) == half_day
True
"""
warnings.warn("Use native division", DeprecationWarning)
return td / divisor
def calculate_prorated_values():
"""
>>> monkeypatch = getfixture('monkeypatch')
>>> import builtins
>>> monkeypatch.setattr(builtins, 'input', lambda prompt: '3/hour')
>>> calculate_prorated_values()
per minute: 0.05
per hour: 3.0
per day: 72.0
per month: 2191.454166666667
per year: 26297.45
"""
rate = input("Enter the rate (3/hour, 50/month)> ")
for period, value in _prorated_values(rate):
print("per {period}: {value}".format(**locals()))
def _prorated_values(rate):
"""
Given a rate (a string in units per unit time), and return that same
rate for various time periods.
>>> for period, value in _prorated_values('20/hour'):
... print('{period}: {value:0.3f}'.format(**locals()))
minute: 0.333
hour: 20.000
day: 480.000
month: 14609.694
year: 175316.333
"""
res = re.match(r'(?P<value>[\d.]+)/(?P<period>\w+)$', rate).groupdict()
value = float(res['value'])
value_per_second = value / get_period_seconds(res['period'])
for period in ('minute', 'hour', 'day', 'month', 'year'):
period_value = value_per_second * get_period_seconds(period)
yield period, period_value
def parse_timedelta(str):
"""
Take a string representing a span of time and parse it to a time delta.
Accepts any string of comma-separated numbers each with a unit indicator.
>>> parse_timedelta('1 day')
datetime.timedelta(days=1)
>>> parse_timedelta('1 day, 30 seconds')
datetime.timedelta(days=1, seconds=30)
>>> parse_timedelta('47.32 days, 20 minutes, 15.4 milliseconds')
datetime.timedelta(days=47, seconds=28848, microseconds=15400)
Supports weeks, months, years
>>> parse_timedelta('1 week')
datetime.timedelta(days=7)
>>> parse_timedelta('1 year, 1 month')
datetime.timedelta(days=395, seconds=58685)
Note that months and years strict intervals, not aligned
to a calendar:
>>> now = datetime.datetime.now()
>>> later = now + parse_timedelta('1 year')
>>> diff = later.replace(year=now.year) - now
>>> diff.seconds
20940
>>> parse_timedelta('14 seconds foo')
Traceback (most recent call last):
...
ValueError: Unexpected 'foo'
Supports abbreviations:
>>> parse_timedelta('1s')
datetime.timedelta(seconds=1)
>>> parse_timedelta('1sec')
datetime.timedelta(seconds=1)
>>> parse_timedelta('5min1sec')
datetime.timedelta(seconds=301)
>>> parse_timedelta('1 ms')
datetime.timedelta(microseconds=1000)
>>> parse_timedelta('1 µs')
datetime.timedelta(microseconds=1)
>>> parse_timedelta('1 us')
datetime.timedelta(microseconds=1)
And supports the common colon-separated duration:
>>> parse_timedelta('14:00:35.362')
datetime.timedelta(seconds=50435, microseconds=362000)
TODO: Should this be 14 hours or 14 minutes?
>>> parse_timedelta('14:00')
datetime.timedelta(seconds=50400)
>>> parse_timedelta('14:00 minutes')
Traceback (most recent call last):
...
ValueError: Cannot specify units with composite delta
Nanoseconds get rounded to the nearest microsecond:
>>> parse_timedelta('600 ns')
datetime.timedelta(microseconds=1)
>>> parse_timedelta('.002 µs, 499 ns')
datetime.timedelta(microseconds=1)
"""
return _parse_timedelta_nanos(str).resolve()
def _parse_timedelta_nanos(str):
parts = re.finditer(r'(?P<value>[\d.:]+)\s?(?P<unit>[^\W\d_]+)?', str)
chk_parts = _check_unmatched(parts, str)
deltas = map(_parse_timedelta_part, chk_parts)
return sum(deltas, _Saved_NS())
def _check_unmatched(matches, text):
"""
Ensure no words appear in unmatched text.
"""
def check_unmatched(unmatched):
found = re.search(r'\w+', unmatched)
if found:
raise ValueError(f"Unexpected {found.group(0)!r}")
pos = 0
for match in matches:
check_unmatched(text[pos : match.start()])
yield match
pos = match.end()
check_unmatched(text[match.end() :])
_unit_lookup = {
'µs': 'microsecond',
'µsec': 'microsecond',
'us': 'microsecond',
'usec': 'microsecond',
'micros': 'microsecond',
'ms': 'millisecond',
'msec': 'millisecond',
'millis': 'millisecond',
's': 'second',
'sec': 'second',
'h': 'hour',
'hr': 'hour',
'm': 'minute',
'min': 'minute',
'w': 'week',
'wk': 'week',
'd': 'day',
'ns': 'nanosecond',
'nsec': 'nanosecond',
'nanos': 'nanosecond',
}
def _resolve_unit(raw_match):
if raw_match is None:
return 'second'
text = raw_match.lower()
return _unit_lookup.get(text, text)
def _parse_timedelta_composite(raw_value, unit):
if unit != 'seconds':
raise ValueError("Cannot specify units with composite delta")
values = raw_value.split(':')
units = 'hours', 'minutes', 'seconds'
composed = ' '.join(f'{value} {unit}' for value, unit in zip(values, units))
return _parse_timedelta_nanos(composed)
def _parse_timedelta_part(match):
unit = _resolve_unit(match.group('unit'))
if not unit.endswith('s'):
unit += 's'
raw_value = match.group('value')
if ':' in raw_value:
return _parse_timedelta_composite(raw_value, unit)
value = float(raw_value)
if unit == 'months':
unit = 'years'
value = value / 12
if unit == 'years':
unit = 'days'
value = value * days_per_year
return _Saved_NS.derive(unit, value)
class _Saved_NS:
"""
Bundle a timedelta with nanoseconds.
>>> _Saved_NS.derive('microseconds', .001)
_Saved_NS(td=datetime.timedelta(0), nanoseconds=1)
"""
td = datetime.timedelta()
nanoseconds = 0
multiplier = dict(
seconds=1000000000,
milliseconds=1000000,
microseconds=1000,
)
def __init__(self, **kwargs):
vars(self).update(kwargs)
@classmethod
def derive(cls, unit, value):
if unit == 'nanoseconds':
return _Saved_NS(nanoseconds=value)
res = _Saved_NS(td=datetime.timedelta(**{unit: value}))
with contextlib.suppress(KeyError):
res.nanoseconds = int(value * cls.multiplier[unit]) % 1000
return res
def __add__(self, other):
return _Saved_NS(
td=self.td + other.td, nanoseconds=self.nanoseconds + other.nanoseconds
)
def resolve(self):
"""
Resolve any nanoseconds into the microseconds field,
discarding any nanosecond resolution (but honoring partial
microseconds).
"""
addl_micros = round(self.nanoseconds / 1000)
return self.td + datetime.timedelta(microseconds=addl_micros)
def __repr__(self):
return f'_Saved_NS(td={self.td!r}, nanoseconds={self.nanoseconds!r})'
def divide_timedelta(td1, td2):
"""
Get the ratio of two timedeltas
>>> one_day = datetime.timedelta(days=1)
>>> one_hour = datetime.timedelta(hours=1)
>>> divide_timedelta(one_hour, one_day) == 1 / 24
True
"""
warnings.warn("Use native division", DeprecationWarning)
return td1 / td2
def date_range(start=None, stop=None, step=None):
"""
Much like the built-in function range, but works with dates
>>> range_items = date_range(
... datetime.datetime(2005,12,21),
... datetime.datetime(2005,12,25),
... )
>>> my_range = tuple(range_items)
>>> datetime.datetime(2005,12,21) in my_range
True
>>> datetime.datetime(2005,12,22) in my_range
True
>>> datetime.datetime(2005,12,25) in my_range
False
>>> from_now = date_range(stop=datetime.datetime(2099, 12, 31))
>>> next(from_now)
datetime.datetime(...)
"""
if step is None:
step = datetime.timedelta(days=1)
if start is None:
start = datetime.datetime.now()
while start < stop:
yield start
start += step
| 20,192
|
Python
|
.py
| 555
| 30.74955
| 83
| 0.632909
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,933
|
timing.py
|
rembo10_headphones/lib/tempora/timing.py
|
import datetime
import functools
import numbers
import time
import collections.abc
import contextlib
import jaraco.functools
class Stopwatch:
"""
A simple stopwatch which starts automatically.
>>> w = Stopwatch()
>>> _1_sec = datetime.timedelta(seconds=1)
>>> w.split() < _1_sec
True
>>> import time
>>> time.sleep(1.0)
>>> w.split() >= _1_sec
True
>>> w.stop() >= _1_sec
True
>>> w.reset()
>>> w.start()
>>> w.split() < _1_sec
True
It should be possible to launch the Stopwatch in a context:
>>> with Stopwatch() as watch:
... assert isinstance(watch.split(), datetime.timedelta)
In that case, the watch is stopped when the context is exited,
so to read the elapsed time:
>>> watch.elapsed
datetime.timedelta(...)
>>> watch.elapsed.seconds
0
"""
def __init__(self):
self.reset()
self.start()
def reset(self):
self.elapsed = datetime.timedelta(0)
with contextlib.suppress(AttributeError):
del self.start_time
def start(self):
self.start_time = datetime.datetime.utcnow()
def stop(self):
stop_time = datetime.datetime.utcnow()
self.elapsed += stop_time - self.start_time
del self.start_time
return self.elapsed
def split(self):
local_duration = datetime.datetime.utcnow() - self.start_time
return self.elapsed + local_duration
# context manager support
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
class IntervalGovernor:
"""
Decorate a function to only allow it to be called once per
min_interval. Otherwise, it returns None.
>>> gov = IntervalGovernor(30)
>>> gov.min_interval.total_seconds()
30.0
"""
def __init__(self, min_interval):
if isinstance(min_interval, numbers.Number):
min_interval = datetime.timedelta(seconds=min_interval)
self.min_interval = min_interval
self.last_call = None
def decorate(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
allow = not self.last_call or self.last_call.split() > self.min_interval
if allow:
self.last_call = Stopwatch()
return func(*args, **kwargs)
return wrapper
__call__ = decorate
class Timer(Stopwatch):
"""
Watch for a target elapsed time.
>>> t = Timer(0.1)
>>> t.expired()
False
>>> __import__('time').sleep(0.15)
>>> t.expired()
True
"""
def __init__(self, target=float('Inf')):
self.target = self._accept(target)
super(Timer, self).__init__()
@staticmethod
def _accept(target):
"""
Accept None or � or datetime or numeric for target
>>> Timer._accept(datetime.timedelta(seconds=30))
30.0
>>> Timer._accept(None)
inf
"""
if isinstance(target, datetime.timedelta):
target = target.total_seconds()
if target is None:
# treat None as infinite target
target = float('Inf')
return target
def expired(self):
return self.split().total_seconds() > self.target
class BackoffDelay(collections.abc.Iterator):
"""
Exponential backoff delay.
Useful for defining delays between retries. Consider for use
with ``jaraco.functools.retry_call`` as the cleanup.
Default behavior has no effect; a delay or jitter must
be supplied for the call to be non-degenerate.
>>> bd = BackoffDelay()
>>> bd()
>>> bd()
The following instance will delay 10ms for the first call,
20ms for the second, etc.
>>> bd = BackoffDelay(delay=0.01, factor=2)
>>> bd()
>>> bd()
Inspect and adjust the state of the delay anytime.
>>> bd.delay
0.04
>>> bd.delay = 0.01
Set limit to prevent the delay from exceeding bounds.
>>> bd = BackoffDelay(delay=0.01, factor=2, limit=0.015)
>>> bd()
>>> bd.delay
0.015
To reset the backoff, simply call ``.reset()``:
>>> bd.reset()
>>> bd.delay
0.01
Iterate on the object to retrieve/advance the delay values.
>>> next(bd)
0.01
>>> next(bd)
0.015
>>> import itertools
>>> tuple(itertools.islice(bd, 3))
(0.015, 0.015, 0.015)
Limit may be a callable taking a number and returning
the limited number.
>>> at_least_one = lambda n: max(n, 1)
>>> bd = BackoffDelay(delay=0.01, factor=2, limit=at_least_one)
>>> next(bd)
0.01
>>> next(bd)
1
Pass a jitter to add or subtract seconds to the delay.
>>> bd = BackoffDelay(jitter=0.01)
>>> next(bd)
0
>>> next(bd)
0.01
Jitter may be a callable. To supply a non-deterministic jitter
between -0.5 and 0.5, consider:
>>> import random
>>> jitter=functools.partial(random.uniform, -0.5, 0.5)
>>> bd = BackoffDelay(jitter=jitter)
>>> next(bd)
0
>>> 0 <= next(bd) <= 0.5
True
"""
delay = 0
factor = 1
"Multiplier applied to delay"
jitter = 0
"Number or callable returning extra seconds to add to delay"
@jaraco.functools.save_method_args
def __init__(self, delay=0, factor=1, limit=float('inf'), jitter=0):
self.delay = delay
self.factor = factor
if isinstance(limit, numbers.Number):
limit_ = limit
def limit(n):
return max(0, min(limit_, n))
self.limit = limit
if isinstance(jitter, numbers.Number):
jitter_ = jitter
def jitter():
return jitter_
self.jitter = jitter
def __call__(self):
time.sleep(next(self))
def __next__(self):
delay = self.delay
self.bump()
return delay
def __iter__(self):
return self
def bump(self):
self.delay = self.limit(self.delay * self.factor + self.jitter())
def reset(self):
saved = self._saved___init__
self.__init__(*saved.args, **saved.kwargs)
| 6,197
|
Python
|
.py
| 201
| 24.114428
| 84
| 0.601079
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,934
|
__main__.py
|
rembo10_headphones/lib/certifi/__main__.py
|
import argparse
from certifi import contents, where
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--contents", action="store_true")
args = parser.parse_args()
if args.contents:
print(contents())
else:
print(where())
| 243
|
Python
|
.py
| 9
| 24.777778
| 60
| 0.748918
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,935
|
core.py
|
rembo10_headphones/lib/certifi/core.py
|
# -*- coding: utf-8 -*-
"""
certifi.py
~~~~~~~~~~
This module returns the installation location of cacert.pem or its contents.
"""
import os
try:
from importlib.resources import path as get_path, read_text
_CACERT_CTX = None
_CACERT_PATH = None
def where():
# This is slightly terrible, but we want to delay extracting the file
# in cases where we're inside of a zipimport situation until someone
# actually calls where(), but we don't want to re-extract the file
# on every call of where(), so we'll do it once then store it in a
# global variable.
global _CACERT_CTX
global _CACERT_PATH
if _CACERT_PATH is None:
# This is slightly janky, the importlib.resources API wants you to
# manage the cleanup of this file, so it doesn't actually return a
# path, it returns a context manager that will give you the path
# when you enter it and will do any cleanup when you leave it. In
# the common case of not needing a temporary file, it will just
# return the file system location and the __exit__() is a no-op.
#
# We also have to hold onto the actual context manager, because
# it will do the cleanup whenever it gets garbage collected, so
# we will also store that at the global level as well.
_CACERT_CTX = get_path("certifi", "cacert.pem")
_CACERT_PATH = str(_CACERT_CTX.__enter__())
return _CACERT_PATH
except ImportError:
# This fallback will work for Python versions prior to 3.7 that lack the
# importlib.resources module but relies on the existing `where` function
# so won't address issues with environments like PyOxidizer that don't set
# __file__ on modules.
def read_text(_module, _path, encoding="ascii"):
with open(where(), "r", encoding=encoding) as data:
return data.read()
# If we don't have importlib.resources, then we will just do the old logic
# of assuming we're on the filesystem and munge the path directly.
def where():
f = os.path.dirname(__file__)
return os.path.join(f, "cacert.pem")
def contents():
return read_text("certifi", "cacert.pem", encoding="ascii")
| 2,303
|
Python
|
.py
| 48
| 40.395833
| 78
| 0.652251
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,936
|
error.py
|
rembo10_headphones/lib/twitter/error.py
|
#!/usr/bin/env python
class TwitterError(Exception):
"""Base class for Twitter errors"""
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class PythonTwitterDeprecationWarning(DeprecationWarning):
"""Base class for python-twitter deprecation warnings"""
pass
class PythonTwitterDeprecationWarning330(PythonTwitterDeprecationWarning):
"""Warning for features to be removed in version 3.3.0"""
pass
class PythonTwitterDeprecationWarning340(PythonTwitterDeprecationWarning):
"""Warning for features to be removed in version 3.4.0"""
pass
| 659
|
Python
|
.py
| 16
| 36.375
| 74
| 0.760252
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,937
|
models.py
|
rembo10_headphones/lib/twitter/models.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from calendar import timegm
try:
from rfc822 import parsedate
except ImportError:
from email.utils import parsedate
class TwitterModel(object):
""" Base class from which all twitter models will inherit. """
def __init__(self, **kwargs):
self.param_defaults = {}
def __str__(self):
""" Returns a string representation of TwitterModel. By default
this is the same as AsJsonString(). """
return self.AsJsonString()
def __eq__(self, other):
return other and self.AsDict() == other.AsDict()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if hasattr(self, 'id'):
return hash(self.id)
else:
raise TypeError('unhashable type: {} (no id attribute)'
.format(type(self)))
def AsJsonString(self, ensure_ascii=True):
""" Returns the TwitterModel as a JSON string based on key/value
pairs returned from the AsDict() method. """
return json.dumps(self.AsDict(), ensure_ascii=ensure_ascii, sort_keys=True)
def AsDict(self):
""" Create a dictionary representation of the object. Please see inline
comments on construction when dictionaries contain TwitterModels. """
data = {}
for (key, value) in self.param_defaults.items():
# If the value is a list, we need to create a list to hold the
# dicts created by an object supporting the AsDict() method,
# i.e., if it inherits from TwitterModel. If the item in the list
# doesn't support the AsDict() method, then we assign the value
# directly. An example being a list of Media objects contained
# within a Status object.
if isinstance(getattr(self, key, None), (list, tuple, set)):
data[key] = list()
for subobj in getattr(self, key, None):
if getattr(subobj, 'AsDict', None):
data[key].append(subobj.AsDict())
else:
data[key].append(subobj)
# Not a list, *but still a subclass of TwitterModel* and
# and we can assign the data[key] directly with the AsDict()
# method of the object. An example being a Status object contained
# within a User object.
elif getattr(getattr(self, key, None), 'AsDict', None):
data[key] = getattr(self, key).AsDict()
# If the value doesn't have an AsDict() method, i.e., it's not
# something that subclasses TwitterModel, then we can use direct
# assigment.
elif getattr(self, key, None):
data[key] = getattr(self, key, None)
return data
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
""" Create a new instance based on a JSON dict. Any kwargs should be
supplied by the inherited, calling class.
Args:
data: A JSON dict, as converted from the JSON in the twitter API.
"""
json_data = data.copy()
if kwargs:
for key, val in kwargs.items():
json_data[key] = val
c = cls(**json_data)
c._json = data
return c
class Media(TwitterModel):
"""A class representing the Media component of a tweet. """
def __init__(self, **kwargs):
self.param_defaults = {
'display_url': None,
'expanded_url': None,
'ext_alt_text': None,
'id': None,
'media_url': None,
'media_url_https': None,
'sizes': None,
'type': None,
'url': None,
'video_info': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Media(ID={media_id}, Type={media_type}, DisplayURL='{url}')".format(
media_id=self.id,
media_type=self.type,
url=self.display_url)
class List(TwitterModel):
"""A class representing the List structure used by the twitter API. """
def __init__(self, **kwargs):
self.param_defaults = {
'description': None,
'following': None,
'full_name': None,
'id': None,
'member_count': None,
'mode': None,
'name': None,
'slug': None,
'subscriber_count': None,
'uri': None,
'user': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if 'user' in kwargs:
self.user = User.NewFromJsonDict(kwargs.get('user'))
def __repr__(self):
return "List(ID={list_id}, FullName={full_name!r}, Slug={slug}, User={user})".format(
list_id=self.id,
full_name=self.full_name,
slug=self.slug,
user=self.user.screen_name)
class Category(TwitterModel):
"""A class representing the suggested user category structure. """
def __init__(self, **kwargs):
self.param_defaults = {
'name': None,
'size': None,
'slug': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Category(Name={name!r}, Slug={slug}, Size={size})".format(
name=self.name,
slug=self.slug,
size=self.size)
class DirectMessage(TwitterModel):
"""A class representing a Direct Message. """
def __init__(self, **kwargs):
self.param_defaults = {
'created_at': None,
'id': None,
'recipient_id': None,
'sender_id': None,
'text': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
if self.text and len(self.text) > 140:
text = "{text}[...]".format(text=self.text[:140])
else:
text = self.text
return "DirectMessage(ID={dm_id}, Sender={sender}, Created={time}, Text='{text!r}')".format(
dm_id=self.id,
sender=self.sender_id,
time=self.created_at,
text=text)
class Trend(TwitterModel):
""" A class representing a trending topic. """
def __init__(self, **kwargs):
self.param_defaults = {
'events': None,
'name': None,
'promoted_content': None,
'query': None,
'timestamp': None,
'url': None,
'tweet_volume': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Trend(Name={0!r}, Time={1}, URL={2})".format(
self.name,
self.timestamp,
self.url)
@property
def volume(self):
return self.tweet_volume
class Hashtag(TwitterModel):
""" A class representing a twitter hashtag. """
def __init__(self, **kwargs):
self.param_defaults = {
'text': None
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "Hashtag(Text={text!r})".format(
text=self.text)
class Url(TwitterModel):
""" A class representing an URL contained in a tweet. """
def __init__(self, **kwargs):
self.param_defaults = {
'expanded_url': None,
'url': None}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "URL(URL={url}, ExpandedURL={eurl})".format(
url=self.url,
eurl=self.expanded_url)
class UserStatus(TwitterModel):
""" A class representing the UserStatus structure. This is an abbreviated
form of the twitter.User object. """
_connections = {'following': False,
'followed_by': False,
'following_received': False,
'following_requested': False,
'blocking': False,
'muting': False}
def __init__(self, **kwargs):
self.param_defaults = {
'blocking': False,
'followed_by': False,
'following': False,
'following_received': False,
'following_requested': False,
'id': None,
'id_str': None,
'muting': False,
'name': None,
'screen_name': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if 'connections' in kwargs:
for param in self._connections:
if param in kwargs['connections']:
setattr(self, param, True)
@property
def connections(self):
return {'following': self.following,
'followed_by': self.followed_by,
'following_received': self.following_received,
'following_requested': self.following_requested,
'blocking': self.blocking,
'muting': self.muting}
def __repr__(self):
connections = [param for param in self.connections if getattr(self, param)]
return "UserStatus(ID={uid}, ScreenName={sn}, Connections=[{conn}])".format(
uid=self.id,
sn=self.screen_name,
conn=", ".join(connections))
class User(TwitterModel):
"""A class representing the User structure. """
def __init__(self, **kwargs):
self.param_defaults = {
'contributors_enabled': None,
'created_at': None,
'default_profile': None,
'default_profile_image': None,
'description': None,
'email': None,
'favourites_count': None,
'followers_count': None,
'following': None,
'friends_count': None,
'geo_enabled': None,
'id': None,
'id_str': None,
'lang': None,
'listed_count': None,
'location': None,
'name': None,
'notifications': None,
'profile_background_color': None,
'profile_background_image_url': None,
'profile_background_image_url_https': None,
'profile_background_tile': None,
'profile_banner_url': None,
'profile_image_url': None,
'profile_image_url_https': None,
'profile_link_color': None,
'profile_sidebar_border_color': None,
'profile_sidebar_fill_color': None,
'profile_text_color': None,
'profile_use_background_image': None,
'protected': None,
'screen_name': None,
'status': None,
'statuses_count': None,
'time_zone': None,
'url': None,
'utc_offset': None,
'verified': None,
'withheld_in_countries': None,
'withheld_scope': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
def __repr__(self):
return "User(ID={uid}, ScreenName={sn})".format(
uid=self.id,
sn=self.screen_name)
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
from twitter import Status
if data.get('status', None):
status = Status.NewFromJsonDict(data.get('status'))
return super(cls, cls).NewFromJsonDict(data=data, status=status)
else:
return super(cls, cls).NewFromJsonDict(data=data)
class Status(TwitterModel):
"""A class representing the Status structure used by the twitter API.
"""
def __init__(self, **kwargs):
self.param_defaults = {
'contributors': None,
'coordinates': None,
'created_at': None,
'current_user_retweet': None,
'favorite_count': None,
'favorited': None,
'full_text': None,
'geo': None,
'hashtags': None,
'id': None,
'id_str': None,
'in_reply_to_screen_name': None,
'in_reply_to_status_id': None,
'in_reply_to_user_id': None,
'lang': None,
'location': None,
'media': None,
'place': None,
'possibly_sensitive': None,
'quoted_status': None,
'quoted_status_id': None,
'quoted_status_id_str': None,
'retweet_count': None,
'retweeted': None,
'retweeted_status': None,
'scopes': None,
'source': None,
'text': None,
'truncated': None,
'urls': None,
'user': None,
'user_mentions': None,
'withheld_copyright': None,
'withheld_in_countries': None,
'withheld_scope': None,
}
for (param, default) in self.param_defaults.items():
setattr(self, param, kwargs.get(param, default))
if kwargs.get('full_text', None):
self.tweet_mode = 'extended'
else:
self.tweet_mode = 'compatibility'
@property
def created_at_in_seconds(self):
""" Get the time this status message was posted, in seconds since
the epoch (1 Jan 1970).
Returns:
int: The time this status message was posted, in seconds since
the epoch.
"""
return timegm(parsedate(self.created_at))
def __repr__(self):
""" A string representation of this twitter.Status instance.
The return value is the ID of status, username and datetime.
Returns:
string: A string representation of this twitter.Status instance with
the ID of status, username and datetime.
"""
if self.tweet_mode == 'extended':
text = self.full_text
else:
text = self.text
if self.user:
return "Status(ID={0}, ScreenName={1}, Created={2}, Text={3!r})".format(
self.id,
self.user.screen_name,
self.created_at,
text)
else:
return u"Status(ID={0}, Created={1}, Text={2!r})".format(
self.id,
self.created_at,
text)
@classmethod
def NewFromJsonDict(cls, data, **kwargs):
""" Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
"""
current_user_retweet = None
hashtags = None
media = None
quoted_status = None
retweeted_status = None
urls = None
user = None
user_mentions = None
# for loading extended tweets from the streaming API.
if 'extended_tweet' in data:
for k, v in data['extended_tweet'].items():
data[k] = v
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
if 'current_user_retweet' in data:
current_user_retweet = data['current_user_retweet']['id']
if 'quoted_status' in data:
quoted_status = Status.NewFromJsonDict(data.get('quoted_status'))
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = [Media.NewFromJsonDict(m) for m in data['entities']['media']]
# the new extended entities
if 'extended_entities' in data:
if 'media' in data['extended_entities']:
media = [Media.NewFromJsonDict(m) for m in data['extended_entities']['media']]
return super(cls, cls).NewFromJsonDict(data=data,
current_user_retweet=current_user_retweet,
hashtags=hashtags,
media=media,
quoted_status=quoted_status,
retweeted_status=retweeted_status,
urls=urls,
user=user,
user_mentions=user_mentions)
| 17,490
|
Python
|
.py
| 433
| 28.561201
| 100
| 0.539478
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,938
|
ratelimit.py
|
rembo10_headphones/lib/twitter/ratelimit.py
|
from collections import namedtuple
import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from twitter.twitter_utils import enf_type
EndpointRateLimit = namedtuple('EndpointRateLimit',
['limit', 'remaining', 'reset'])
ResourceEndpoint = namedtuple('ResourceEndpoint', ['regex', 'resource'])
GEO_ID_PLACE_ID = ResourceEndpoint(re.compile(r'/geo/id/\d+'), "/geo/id/:place_id")
SAVED_SEARCHES_DESTROY_ID = ResourceEndpoint(re.compile(r'/saved_searches/destroy/\d+'), "/saved_searches/destroy/:id")
SAVED_SEARCHES_SHOW_ID = ResourceEndpoint(re.compile(r'/saved_searches/show/\d+'), "/saved_searches/show/:id")
STATUSES_RETWEETS_ID = ResourceEndpoint(re.compile(r'/statuses/retweets/\d+'), "/statuses/retweets/:id")
STATUSES_SHOW_ID = ResourceEndpoint(re.compile(r'/statuses/show'), "/statuses/show/:id")
USERS_SHOW_ID = ResourceEndpoint(re.compile(r'/users/show'), "/users/show/:id")
USERS_SUGGESTIONS_SLUG = ResourceEndpoint(re.compile(r'/users/suggestions/\w+$'), "/users/suggestions/:slug")
USERS_SUGGESTIONS_SLUG_MEMBERS = ResourceEndpoint(re.compile(r'/users/suggestions/.+/members'), "/users/suggestions/:slug/members")
NON_STANDARD_ENDPOINTS = [
GEO_ID_PLACE_ID,
SAVED_SEARCHES_DESTROY_ID,
SAVED_SEARCHES_SHOW_ID,
STATUSES_RETWEETS_ID,
STATUSES_SHOW_ID,
USERS_SHOW_ID,
USERS_SUGGESTIONS_SLUG,
USERS_SUGGESTIONS_SLUG_MEMBERS,
]
class RateLimit(object):
""" Object to hold the rate limit status of various endpoints for
the twitter.Api object.
This object is generally attached to the API as Api.rate_limit, but is not
created until the user makes a method call that uses _RequestUrl() or calls
Api.InitializeRateLimit(), after which it get created and populated with
rate limit data from Twitter.
Calling Api.InitializeRateLimit() populates the object with all of the
rate limits for the endpoints defined by Twitter; more info is available
here:
https://dev.twitter.com/rest/public/rate-limits
https://dev.twitter.com/rest/public/rate-limiting
https://dev.twitter.com/rest/reference/get/application/rate_limit_status
Once a resource (i.e., an endpoint) has been requested, Twitter's response
will contain the current rate limit status as part of the headers, i.e.::
x-rate-limit-limit
x-rate-limit-remaining
x-rate-limit-reset
``limit`` is the generic limit for that endpoint, ``remaining`` is how many
more times you can make a call to that endpoint, and ``reset`` is the time
(in seconds since the epoch) until remaining resets to its default for that
endpoint.
Generally speaking, each endpoint has a 15-minute reset time and endpoints
can either make 180 or 15 requests per window. According to Twitter, any
endpoint not defined in the rate limit chart or the response from a GET
request to ``application/rate_limit_status.json`` should be assumed to be
15 requests per 15 minutes.
"""
def __init__(self, **kwargs):
""" Instantiates the RateLimitObject. Takes a json dict as
kwargs and maps to the object's dictionary. So for something like:
{"resources": {
"help": {
/help/privacy": {
"limit": 15,
"remaining": 15,
"reset": 1452254278
}
}
}
}
the RateLimit object will have an attribute 'resources' from which you
can perform a lookup like:
api.rate_limit.get('help').get('/help/privacy')
and a dictionary of limit, remaining, and reset will be returned.
"""
self.__dict__['resources'] = {}
self.__dict__.update(kwargs)
@staticmethod
def url_to_resource(url):
""" Take a fully qualified URL and attempts to return the rate limit
resource family corresponding to it. For example:
>>> RateLimit.url_to_resource('https://api.twitter.com/1.1/statuses/lookup.json?id=317')
>>> '/statuses/lookup'
Args:
url (str): URL to convert to a resource family.
Returns:
string: Resource family corresponding to the URL.
"""
resource = urlparse(url).path.replace('/1.1', '').replace('.json', '')
for non_std_endpoint in NON_STANDARD_ENDPOINTS:
if re.match(non_std_endpoint.regex, resource):
return non_std_endpoint.resource
return resource
def set_unknown_limit(self, url, limit, remaining, reset):
return self.set_limit(url, limit, remaining, reset)
def set_limit(self, url, limit, remaining, reset):
""" If a resource family is unknown, add it to the object's
dictionary. This is to deal with new endpoints being added to
the API, but not necessarily to the information returned by
``/account/rate_limit_status.json`` endpoint.
For example, if Twitter were to add an endpoint
``/puppies/lookup.json``, the RateLimit object would create a resource
family ``puppies`` and add ``/puppies/lookup`` as the endpoint, along
with whatever limit, remaining hits available, and reset time would be
applicable to that resource+endpoint pair.
Args:
url (str):
URL of the endpoint being fetched.
limit (int):
Max number of times a user or app can hit the endpoint
before being rate limited.
remaining (int):
Number of times a user or app can access the endpoint
before being rate limited.
reset (int):
Epoch time at which the rate limit window will reset.
"""
endpoint = self.url_to_resource(url)
resource_family = endpoint.split('/')[1]
new_endpoint = {endpoint: {
"limit": enf_type('limit', int, limit),
"remaining": enf_type('remaining', int, remaining),
"reset": enf_type('reset', int, reset)
}}
if not self.resources.get(resource_family, None):
self.resources[resource_family] = {}
self.__dict__['resources'][resource_family].update(new_endpoint)
return self.get_limit(url)
def get_limit(self, url):
""" Gets a EndpointRateLimit object for the given url.
Args:
url (str, optional):
URL of the endpoint for which to return the rate limit
status.
Returns:
namedtuple: EndpointRateLimit object containing rate limit
information.
"""
endpoint = self.url_to_resource(url)
resource_family = endpoint.split('/')[1]
try:
family_rates = self.resources.get(resource_family).get(endpoint)
except AttributeError:
return EndpointRateLimit(limit=15, remaining=15, reset=0)
if not family_rates:
self.set_unknown_limit(url, limit=15, remaining=15, reset=0)
return EndpointRateLimit(limit=15, remaining=15, reset=0)
return EndpointRateLimit(family_rates['limit'],
family_rates['remaining'],
family_rates['reset'])
| 7,389
|
Python
|
.py
| 149
| 40.114094
| 131
| 0.646846
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,939
|
twitter_utils.py
|
rembo10_headphones/lib/twitter/twitter_utils.py
|
# encoding: utf-8
from __future__ import unicode_literals
import mimetypes
import os
import re
import sys
from tempfile import NamedTemporaryFile
from unicodedata import normalize
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import requests
from twitter import TwitterError
import twitter
if sys.version_info < (3,):
range = xrange
if sys.version_info > (3,):
unicode = str
CHAR_RANGES = [
range(0, 4351),
range(8192, 8205),
range(8208, 8223),
range(8242, 8247)]
TLDS = [
"ac", "ad", "ae", "af", "ag", "ai", "al", "am", "an", "ao", "aq", "ar",
"as", "at", "au", "aw", "ax", "az", "ba", "bb", "bd", "be", "bf", "bg",
"bh", "bi", "bj", "bl", "bm", "bn", "bo", "bq", "br", "bs", "bt", "bv",
"bw", "by", "bz", "ca", "cc", "cd", "cf", "cg", "ch", "ci", "ck", "cl",
"cm", "cn", "co", "cr", "cu", "cv", "cw", "cx", "cy", "cz", "de", "dj",
"dk", "dm", "do", "dz", "ec", "ee", "eg", "eh", "er", "es", "et", "eu",
"fi", "fj", "fk", "fm", "fo", "fr", "ga", "gb", "gd", "ge", "gf", "gg",
"gh", "gi", "gl", "gm", "gn", "gp", "gq", "gr", "gs", "gt", "gu", "gw",
"gy", "hk", "hm", "hn", "hr", "ht", "hu", "id", "ie", "il", "im", "in",
"io", "iq", "ir", "is", "it", "je", "jm", "jo", "jp", "ke", "kg", "kh",
"ki", "km", "kn", "kp", "kr", "kw", "ky", "kz", "la", "lb", "lc", "li",
"lk", "lr", "ls", "lt", "lu", "lv", "ly", "ma", "mc", "md", "me", "mf",
"mg", "mh", "mk", "ml", "mm", "mn", "mo", "mp", "mq", "mr", "ms", "mt",
"mu", "mv", "mw", "mx", "my", "mz", "na", "nc", "ne", "nf", "ng", "ni",
"nl", "no", "np", "nr", "nu", "nz", "om", "pa", "pe", "pf", "pg", "ph",
"pk", "pl", "pm", "pn", "pr", "ps", "pt", "pw", "py", "qa", "re", "ro",
"rs", "ru", "rw", "sa", "sb", "sc", "sd", "se", "sg", "sh", "si", "sj",
"sk", "sl", "sm", "sn", "so", "sr", "ss", "st", "su", "sv", "sx", "sy",
"sz", "tc", "td", "tf", "tg", "th", "tj", "tk", "tl", "tm", "tn", "to",
"tp", "tr", "tt", "tv", "tw", "tz", "ua", "ug", "uk", "um", "us", "uy",
"uz", "va", "vc", "ve", "vg", "vi", "vn", "vu", "wf", "ws", "ye", "yt",
"za", "zm", "zw", "ελ", "бел", "мкд", "мон", "рф", "срб", "укр", "қаз",
"հայ", "الاردن", "الجزائر", "السعودية", "المغرب", "امارات", "ایران", "بھارت",
"تونس", "سودان", "سورية", "عراق", "عمان", "فلسطين", "قطر", "مصر",
"مليسيا", "پاکستان", "भारत", "বাংলা", "ভারত", "ਭਾਰਤ", "ભારત",
"இந்தியா", "இலங்கை", "சிங்கப்பூர்", "భారత్", "ලංකා", "ไทย",
"გე", "中国", "中國", "台湾", "台灣", "新加坡", "澳門", "香港", "한국", "neric:",
"abb", "abbott", "abogado", "academy", "accenture", "accountant",
"accountants", "aco", "active", "actor", "ads", "adult", "aeg", "aero",
"afl", "agency", "aig", "airforce", "airtel", "allfinanz", "alsace",
"amsterdam", "android", "apartments", "app", "aquarelle", "archi", "army",
"arpa", "asia", "associates", "attorney", "auction", "audio", "auto",
"autos", "axa", "azure", "band", "bank", "bar", "barcelona", "barclaycard",
"barclays", "bargains", "bauhaus", "bayern", "bbc", "bbva", "bcn", "beer",
"bentley", "berlin", "best", "bet", "bharti", "bible", "bid", "bike",
"bing", "bingo", "bio", "biz", "black", "blackfriday", "bloomberg", "blue",
"bmw", "bnl", "bnpparibas", "boats", "bond", "boo", "boots", "boutique",
"bradesco", "bridgestone", "broker", "brother", "brussels", "budapest",
"build", "builders", "business", "buzz", "bzh", "cab", "cafe", "cal",
"camera", "camp", "cancerresearch", "canon", "capetown", "capital",
"caravan", "cards", "care", "career", "careers", "cars", "cartier",
"casa", "cash", "casino", "cat", "catering", "cba", "cbn", "ceb", "center",
"ceo", "cern", "cfa", "cfd", "chanel", "channel", "chat", "cheap",
"chloe", "christmas", "chrome", "church", "cisco", "citic", "city",
"claims", "cleaning", "click", "clinic", "clothing", "cloud", "club",
"coach", "codes", "coffee", "college", "cologne", "com", "commbank",
"community", "company", "computer", "condos", "construction", "consulting",
"contractors", "cooking", "cool", "coop", "corsica", "country", "coupons",
"courses", "credit", "creditcard", "cricket", "crown", "crs", "cruises",
"cuisinella", "cymru", "cyou", "dabur", "dad", "dance", "date", "dating",
"datsun", "day", "dclk", "deals", "degree", "delivery", "delta",
"democrat", "dental", "dentist", "desi", "design", "dev", "diamonds",
"diet", "digital", "direct", "directory", "discount", "dnp", "docs",
"dog", "doha", "domains", "doosan", "download", "drive", "durban", "dvag",
"earth", "eat", "edu", "education", "email", "emerck", "energy",
"engineer", "engineering", "enterprises", "epson", "equipment", "erni",
"esq", "estate", "eurovision", "eus", "events", "everbank", "exchange",
"expert", "exposed", "express", "fage", "fail", "faith", "family", "fan",
"fans", "farm", "fashion", "feedback", "film", "finance", "financial",
"firmdale", "fish", "fishing", "fit", "fitness", "flights", "florist",
"flowers", "flsmidth", "fly", "foo", "football", "forex", "forsale",
"forum", "foundation", "frl", "frogans", "fund", "furniture", "futbol",
"fyi", "gal", "gallery", "game", "garden", "gbiz", "gdn", "gent",
"genting", "ggee", "gift", "gifts", "gives", "giving", "glass", "gle",
"global", "globo", "gmail", "gmo", "gmx", "gold", "goldpoint", "golf",
"goo", "goog", "google", "gop", "gov", "graphics", "gratis", "green",
"gripe", "group", "guge", "guide", "guitars", "guru", "hamburg", "hangout",
"haus", "healthcare", "help", "here", "hermes", "hiphop", "hitachi", "hiv",
"hockey", "holdings", "holiday", "homedepot", "homes", "honda", "horse",
"host", "hosting", "hoteles", "hotmail", "house", "how", "hsbc", "ibm",
"icbc", "ice", "icu", "ifm", "iinet", "immo", "immobilien", "industries",
"infiniti", "info", "ing", "ink", "institute", "insure", "int",
"international", "investments", "ipiranga", "irish", "ist", "istanbul",
"itau", "iwc", "java", "jcb", "jetzt", "jewelry", "jlc", "jll", "jobs",
"joburg", "jprs", "juegos", "kaufen", "kddi", "kim", "kitchen", "kiwi",
"koeln", "komatsu", "krd", "kred", "kyoto", "lacaixa", "lancaster", "land",
"lasalle", "lat", "latrobe", "law", "lawyer", "lds", "lease", "leclerc",
"legal", "lexus", "lgbt", "liaison", "lidl", "life", "lighting", "limited",
"limo", "link", "live", "lixil", "loan", "loans", "lol", "london", "lotte",
"lotto", "love", "ltda", "lupin", "luxe", "luxury", "madrid", "maif",
"maison", "man", "management", "mango", "market", "marketing", "markets",
"marriott", "mba", "media", "meet", "melbourne", "meme", "memorial", "men",
"menu", "miami", "microsoft", "mil", "mini", "mma", "mobi", "moda", "moe",
"mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow",
"motorcycles", "mov", "movie", "movistar", "mtn", "mtpc", "museum",
"nadex", "nagoya", "name", "navy", "nec", "net", "netbank", "network",
"neustar", "new", "news", "nexus", "ngo", "nhk", "nico", "ninja", "nissan",
"nokia", "nra", "nrw", "ntt", "nyc", "office", "okinawa", "omega", "one",
"ong", "onl", "online", "ooo", "oracle", "orange", "org", "organic",
"osaka", "otsuka", "ovh", "page", "panerai", "paris", "partners", "parts",
"party", "pet", "pharmacy", "philips", "photo", "photography", "photos",
"physio", "piaget", "pics", "pictet", "pictures", "pink", "pizza", "place",
"play", "plumbing", "plus", "pohl", "poker", "porn", "post", "praxi",
"press", "pro", "prod", "productions", "prof", "properties", "property",
"pub", "qpon", "quebec", "racing", "realtor", "realty", "recipes", "red",
"redstone", "rehab", "reise", "reisen", "reit", "ren", "rent", "rentals",
"repair", "report", "republican", "rest", "restaurant", "review",
"reviews", "rich", "ricoh", "rio", "rip", "rocks", "rodeo", "rsvp", "ruhr",
"run", "ryukyu", "saarland", "sakura", "sale", "samsung", "sandvik",
"sandvikcoromant", "sanofi", "sap", "sarl", "saxo", "sca", "scb",
"schmidt", "scholarships", "school", "schule", "schwarz", "science",
"scor", "scot", "seat", "seek", "sener", "services", "sew", "sex", "sexy",
"shiksha", "shoes", "show", "shriram", "singles", "site", "ski", "sky",
"skype", "sncf", "soccer", "social", "software", "sohu", "solar",
"solutions", "sony", "soy", "space", "spiegel", "spreadbetting", "srl",
"starhub", "statoil", "studio", "study", "style", "sucks", "supplies",
"supply", "support", "surf", "surgery", "suzuki", "swatch", "swiss",
"sydney", "systems", "taipei", "tatamotors", "tatar", "tattoo", "tax",
"taxi", "team", "tech", "technology", "tel", "telefonica", "temasek",
"tennis", "thd", "theater", "tickets", "tienda", "tips", "tires", "tirol",
"today", "tokyo", "tools", "top", "toray", "toshiba", "tours", "town",
"toyota", "toys", "trade", "trading", "training", "travel", "trust", "tui",
"ubs", "university", "uno", "uol", "vacations", "vegas", "ventures",
"vermögensberater", "vermögensberatung", "versicherung", "vet", "viajes",
"video", "villas", "vin", "vision", "vista", "vistaprint", "vlaanderen",
"vodka", "vote", "voting", "voto", "voyage", "wales", "walter", "wang",
"watch", "webcam", "website", "wed", "wedding", "weir", "whoswho", "wien",
"wiki", "williamhill", "win", "windows", "wine", "wme", "work", "works",
"world", "wtc", "wtf", "xbox", "xerox", "xin", "xperia", "xxx", "xyz",
"yachts", "yandex", "yodobashi", "yoga", "yokohama", "youtube", "zip",
"zone", "zuerich", "дети", "ком", "москва", "онлайн", "орг", "рус", "сайт",
"קום", "بازار", "شبكة", "كوم", "موقع", "कॉम", "नेट", "संगठन", "คอม",
"みんな", "グーグル", "コム", "世界", "中信", "中文网", "企业", "佛山", "信息",
"健康", "八卦", "公司", "公益", "商城", "商店", "商标", "在线", "大拿", "娱乐",
"工行", "广东", "慈善", "我爱你", "手机", "政务", "政府", "新闻", "时尚", "机构",
"淡马锡", "游戏", "点看", "移动", "组织机构", "网址", "网店", "网络", "谷歌", "集团",
"飞利浦", "餐厅", "닷넷", "닷컴", "삼성", "onion"]
URL_REGEXP = re.compile((
r'('
r'^(?!(https?://|www\.)?\.|ftps?://|([0-9]+\.){{1,3}}\d+)' # exclude urls that start with "."
r'(?:https?://|www\.)*^(?!.*@)(?:[\w+-_]+[.])' # beginning of url
r'(?:{0}\b' # all tlds
r'(?:[:0-9]))' # port numbers & close off TLDs
r'(?:[\w+\/]?[a-z0-9!\*\'\(\);:&=\+\$/%#\[\]\-_\.,~?])*' # path/query params
r')').format(r'\b|'.join(TLDS)), re.U | re.I | re.X)
def calc_expected_status_length(status, short_url_length=23):
""" Calculates the length of a tweet, taking into account Twitter's
replacement of URLs with https://t.co links.
Args:
status: text of the status message to be posted.
short_url_length: the current published https://t.co links
Returns:
Expected length of the status message as an integer.
"""
status_length = 0
if isinstance(status, bytes):
status = unicode(status)
for word in re.split(r'\s', status):
if is_url(word):
status_length += short_url_length
else:
for character in word:
if any([ord(normalize("NFC", character)) in char_range for char_range in CHAR_RANGES]):
status_length += 1
else:
status_length += 2
status_length += len(re.findall(r'\s', status))
return status_length
def is_url(text):
""" Checks to see if a bit of text is a URL.
Args:
text: text to check.
Returns:
Boolean of whether the text should be treated as a URL or not.
"""
return bool(re.findall(URL_REGEXP, text))
def http_to_file(http):
data_file = NamedTemporaryFile()
req = requests.get(http, stream=True)
for chunk in req.iter_content(chunk_size=1024 * 1024):
data_file.write(chunk)
return data_file
def parse_media_file(passed_media, async_upload=False):
""" Parses a media file and attempts to return a file-like object and
information about the media file.
Args:
passed_media: media file which to parse.
async_upload: flag, for validation media file attributes.
Returns:
file-like object, the filename of the media file, the file size, and
the type of media.
"""
img_formats = ['image/jpeg',
'image/png',
'image/bmp',
'image/webp']
long_img_formats = [
'image/gif'
]
video_formats = ['video/mp4',
'video/quicktime']
# If passed_media is a string, check if it points to a URL, otherwise,
# it should point to local file. Create a reference to a file obj for
# each case such that data_file ends up with a read() method.
if not hasattr(passed_media, 'read'):
if passed_media.startswith('http'):
data_file = http_to_file(passed_media)
filename = os.path.basename(urlparse(passed_media).path)
else:
data_file = open(os.path.realpath(passed_media), 'rb')
filename = os.path.basename(passed_media)
# Otherwise, if a file object was passed in the first place,
# create the standard reference to media_file (i.e., rename it to fp).
else:
if passed_media.mode not in ['rb', 'rb+', 'w+b']:
raise TwitterError('File mode must be "rb" or "rb+"')
filename = os.path.basename(passed_media.name)
data_file = passed_media
data_file.seek(0, 2)
file_size = data_file.tell()
try:
data_file.seek(0)
except Exception as e:
pass
media_type = mimetypes.guess_type(os.path.basename(filename))[0]
if media_type is not None:
if media_type in img_formats and file_size > 5 * 1048576:
raise TwitterError({'message': 'Images must be less than 5MB.'})
elif media_type in long_img_formats and file_size > 15 * 1048576:
raise TwitterError({'message': 'GIF Image must be less than 15MB.'})
elif media_type in video_formats and not async_upload and file_size > 15 * 1048576:
raise TwitterError({'message': 'Videos must be less than 15MB.'})
elif media_type in video_formats and async_upload and file_size > 512 * 1048576:
raise TwitterError({'message': 'Videos must be less than 512MB.'})
elif media_type not in img_formats and media_type not in video_formats and media_type not in long_img_formats:
raise TwitterError({'message': 'Media type could not be determined.'})
return data_file, filename, file_size, media_type
def enf_type(field, _type, val):
""" Checks to see if a given val for a field (i.e., the name of the field)
is of the proper _type. If it is not, raises a TwitterError with a brief
explanation.
Args:
field:
Name of the field you are checking.
_type:
Type that the value should be returned as.
val:
Value to convert to _type.
Returns:
val converted to type _type.
"""
try:
return _type(val)
except ValueError:
raise TwitterError({
'message': '"{0}" must be type {1}'.format(field, _type.__name__)
})
def parse_arg_list(args, attr):
out = []
if isinstance(args, (str, unicode)):
out.append(args)
elif isinstance(args, twitter.User):
out.append(getattr(args, attr))
elif isinstance(args, (list, tuple)):
for item in args:
if isinstance(item, (str, unicode)):
out.append(item)
elif isinstance(item, twitter.User):
out.append(getattr(item, attr))
return ",".join([str(item) for item in out])
| 16,403
|
Python
|
.py
| 285
| 49.364912
| 118
| 0.545197
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,940
|
api.py
|
rembo10_headphones/lib/twitter/api.py
|
#!/usr/bin/env python
#
#
# Copyright 2007-2016, 2018 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API"""
from __future__ import division
from __future__ import print_function
import json
import sys
import gzip
import time
import base64
import re
import logging
import requests
from requests_oauthlib import OAuth1, OAuth2
import io
import warnings
from uuid import uuid4
import os
try:
# python 3
from urllib.parse import urlparse, urlunparse, urlencode, quote_plus
from urllib.request import __version__ as urllib_version
except ImportError:
from urlparse import urlparse, urlunparse
from urllib import urlencode, quote_plus
from urllib import __version__ as urllib_version
from twitter import (
__version__,
_FileCache,
Category,
DirectMessage,
List,
Status,
Trend,
User,
UserStatus,
)
from twitter.ratelimit import RateLimit
from twitter.twitter_utils import (
calc_expected_status_length,
is_url,
parse_media_file,
enf_type,
parse_arg_list)
from twitter.error import (
TwitterError,
PythonTwitterDeprecationWarning330,
)
if sys.version_info > (3,):
long = int # pylint: disable=invalid-name,redefined-builtin
CHARACTER_LIMIT = 280
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
logger = logging.getLogger(__name__)
class Api(object):
"""A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print([s.text for s in statuses])
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print([u.name for u in users])
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print(status.text)
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetHomeTimeline()
>>> api.GetStatus(status_id)
>>> api.GetStatuses(status_ids)
>>> api.DestroyStatus(status_id)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(message_id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.LookupFriendship(user)
>>> api.VerifyCredentials()
"""
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
application_only_auth=False,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
base_url=None,
stream_url=None,
upload_url=None,
chunk_size=1024 * 1024,
use_gzip_compression=False,
debugHTTP=False,
timeout=None,
sleep_on_rate_limit=False,
tweet_mode='compat',
proxies=None):
"""Instantiate a new twitter.Api object.
Args:
consumer_key (str):
Your Twitter user's consumer_key.
consumer_secret (str):
Your Twitter user's consumer_secret.
access_token_key (str):
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret (str):
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
application_only_auth:
Use Application-Only Auth instead of User Auth.
Defaults to False [Optional]
input_encoding (str, optional):
The encoding used to encode input strings.
request_header (dict, optional):
A dictionary of additional HTTP request headers.
cache (object, optional):
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching.
base_url (str, optional):
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com.
stream_url (str, optional):
The base URL to use for streaming endpoints.
Defaults to 'https://stream.twitter.com/1.1'.
upload_url (str, optional):
The base URL to use for uploads. Defaults to 'https://upload.twitter.com/1.1'.
chunk_size (int, optional):
Chunk size to use for chunked (multi-part) uploads of images/videos/gifs.
Defaults to 1MB. Anything under 16KB and you run the risk of erroring out
on 15MB files.
use_gzip_compression (bool, optional):
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False.
debugHTTP (bool, optional):
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False.
timeout (int, optional):
Set timeout (in seconds) of the http/https requests. If None the
requests lib default will be used. Defaults to None.
sleep_on_rate_limit (bool, optional):
Whether to sleep an appropriate amount of time if a rate limit is hit for
an endpoint.
tweet_mode (str, optional):
Whether to use the new (as of Sept. 2016) extended tweet mode. See docs for
details. Choices are ['compatibility', 'extended'].
proxies (dict, optional):
A dictionary of proxies for the request to pass through, if not specified
allows requests lib to use environmental variables for proxy if any.
"""
# check to see if the library is running on a Google App Engine instance
# see GAE.rst for more information
if os.environ:
if 'APPENGINE_RUNTIME' in os.environ.keys():
# Adapter ensures requests use app engine's urlfetch
import requests_toolbelt.adapters.appengine
requests_toolbelt.adapters.appengine.monkeypatch()
# App Engine does not like this caching strategy, disable caching
cache = None
self.SetCache(cache)
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._shortlink_size = 19
if timeout and timeout < 30:
warnings.warn("Warning: The Twitter streaming API sends 30s keepalives, the given timeout is shorter!")
self._timeout = timeout
self.__auth = None
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
self.rate_limit = RateLimit()
self.sleep_on_rate_limit = sleep_on_rate_limit
self.tweet_mode = tweet_mode
self.proxies = proxies
if base_url is None:
self.base_url = 'https://api.twitter.com/1.1'
else:
self.base_url = base_url
if stream_url is None:
self.stream_url = 'https://stream.twitter.com/1.1'
else:
self.stream_url = stream_url
if upload_url is None:
self.upload_url = 'https://upload.twitter.com/1.1'
else:
self.upload_url = upload_url
self.chunk_size = chunk_size
if self.chunk_size < 1024 * 16:
warnings.warn((
"A chunk size lower than 16384 may result in too many "
"requests to the Twitter API when uploading videos. You are "
"strongly advised to increase it above 16384"))
if (consumer_key and not
(application_only_auth or all([access_token_key, access_token_secret]))):
raise TwitterError({'message': "Missing oAuth Consumer Key or Access Token"})
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret,
application_only_auth)
if debugHTTP:
try:
import http.client as http_client # python3
except ImportError:
import httplib as http_client # python2
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
@staticmethod
def GetAppOnlyAuthToken(consumer_key, consumer_secret):
"""
Generate a Bearer Token from consumer_key and consumer_secret
"""
key = quote_plus(consumer_key)
secret = quote_plus(consumer_secret)
bearer_token = base64.b64encode('{}:{}'.format(key, secret).encode('utf8'))
post_headers = {
'Authorization': 'Basic {0}'.format(bearer_token.decode('utf8')),
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
res = requests.post(url='https://api.twitter.com/oauth2/token',
data={'grant_type': 'client_credentials'},
headers=post_headers)
bearer_creds = res.json()
return bearer_creds
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None,
application_only_auth=False):
"""Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
application_only_auth:
Whether to generate a bearer token and use Application-Only Auth
"""
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
if application_only_auth:
self._bearer_token = self.GetAppOnlyAuthToken(consumer_key, consumer_secret)
self.__auth = OAuth2(token=self._bearer_token)
else:
auth_list = [consumer_key, consumer_secret,
access_token_key, access_token_secret]
if all(auth_list):
self.__auth = OAuth1(consumer_key, consumer_secret,
access_token_key, access_token_secret)
self._config = None
def GetHelpConfiguration(self):
"""Get basic help configuration details from Twitter.
Args:
None
Returns:
dict: Sets self._config and returns dict of help config values.
"""
if self._config is None:
url = '%s/help/configuration.json' % self.base_url
resp = self._RequestUrl(url, 'GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self._config = data
return self._config
def GetShortUrlLength(self, https=False):
"""Returns number of characters reserved per URL included in a tweet.
Args:
https (bool, optional):
If True, return number of characters reserved for https urls
or, if False, return number of character reserved for http urls.
Returns:
(int): Number of characters reserved per URL.
"""
config = self.GetHelpConfiguration()
if https:
return config['short_url_length_https']
else:
return config['short_url_length']
def ClearCredentials(self):
"""Clear any credentials for this instance
"""
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._bearer_token = None
self.__auth = None # for request upgrade
def GetSearch(self,
term=None,
raw_query=None,
geocode=None,
since_id=None,
max_id=None,
until=None,
since=None,
count=15,
lang=None,
locale=None,
result_type="mixed",
include_entities=None,
return_json=False):
"""Return twitter search results for a given term. You must specify one
of term, geocode, or raw_query.
Args:
term (str, optional):
Term to search by. Optional if you include geocode.
raw_query (str, optional):
A raw query as a string. This should be everything after the "?" in
the URL (i.e., the query parameters). You are responsible for all
type checking and ensuring that the query string is properly
formatted, as it will only be URL-encoded before be passed directly
to Twitter with no other checks performed. For advanced usage only.
*This will override any other parameters passed*
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
until (str, optional):
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD.
since (str, optional):
Returns tweets generated since the given date. Date should be
formatted as YYYY-MM-DD.
geocode (str or list or tuple, optional):
Geolocation within which to search for tweets. Can be either a
string in the form of "latitude,longitude,radius" where latitude
and longitude are floats and radius is a string such as "1mi" or
"1km" ("mi" or "km" are the only units allowed). For example:
>>> api.GetSearch(geocode="37.781157,-122.398720,1mi").
Otherwise, you can pass a list of either floats or strings for
lat/long and a string for radius:
>>> api.GetSearch(geocode=[37.781157, -122.398720, "1mi"])
>>> # or:
>>> api.GetSearch(geocode=(37.781157, -122.398720, "1mi"))
>>> # or:
>>> api.GetSearch(geocode=("37.781157", "-122.398720", "1mi"))
count (int, optional):
Number of results to return. Default is 15 and maxmimum that
Twitter returns is 100 irrespective of what you type in.
lang (str, optional):
Language for results as ISO 639-1 code. Default is None
(all languages).
locale (str, optional):
Language of the search query. Currently only 'ja' is effective.
This is intended for language-specific consumers and the default
should work in the majority of cases.
result_type (str, optional):
Type of result which should be returned. Default is "mixed".
Valid options are "mixed, "recent", and "popular".
include_entities (bool, optional):
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and
hashtags.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.Userret
Returns:
list: A sequence of twitter.Status instances, one for each message
containing the term, within the bounds of the geocoded area, or
given by the raw_query.
"""
url = '%s/search/tweets.json' % self.base_url
parameters = {}
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if until:
parameters['until'] = enf_type('until', str, until)
if since:
parameters['since'] = enf_type('since', str, since)
if lang:
parameters['lang'] = enf_type('lang', str, lang)
if locale:
parameters['locale'] = enf_type('locale', str, locale)
if term is None and geocode is None and raw_query is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
if isinstance(geocode, list) or isinstance(geocode, tuple):
parameters['geocode'] = ','.join([str(geo) for geo in geocode])
else:
parameters['geocode'] = enf_type('geocode', str, geocode)
if include_entities:
parameters['include_entities'] = enf_type('include_entities',
bool,
include_entities)
parameters['count'] = enf_type('count', int, count)
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
if raw_query is not None:
url = "{url}?{raw_query}".format(
url=url,
raw_query=raw_query)
resp = self._RequestUrl(url, 'GET')
else:
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data.get('statuses', '')]
def GetUsersSearch(self,
term=None,
page=1,
count=20,
include_entities=None):
"""Return twitter user search results for a given term.
Args:
term:
Term to search by.
page:
Page of results to return. Default is 1
[Optional]
count:
Number of results to return. Default is 20
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A sequence of twitter.User instances, one for each message containing
the term
"""
# Build request parameters
parameters = {}
if term is not None:
parameters['q'] = term
if page != 1:
parameters['page'] = page
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
# Make and send requests
url = '%s/users/search.json' % self.base_url
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [User.NewFromJsonDict(x) for x in data]
def GetTrendsCurrent(self, exclude=None):
"""Get the current top trending topics (global)
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
return self.GetTrendsWoeid(woeid=1, exclude=exclude)
def GetTrendsWoeid(self, woeid, exclude=None):
"""Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
"""
url = '%s/trends/place.json' % (self.base_url)
parameters = {'id': woeid}
if exclude:
parameters['exclude'] = exclude
resp = self._RequestUrl(url, verb='GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp=timestamp))
return trends
def GetUserSuggestionCategories(self):
""" Return the list of suggested user categories, this can be used in
GetUserSuggestion function
Returns:
A list of categories
"""
url = '%s/users/suggestions.json' % (self.base_url)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
categories = []
for category in data:
categories.append(Category.NewFromJsonDict(category))
return categories
def GetUserSuggestion(self, category):
""" Returns a list of users in a category
Args:
category:
The Category object to limit the search by
Returns:
A list of users in that category
"""
url = '%s/users/suggestions/%s.json' % (self.base_url, category.slug)
resp = self._RequestUrl(url, verb='GET')
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
users = []
for user in data['users']:
users.append(User.NewFromJsonDict(user))
return users
def GetHomeTimeline(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
exclude_replies=False,
contributor_details=False,
include_entities=True):
"""Fetch a collection of the most recent Tweets and retweets posted
by the authenticating user and the users they follow.
The home timeline is central to how most users interact with Twitter.
Args:
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
When True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
exclude_replies:
This parameter will prevent replies from appearing in the
returned timeline. Using exclude_replies with the count
parameter will mean you will receive up-to count tweets -
this is because the count parameter retrieves that many
tweets before filtering out retweets and replies. [Optional]
contributor_details:
This parameter enhances the contributors element of the
status response to include the screen_name of the contributor.
By default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to false.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
"""
url = '%s/statuses/home_timeline.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError({'message': "'count' may not be greater than 200"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
parameters['count'] = count
if since_id:
try:
parameters['since_id'] = int(since_id)
except ValueError:
raise TwitterError({'message': "'since_id' must be an integer"})
if max_id:
try:
parameters['max_id'] = int(max_id)
except ValueError:
raise TwitterError({'message': "'max_id' must be an integer"})
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
if contributor_details:
parameters['contributor_details'] = 1
if not include_entities:
parameters['include_entities'] = 'false'
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
trim_user=False,
exclude_replies=False):
"""Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
user_id (int, optional):
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve. May not be
greater than 200.
include_rts (bool, optional):
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets.
trim_user (bool, optional):
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
exclude_replies (bool, optional)
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses.
Returns:
A sequence of Status instances, one for each message up to count
"""
url = '%s/statuses/user_timeline.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
parameters['include_rts'] = enf_type('include_rts', bool, include_rts)
parameters['trim_user'] = enf_type('trim_user', bool, trim_user)
parameters['exclude_replies'] = enf_type('exclude_replies', bool, exclude_replies)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self,
status_id,
trim_user=False,
include_my_retweet=True,
include_entities=True,
include_ext_alt_text=True):
"""Returns a single status message, specified by the status_id parameter.
Args:
status_id:
The numeric ID of the status you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object. [Optional]
include_my_retweet:
When set to True, any Tweets returned that have been retweeted by
the authenticating user will include an additional
current_user_retweet node, containing the ID of the source status
for the retweet. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
"""
url = '%s/statuses/show.json' % (self.base_url)
parameters = {
'id': enf_type('status_id', int, status_id),
'trim_user': enf_type('trim_user', bool, trim_user),
'include_my_retweet': enf_type('include_my_retweet', bool, include_my_retweet),
'include_entities': enf_type('include_entities', bool, include_entities),
'include_ext_alt_text': enf_type('include_ext_alt_text', bool, include_ext_alt_text)
}
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetStatuses(self,
status_ids,
trim_user=False,
include_entities=True,
map=False):
"""Returns a list of status messages, specified by the status_ids parameter.
Args:
status_ids:
A list of the numeric ID of the statuses you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
map:
If True, returns a dictionary with status id as key and returned
status data (or None if tweet does not exist or is inaccessible)
as value. Otherwise returns an unordered list of successfully
retrieved Tweets. [Optional]
Returns:
A dictionary or unordered list (depending on the parameter 'map') of
twitter Status instances representing the status messages.
"""
url = '%s/statuses/lookup.json' % (self.base_url)
map = enf_type('map', bool, map)
if map:
result = {}
else:
result = []
offset = 0
parameters = {
'trim_user': enf_type('trim_user', bool, trim_user),
'include_entities': enf_type('include_entities', bool, include_entities),
'map': map
}
while offset < len(status_ids):
parameters['id'] = ','.join([str(enf_type('status_id', int, status_id)) for status_id in status_ids[offset:offset + 100]])
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if map:
result.update({int(key): (Status.NewFromJsonDict(value) if value else None) for key, value in data['id'].items()})
else:
result += [Status.NewFromJsonDict(dataitem) for dataitem in data]
offset += 100
return result
def GetStatusOembed(self,
status_id=None,
url=None,
maxwidth=None,
hide_media=False,
hide_thread=False,
omit_script=False,
align=None,
related=None,
lang=None):
"""Returns information allowing the creation of an embedded representation of a
Tweet on third party sites.
Specify tweet by the id or url parameter.
Args:
status_id:
The numeric ID of the status you are trying to embed.
url:
The url of the status you are trying to embed.
maxwidth:
The maximum width in pixels that the embed should be rendered at.
This value is constrained to be between 250 and 550 pixels. [Optional]
hide_media:
Specifies whether the embedded Tweet should automatically expand images. [Optional]
hide_thread:
Specifies whether the embedded Tweet should automatically show the original
message in the case that the embedded Tweet is a reply. [Optional]
omit_script:
Specifies whether the embedded Tweet HTML should include a <script>
element pointing to widgets.js. [Optional]
align:
Specifies whether the embedded Tweet should be left aligned, right aligned,
or centered in the page. [Optional]
related:
A comma sperated string of related screen names. [Optional]
lang:
Language code for the rendered embed. [Optional]
Returns:
A dictionary with the response.
"""
request_url = '%s/statuses/oembed.json' % (self.base_url)
parameters = {}
if status_id is not None:
try:
parameters['id'] = int(status_id)
except ValueError:
raise TwitterError({'message': "'status_id' must be an integer."})
elif url is not None:
parameters['url'] = url
else:
raise TwitterError({'message': "Must specify either 'status_id' or 'url'"})
if maxwidth is not None:
parameters['maxwidth'] = maxwidth
if hide_media is True:
parameters['hide_media'] = 'true'
if hide_thread is True:
parameters['hide_thread'] = 'true'
if omit_script is True:
parameters['omit_script'] = 'true'
if align is not None:
if align not in ('left', 'center', 'right', 'none'):
raise TwitterError({'message': "'align' must be 'left', 'center', 'right', or 'none'"})
parameters['align'] = align
if related:
if not isinstance(related, str):
raise TwitterError({'message': "'related' should be a string of comma separated screen names"})
parameters['related'] = related
if lang is not None:
if not isinstance(lang, str):
raise TwitterError({'message': "'lang' should be string instance"})
parameters['lang'] = lang
resp = self._RequestUrl(request_url, 'GET', data=parameters, enforce_auth=False)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def DestroyStatus(self, status_id, trim_user=False):
"""Destroys the status specified by the required ID parameter.
The authenticating user must be the author of the specified
status.
Args:
status_id (int):
The numerical ID of the status you're trying to destroy.
trim_user (bool, optional):
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Returns:
A twitter.Status instance representing the destroyed status message
"""
url = '%s/statuses/destroy/%s.json' % (self.base_url, status_id)
post_data = {
'id': enf_type('status_id', int, status_id),
'trim_user': enf_type('trim_user', bool, trim_user)
}
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def PostUpdate(self,
status,
media=None,
media_additional_owners=None,
media_category=None,
in_reply_to_status_id=None,
auto_populate_reply_metadata=False,
exclude_reply_user_ids=None,
latitude=None,
longitude=None,
place_id=None,
display_coordinates=False,
trim_user=False,
verify_status_length=True,
attachment_url=None):
"""Post a twitter status message from the authenticated user.
https://dev.twitter.com/docs/api/1.1/post/statuses/update
Args:
status (str):
The message text to be posted. Must be less than or equal to
CHARACTER_LIMIT characters.
media (int, str, fp, optional):
A URL, a local file, or a file-like object (something with a
read() method), or a list of any combination of the above.
media_additional_owners (list, optional):
A list of user ids representing Twitter users that should be able
to use the uploaded media in their tweets. If you pass a list of
media, then additional_owners will apply to each object. If you
need more granular control, please use the UploadMedia* methods.
media_category (str, optional):
Only for use with the AdsAPI. See
https://dev.twitter.com/ads/creative/promoted-video-overview if
this applies to your application.
in_reply_to_status_id (int, optional):
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored.
auto_populate_reply_metadata (bool, optional):
Automatically include the @usernames of the users mentioned or
participating in the tweet to which this tweet is in reply.
exclude_reply_user_ids (list, optional):
Remove given user_ids (*not* @usernames) from the tweet's
automatically generated reply metadata.
attachment_url (str, optional):
URL to an attachment resource: one to four photos, a GIF,
video, Quote Tweet, or DM deep link. If not specified and
media parameter is not None, we will attach the first media
object as the attachment URL. If a bad URL is passed, Twitter
will raise an error.
latitude (float, optional):
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting.
longitude (float, optional):
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting.
place_id (int, optional):
A place in the world. These IDs can be retrieved from
GET geo/reverse_geocode.
display_coordinates (bool, optional):
Whether or not to put a pin on the exact coordinates a tweet
has been sent from.
trim_user (bool, optional):
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
verify_status_length (bool, optional):
If True, api throws a hard error that the status is over
CHARACTER_LIMIT characters. If False, Api will attempt to post
the status.
Returns:
(twitter.Status) A twitter.Status instance representing the
message posted.
"""
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
if verify_status_length and calc_expected_status_length(u_status) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to CHARACTER_LIMIT characters.")
if auto_populate_reply_metadata and not in_reply_to_status_id:
raise TwitterError("If auto_populate_reply_metadata is True, you must set in_reply_to_status_id")
parameters = {
'status': u_status,
'in_reply_to_status_id': in_reply_to_status_id,
'auto_populate_reply_metadata': auto_populate_reply_metadata,
'place_id': place_id,
'display_coordinates': display_coordinates,
'trim_user': trim_user,
'exclude_reply_user_ids': ','.join([str(u) for u in exclude_reply_user_ids or []]),
}
if attachment_url:
parameters['attachment_url'] = attachment_url
if media:
chunked_types = ['video/mp4', 'video/quicktime', 'image/gif']
media_ids = []
if isinstance(media, (int, long)):
media_ids.append(media)
elif isinstance(media, list):
for media_file in media:
# If you want to pass just a media ID, it should be an int
if isinstance(media_file, (int, long)):
media_ids.append(media_file)
continue
_, _, file_size, media_type = parse_media_file(media_file)
if (media_type == 'image/gif' or media_type == 'video/mp4') and len(media) > 1:
raise TwitterError(
'You cannot post more than 1 GIF or 1 video in a single status.')
if file_size > self.chunk_size or media_type in chunked_types:
media_id = self.UploadMediaChunked(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
else:
media_id = self.UploadMediaSimple(
media=media_file,
additional_owners=media_additional_owners,
media_category=media_category)
media_ids.append(media_id)
else:
_, _, file_size, media_type = parse_media_file(media)
if file_size > self.chunk_size or media_type in chunked_types:
media_ids.append(self.UploadMediaChunked(
media, media_additional_owners, media_category=media_category
))
else:
media_ids.append(self.UploadMediaSimple(
media, media_additional_owners, media_category=media_category
))
parameters['media_ids'] = ','.join([str(mid) for mid in media_ids])
if latitude is not None and longitude is not None:
parameters['lat'] = str(latitude)
parameters['long'] = str(longitude)
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def UploadMediaSimple(self,
media,
additional_owners=None,
media_category=None):
""" Upload a media file to Twitter in one request. Used for small file
uploads that do not require chunked uploads.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API or 0.
"""
url = '%s/media/upload.json' % self.upload_url
parameters = {}
media_fp, _, _, _ = parse_media_file(media)
parameters['media'] = media_fp.read()
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
return data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded.'})
def PostMediaMetadata(self,
media_id,
alt_text=None):
"""Provide addtional data for uploaded media.
Args:
media_id:
ID of a previously uploaded media item.
alt_text:
Image Alternate Text.
"""
url = '%s/media/metadata/create.json' % self.upload_url
parameters = {}
parameters['media_id'] = media_id
if alt_text:
parameters['alt_text'] = {"text": alt_text}
resp = self._RequestUrl(url, 'POST', json=parameters)
return resp
def _UploadMediaChunkedInit(self,
media,
additional_owners=None,
media_category=None):
"""Start a chunked upload to Twitter.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()
method), filename media file.
"""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media, async_upload=True)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
parameters['additional_owners'] = additional_owners
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
raise TwitterError({'message': 'Media could not be uploaded'})
return (media_id, media_fp, filename)
def _UploadMediaChunkedAppend(self,
media_id,
media_fp,
filename):
"""Appends (i.e., actually uploads) media file to Twitter.
Args:
media_id (int):
ID of the media file received from Init method.
media_fp (file):
File-like object representing media file (must have .read() method)
filename (str):
Filename of the media file being uploaded.
Returns:
True if successful. Raises otherwise.
"""
url = '%s/media/upload.json' % self.upload_url
boundary = "--{0}".format(uuid4().hex).encode('utf-8')
media_id_bytes = str(media_id).encode('utf-8')
headers = {'Content-Type': 'multipart/form-data; boundary={0}'.format(
boundary.decode('utf8')[2:]
)}
segment_id = 0
while True:
try:
data = media_fp.read(self.chunk_size)
except ValueError:
break
if not data:
break
body = [
boundary,
b'Content-Disposition: form-data; name="command"',
b'',
b'APPEND',
boundary,
b'Content-Disposition: form-data; name="media_id"',
b'',
media_id_bytes,
boundary,
b'Content-Disposition: form-data; name="segment_index"',
b'',
str(segment_id).encode('utf-8'),
boundary,
'Content-Disposition: form-data; name="media"; filename="{0!r}"'.format(filename).encode('utf8'),
b'Content-Type: application/octet-stream',
b'',
data,
boundary + b'--'
]
body_data = b'\r\n'.join(body)
headers['Content-Length'] = str(len(body_data))
resp = self._RequestChunkedUpload(url=url,
headers=headers,
data=body_data)
# The body of the response should be blank, but the normal decoding
# raises a JSONDecodeError, so we should only do error checking
# if the response is not blank.
if resp.content.decode('utf-8'):
return self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
segment_id += 1
try:
media_fp.close()
except Exception as e:
pass
return True
def _UploadMediaChunkedFinalize(self, media_id):
"""Finalize chunked upload to Twitter.
Args:
media_id (int):
ID of the media file for which to finalize the upload.
Returns:
json: JSON string of data from Twitter.
"""
url = '%s/media/upload.json' % self.upload_url
parameters = {
'command': 'FINALIZE',
'media_id': media_id
}
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def UploadMediaChunked(self,
media,
additional_owners=None,
media_category=None):
"""Upload a media file to Twitter in multiple requests.
Args:
media:
File-like object to upload.
additional_owners: additional Twitter users that are allowed to use
The uploaded media. Should be a list of integers. Maximum
number of additional owners is capped at 100 by Twitter.
media_category:
Category with which to identify media upload. Only use with Ads
API & video files.
Returns:
media_id:
ID of the uploaded media returned by the Twitter API. Raises if
unsuccesful.
"""
media_id, media_fp, filename = self._UploadMediaChunkedInit(media=media,
additional_owners=additional_owners,
media_category=media_category)
append = self._UploadMediaChunkedAppend(media_id=media_id,
media_fp=media_fp,
filename=filename)
if not append:
TwitterError('Media could not be uploaded.')
data = self._UploadMediaChunkedFinalize(media_id)
try:
return data['media_id']
except KeyError:
raise TwitterError('Media could not be uploaded.')
def _TweetTextWrap(self,
status,
char_lim=CHARACTER_LIMIT):
if not self._config:
self.GetHelpConfiguration()
tweets = []
line = []
line_length = 0
words = re.split(r'\s', status)
if len(words) == 1 and not is_url(words[0]):
if len(words[0]) > CHARACTER_LIMIT:
raise TwitterError("Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(words[0]), char_lim))
else:
tweets.append(words[0])
return tweets
for word in words:
if len(word) > char_lim:
raise TwitterError("Unable to split status into tweetable parts. Word was: {0}/{1}".format(len(word), char_lim))
new_len = line_length
if is_url(word):
new_len = line_length + self._config['short_url_length_https'] + 1
else:
new_len += len(word) + 1
if new_len > CHARACTER_LIMIT:
tweets.append(' '.join(line))
line = [word]
line_length = new_len - line_length
else:
line.append(word)
line_length = new_len
tweets.append(' '.join(line))
return tweets
def PostUpdates(self,
status,
continuation=None,
**kwargs):
"""Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than CHARACTER_LIMIT characters.
Args:
status:
The message text to be posted.
May be longer than CHARACTER_LIMIT characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
"""
results = list()
if continuation is None:
continuation = ''
char_limit = CHARACTER_LIMIT - len(continuation)
tweets = self._TweetTextWrap(status=status, char_lim=char_limit)
if len(tweets) == 1:
results.append(self.PostUpdate(status=tweets[0], **kwargs))
return results
for tweet in tweets[0:-1]:
results.append(self.PostUpdate(status=tweet + continuation, **kwargs))
results.append(self.PostUpdate(status=tweets[-1], **kwargs))
return results
def PostRetweet(self, status_id, trim_user=False):
"""Retweet a tweet with the Retweet API.
Args:
status_id:
The numerical id of the tweet that will be retweeted
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the original tweet with retweet details embedded.
"""
try:
if int(status_id) <= 0:
raise TwitterError({'message': "'status_id' must be a positive number"})
except ValueError:
raise TwitterError({'message': "'status_id' must be an integer"})
url = '%s/statuses/retweet/%s.json' % (self.base_url, status_id)
data = {'id': status_id}
if trim_user:
data['trim_user'] = 'true'
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetUserRetweets(self,
count=None,
since_id=None,
max_id=None,
trim_user=False):
"""Fetch the sequence of retweets made by the authenticated user.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
"""
return self.GetUserTimeline(
since_id=since_id,
count=count,
max_id=max_id,
trim_user=trim_user,
exclude_replies=True,
include_rts=True)
def GetReplies(self,
since_id=None,
count=None,
max_id=None,
trim_user=False):
"""Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
"""
return self.GetUserTimeline(since_id=since_id, count=count, max_id=max_id, trim_user=trim_user,
exclude_replies=False, include_rts=False)
def GetRetweets(self,
statusid,
count=None,
trim_user=False):
"""Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid (int):
The ID of the tweet for which retweets should be searched for
count (int, optional):
The number of status messages to retrieve.
trim_user (bool, optional):
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
Returns:
A list of twitter.Status instances, which are retweets of statusid
"""
url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid)
parameters = {
'trim_user': enf_type('trim_user', bool, trim_user),
}
if count:
parameters['count'] = enf_type('count', int, count)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweeters(self,
status_id,
cursor=None,
count=100,
stringify_ids=False):
"""Returns a collection of up to 100 user IDs belonging to users who have
retweeted the tweet specified by the status_id parameter.
Args:
status_id:
the tweet's numerical ID
cursor:
breaks the ids into pages of no more than 100.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/statuses/retweeters/ids.json' % (self.base_url)
parameters = {
'id': enf_type('id', int, status_id),
'stringify_ids': enf_type('stringify_ids', bool, stringify_ids),
'count': count,
}
result = []
total_count = 0
while True:
if cursor:
try:
parameters['cursor'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def GetRetweetsOfMe(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
include_entities=True,
include_user_entities=True):
"""Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100.
Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than
(newer than) this ID. [Optional]
max_id:
Returns results with an ID less than or equal
to this ID. [Optional]
trim_user:
When True, the user object for each tweet will
only be an ID. [Optional]
include_entities:
When True, the tweet entities will be included. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
"""
url = '%s/statuses/retweets_of_me.json' % self.base_url
if count is not None:
try:
if int(count) > 100:
raise TwitterError({'message': "'count' may not be greater than 100"})
except ValueError:
raise TwitterError({'message': "'count' must be an integer"})
parameters = {
'count': count,
'since_id': since_id,
'max_id': max_id,
'trim_user': bool(trim_user),
'include_entities': bool(include_entities),
'include_user_entities': bool(include_user_entities),
}
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return [Status.NewFromJsonDict(s) for s in data]
def _GetBlocksMutesPaged(self,
endpoint,
action,
cursor=-1,
skip_status=False,
include_entities=True,
stringify_ids=False):
""" Fetch a page of the users (as twitter.User instances)
blocked or muted by the currently authenticated user.
Args:
endpoint (str):
Either "mute" or "block".
action (str):
Either 'list' or 'ids' depending if you want to return fully-hydrated
twitter.User objects or a list of user IDs as ints.
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each user.
"""
urls = {
'mute': {
'list': '%s/mutes/users/list.json' % self.base_url,
'ids': '%s/mutes/users/ids.json' % self.base_url
},
'block': {
'list': '%s/blocks/list.json' % self.base_url,
'ids': '%s/blocks/ids.json' % self.base_url
}
}
url = urls[endpoint][action]
result = []
parameters = {
'skip_status': bool(skip_status),
'include_entities': bool(include_entities),
'stringify_ids': bool(stringify_ids),
'cursor': cursor,
}
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if action == 'ids':
result += data.get('ids')
else:
result += [User.NewFromJsonDict(x) for x in data['users']]
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetBlocks(self,
skip_status=False,
include_entities=False):
""" Fetch the sequence of all users (as twitter.User instances),
blocked by the currently authenticated user.
Args:
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
A list of twitter.User instances, one for each blocked user.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, users = self.GetBlocksPaged(
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetBlocksPaged(self,
cursor=-1,
skip_status=False,
include_entities=False):
""" Fetch a page of the users (as twitter.User instances)
blocked by the currently authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each blocked user.
"""
return self._GetBlocksMutesPaged(endpoint='block',
action='list',
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
def GetBlocksIDs(self,
stringify_ids=False):
"""Fetch the sequence of all user IDs blocked by the
currently authenticated user.
Args:
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
A list of user IDs for all blocked users.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, user_ids = self.GetBlocksIDsPaged(
cursor=cursor,
stringify_ids=stringify_ids)
result += user_ids
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetBlocksIDsPaged(self,
cursor=-1,
stringify_ids=False):
""" Fetch a page of the user IDs blocked by the currently
authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of blocked users that you want to return.
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
next_cursor, previous_cursor, list of user IDs of blocked users.
"""
return self._GetBlocksMutesPaged(endpoint='block',
action='ids',
cursor=cursor,
stringify_ids=stringify_ids)
def GetMutes(self,
skip_status=False,
include_entities=False):
""" Fetch the sequence of all users (as twitter.User instances),
muted by the currently authenticated user.
Args:
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
A list of twitter.User instances, one for each muted user.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, users = self.GetMutesPaged(
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetMutesPaged(self,
cursor=-1,
skip_status=False,
include_entities=False):
""" Fetch a page of the users (as twitter.User instances)
muted by the currently authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of muted users that you want to return.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
When True, the user entities will be included.
Returns:
next_cursor, previous_cursor, list of twitter.User instances,
one for each muted user.
"""
return self._GetBlocksMutesPaged(endpoint='mute',
action='list',
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
def GetMutesIDs(self,
stringify_ids=False):
"""Fetch the sequence of all user IDs muted by the
currently authenticated user.
Args:
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
A list of user IDs for all muted users.
"""
result = []
cursor = -1
while True:
next_cursor, previous_cursor, user_ids = self.GetMutesIDsPaged(
cursor=cursor,
stringify_ids=stringify_ids)
result += user_ids
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetMutesIDsPaged(self,
cursor=-1,
stringify_ids=False):
""" Fetch a page of the user IDs muted by the currently
authenticated user.
Args:
cursor (int, optional):
Should be set to -1 if you want the first page, thereafter denotes
the page of muted users that you want to return.
stringify_ids (bool, optional):
If True user IDs will be returned as strings rather than integers.
Returns:
next_cursor, previous_cursor, list of user IDs of muted users.
"""
return self._GetBlocksMutesPaged(endpoint='mute',
action='ids',
cursor=cursor,
stringify_ids=stringify_ids)
def _BlockMute(self,
action,
endpoint,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Create or destroy a block or mute on behalf of the authenticated user.
Args:
action (str):
Either 'create' or 'destroy'.
endpoint (str):
Either 'block' or 'mute'.
user_id (int, optional)
The numerical ID of the user to block/mute.
screen_name (str, optional):
The screen name of the user to block/mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
twitter.User: twitter.User object representing the blocked/muted user.
"""
urls = {
'block': {
'create': '%s/blocks/create.json' % (self.base_url),
'destroy': '%s/blocks/destroy.json' % (self.base_url),
},
'mute': {
'create': '%s/mutes/users/create.json' % (self.base_url),
'destroy': '%s/mutes/users/destroy.json' % (self.base_url)
}
}
url = urls[endpoint][action]
post_data = {}
if user_id:
post_data['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
post_data['screen_name'] = screen_name
else:
raise TwitterError("You must specify either a user_id or screen_name")
if include_entities:
post_data['include_entities'] = enf_type('include_entities', bool, include_entities)
if skip_status:
post_data['skip_status'] = enf_type('skip_status', bool, skip_status)
resp = self._RequestUrl(url, 'POST', data=post_data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def CreateBlock(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Blocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to block.
screen_name (str, optional):
The screen name of the user to block.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the blocked user.
"""
return self._BlockMute(action='create',
endpoint='block',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def DestroyBlock(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Unlocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to block.
screen_name (str, optional):
The screen name of the user to block.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the blocked User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the blocked user.
"""
return self._BlockMute(action='destroy',
endpoint='block',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def CreateMute(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Mutes the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to mute.
screen_name (str, optional):
The screen name of the user to mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the muted User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the muted user.
"""
return self._BlockMute(action='create',
endpoint='mute',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def DestroyMute(self,
user_id=None,
screen_name=None,
include_entities=True,
skip_status=False):
"""Unlocks the user specified by either user_id or screen_name.
Args:
user_id (int, optional)
The numerical ID of the user to mute.
screen_name (str, optional):
The screen name of the user to mute.
include_entities (bool, optional):
The entities node will not be included if set to False.
skip_status (bool, optional):
When set to False, the muted User's statuses will not be included
with the returned User object.
Returns:
A twitter.User instance representing the muted user.
"""
return self._BlockMute(action='destroy',
endpoint='mute',
user_id=user_id,
screen_name=screen_name,
include_entities=include_entities,
skip_status=skip_status)
def _GetIDsPaged(self,
url,
user_id,
screen_name,
cursor,
stringify_ids,
count):
"""
This is the lowest level paging logic for fetching IDs. It is used
solely by GetFollowerIDsPaged and GetFriendIDsPaged. It is not intended
for other use.
See GetFollowerIDsPaged or GetFriendIDsPaged for an explanation of the
input arguments.
"""
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = count
parameters['stringify_ids'] = stringify_ids
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'ids' in data:
result.extend([x for x in data['ids']])
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
return next_cursor, previous_cursor, result
def GetFollowerIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return a list of one page followers.
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of user ids,
one for each follower
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetIDsPaged(url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count)
def GetFriendIDsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
stringify_ids=False,
count=5000):
"""Make a cursor driven call to return the list of all friends
The caller is responsible for handling the cursor value and looping
to gather all of the data
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User instances,
one for each friend
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetIDsPaged(url,
user_id,
screen_name,
cursor,
stringify_ids,
count)
def _GetFriendFollowerIDs(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Common method for GetFriendIDs and GetFollowerIDs """
count = 5000
cursor = -1
result = []
if total_count:
total_count = enf_type('total_count', int, total_count)
if total_count and total_count < count:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetIDsPaged(
url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count)
result.extend([x for x in data])
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def GetFollowerIDs(self,
user_id=None,
screen_name=None,
cursor=None,
stringify_ids=False,
count=None,
total_count=None):
"""Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of
integers. [Optional]
count:
The number of user id's to retrieve per API request. Please be aware
that this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many
followers and you don't want to get rate limited. The data returned
might contain more UIDs if total_count is not a multiple of count
(5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/followers/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url=url,
user_id=user_id,
screen_name=screen_name,
cursor=cursor,
stringify_ids=stringify_ids,
count=count,
total_count=total_count)
def GetFriendIDs(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
stringify_ids=False,
total_count=None):
""" Fetch a sequence of user ids, one for each friend.
Returns a list of all the given user's friends' IDs. If no user_id or
screen_name is given, the friends will be those of the authenticated
user.
Args:
user_id:
The id of the user to retrieve the id list for. [Optional]
screen_name:
The screen_name of the user to retrieve the id list for. [Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits. [Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of user id's to retrieve per API request. Please be aware that
this might get you rate-limited if set to a small number.
By default Twitter will retrieve 5000 UIDs per call. [Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many followers
and you don't want to get rate limited. The data returned might contain more
UIDs if total_count is not a multiple of count (5000 by default). [Optional]
Returns:
A list of integers, one for each user id.
"""
url = '%s/friends/ids.json' % self.base_url
return self._GetFriendFollowerIDs(url,
user_id,
screen_name,
cursor,
count,
stringify_ids,
total_count)
def _GetFriendsFollowersPaged(self,
url=None,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of 1 page of friends
or followers.
Args:
url:
Endpoint from which to get data. Either
base_url+'/followers/list.json' or base_url+'/friends/list.json'.
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
if user_id and screen_name:
warnings.warn(
"If both user_id and screen_name are specified, Twitter will "
"return the followers of the user specified by screen_name, "
"however this behavior is undocumented by Twitter and might "
"change without warning.", stacklevel=2)
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
try:
parameters['count'] = int(count)
except ValueError:
raise TwitterError({'message': "count must be an integer"})
parameters['skip_status'] = skip_status
parameters['include_user_entities'] = include_user_entities
parameters['cursor'] = cursor
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if 'users' in data:
users = [User.NewFromJsonDict(user) for user in data['users']]
else:
users = []
if 'next_cursor' in data:
next_cursor = data['next_cursor']
else:
next_cursor = 0
if 'previous_cursor' in data:
previous_cursor = data['previous_cursor']
else:
previous_cursor = 0
return next_cursor, previous_cursor, users
def GetFollowersPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all followers
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def GetFriendsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=200,
skip_status=False,
include_user_entities=True):
"""Make a cursor driven call to return the list of all friends.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a current maximum of
200. Defaults to 200. [Optional]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
next_cursor, previous_cursor, data sequence of twitter.User
instances, one for each follower
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowersPaged(url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
def _GetFriendsFollowers(self,
url=None,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
""" Fetch the sequence of twitter.User instances, one for each friend
or follower.
Args:
url:
URL to get. Either base_url + ('/followers/list.json' or
'/friends/list.json').
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend or follower
"""
if cursor is not None or count is not None:
warnings.warn(
"Use of 'cursor' and 'count' parameters are deprecated as of "
"python-twitter 3.0. Please use GetFriendsPaged or "
"GetFollowersPaged instead.",
PythonTwitterDeprecationWarning330)
count = 200
cursor = -1
result = []
if total_count:
try:
total_count = int(total_count)
except ValueError:
raise TwitterError({'message': "total_count must be an integer"})
if total_count <= 200:
count = total_count
while True:
if total_count is not None and len(result) + count > total_count:
break
next_cursor, previous_cursor, data = self._GetFriendsFollowersPaged(
url,
user_id,
screen_name,
cursor,
count,
skip_status,
include_user_entities)
if next_cursor:
cursor = next_cursor
result.extend(data)
if next_cursor == 0 or next_cursor == previous_cursor:
break
return result
def GetFollowers(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each follower.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items. [Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each follower
"""
url = '%s/followers/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def GetFriends(self,
user_id=None,
screen_name=None,
cursor=None,
count=None,
total_count=None,
skip_status=False,
include_user_entities=True):
"""Fetch the sequence of twitter.User instances, one for each friend.
If both user_id and screen_name are specified, this call will return
the followers of the user specified by screen_name, however this
behavior is undocumented by Twitter and may change without warning.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
count:
The number of users to return per page, up to a maximum of 200.
Defaults to 200. [Optional]
total_count:
The upper bound of number of users to return, defaults to None.
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
"""
url = '%s/friends/list.json' % self.base_url
return self._GetFriendsFollowers(url,
user_id,
screen_name,
cursor,
count,
total_count,
skip_status,
include_user_entities)
def UsersLookup(self,
user_id=None,
screen_name=None,
users=None,
include_entities=True,
return_json=False):
"""Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
No more than 100 users may be given per request.
Args:
user_id (int, list, optional):
A list of user_ids to retrieve extended information.
screen_name (str, list, optional):
A list of screen_names to retrieve extended information.
users (list, optional):
A list of twitter.User objects to retrieve extended information.
include_entities (bool, optional):
The entities node that may appear within embedded statuses will be
excluded when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A list of twitter.User objects for the requested users
"""
if not any([user_id, screen_name, users]):
raise TwitterError("Specify at least one of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {
'include_entities': include_entities
}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join([str(u) for u in uids])
if screen_name:
parameters['screen_name'] = parse_arg_list(screen_name, 'screen_name')
if len(uids) > 100:
raise TwitterError("No more than 100 users may be requested per request.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self,
user_id=None,
screen_name=None,
include_entities=True,
return_json=False):
"""Returns a single user.
Args:
user_id (int, optional):
The id of the user to retrieve.
screen_name (str, optional):
The screen name of the user for whom to return results for.
Either a user_id or screen_name is required for this method.
include_entities (bool, optional):
The entities node will be omitted when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A twitter.User instance representing that user
"""
url = '%s/users/show.json' % (self.base_url)
parameters = {
'include_entities': include_entities
}
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return User.NewFromJsonDict(data)
def GetDirectMessages(self,
since_id=None,
max_id=None,
count=None,
include_entities=True,
skip_status=False,
full_text=False,
page=None,
return_json=False):
"""Returns a list of the direct messages sent to the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
include_entities:
The entities node will be omitted when set to False.
[Optional]
skip_status:
When set to True statuses will not be included in the returned user
objects. [Optional]
full_text:
When set to True full message will be included in the returned message
object if message length is bigger than CHARACTER_LIMIT characters. [Optional]
page:
If you want more than 200 messages, you can use this and get 20 messages
each time. You must recall it and increment the page value until it
return nothing. You can't use count option with it. First value is 1 and
not 0.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages.json' % self.base_url
parameters = {
'full_text': bool(full_text),
'include_entities': bool(include_entities),
'max_id': max_id,
'since_id': since_id,
'skip_status': bool(skip_status),
}
if count:
parameters['count'] = enf_type('count', int, count)
if page:
parameters['page'] = enf_type('page', int, page)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self,
since_id=None,
max_id=None,
count=None,
page=None,
include_entities=True,
return_json=False):
"""Returns a list of the direct messages sent by the authenticating user.
Args:
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns results with an ID less than (that is, older than) or
equal to the specified ID.
count (int, optional):
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied.
page (int, optional):
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_entities (bool, optional):
The entities node will be omitted when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.DirectMessage instances
"""
url = '%s/direct_messages/sent.json' % self.base_url
parameters = {
'include_entities': bool(include_entities),
'max_id': max_id,
'since_id': since_id,
}
if count:
parameters['count'] = enf_type('count', int, count)
if page:
parameters['page'] = enf_type('page', int, page)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self,
text,
user_id=None,
screen_name=None,
return_json=False):
"""Post a twitter direct message from the authenticated user.
Args:
text: The message text to be posted.
user_id:
The ID of the user who should receive the direct message.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.DirectMessage
Returns:
A twitter.DirectMessage instance representing the message posted
"""
url = '%s/direct_messages/events/new.json' % self.base_url
# Hack to allow some sort of backwards compatibility with older versions
# part of the fix for Issue #587
if user_id is None and screen_name is not None:
user_id = self.GetUser(screen_name=screen_name).id
event = {
'event': {
'type': 'message_create',
'message_create': {
'target': {
'recipient_id': user_id,
},
'message_data': {
'text': text
}
}
}
}
resp = self._RequestUrl(url, 'POST', json=event)
data = resp.json()
if return_json:
return data
else:
dm = DirectMessage(
created_at=data['event']['created_timestamp'],
id=data['event']['id'],
recipient_id=data['event']['message_create']['target']['recipient_id'],
sender_id=data['event']['message_create']['sender_id'],
text=data['event']['message_create']['message_data']['text'],
)
dm._json = data
return dm
def DestroyDirectMessage(self, message_id, include_entities=True, return_json=False):
"""Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
message_id:
The id of the direct message to be destroyed
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A twitter.DirectMessage instance representing the message destroyed
"""
url = '%s/direct_messages/destroy.json' % self.base_url
data = {
'id': enf_type('message_id', int, message_id),
'include_entities': enf_type('include_entities', bool, include_entities)
}
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user_id=None, screen_name=None, follow=True, retweets=True, **kwargs):
"""Befriends the user specified by the user_id or screen_name.
Args:
user_id (int, optional):
A user_id to follow
screen_name (str, optional)
A screen_name to follow
follow (bool, optional):
Set to False to disable notifications for the target user
retweets (bool, optional):
Enable or disable retweets from the target user.
Returns:
A twitter.User instance representing the befriended user.
"""
return self._AddOrEditFriendship(user_id=user_id,
screen_name=screen_name,
follow=follow,
retweets=retweets,
**kwargs)
def _AddOrEditFriendship(self,
user_id=None,
screen_name=None,
uri_end='create',
follow_key='follow',
follow=True,
**kwargs):
"""Shared method for Create/Update Friendship."""
url = '%s/friendships/%s.json' % (self.base_url, uri_end)
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
follow_json = json.dumps(follow)
data['{}'.format(follow_key)] = follow_json
data.update(**kwargs)
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateFriendship(self,
user_id=None,
screen_name=None,
follow=True,
retweets=True,
**kwargs):
"""Updates a friendship with the user specified by the user_id or screen_name.
Args:
user_id (int, optional):
A user_id to update
screen_name (str, optional):
A screen_name to update
follow (bool, optional):
Set to False to disable notifications for the target user
retweets (bool, optional):
Enable or disable retweets from the target user.
device:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
"""
return self._AddOrEditFriendship(user_id=user_id,
screen_name=screen_name,
follow=follow,
follow_key='device',
retweets=retweets,
uri_end='update',
**kwargs)
def DestroyFriendship(self, user_id=None, screen_name=None):
"""Discontinues friendship with a user_id or screen_name.
Args:
user_id:
A user_id to unfollow [Optional]
screen_name:
A screen_name to unfollow [Optional]
Returns:
A twitter.User instance representing the discontinued friend.
"""
url = '%s/friendships/destroy.json' % self.base_url
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def ShowFriendship(self,
source_user_id=None,
source_screen_name=None,
target_user_id=None,
target_screen_name=None):
"""Returns information about the relationship between the two users.
Args:
source_id:
The user_id of the subject user [Optional]
source_screen_name:
The screen_name of the subject user [Optional]
target_id:
The user_id of the target user [Optional]
target_screen_name:
The screen_name of the target user [Optional]
Returns:
A Twitter Json structure.
"""
url = '%s/friendships/show.json' % self.base_url
data = {}
if source_user_id:
data['source_id'] = source_user_id
elif source_screen_name:
data['source_screen_name'] = source_screen_name
else:
raise TwitterError({'message': "Specify at least one of source_user_id or source_screen_name."})
if target_user_id:
data['target_id'] = target_user_id
elif target_screen_name:
data['target_screen_name'] = target_screen_name
else:
raise TwitterError({'message': "Specify at least one of target_user_id or target_screen_name."})
resp = self._RequestUrl(url, 'GET', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def LookupFriendship(self,
user_id=None,
screen_name=None,
return_json=False):
"""Lookup friendship status for user to authed user.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
Up to 100 users may be specified.
Args:
user_id (int, User, or list of ints or Users, optional):
A list of user_ids to retrieve extended information.
screen_name (string, User, or list of strings or Users, optional):
A list of screen_names to retrieve extended information.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.UserStatus instance representing the
friendship status between the specified users and the authenticated
user.
"""
url = '%s/friendships/lookup.json' % (self.base_url)
parameters = {}
if user_id:
if isinstance(user_id, (list, tuple)):
uids = list()
for user in user_id:
if isinstance(user, User):
uids.append(user.id)
else:
uids.append(enf_type('user_id', int, user))
parameters['user_id'] = ",".join([str(uid) for uid in uids])
else:
if isinstance(user_id, User):
parameters['user_id'] = user_id.id
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
if screen_name:
if isinstance(screen_name, (list, tuple)):
sn_list = list()
for user in screen_name:
if isinstance(user, User):
sn_list.append(user.screen_name)
else:
sn_list.append(enf_type('screen_name', str, user))
parameters['screen_name'] = ','.join(sn_list)
else:
if isinstance(screen_name, User):
parameters['screen_name'] = screen_name.screen_name
else:
parameters['screen_name'] = enf_type('screen_name', str, screen_name)
if not user_id and not screen_name:
raise TwitterError("Specify at least one of user_id or screen_name.")
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [UserStatus.NewFromJsonDict(x) for x in data]
def IncomingFriendship(self,
cursor=None,
stringify_ids=None):
"""Returns a collection of user IDs belonging to users who have
pending request to follow the authenticated user.
Args:
cursor:
breaks the ids into pages of no more than 5000.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/friendships/incoming.json' % (self.base_url)
parameters = {}
if stringify_ids:
parameters['stringify_ids'] = 'true'
result = []
total_count = 0
while True:
if cursor:
try:
parameters['count'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def OutgoingFriendship(self,
cursor=None,
stringify_ids=None):
"""Returns a collection of user IDs for every protected user
for whom the authenticated user has a pending follow request.
Args:
cursor:
breaks the ids into pages of no more than 5000.
stringify_ids:
returns the IDs as unicode strings. [Optional]
Returns:
A list of user IDs
"""
url = '%s/friendships/outgoing.json' % (self.base_url)
parameters = {}
if stringify_ids:
parameters['stringify_ids'] = 'true'
result = []
total_count = 0
while True:
if cursor:
try:
parameters['count'] = int(cursor)
except ValueError:
raise TwitterError({'message': "cursor must be an integer"})
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def CreateFavorite(self,
status=None,
status_id=None,
include_entities=True):
"""Favorites the specified status object or id as the authenticating user.
Returns the favorite status when successful.
Args:
status_id (int, optional):
The id of the twitter status to mark as a favorite.
status (twitter.Status, optional):
The twitter.Status object to mark as a favorite.
include_entities (bool, optional):
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-marked favorite.
"""
url = '%s/favorites/create.json' % self.base_url
data = {}
if status_id:
data['id'] = status_id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify status_id or status"})
data['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def DestroyFavorite(self,
status=None,
status_id=None,
include_entities=True):
"""Un-Favorites the specified status object or id as the authenticating user.
Returns the un-favorited status when successful.
Args:
status_id (int, optional):
The id of the twitter status to mark as a favorite.
status (twitter.Status, optional):
The twitter.Status object to mark as a favorite.
include_entities (bool, optional):
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
"""
url = '%s/favorites/destroy.json' % self.base_url
data = {}
if status_id:
data['id'] = status_id
elif status:
data['id'] = status.id
else:
raise TwitterError({'message': "Specify status_id or status"})
data['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user_id=None,
screen_name=None,
count=None,
since_id=None,
max_id=None,
include_entities=True,
return_json=False):
"""Return a list of Status objects representing favorited tweets.
Returns up to 200 most recent tweets for the authenticated user.
Args:
user_id (int, optional):
Specifies the ID of the user for whom to return the
favorites. Helpful for disambiguating when a valid user ID
is also a valid screen name.
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
favorites. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve. May not be
greater than 200.
include_entities (bool, optional):
The entities node will be omitted when set to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of Status instances, one for each favorited tweet up to count
"""
parameters = {}
url = '%s/favorites/list.json' % self.base_url
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
count=None,
since_id=None,
max_id=None,
trim_user=False,
contributor_details=False,
include_entities=True,
return_json=False):
"""Returns the 20 most recent mentions (status containing @screen_name)
for the authenticating user.
Args:
count:
Specifies the number of tweets to try and retrieve, up to a maximum of
200. The value of count is best thought of as a limit to the number of
tweets to return because suspended or deleted content is removed after
the count has been applied. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
trim_user:
When set to True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
contributor_details:
If set to True, this parameter enhances the contributors element of the
status response to include the screen_name of the contributor. By
default only the user_id of the contributor is included. [Optional]
include_entities:
The entities node will be disincluded when set to False. [Optional]
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
"""
url = '%s/statuses/mentions_timeline.json' % self.base_url
parameters = {
'contributor_details': bool(contributor_details),
'include_entities': bool(include_entities),
'max_id': max_id,
'since_id': since_id,
'trim_user': bool(trim_user),
}
if count:
parameters['count'] = enf_type('count', int, count)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
@staticmethod
def _IDList(list_id, slug, owner_id, owner_screen_name):
parameters = {}
if list_id is not None:
parameters['list_id'] = enf_type('list_id', int, list_id)
elif slug is not None:
parameters['slug'] = slug
if owner_id is not None:
parameters['owner_id'] = enf_type('owner_id', int, owner_id)
elif owner_screen_name is not None:
parameters['owner_screen_name'] = owner_screen_name
else:
raise TwitterError({'message': (
'If specifying a list by slug, an owner_id or '
'owner_screen_name must also be given.')})
else:
raise TwitterError({'message': (
'Either list_id or slug and one of owner_id and '
'owner_screen_name must be passed.')})
return parameters
def CreateList(self, name, mode=None, description=None):
"""Creates a new list with the give name for the authenticated user.
Args:
name (str):
New name for the list
mode (str, optional):
'public' or 'private'. Defaults to 'public'.
description (str, optional):
Description of the list.
Returns:
twitter.list.List: A twitter.List instance representing the new list
"""
url = '%s/lists/create.json' % self.base_url
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyList(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Destroys the list identified by list_id or slug and one of
owner_screen_name or owner_id.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
url = '%s/lists/destroy.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def CreateSubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Creates a subscription to a list by the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.user.User: A twitter.User instance representing the user subscribed
"""
url = '%s/lists/subscribers/create.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def DestroySubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
Returns:
twitter.list.List: A twitter.List instance representing
the removed list.
"""
url = '%s/lists/subscribers/destroy.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def ShowSubscription(self,
owner_screen_name=None,
owner_id=None,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
include_entities=False,
skip_status=False,
return_json=False):
"""Check if the specified user is a subscriber of the specified list.
Returns the user if they are subscriber.
Args:
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested
by a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested
by a slug.
list_id (int, optional):
The numerical ID of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical ID.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
twitter.user.User: A twitter.User instance representing the user
requested.
"""
url = '%s/lists/subscribers/show.json' % (self.base_url)
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_entities:
parameters['include_entities'] = True
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return User.NewFromJsonDict(data)
def GetSubscriptions(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1,
return_json=False):
"""Obtain a collection of the lists the specified user is
subscribed to. If neither user_id or screen_name is specified, the
data returned will be for the authenticated user.
The list will contain a maximum of 20 lists per page by default.
Does not include the user's own lists.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single
page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the
list sequence from. Use the value of -1 to start at the
beginning. Twitter will return in the result the values for
next_cursor and previous_cursor.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
twitter.list.List: A sequence of twitter.List instances,
one for each list
"""
url = '%s/lists/subscriptions.json' % (self.base_url)
parameters = {}
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['count'] = enf_type('count', int, count)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetMemberships(self,
user_id=None,
screen_name=None,
count=20,
cursor=-1,
filter_to_owned_lists=False,
return_json=False):
"""Obtain the lists the specified user is a member of. If no user_id or
screen_name is specified, the data returned will be for the
authenticated user.
Returns a maximum of 20 lists per page by default.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return
results for.
count (int, optional):
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
filter_to_owned_lists (bool, optional):
Set to True to return only the lists the authenticating user
owns, and the user specified by user_id or screen_name is a
member of. Default value is False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.List instances, one for each list in which
the user specified by user_id or screen_name is a member
"""
url = '%s/lists/memberships.json' % (self.base_url)
parameters = {}
if cursor is not None:
parameters['cursor'] = enf_type('cursor', int, cursor)
if count is not None:
parameters['count'] = enf_type('count', int, count)
if filter_to_owned_lists:
parameters['filter_to_owned_lists'] = enf_type(
'filter_to_owned_lists', bool, filter_to_owned_lists)
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetListsList(self,
screen_name=None,
user_id=None,
reverse=False,
return_json=False):
"""Returns all lists the user subscribes to, including their own.
If no user_id or screen_name is specified, the data returned will be
for the authenticated user.
Args:
screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
user_id (int, optional):
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
reverse (bool, optional):
If False, the owned lists will be returned first, othewise
subscribed lists will be at the top. Returns a maximum of 100
entries regardless. Defaults to False.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A sequence of twitter.List instances.
"""
url = '%s/lists/list.json' % (self.base_url)
parameters = {}
if user_id:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
parameters['screen_name'] = screen_name
if reverse:
parameters['reverse'] = enf_type('reverse', bool, reverse)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [List.NewFromJsonDict(x) for x in data]
def GetListTimeline(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
since_id=None,
max_id=None,
count=None,
include_rts=True,
include_entities=True,
return_json=False):
"""Fetch the sequence of Status messages for a given List ID.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
since_id (int, optional):
Returns results with an ID greater than (that is, more recent than)
the specified ID. There are limits to the number of Tweets which
can be accessed through the API.
If the limit of Tweets has occurred since the since_id, the
since_id will be forced to the oldest ID available.
max_id (int, optional):
Returns only statuses with an ID less than (that is, older than) or
equal to the specified ID.
count (int, optional):
Specifies the number of statuses to retrieve.
May not be greater than 200.
include_rts (bool, optional):
If True, the timeline will contain native retweets (if they exist)
in addition to the standard stream of tweets.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
return_json (bool, optional):
If True JSON data will be returned, instead of twitter.User
Returns:
list: A list of twitter.status.Status instances, one for each
message up to count.
"""
url = '%s/lists/statuses.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if since_id:
parameters['since_id'] = enf_type('since_id', int, since_id)
if max_id:
parameters['max_id'] = enf_type('max_id', int, max_id)
if count:
parameters['count'] = enf_type('count', int, count)
if not include_rts:
parameters['include_rts'] = enf_type('include_rts', bool, include_rts)
if not include_entities:
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
if return_json:
return data
else:
return [Status.NewFromJsonDict(x) for x in data]
def GetListMembersPaged(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
cursor=-1,
count=100,
skip_status=False,
include_entities=True):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
cursor (int, optional):
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
url = '%s/lists/members.json' % self.base_url
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if count:
parameters['count'] = enf_type('count', int, count)
if cursor:
parameters['cursor'] = enf_type('cursor', int, cursor)
parameters['skip_status'] = enf_type('skip_status', bool, skip_status)
parameters['include_entities'] = enf_type('include_entities', bool, include_entities)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
users = [User.NewFromJsonDict(user) for user in data.get('users', [])]
return next_cursor, previous_cursor, users
def GetListMembers(self,
list_id=None,
slug=None,
owner_id=None,
owner_screen_name=None,
skip_status=False,
include_entities=False):
"""Fetch the sequence of twitter.User instances, one for each member
of the given list_id or slug.
Args:
list_id (int, optional):
Specifies the ID of the list to retrieve.
slug (str, optional):
The slug name for the list to retrieve. If you specify None for the
list_id, then you have to provide either a owner_screen_name or
owner_id.
owner_id (int, optional):
Specifies the ID of the user for whom to return the
list timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name.
owner_screen_name (str, optional):
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID.
skip_status (bool, optional):
If True the statuses will not be returned in the user items.
include_entities (bool, optional):
If False, the timeline will not contain additional metadata.
Defaults to True.
Returns:
list: A sequence of twitter.user.User instances, one for each
member of the twitter.list.List.
"""
cursor = -1
result = []
while True:
next_cursor, previous_cursor, users = self.GetListMembersPaged(
list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name,
cursor=cursor,
skip_status=skip_status,
include_entities=include_entities)
result += users
if next_cursor == 0 or next_cursor == previous_cursor:
break
else:
cursor = next_cursor
return result
def CreateListsMember(self,
list_id=None,
slug=None,
user_id=None,
screen_name=None,
owner_screen_name=None,
owner_id=None):
"""Add a new member (or list of members) to the specified list.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify the
list owner using the owner_id or owner_screen_name parameters.
user_id (int, optional):
The user_id or a list of user_id's to add to the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of screen_name's to add to the list.
If not given, then user_id is required.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by
a slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by
a slug.
Returns:
twitter.list.List: A twitter.List instance representing the list
subscribed to.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/create_all.json' % self.base_url
else:
url = '%s/lists/members/create.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyListsMember(self,
list_id=None,
slug=None,
owner_screen_name=None,
owner_id=None,
user_id=None,
screen_name=None):
"""Destroys the subscription to a list for the authenticated user.
Args:
list_id (int, optional):
The numerical id of the list.
slug (str, optional):
You can identify a list by its slug instead of its numerical id.
If you decide to do so, note that you'll also have to specify
the list owner using the owner_id or owner_screen_name parameters.
owner_screen_name (str, optional):
The screen_name of the user who owns the list being requested by a
slug.
owner_id (int, optional):
The user ID of the user who owns the list being requested by a slug.
user_id (int, optional):
The user_id or a list of user_id's to remove from the list.
If not given, then screen_name is required.
screen_name (str, optional):
The screen_name or a list of Screen_name's to remove from the list.
If not given, then user_id is required.
Returns:
twitter.list.List: A twitter.List instance representing the
removed list.
"""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = int(user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/destroy_all.json' % self.base_url
else:
url = '%s/lists/members/destroy.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def GetListsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=20):
""" Fetch the sequence of lists for a user. If no user_id or
screen_name is passed, the data returned will be for the
authenticated user.
Args:
user_id (int, optional):
The ID of the user for whom to return results for.
screen_name (str, optional):
The screen name of the user for whom to return results
for.
count (int, optional):
The amount of results to return per page. No more than 1000 results
will ever be returned in a single page. Defaults to 20.
cursor (int, optional):
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor.
Returns:
next_cursor (int), previous_cursor (int), list of twitter.List
instances, one for each list
"""
url = '%s/lists/ownerships.json' % self.base_url
parameters = {}
if user_id is not None:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name is not None:
parameters['screen_name'] = screen_name
if count is not None:
parameters['count'] = enf_type('count', int, count)
parameters['cursor'] = enf_type('cursor', int, cursor)
resp = self._RequestUrl(url, 'GET', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
next_cursor = data.get('next_cursor', 0)
previous_cursor = data.get('previous_cursor', 0)
lists = [List.NewFromJsonDict(x) for x in data.get('lists', [])]
return next_cursor, previous_cursor, lists
def GetLists(self,
user_id=None,
screen_name=None):
"""Fetch the sequence of lists for a user. If no user_id or screen_name
is passed, the data returned will be for the authenticated user.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results
for. [Optional]
count:
The amount of results to return per page.
No more than 1000 results will ever be returned in a single page.
Defaults to 20. [Optional]
cursor:
The "page" value that Twitter will use to start building the list
sequence from. Use the value of -1 to start at the beginning.
Twitter will return in the result the values for next_cursor and
previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
"""
result = []
cursor = -1
while True:
next_cursor, prev_cursor, lists = self.GetListsPaged(
user_id=user_id,
screen_name=screen_name,
cursor=cursor)
result += lists
if next_cursor == 0 or next_cursor == prev_cursor:
break
else:
cursor = next_cursor
return result
def UpdateProfile(self,
name=None,
profileURL=None,
location=None,
description=None,
profile_link_color=None,
include_entities=False,
skip_status=False):
"""Update's the authenticated user's profile data.
Args:
name (str, optional):
Full name associated with the profile.
profileURL (str, optional):
URL associated with the profile.
Will be prepended with "http://" if not present.
location (str, optional):
The city or country describing where the user of the account is located.
The contents are not normalized or geocoded in any way.
description (str, optional):
A description of the user owning the account.
profile_link_color (str, optional):
hex value of profile color theme. formated without '#' or '0x'. Ex: FF00FF
include_entities (bool, optional):
The entities node will be omitted when set to False.
skip_status (bool, optional):
When set to either True, t or 1 then statuses will not be included
in the returned user objects.
Returns:
A twitter.User instance representing the modified user.
"""
url = '%s/account/update_profile.json' % (self.base_url)
data = {
'name': name,
'url': profileURL,
'location': location,
'description': description,
'profile_link_color': profile_link_color,
'include_entities': include_entities,
'skip_status': skip_status,
}
resp = self._RequestUrl(url, 'POST', data=data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def UpdateImage(self,
image,
include_entities=False,
skip_status=False):
"""Update a User's profile image. Change may not be immediately
reflected due to image processing on Twitter's side.
Args:
image (str):
Location of local image file to use.
include_entities (bool, optional):
Include the entities node in the return data.
skip_status (bool, optional):
Include the User's last Status in the User entity returned.
Returns:
(twitter.models.User): Updated User object.
"""
url = '%s/account/update_profile_image.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
'image': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
def UpdateBanner(self,
image,
include_entities=False,
skip_status=False):
"""Updates the authenticated users profile banner.
Args:
image:
Location of image in file system
include_entities:
If True, each tweet will include a node called "entities."
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A twitter.List instance representing the list subscribed to
"""
url = '%s/account/update_profile_banner.json' % (self.base_url)
with open(image, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read())
data = {
# When updated for API v1.1 use image, not banner
# https://dev.twitter.com/docs/api/1.1/post/account/update_profile_banner
# 'image': encoded_image
'banner': encoded_image
}
if include_entities:
data['include_entities'] = 1
if skip_status:
data['skip_status'] = 1
resp = self._RequestUrl(url, 'POST', data=data)
if resp.status_code in [200, 201, 202]:
return True
if resp.status_code == 400:
raise TwitterError({'message': "Image data could not be processed"})
if resp.status_code == 422:
raise TwitterError({'message': "The image could not be resized or is too large."})
raise TwitterError({'message': "Unkown banner image upload issue"})
def GetStreamSample(self, delimited=False, stall_warnings=True):
"""Returns a small sample of public statuses.
Args:
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
Returns:
A Twitter stream
"""
url = '%s/statuses/sample.json' % self.stream_url
parameters = {
'delimited': bool(delimited),
'stall_warnings': bool(stall_warnings)
}
resp = self._RequestStream(url, 'GET', data=parameters)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetStreamFilter(self,
follow=None,
track=None,
locations=None,
languages=None,
delimited=None,
stall_warnings=None,
filter_level=None):
"""Returns a filtered view of public statuses.
Args:
follow:
A list of user IDs to track. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Longitude,Latitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
languages:
A list of Languages.
Will only return Tweets that have been detected as being
written in the specified languages. [Optional]
filter_level:
Specifies level of filtering applied to stream.
Set to None, 'low' or 'medium'. [Optional]
Returns:
A twitter stream
"""
if all((follow is None, track is None, locations is None)):
raise ValueError({'message': "No filter parameters specified."})
url = '%s/statuses/filter.json' % self.stream_url
data = {}
if follow is not None:
data['follow'] = ','.join(follow)
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
if languages is not None:
data['language'] = ','.join(languages)
if filter_level is not None:
data['filter_level'] = filter_level
resp = self._RequestStream(url, 'POST', data=data)
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
def GetUserStream(self,
replies='all',
withuser='user',
track=None,
locations=None,
delimited=None,
stall_warnings=None,
stringify_friend_ids=False,
filter_level=None,
session=None,
include_keepalive=False):
"""Returns the data from the user stream.
Args:
replies:
Specifies whether to return additional @replies in the stream.
Defaults to 'all'.
withuser:
Specifies whether to return information for just the authenticating
user, or include messages from accounts the user follows. [Optional]
track:
A list of expressions to track. [Optional]
locations:
A list of Latitude,Longitude pairs (as strings) specifying
bounding boxes for the tweets' origin. [Optional]
delimited:
Specifies a message length. [Optional]
stall_warnings:
Set to True to have Twitter deliver stall warnings. [Optional]
stringify_friend_ids:
Specifies whether to send the friends list preamble as an array of
integers or an array of strings. [Optional]
filter_level:
Specifies level of filtering applied to stream.
Set to None, low or medium. [Optional]
Returns:
A twitter stream
"""
url = 'https://userstream.twitter.com/1.1/user.json'
data = {}
if stringify_friend_ids:
data['stringify_friend_ids'] = 'true'
if replies is not None:
data['replies'] = replies
if withuser is not None:
data['with'] = withuser
if track is not None:
data['track'] = ','.join(track)
if locations is not None:
data['locations'] = ','.join(locations)
if delimited is not None:
data['delimited'] = str(delimited)
if stall_warnings is not None:
data['stall_warnings'] = str(stall_warnings)
if filter_level is not None:
data['filter_level'] = filter_level
resp = self._RequestStream(url, 'POST', data=data, session=session)
# The Twitter streaming API sends keep-alive newlines every 30s if there has not been other
# traffic, and specifies that streams should only be reset after three keep-alive ticks.
#
# The original implementation of this API didn't expose keep-alive signals to the user,
# making it difficult to determine whether the connection should be hung up or not.
#
# https://dev.twitter.com/streaming/overview/connecting
for line in resp.iter_lines():
if line:
data = self._ParseAndCheckTwitter(line.decode('utf-8'))
yield data
elif include_keepalive:
yield None
def VerifyCredentials(self, include_entities=None, skip_status=None, include_email=None):
"""Returns a twitter.User instance if the authenticating user is valid.
Args:
include_entities:
Specifies whether to return additional @replies in the stream.
skip_status:
When set to either true, t or 1 statuses will not be included in the
returned user object.
include_email:
Use of this parameter requires whitelisting.
When set to true email will be returned in the user objects as a string.
If the user does not have an email address on their account, or if the
email address is un-verified, null will be returned. If your app is
not whitelisted, then the 'email' key will not be present in the json
response.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
"""
url = '%s/account/verify_credentials.json' % self.base_url
data = {
'include_entities': enf_type('include_entities', bool, include_entities),
'skip_status': enf_type('skip_status', bool, skip_status),
'include_email': 'true' if enf_type('include_email', bool, include_email) else 'false',
}
resp = self._RequestUrl(url, 'GET', data)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return User.NewFromJsonDict(data)
def SetCache(self, cache):
"""Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
"""
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
"""Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
"""
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
"""Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
"""
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
"""Override the default user agent.
Args:
user_agent:
A string that should be send to the server as the user-agent.
"""
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
"""Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
"""
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
"""Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server.
New source values are authorized on a case by case basis by the
Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
"""
self._default_params['source'] = source
def InitializeRateLimit(self):
""" Make a call to the Twitter API to get the rate limit
status for the currently authenticated user or application.
Returns:
None.
"""
_sleep = self.sleep_on_rate_limit
if self.sleep_on_rate_limit:
self.sleep_on_rate_limit = False
url = '%s/application/rate_limit_status.json' % self.base_url
resp = self._RequestUrl(url, 'GET') # No-Cache
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
self.sleep_on_rate_limit = _sleep
self.rate_limit = RateLimit(**data)
def CheckRateLimit(self, url):
""" Checks a URL to see the rate limit status for that endpoint.
Args:
url (str):
URL to check against the current rate limits.
Returns:
namedtuple: EndpointRateLimit namedtuple.
"""
if not self.rate_limit.__dict__.get('resources', None):
self.InitializeRateLimit()
if url:
limit = self.rate_limit.get_limit(url)
return limit
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
filtered_elements = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(filtered_elements)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(urllib_version, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
@staticmethod
def _DecompressGzippedResponse(response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=io.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
@staticmethod
def _EncodeParameters(parameters):
"""Return a string in key=value&key=value form.
Values of None are not included in the output string.
Args:
parameters (dict): dictionary of query parameters to be converted into a
string for encoding and sending to Twitter.
Returns:
A URL-encoded string in "key=value&key=value" form
"""
if parameters is None:
return None
if not isinstance(parameters, dict):
raise TwitterError("`parameters` must be a dict.")
else:
params = dict()
for k, v in parameters.items():
if v is not None:
if getattr(v, 'encode', None):
v = v.encode('utf8')
params.update({k: v})
return urlencode(params)
def _ParseAndCheckTwitter(self, json_data):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error.
This is a purely defensive check because during some Twitter
network outages it will return an HTML failwhale page.
"""
try:
data = json.loads(json_data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json_data:
raise TwitterError({'message': "Capacity Error"})
if "<title>Twitter / Error</title>" in json_data:
raise TwitterError({'message': "Technical Error"})
if "Exceeded connection limit for user" in json_data:
raise TwitterError({'message': "Exceeded connection limit for user"})
if "Error 401 Unauthorized" in json_data:
raise TwitterError({'message': "Unauthorized"})
raise TwitterError({'Unknown error': '{0}'.format(json_data)})
self._CheckForTwitterError(data)
return data
@staticmethod
def _CheckForTwitterError(data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data (dict):
A python dict created from the Twitter json response
Raises:
(twitter.TwitterError): TwitterError wrapping the twitter error
message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _RequestChunkedUpload(self, url, headers, data):
try:
return requests.post(
url,
headers=headers,
data=data,
auth=self.__auth,
timeout=self._timeout,
proxies=self.proxies
)
except requests.RequestException as e:
raise TwitterError(str(e))
def _RequestUrl(self, url, verb, data=None, json=None, enforce_auth=True):
"""Request a url.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A JSON object.
"""
if enforce_auth:
if not self.__auth:
raise TwitterError("The twitter.Api instance must be authenticated.")
if url and self.sleep_on_rate_limit:
limit = self.CheckRateLimit(url)
if limit.remaining == 0:
try:
stime = max(int(limit.reset - time.time()) + 10, 0)
logger.debug('Rate limited requesting [%s], sleeping for [%s]', url, stime)
time.sleep(stime)
except ValueError:
pass
if not data:
data = {}
if verb == 'POST':
if data:
if 'media_ids' in data:
url = self._BuildUrl(url, extra_params={'media_ids': data['media_ids']})
resp = requests.post(url, data=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
elif 'media' in data:
resp = requests.post(url, files=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = requests.post(url, data=data, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
elif json:
resp = requests.post(url, json=json, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = 0 # POST request, but without data or json
elif verb == 'GET':
data['tweet_mode'] = self.tweet_mode
url = self._BuildUrl(url, extra_params=data)
resp = requests.get(url, auth=self.__auth, timeout=self._timeout, proxies=self.proxies)
else:
resp = 0 # if not a POST or GET request
if url and self.rate_limit:
limit = resp.headers.get('x-rate-limit-limit', 0)
remaining = resp.headers.get('x-rate-limit-remaining', 0)
reset = resp.headers.get('x-rate-limit-reset', 0)
self.rate_limit.set_limit(url, limit, remaining, reset)
return resp
def _RequestStream(self, url, verb, data=None, session=None):
"""Request a stream of data.
Args:
url:
The web location we want to retrieve.
verb:
Either POST or GET.
data:
A dict of (str, unicode) key/value pairs.
Returns:
A twitter stream.
"""
session = session or requests.Session()
if verb == 'POST':
try:
return session.post(url, data=data, stream=True,
auth=self.__auth,
timeout=self._timeout,
proxies=self.proxies)
except requests.RequestException as e:
raise TwitterError(str(e))
if verb == 'GET':
url = self._BuildUrl(url, extra_params=data)
try:
return session.get(url, stream=True, auth=self.__auth,
timeout=self._timeout, proxies=self.proxies)
except requests.RequestException as e:
raise TwitterError(str(e))
return 0 # if not a POST or GET request
| 199,197
|
Python
|
.py
| 4,356
| 32.054408
| 134
| 0.561902
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,941
|
parse_tweet.py
|
rembo10_headphones/lib/twitter/parse_tweet.py
|
#!/usr/bin/env python
import re
class Emoticons:
POSITIVE = ["*O", "*-*", "*O*", "*o*", "* *",
":P", ":D", ":d", ":p",
";P", ";D", ";d", ";p",
":-)", ";-)", ":=)", ";=)",
":<)", ":>)", ";>)", ";=)",
"=}", ":)", "(:;)",
"(;", ":}", "{:", ";}",
"{;:]",
"[;", ":')", ";')", ":-3",
"{;", ":]",
";-3", ":-x", ";-x", ":-X",
";-X", ":-}", ";-=}", ":-]",
";-]", ":-.)",
"^_^", "^-^"]
NEGATIVE = [":(", ";(", ":'(",
"=(", "={", "):", ");",
")':", ")';", ")=", "}=",
";-{{", ";-{", ":-{{", ":-{",
":-(", ";-(",
":,)", ":'{",
"[:", ";]"
]
class ParseTweet(object):
# compile once on import
regexp = {"RT": "^RT", "MT": r"^MT", "ALNUM": r"(@[a-zA-Z0-9_]+)",
"HASHTAG": r"(#[\w\d]+)", "URL": r"([https://|http://]?[a-zA-Z\d\/]+[\.]+[a-zA-Z\d\/\.]+)",
"SPACES": r"\s+"}
regexp = dict((key, re.compile(value)) for key, value in regexp.items())
def __init__(self, timeline_owner, tweet):
""" timeline_owner : twitter handle of user account. tweet - 140 chars from feed; object does all computation on construction
properties:
RT, MT - boolean
URLs - list of URL
Hashtags - list of tags
"""
self.Owner = timeline_owner
self.tweet = tweet
self.UserHandles = ParseTweet.getUserHandles(tweet)
self.Hashtags = ParseTweet.getHashtags(tweet)
self.URLs = ParseTweet.getURLs(tweet)
self.RT = ParseTweet.getAttributeRT(tweet)
self.MT = ParseTweet.getAttributeMT(tweet)
self.Emoticon = ParseTweet.getAttributeEmoticon(tweet)
# additional intelligence
if (self.RT and len(self.UserHandles) > 0): # change the owner of tweet?
self.Owner = self.UserHandles[0]
return
def __str__(self):
""" for display method """
return "owner %s, urls: %d, hashtags %d, user_handles %d, len_tweet %d, RT = %s, MT = %s" % \
(self.Owner, len(self.URLs), len(self.Hashtags), len(self.UserHandles), len(self.tweet), self.RT, self.MT)
@staticmethod
def getAttributeEmoticon(tweet):
""" see if tweet is contains any emoticons, +ve, -ve or neutral """
emoji = list()
for tok in re.split(ParseTweet.regexp["SPACES"], tweet.strip()):
if tok in Emoticons.POSITIVE:
emoji.append(tok)
continue
if tok in Emoticons.NEGATIVE:
emoji.append(tok)
return emoji
@staticmethod
def getAttributeRT(tweet):
""" see if tweet is a RT """
return re.search(ParseTweet.regexp["RT"], tweet.strip()) is not None
@staticmethod
def getAttributeMT(tweet):
""" see if tweet is a MT """
return re.search(ParseTweet.regexp["MT"], tweet.strip()) is not None
@staticmethod
def getUserHandles(tweet):
""" given a tweet we try and extract all user handles in order of occurrence"""
return re.findall(ParseTweet.regexp["ALNUM"], tweet)
@staticmethod
def getHashtags(tweet):
""" return all hashtags"""
return re.findall(ParseTweet.regexp["HASHTAG"], tweet)
@staticmethod
def getURLs(tweet):
r""" URL : [http://]?[\w\.?/]+"""
return re.findall(ParseTweet.regexp["URL"], tweet)
| 3,607
|
Python
|
.py
| 85
| 31.811765
| 133
| 0.475905
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,942
|
__init__.py
|
rembo10_headphones/lib/twitter/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2018 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API."""
from __future__ import absolute_import
__author__ = 'The Python-Twitter Developers'
__email__ = 'python-twitter@googlegroups.com'
__copyright__ = 'Copyright (c) 2007-2016 The Python-Twitter Developers'
__license__ = 'Apache License 2.0'
__version__ = '3.5'
__url__ = 'https://github.com/bear/python-twitter'
__download_url__ = 'https://pypi.python.org/pypi/python-twitter'
__description__ = 'A Python wrapper around the Twitter API'
import json # noqa
try:
from hashlib import md5 # noqa
except ImportError:
from md5 import md5 # noqa
from ._file_cache import _FileCache # noqa
from .error import TwitterError # noqa
from .parse_tweet import ParseTweet # noqa
from .models import ( # noqa
Category, # noqa
DirectMessage, # noqa
Hashtag, # noqa
List, # noqa
Media, # noqa
Trend, # noqa
Url, # noqa
User, # noqa
UserStatus, # noqa
Status # noqa
)
from .api import Api # noqa
| 2,137
|
Python
|
.py
| 47
| 43.276596
| 74
| 0.556196
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,943
|
_file_cache.py
|
rembo10_headphones/lib/twitter/_file_cache.py
|
#!/usr/bin/env python
import errno
import os
import tempfile
from hashlib import md5
class _FileCacheError(Exception):
"""Base exception class for FileCache related errors"""
class _FileCache(object):
DEPTH = 3
def __init__(self, root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self, key):
path = self._GetPath(key)
if os.path.exists(path):
with open(path) as f:
return f.read()
else:
return None
def Set(self, key, data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self, key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
"""Attempt to find the username in a cross-platform fashion."""
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError):
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
try:
os.mkdir(root_directory)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(root_directory):
# directory already exists
pass
else:
# exists but is a file, or no permissions, or...
raise
self._root_directory = root_directory
def _GetPath(self, key):
try:
hashed_key = md5(key.encode('utf-8')).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self, hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
| 3,337
|
Python
|
.py
| 86
| 27.94186
| 81
| 0.572798
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,944
|
models.py
|
rembo10_headphones/lib/requests/models.py
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from urllib3.fields import RequestField
from urllib3.filepost import encode_multipart_formdata
from urllib3.util import parse_url
from urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError,
InvalidJSONError)
from .exceptions import JSONDecodeError as RequestsJSONDecodeError
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring, JSONDecodeError)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No scheme supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith((u'*', u'.')):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
try:
body = complexjson.dumps(json, allow_nan=False)
except ValueError as ve:
raise InvalidJSONError(ve, request=self)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``charset_normalizer`` or ``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises requests.exceptions.JSONDecodeError: If the response body does not
contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using charset_normalizer to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
try:
return complexjson.loads(self.text, **kwargs)
except JSONDecodeError as e:
# Catch JSON-related errors and raise as requests.JSONDecodeError
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
if is_py2: # e is a ValueError
raise RequestsJSONDecodeError(e.message)
else:
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| 35,051
|
Python
|
.py
| 786
| 34.005089
| 119
| 0.599566
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,945
|
cookies.py
|
rembo10_headphones/lib/requests/cookies.py
|
# -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
from ._internal_utils import to_native_string
from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
try:
import threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = to_native_string(self._r.headers['Host'], encoding='utf-8')
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (
(domain is None or cookie.domain == domain) and
(path is None or cookie.path == path)
):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.set_policy(self.get_policy())
new_cj.update(self)
return new_cj
def get_policy(self):
"""Return the CookiePolicy instance used."""
return self._policy
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = {
'version': 0,
'name': name,
'value': value,
'port': None,
'domain': '',
'path': '/',
'secure': False,
'expires': None,
'discard': True,
'comment': None,
'comment_url': None,
'rest': {'HttpOnly': None},
'rfc2109': False,
}
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
:rtype: CookieJar
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
:rtype: CookieJar
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| 18,430
|
Python
|
.py
| 438
| 33.828767
| 111
| 0.636933
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,946
|
sessions.py
|
rembo10_headphones/lib/requests/sessions.py
|
# -*- coding: utf-8 -*-
"""
requests.sessions
~~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import sys
import time
from datetime import timedelta
from collections import OrderedDict
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body, resolve_proxies
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if sys.platform == 'win32':
try: # Python 3.4+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
# Due to the nature of how requests processes redirects this method will
# be called at least once upon the original response and at least twice
# on each subsequent redirect response (if any).
# If a custom mixin is used to handle this logic, it may be advantageous
# to cache the redirect location onto the response object as a private
# attribute.
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def should_strip_auth(self, old_url, new_url):
"""Decide whether Authorization header should be removed when redirecting"""
old_parsed = urlparse(old_url)
new_parsed = urlparse(new_url)
if old_parsed.hostname != new_parsed.hostname:
return True
# Special case: allow http -> https redirect when using the standard
# ports. This isn't specified by RFC 7235, but is kept to avoid
# breaking backwards compatibility with older versions of requests
# that allowed any redirects on the same host.
if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
return False
# Handle default port usage corresponding to scheme.
changed_port = old_parsed.port != new_parsed.port
changed_scheme = old_parsed.scheme != new_parsed.scheme
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
if (not changed_scheme and old_parsed.port in default_port
and new_parsed.port in default_port):
return False
# Standard case: root URI must match
return changed_port or changed_scheme
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
previous_fragment = urlparse(req.url).fragment
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = ':'.join([to_native_string(parsed_rurl.scheme), url])
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
parsed = urlparse(url)
if parsed.fragment == '' and previous_fragment:
parsed = parsed._replace(fragment=previous_fragment)
elif parsed.fragment:
previous_fragment = parsed.fragment
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/psf/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/psf/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
headers.pop('Cookie', None)
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
# If we get redirected to a new host, we should strip out any
# authentication headers.
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
headers = prepared_request.headers
scheme = urlparse(prepared_request.url).scheme
new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# https://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
#: Defaults to `True`, requiring requests to verify the TLS certificate at the
#: remote end.
#: If verify is set to `False`, requests will accept any TLS certificate
#: presented by the server, and will ignore hostname mismatches and/or
#: expired certificates, which will make your application vulnerable to
#: man-in-the-middle (MitM) attacks.
#: Only set this to `False` for testing.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``. When set to
``False``, requests will accept any TLS certificate presented by
the server, and will ignore hostname mismatches and/or expired
certificates, which will make your application vulnerable to
man-in-the-middle (MitM) attacks. Setting verify to ``False``
may be useful during local development or testing.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
if 'proxies' not in kwargs:
kwargs['proxies'] = resolve_proxies(
request, self.proxies, self.trust_env
)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for {!r}".format(url))
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
| 29,835
|
Python
|
.py
| 613
| 38.657423
| 106
| 0.631572
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,947
|
help.py
|
rembo10_headphones/lib/requests/help.py
|
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import idna
import urllib3
from . import __version__ as requests_version
try:
import charset_normalizer
except ImportError:
charset_normalizer = None
try:
import chardet
except ImportError:
chardet = None
try:
from urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
charset_normalizer_info = {'version': None}
chardet_info = {'version': None}
if charset_normalizer:
charset_normalizer_info = {'version': charset_normalizer.__version__}
if chardet:
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'using_charset_normalizer': chardet is None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'charset_normalizer': charset_normalizer_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| 3,968
|
Python
|
.py
| 113
| 27.99115
| 78
| 0.630577
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,948
|
__version__.py
|
rembo10_headphones/lib/requests/__version__.py
|
# .-. .-. .-. . . .-. .-. .-. .-.
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'https://requests.readthedocs.io'
__version__ = '2.27.1'
__build__ = 0x022701
__author__ = 'Kenneth Reitz'
__author_email__ = 'me@kennethreitz.org'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2022 Kenneth Reitz'
__cake__ = u'\u2728 \U0001f370 \u2728'
| 441
|
Python
|
.py
| 13
| 32.846154
| 46
| 0.503513
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,949
|
api.py
|
rembo10_headphones/lib/requests/api.py
|
# -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
>>> req
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes. If
`allow_redirects` is not provided, it will be set to `False` (as
opposed to the default :meth:`request` behavior).
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| 6,402
|
Python
|
.py
| 121
| 47.46281
| 139
| 0.676277
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,950
|
auth.py
|
rembo10_headphones/lib/requests/auth.py
|
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import threading
import warnings
from base64 import b64encode
from .compat import urlparse, str, basestring
from .cookies import extract_cookies_to_jar
from ._internal_utils import to_native_string
from .utils import parse_dict_header
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
# "I want us to put a big-ol' comment on top of it that
# says that this behaviour is dumb but we need to preserve
# it because people are relying on it."
# - Lukasa
#
# These are here solely to maintain backwards compatibility
# for things like ints. This will be removed in 3.0.0.
if not isinstance(username, basestring):
warnings.warn(
"Non-string usernames will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(username),
category=DeprecationWarning,
)
username = str(username)
if not isinstance(password, basestring):
warnings.warn(
"Non-string passwords will no longer be supported in Requests "
"3.0.0. Please convert the object you've passed in ({!r}) to "
"a string or bytes object in the near future to avoid "
"problems.".format(type(password)),
category=DeprecationWarning,
)
password = str(password)
# -- End Removal --
if isinstance(username, str):
username = username.encode('latin1')
if isinstance(password, str):
password = password.encode('latin1')
authstr = 'Basic ' + to_native_string(
b64encode(b':'.join((username, password))).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
# Keep state in per-thread local storage
self._thread_local = threading.local()
def init_per_thread_state(self):
# Ensure state is initialized just once per-thread
if not hasattr(self._thread_local, 'init'):
self._thread_local.init = True
self._thread_local.last_nonce = ''
self._thread_local.nonce_count = 0
self._thread_local.chal = {}
self._thread_local.pos = None
self._thread_local.num_401_calls = None
def build_digest_header(self, method, url):
"""
:rtype: str
"""
realm = self._thread_local.chal['realm']
nonce = self._thread_local.chal['nonce']
qop = self._thread_local.chal.get('qop')
algorithm = self._thread_local.chal.get('algorithm')
opaque = self._thread_local.chal.get('opaque')
hash_utf8 = None
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
elif _algorithm == 'SHA-256':
def sha256_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha256(x).hexdigest()
hash_utf8 = sha256_utf8
elif _algorithm == 'SHA-512':
def sha512_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha512(x).hexdigest()
hash_utf8 = sha512_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
#: path is request-uri defined in RFC 2616 which should not be empty
path = p_parsed.path or "/"
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self._thread_local.last_nonce:
self._thread_local.nonce_count += 1
else:
self._thread_local.nonce_count = 1
ncvalue = '%08x' % self._thread_local.nonce_count
s = str(self._thread_local.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if not qop:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (
nonce, ncvalue, cnonce, 'auth', HA2
)
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self._thread_local.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
self._thread_local.num_401_calls = 1
def handle_401(self, r, **kwargs):
"""
Takes the given response and tries digest-auth, if needed.
:rtype: requests.Response
"""
# If response is not 4xx, do not auth
# See https://github.com/psf/requests/issues/3772
if not 400 <= r.status_code < 500:
self._thread_local.num_401_calls = 1
return r
if self._thread_local.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self._thread_local.pos)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
self._thread_local.num_401_calls += 1
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
self._thread_local.num_401_calls = 1
return r
def __call__(self, r):
# Initialize per-thread state, if needed
self.init_per_thread_state()
# If we have a saved nonce, skip the 401
if self._thread_local.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self._thread_local.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self._thread_local.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
self._thread_local.num_401_calls = 1
return r
def __eq__(self, other):
return all([
self.username == getattr(other, 'username', None),
self.password == getattr(other, 'password', None)
])
def __ne__(self, other):
return not self == other
| 10,207
|
Python
|
.py
| 246
| 31.849593
| 88
| 0.580893
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,951
|
utils.py
|
rembo10_headphones/lib/requests/utils.py
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from urllib3.util import make_headers
from urllib3.util import parse_url
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment, Mapping)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
DEFAULT_PORTS = {'http': 80, 'https': 443}
# Ensure that ', ' is used to preserve previous delimiter behavior.
DEFAULT_ACCEPT_ENCODING = ", ".join(
re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
)
if sys.platform == 'win32':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
if is_py3:
import winreg
else:
import _winreg as winreg
except ImportError:
return False
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
proxyEnable = int(winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0])
# ProxyOverride is almost always a string
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except (io.UnsupportedOperation, AttributeError):
# AttributeError is a surprising exception, seeing as how we've just checked
# that `hasattr(o, 'fileno')`. It happens for objects obtained via
# `Tarfile.extractfile()`, per issue 5229.
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no usable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
netrc_file = os.environ.get('NETRC')
if netrc_file is not None:
netrc_locations = (netrc_file,)
else:
netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES)
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in netrc_locations:
try:
loc = os.path.expanduser(f)
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# App Engine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
"""
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
if not prefix:
# If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
# we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
break
member = '/'.join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, member.split('/')[-1])
if not os.path.exists(extracted_path):
# use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
with atomic_open(extracted_path) as file_handler:
file_handler.write(zip_file.read(member))
return extracted_path
@contextlib.contextmanager
def atomic_open(filename):
"""Write a file to the disk in an atomic fashion"""
replacer = os.rename if sys.version_info[0] == 2 else os.replace
tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
try:
with os.fdopen(tmp_descriptor, 'wb') as tmp_handler:
yield tmp_handler
replacer(tmp_name, filename)
except BaseException:
os.remove(tmp_name)
raise
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
if 'application/json' in content_type:
# Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
return 'utf-8'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
parsed = urlparse(url)
if parsed.hostname is None:
# URLs don't always have hostnames, e.g. file:/// urls.
return True
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(parsed.hostname, proxy_ip):
return True
elif parsed.hostname == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
host_with_port = parsed.hostname
if parsed.port:
host_with_port += ':{}'.format(parsed.port)
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
with set_environ('no_proxy', no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def resolve_proxies(request, proxies, trust_env=True):
"""This method takes proxy information from a request and configuration
input to resolve a mapping of target proxies. This will consider settings
such a NO_PROXY to strip proxy configurations.
:param request: Request or PreparedRequest
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
:param trust_env: Boolean declaring whether to trust environment configs
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
url = request.url
scheme = urlparse(url).scheme
no_proxy = proxies.get('no_proxy')
new_proxies = proxies.copy()
if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
return new_proxies
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': DEFAULT_ACCEPT_ENCODING,
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
parsed = parse_url(url)
scheme, auth, host, port, path, query, fragment = parsed
# A defect in urlparse determines that there isn't a netloc present in some
# urls. We previously assumed parsing was overly cautious, and swapped the
# netloc and path. Due to a lack of tests on the original defect, this is
# maintained with parse_url for backwards compatibility.
netloc = parsed.netloc
if not netloc:
netloc, path = path, netloc
if auth:
# parse_url doesn't provide the netloc with auth
# so we'll add it ourselves.
netloc = '@'.join([auth, netloc])
if scheme is None:
scheme = new_scheme
if path is None:
path = ''
return urlunparse((scheme, netloc, path, '', query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| 33,277
|
Python
|
.py
| 828
| 32.233092
| 128
| 0.623336
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,952
|
status_codes.py
|
rembo10_headphones/lib/requests/status_codes.py
|
# -*- coding: utf-8 -*-
r"""
The ``codes`` object defines a mapping from common names for HTTP statuses
to their numerical codes, accessible either as attributes or as dictionary
items.
Example::
>>> import requests
>>> requests.codes['temporary_redirect']
307
>>> requests.codes.teapot
418
>>> requests.codes['\o/']
200
Some codes have multiple names, and both upper- and lower-case versions of
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
``codes.okay`` all correspond to the HTTP status code 200.
"""
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('permanent_redirect',
'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
421: ('misdirected_request',),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
511: ('network_authentication_required', 'network_auth', 'network_authentication'),
}
codes = LookupDict(name='status_codes')
def _init():
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
if not title.startswith(('\\', '/')):
setattr(codes, title.upper(), code)
def doc(code):
names = ', '.join('``%s``' % n for n in _codes[code])
return '* %d: %s' % (code, names)
global __doc__
__doc__ = (__doc__ + '\n' +
'\n'.join(doc(code) for code in sorted(_codes))
if __doc__ is not None else None)
_init()
| 4,188
|
Python
|
.py
| 108
| 33.564815
| 89
| 0.6033
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,953
|
__init__.py
|
rembo10_headphones/lib/requests/__init__.py
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP Library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings.
Basic GET usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> b'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('https://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key1": "value1",
"key2": "value2"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <https://requests.readthedocs.io>.
:copyright: (c) 2017 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
import urllib3
import warnings
from .exceptions import RequestsDependencyWarning
try:
from charset_normalizer import __version__ as charset_normalizer_version
except ImportError:
charset_normalizer_version = None
try:
from chardet import __version__ as chardet_version
except ImportError:
chardet_version = None
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
urllib3_version = urllib3_version.split('.')
assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
# Sometimes, urllib3 only reports its version as 16.1.
if len(urllib3_version) == 2:
urllib3_version.append('0')
# Check urllib3 for compatibility.
major, minor, patch = urllib3_version # noqa: F811
major, minor, patch = int(major), int(minor), int(patch)
# urllib3 >= 1.21.1, <= 1.26
assert major == 1
assert minor >= 21
assert minor <= 26
# Check charset_normalizer for compatibility.
if chardet_version:
major, minor, patch = chardet_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# chardet_version >= 3.0.2, < 5.0.0
assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
elif charset_normalizer_version:
major, minor, patch = charset_normalizer_version.split('.')[:3]
major, minor, patch = int(major), int(minor), int(patch)
# charset_normalizer >= 2.0.0 < 3.0.0
assert (2, 0, 0) <= (major, minor, patch) < (3, 0, 0)
else:
raise Exception("You need either charset_normalizer or chardet installed")
def _check_cryptography(cryptography_version):
# cryptography < 1.3.4
try:
cryptography_version = list(map(int, cryptography_version.split('.')))
except ValueError:
return
if cryptography_version < [1, 3, 4]:
warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
warnings.warn(warning, RequestsDependencyWarning)
# Check imported dependencies for compatibility.
try:
check_compatibility(urllib3.__version__, chardet_version, charset_normalizer_version)
except (AssertionError, ValueError):
warnings.warn("urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
"version!".format(urllib3.__version__, chardet_version, charset_normalizer_version),
RequestsDependencyWarning)
# Attempt to enable urllib3's fallback for SNI support
# if the standard library doesn't support SNI or the
# 'ssl' library isn't available.
try:
try:
import ssl
except ImportError:
ssl = None
if not getattr(ssl, "HAS_SNI", False):
from urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
# Check cryptography version
from cryptography import __version__ as cryptography_version
_check_cryptography(cryptography_version)
except ImportError:
pass
# urllib3's DependencyWarnings should be silenced.
from urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __build__, __author__, __author_email__, __license__
from .__version__ import __copyright__, __cake__
from . import utils
from . import packages
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout, JSONDecodeError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
| 4,924
|
Python
|
.py
| 125
| 34.984
| 102
| 0.685457
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,954
|
packages.py
|
rembo10_headphones/lib/requests/packages.py
|
import sys
try:
import chardet
except ImportError:
import charset_normalizer as chardet
import warnings
warnings.filterwarnings('ignore', 'Trying to detect', module='charset_normalizer')
# This code exists for backwards compatibility reasons.
# I don't like it either. Just look the other way. :)
for package in ('urllib3', 'idna'):
locals()[package] = __import__(package)
# This traversal is apparently necessary such that the identities are
# preserved (requests.packages.urllib3.* is urllib3.*)
for mod in list(sys.modules):
if mod == package or mod.startswith(package + '.'):
sys.modules['requests.packages.' + mod] = sys.modules[mod]
target = chardet.__name__
for mod in list(sys.modules):
if mod == target or mod.startswith(target + '.'):
sys.modules['requests.packages.' + target.replace(target, 'chardet')] = sys.modules[mod]
# Kinda cool, though, right?
| 932
|
Python
|
.py
| 21
| 40.095238
| 96
| 0.703091
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,955
|
hooks.py
|
rembo10_headphones/lib/requests/hooks.py
|
# -*- coding: utf-8 -*-
"""
requests.hooks
~~~~~~~~~~~~~~
This module provides the capabilities for the Requests hooks system.
Available hooks:
``response``:
The response generated from a Request.
"""
HOOKS = ['response']
def default_hooks():
return {event: [] for event in HOOKS}
# TODO: response is the only one
def dispatch_hook(key, hooks, hook_data, **kwargs):
"""Dispatches a hook dictionary on a given piece of data."""
hooks = hooks or {}
hooks = hooks.get(key)
if hooks:
if hasattr(hooks, '__call__'):
hooks = [hooks]
for hook in hooks:
_hook_data = hook(hook_data, **kwargs)
if _hook_data is not None:
hook_data = _hook_data
return hook_data
| 757
|
Python
|
.py
| 25
| 25.08
| 68
| 0.616874
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,956
|
compat.py
|
rembo10_headphones/lib/requests/compat.py
|
# -*- coding: utf-8 -*-
"""
requests.compat
~~~~~~~~~~~~~~~
This module handles import compatibility issues between Python 2 and
Python 3.
"""
try:
import chardet
except ImportError:
import charset_normalizer as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
has_simplejson = False
try:
import simplejson as json
has_simplejson = True
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import (
quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
proxy_bypass, proxy_bypass_environment, getproxies_environment)
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
# Keep OrderedDict for backwards compatibility.
from collections import Callable, Mapping, MutableMapping, OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
JSONDecodeError = ValueError
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
# Keep OrderedDict for backwards compatibility.
from collections import OrderedDict
from collections.abc import Callable, Mapping, MutableMapping
if has_simplejson:
from simplejson import JSONDecodeError
else:
from json import JSONDecodeError
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
| 2,054
|
Python
|
.py
| 67
| 26.820896
| 132
| 0.717689
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,957
|
exceptions.py
|
rembo10_headphones/lib/requests/exceptions.py
|
# -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from urllib3.exceptions import HTTPError as BaseHTTPError
from .compat import JSONDecodeError as CompatJSONDecodeError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request.
"""
def __init__(self, *args, **kwargs):
"""Initialize RequestException with `request` and `response` objects."""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class InvalidJSONError(RequestException):
"""A JSON error occurred."""
class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
"""Couldn't decode the text into json"""
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL scheme (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""The URL scheme provided is either invalid or unsupported."""
class InvalidURL(RequestException, ValueError):
"""The URL provided was somehow invalid."""
class InvalidHeader(RequestException, ValueError):
"""The header value provided was somehow invalid."""
class InvalidProxyURL(InvalidURL):
"""The proxy URL provided is invalid."""
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content."""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed."""
class RetryError(RequestException):
"""Custom retries logic failed"""
class UnrewindableBodyError(RequestException):
"""Requests encountered an error when trying to rewind a body."""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""A file was opened in text mode, but Requests determined its binary length."""
class RequestsDependencyWarning(RequestsWarning):
"""An imported dependency doesn't match the expected version range."""
| 3,434
|
Python
|
.py
| 76
| 40.644737
| 84
| 0.741896
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,958
|
certs.py
|
rembo10_headphones/lib/requests/certs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle. There is
only one — the one from the certifi package.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
from certifi import where
if __name__ == '__main__':
print(where())
| 453
|
Python
|
.py
| 14
| 30.785714
| 76
| 0.71954
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,959
|
adapters.py
|
rembo10_headphones/lib/requests/adapters.py
|
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from urllib3.poolmanager import PoolManager, proxy_from_url
from urllib3.response import HTTPResponse
from urllib3.util import parse_url
from urllib3.util import Timeout as TimeoutSauce
from urllib3.util.retry import Retry
from urllib3.exceptions import ClosedPoolError
from urllib3.exceptions import ConnectTimeoutError
from urllib3.exceptions import HTTPError as _HTTPError
from urllib3.exceptions import InvalidHeader as _InvalidHeader
from urllib3.exceptions import MaxRetryError
from urllib3.exceptions import NewConnectionError
from urllib3.exceptions import ProxyError as _ProxyError
from urllib3.exceptions import ProtocolError
from urllib3.exceptions import ReadTimeoutError
from urllib3.exceptions import SSLError as _SSLError
from urllib3.exceptions import ResponseError
from urllib3.exceptions import LocationValueError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
InvalidURL, InvalidHeader)
from .auth import _basic_auth_str
try:
from urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
try:
conn = self.get_connection(request.url, proxies)
except LocationValueError as e:
raise InvalidURL(e, request=request)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
skip_host = 'Host' in request.headers
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True,
skip_host=skip_host)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7, use buffering of HTTP responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 3.3+
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
elif isinstance(e, _InvalidHeader):
raise InvalidHeader(e, request=request)
else:
raise
return self.build_response(request, resp)
| 21,645
|
Python
|
.py
| 439
| 37.42369
| 108
| 0.618278
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,960
|
_internal_utils.py
|
rembo10_headphones/lib/requests/_internal_utils.py
|
# -*- coding: utf-8 -*-
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
from .compat import is_py2, builtin_str, str
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
| 1,096
|
Python
|
.py
| 33
| 27.818182
| 77
| 0.679317
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,961
|
structures.py
|
rembo10_headphones/lib/requests/structures.py
|
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
from collections import OrderedDict
from .compat import Mapping, MutableMapping
class CaseInsensitiveDict(MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| 3,005
|
Python
|
.py
| 77
| 32.051948
| 75
| 0.617931
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,962
|
_compat.py
|
rembo10_headphones/lib/cheroot/_compat.py
|
# pylint: disable=unused-import
"""Compatibility code for using Cheroot with various versions of Python."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import platform
import re
import six
try:
import selectors # lgtm [py/unused-import]
except ImportError:
import selectors2 as selectors # noqa: F401 # lgtm [py/unused-import]
try:
import ssl
IS_ABOVE_OPENSSL10 = ssl.OPENSSL_VERSION_INFO >= (1, 1)
del ssl
except ImportError:
IS_ABOVE_OPENSSL10 = None
# contextlib.suppress was added in Python 3.4
try:
from contextlib import suppress
except ImportError:
from contextlib import contextmanager
@contextmanager
def suppress(*exceptions):
"""Return a context manager that suppresses the `exceptions`."""
try:
yield
except exceptions:
pass
IS_CI = bool(os.getenv('CI'))
IS_GITHUB_ACTIONS_WORKFLOW = bool(os.getenv('GITHUB_WORKFLOW'))
IS_PYPY = platform.python_implementation() == 'PyPy'
SYS_PLATFORM = platform.system()
IS_WINDOWS = SYS_PLATFORM == 'Windows'
IS_LINUX = SYS_PLATFORM == 'Linux'
IS_MACOS = SYS_PLATFORM == 'Darwin'
PLATFORM_ARCH = platform.machine()
IS_PPC = PLATFORM_ARCH.startswith('ppc')
if not six.PY2:
def ntob(n, encoding='ISO-8859-1'):
"""Return the native string as bytes in the given encoding."""
assert_native(n)
# In Python 3, the native string type is unicode
return n.encode(encoding)
def ntou(n, encoding='ISO-8859-1'):
"""Return the native string as Unicode with the given encoding."""
assert_native(n)
# In Python 3, the native string type is unicode
return n
def bton(b, encoding='ISO-8859-1'):
"""Return the byte string as native string in the given encoding."""
return b.decode(encoding)
else:
# Python 2
def ntob(n, encoding='ISO-8859-1'):
"""Return the native string as bytes in the given encoding."""
assert_native(n)
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
def ntou(n, encoding='ISO-8859-1'):
"""Return the native string as Unicode with the given encoding."""
assert_native(n)
# In Python 2, the native string type is bytes.
# First, check for the special encoding 'escape'. The test suite uses
# this to signal that it wants to pass a string with embedded \uXXXX
# escapes, but without having to prefix it with u'' for Python 2,
# but no prefix for Python 3.
if encoding == 'escape':
return re.sub(
r'\\u([0-9a-zA-Z]{4})',
lambda m: six.unichr(int(m.group(1), 16)),
n.decode('ISO-8859-1'),
)
# Assume it's already in the given encoding, which for ISO-8859-1
# is almost always what was intended.
return n.decode(encoding)
def bton(b, encoding='ISO-8859-1'):
"""Return the byte string as native string in the given encoding."""
return b
def assert_native(n):
"""Check whether the input is of native :py:class:`str` type.
Raises:
TypeError: in case of failed check
"""
if not isinstance(n, str):
raise TypeError('n must be a native str (got %s)' % type(n).__name__)
if not six.PY2:
"""Python 3 has :py:class:`memoryview` builtin."""
# Python 2.7 has it backported, but socket.write() does
# str(memoryview(b'0' * 100)) -> <memory at 0x7fb6913a5588>
# instead of accessing it correctly.
memoryview = memoryview
else:
"""Link :py:class:`memoryview` to buffer under Python 2."""
memoryview = buffer # noqa: F821
def extract_bytes(mv):
r"""Retrieve bytes out of the given input buffer.
:param mv: input :py:func:`buffer`
:type mv: memoryview or bytes
:return: unwrapped bytes
:rtype: bytes
:raises ValueError: if the input is not one of \
:py:class:`memoryview`/:py:func:`buffer` \
or :py:class:`bytes`
"""
if isinstance(mv, memoryview):
return bytes(mv) if six.PY2 else mv.tobytes()
if isinstance(mv, bytes):
return mv
raise ValueError(
'extract_bytes() only accepts bytes and memoryview/buffer',
)
| 4,436
|
Python
|
.py
| 115
| 32.104348
| 77
| 0.650187
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,963
|
wsgi.pyi
|
rembo10_headphones/lib/cheroot/wsgi.pyi
|
from . import server
from typing import Any
class Server(server.HTTPServer):
wsgi_version: Any
wsgi_app: Any
request_queue_size: Any
timeout: Any
shutdown_timeout: Any
requests: Any
def __init__(self, bind_addr, wsgi_app, numthreads: int = ..., server_name: Any | None = ..., max: int = ..., request_queue_size: int = ..., timeout: int = ..., shutdown_timeout: int = ..., accepted_queue_size: int = ..., accepted_queue_timeout: int = ..., peercreds_enabled: bool = ..., peercreds_resolve_enabled: bool = ...) -> None: ...
@property
def numthreads(self): ...
@numthreads.setter
def numthreads(self, value) -> None: ...
class Gateway(server.Gateway):
started_response: bool
env: Any
remaining_bytes_out: Any
def __init__(self, req) -> None: ...
@classmethod
def gateway_map(cls): ...
def get_environ(self) -> None: ...
def respond(self) -> None: ...
def start_response(self, status, headers, exc_info: Any | None = ...): ...
def write(self, chunk) -> None: ...
class Gateway_10(Gateway):
version: Any
def get_environ(self): ...
class Gateway_u0(Gateway_10):
version: Any
def get_environ(self): ...
wsgi_gateways: Any
class PathInfoDispatcher:
apps: Any
def __init__(self, apps): ...
def __call__(self, environ, start_response): ...
| 1,348
|
Python
|
.py
| 36
| 33.166667
| 343
| 0.6317
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,964
|
server.pyi
|
rembo10_headphones/lib/cheroot/server.pyi
|
from typing import Any
class HeaderReader:
def __call__(self, rfile, hdict: Any | None = ...): ...
class DropUnderscoreHeaderReader(HeaderReader): ...
class SizeCheckWrapper:
rfile: Any
maxlen: Any
bytes_read: int
def __init__(self, rfile, maxlen) -> None: ...
def read(self, size: Any | None = ...): ...
def readline(self, size: Any | None = ...): ...
def readlines(self, sizehint: int = ...): ...
def close(self) -> None: ...
def __iter__(self): ...
def __next__(self): ...
next: Any
class KnownLengthRFile:
rfile: Any
remaining: Any
def __init__(self, rfile, content_length) -> None: ...
def read(self, size: Any | None = ...): ...
def readline(self, size: Any | None = ...): ...
def readlines(self, sizehint: int = ...): ...
def close(self) -> None: ...
def __iter__(self): ...
def __next__(self): ...
next: Any
class ChunkedRFile:
rfile: Any
maxlen: Any
bytes_read: int
buffer: Any
bufsize: Any
closed: bool
def __init__(self, rfile, maxlen, bufsize: int = ...) -> None: ...
def read(self, size: Any | None = ...): ...
def readline(self, size: Any | None = ...): ...
def readlines(self, sizehint: int = ...): ...
def read_trailer_lines(self) -> None: ...
def close(self) -> None: ...
class HTTPRequest:
server: Any
conn: Any
inheaders: Any
outheaders: Any
ready: bool
close_connection: bool
chunked_write: bool
header_reader: Any
started_request: bool
scheme: bytes
response_protocol: str
status: str
sent_headers: bool
chunked_read: bool
proxy_mode: Any
strict_mode: Any
def __init__(self, server, conn, proxy_mode: bool = ..., strict_mode: bool = ...) -> None: ...
rfile: Any
def parse_request(self) -> None: ...
uri: Any
method: Any
authority: Any
path: Any
qs: Any
request_protocol: Any
def read_request_line(self): ...
def read_request_headers(self): ...
def respond(self) -> None: ...
def simple_response(self, status, msg: str = ...) -> None: ...
def ensure_headers_sent(self) -> None: ...
def write(self, chunk) -> None: ...
def send_headers(self) -> None: ...
class HTTPConnection:
remote_addr: Any
remote_port: Any
ssl_env: Any
rbufsize: Any
wbufsize: Any
RequestHandlerClass: Any
peercreds_enabled: bool
peercreds_resolve_enabled: bool
last_used: Any
server: Any
socket: Any
rfile: Any
wfile: Any
requests_seen: int
def __init__(self, server, sock, makefile=...) -> None: ...
def communicate(self): ...
linger: bool
def close(self) -> None: ...
def get_peer_creds(self): ...
@property
def peer_pid(self): ...
@property
def peer_uid(self): ...
@property
def peer_gid(self): ...
def resolve_peer_creds(self): ...
@property
def peer_user(self): ...
@property
def peer_group(self): ...
class HTTPServer:
gateway: Any
minthreads: Any
maxthreads: Any
server_name: Any
protocol: str
request_queue_size: int
shutdown_timeout: int
timeout: int
expiration_interval: float
version: Any
software: Any
ready: bool
max_request_header_size: int
max_request_body_size: int
nodelay: bool
ConnectionClass: Any
ssl_adapter: Any
peercreds_enabled: bool
peercreds_resolve_enabled: bool
keep_alive_conn_limit: int
requests: Any
def __init__(self, bind_addr, gateway, minthreads: int = ..., maxthreads: int = ..., server_name: Any | None = ..., peercreds_enabled: bool = ..., peercreds_resolve_enabled: bool = ...) -> None: ...
stats: Any
def clear_stats(self): ...
def runtime(self): ...
@property
def bind_addr(self): ...
@bind_addr.setter
def bind_addr(self, value) -> None: ...
def safe_start(self) -> None: ...
socket: Any
def prepare(self) -> None: ...
def serve(self) -> None: ...
def start(self) -> None: ...
@property
def can_add_keepalive_connection(self): ...
def put_conn(self, conn) -> None: ...
def error_log(self, msg: str = ..., level: int = ..., traceback: bool = ...) -> None: ...
def bind(self, family, type, proto: int = ...): ...
def bind_unix_socket(self, bind_addr): ...
@staticmethod
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter): ...
@staticmethod
def bind_socket(socket_, bind_addr): ...
@staticmethod
def resolve_real_bind_addr(socket_): ...
def process_conn(self, conn) -> None: ...
@property
def interrupt(self): ...
@interrupt.setter
def interrupt(self, interrupt) -> None: ...
def stop(self) -> None: ...
class Gateway:
req: Any
def __init__(self, req) -> None: ...
def respond(self) -> None: ...
def get_ssl_adapter_class(name: str = ...): ...
| 4,910
|
Python
|
.py
| 162
| 25.518519
| 202
| 0.600886
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,965
|
errors.py
|
rembo10_headphones/lib/cheroot/errors.py
|
# -*- coding: utf-8 -*-
"""Collection of exceptions raised and/or processed by Cheroot."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import sys
class MaxSizeExceeded(Exception):
"""Exception raised when a client sends more data then acceptable within limit.
Depends on ``request.body.maxbytes`` config option if used within CherryPy
"""
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in ``errnames`` on this platform.
The :py:mod:`errno` module contains different global constants
depending on the specific platform (OS). This function will return
the list of numeric values for a given list of potential names.
"""
missing_attr = {None}
unique_nums = {getattr(errno, k, None) for k in errnames}
return list(unique_nums - missing_attr)
socket_error_eintr = plat_specific_errors('EINTR', 'WSAEINTR')
socket_errors_to_ignore = plat_specific_errors(
'EPIPE',
'EBADF', 'WSAEBADF',
'ENOTSOCK', 'WSAENOTSOCK',
'ETIMEDOUT', 'WSAETIMEDOUT',
'ECONNREFUSED', 'WSAECONNREFUSED',
'ECONNRESET', 'WSAECONNRESET',
'ECONNABORTED', 'WSAECONNABORTED',
'ENETRESET', 'WSAENETRESET',
'EHOSTDOWN', 'EHOSTUNREACH',
)
socket_errors_to_ignore.append('timed out')
socket_errors_to_ignore.append('The read operation timed out')
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK',
)
if sys.platform == 'darwin':
socket_errors_to_ignore.extend(plat_specific_errors('EPROTOTYPE'))
socket_errors_nonblocking.extend(plat_specific_errors('EPROTOTYPE'))
acceptable_sock_shutdown_error_codes = {
errno.ENOTCONN,
errno.EPIPE, errno.ESHUTDOWN, # corresponds to BrokenPipeError in Python 3
errno.ECONNRESET, # corresponds to ConnectionResetError in Python 3
}
"""Errors that may happen during the connection close sequence.
* ENOTCONN — client is no longer connected
* EPIPE — write on a pipe while the other end has been closed
* ESHUTDOWN — write on a socket which has been shutdown for writing
* ECONNRESET — connection is reset by the peer, we received a TCP RST packet
Refs:
* https://github.com/cherrypy/cheroot/issues/341#issuecomment-735884889
* https://bugs.python.org/issue30319
* https://bugs.python.org/issue30329
* https://github.com/python/cpython/commit/83a2c28
* https://github.com/python/cpython/blob/c39b52f/Lib/poplib.py#L297-L302
* https://docs.microsoft.com/windows/win32/api/winsock/nf-winsock-shutdown
"""
try: # py3
acceptable_sock_shutdown_exceptions = (
BrokenPipeError, ConnectionResetError,
)
except NameError: # py2
acceptable_sock_shutdown_exceptions = ()
| 2,944
|
Python
|
.py
| 67
| 40.537313
| 83
| 0.749649
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,966
|
__main__.py
|
rembo10_headphones/lib/cheroot/__main__.py
|
"""Stub for accessing the Cheroot CLI tool."""
from .cli import main
if __name__ == '__main__':
main()
| 109
|
Python
|
.py
| 4
| 24.75
| 46
| 0.621359
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,967
|
makefile.pyi
|
rembo10_headphones/lib/cheroot/makefile.pyi
|
import io
SOCK_WRITE_BLOCKSIZE: int
class BufferedWriter(io.BufferedWriter):
def write(self, b): ...
class MakeFile_PY2:
bytes_read: int
bytes_written: int
def __init__(self, *args, **kwargs) -> None: ...
def write(self, data) -> None: ...
def send(self, data): ...
def flush(self) -> None: ...
def recv(self, size): ...
class FauxSocket: ...
def read(self, size: int = ...): ...
def readline(self, size: int = ...): ...
def has_data(self): ...
class StreamReader(io.BufferedReader):
bytes_read: int
def __init__(self, sock, mode: str = ..., bufsize=...) -> None: ...
def read(self, *args, **kwargs): ...
def has_data(self): ...
class StreamWriter(BufferedWriter):
bytes_written: int
def __init__(self, sock, mode: str = ..., bufsize=...) -> None: ...
def write(self, val, *args, **kwargs): ...
def MakeFile(sock, mode: str = ..., bufsize=...): ...
| 931
|
Python
|
.py
| 26
| 31.653846
| 71
| 0.585095
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,968
|
cli.py
|
rembo10_headphones/lib/cheroot/cli.py
|
"""Command line tool for starting a Cheroot WSGI/HTTP server instance.
Basic usage:
.. code-block:: shell-session
$ # Start a server on 127.0.0.1:8000 with the default settings
$ # for the WSGI app myapp/wsgi.py:application()
$ cheroot myapp.wsgi
$ # Start a server on 0.0.0.0:9000 with 8 threads
$ # for the WSGI app myapp/wsgi.py:main_app()
$ cheroot myapp.wsgi:main_app --bind 0.0.0.0:9000 --threads 8
$ # Start a server for the cheroot.server.Gateway subclass
$ # myapp/gateway.py:HTTPGateway
$ cheroot myapp.gateway:HTTPGateway
$ # Start a server on the UNIX socket /var/spool/myapp.sock
$ cheroot myapp.wsgi --bind /var/spool/myapp.sock
$ # Start a server on the abstract UNIX socket CherootServer
$ cheroot myapp.wsgi --bind @CherootServer
.. spelling::
cli
"""
import argparse
from importlib import import_module
import os
import sys
import six
from . import server
from . import wsgi
from ._compat import suppress
__metaclass__ = type
class BindLocation:
"""A class for storing the bind location for a Cheroot instance."""
class TCPSocket(BindLocation):
"""TCPSocket."""
def __init__(self, address, port):
"""Initialize.
Args:
address (str): Host name or IP address
port (int): TCP port number
"""
self.bind_addr = address, port
class UnixSocket(BindLocation):
"""UnixSocket."""
def __init__(self, path):
"""Initialize."""
self.bind_addr = path
class AbstractSocket(BindLocation):
"""AbstractSocket."""
def __init__(self, abstract_socket):
"""Initialize."""
self.bind_addr = '\x00{sock_path}'.format(sock_path=abstract_socket)
class Application:
"""Application."""
@classmethod
def resolve(cls, full_path):
"""Read WSGI app/Gateway path string and import application module."""
mod_path, _, app_path = full_path.partition(':')
app = getattr(import_module(mod_path), app_path or 'application')
# suppress the `TypeError` exception, just in case `app` is not a class
with suppress(TypeError):
if issubclass(app, server.Gateway):
return GatewayYo(app)
return cls(app)
def __init__(self, wsgi_app):
"""Initialize."""
if not callable(wsgi_app):
raise TypeError(
'Application must be a callable object or '
'cheroot.server.Gateway subclass',
)
self.wsgi_app = wsgi_app
def server_args(self, parsed_args):
"""Return keyword args for Server class."""
args = {
arg: value
for arg, value in vars(parsed_args).items()
if not arg.startswith('_') and value is not None
}
args.update(vars(self))
return args
def server(self, parsed_args):
"""Server."""
return wsgi.Server(**self.server_args(parsed_args))
class GatewayYo:
"""Gateway."""
def __init__(self, gateway):
"""Init."""
self.gateway = gateway
def server(self, parsed_args):
"""Server."""
server_args = vars(self)
server_args['bind_addr'] = parsed_args['bind_addr']
if parsed_args.max is not None:
server_args['maxthreads'] = parsed_args.max
if parsed_args.numthreads is not None:
server_args['minthreads'] = parsed_args.numthreads
return server.HTTPServer(**server_args)
def parse_wsgi_bind_location(bind_addr_string):
"""Convert bind address string to a BindLocation."""
# if the string begins with an @ symbol, use an abstract socket,
# this is the first condition to verify, otherwise the urlparse
# validation would detect //@<value> as a valid url with a hostname
# with value: "<value>" and port: None
if bind_addr_string.startswith('@'):
return AbstractSocket(bind_addr_string[1:])
# try and match for an IP/hostname and port
match = six.moves.urllib.parse.urlparse(
'//{addr}'.format(addr=bind_addr_string),
)
try:
addr = match.hostname
port = match.port
if addr is not None or port is not None:
return TCPSocket(addr, port)
except ValueError:
pass
# else, assume a UNIX socket path
return UnixSocket(path=bind_addr_string)
def parse_wsgi_bind_addr(bind_addr_string):
"""Convert bind address string to bind address parameter."""
return parse_wsgi_bind_location(bind_addr_string).bind_addr
_arg_spec = {
'_wsgi_app': {
'metavar': 'APP_MODULE',
'type': Application.resolve,
'help': 'WSGI application callable or cheroot.server.Gateway subclass',
},
'--bind': {
'metavar': 'ADDRESS',
'dest': 'bind_addr',
'type': parse_wsgi_bind_addr,
'default': '[::1]:8000',
'help': 'Network interface to listen on (default: [::1]:8000)',
},
'--chdir': {
'metavar': 'PATH',
'type': os.chdir,
'help': 'Set the working directory',
},
'--server-name': {
'dest': 'server_name',
'type': str,
'help': 'Web server name to be advertised via Server HTTP header',
},
'--threads': {
'metavar': 'INT',
'dest': 'numthreads',
'type': int,
'help': 'Minimum number of worker threads',
},
'--max-threads': {
'metavar': 'INT',
'dest': 'max',
'type': int,
'help': 'Maximum number of worker threads',
},
'--timeout': {
'metavar': 'INT',
'dest': 'timeout',
'type': int,
'help': 'Timeout in seconds for accepted connections',
},
'--shutdown-timeout': {
'metavar': 'INT',
'dest': 'shutdown_timeout',
'type': int,
'help': 'Time in seconds to wait for worker threads to cleanly exit',
},
'--request-queue-size': {
'metavar': 'INT',
'dest': 'request_queue_size',
'type': int,
'help': 'Maximum number of queued connections',
},
'--accepted-queue-size': {
'metavar': 'INT',
'dest': 'accepted_queue_size',
'type': int,
'help': 'Maximum number of active requests in queue',
},
'--accepted-queue-timeout': {
'metavar': 'INT',
'dest': 'accepted_queue_timeout',
'type': int,
'help': 'Timeout in seconds for putting requests into queue',
},
}
def main():
"""Create a new Cheroot instance with arguments from the command line."""
parser = argparse.ArgumentParser(
description='Start an instance of the Cheroot WSGI/HTTP server.',
)
for arg, spec in _arg_spec.items():
parser.add_argument(arg, **spec)
raw_args = parser.parse_args()
# ensure cwd in sys.path
'' in sys.path or sys.path.insert(0, '')
# create a server based on the arguments provided
raw_args._wsgi_app.server(raw_args).safe_start()
| 6,994
|
Python
|
.py
| 197
| 28.467005
| 79
| 0.609901
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,969
|
connections.py
|
rembo10_headphones/lib/cheroot/connections.py
|
"""Utilities to manage open connections."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import io
import os
import socket
import threading
import time
from . import errors
from ._compat import selectors
from ._compat import suppress
from ._compat import IS_WINDOWS
from .makefile import MakeFile
import six
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
import ctypes.wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
]
_SetHandleInformation.restype = ctypes.wintypes.BOOL
except ImportError:
def prevent_socket_inheritance(sock):
"""Stub inheritance prevention.
Dummy function, since neither fcntl nor ctypes are available.
"""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class _ThreadsafeSelector:
"""Thread-safe wrapper around a DefaultSelector.
There are 2 thread contexts in which it may be accessed:
* the selector thread
* one of the worker threads in workers/threadpool.py
The expected read/write patterns are:
* :py:func:`~iter`: selector thread
* :py:meth:`register`: selector thread and threadpool,
via :py:meth:`~cheroot.workers.threadpool.ThreadPool.put`
* :py:meth:`unregister`: selector thread only
Notably, this means :py:class:`_ThreadsafeSelector` never needs to worry
that connections will be removed behind its back.
The lock is held when iterating or modifying the selector but is not
required when :py:meth:`select()ing <selectors.BaseSelector.select>` on it.
"""
def __init__(self):
self._selector = selectors.DefaultSelector()
self._lock = threading.Lock()
def __len__(self):
with self._lock:
return len(self._selector.get_map() or {})
@property
def connections(self):
"""Retrieve connections registered with the selector."""
with self._lock:
mapping = self._selector.get_map() or {}
for _, (_, sock_fd, _, conn) in mapping.items():
yield (sock_fd, conn)
def register(self, fileobj, events, data=None):
"""Register ``fileobj`` with the selector."""
with self._lock:
return self._selector.register(fileobj, events, data)
def unregister(self, fileobj):
"""Unregister ``fileobj`` from the selector."""
with self._lock:
return self._selector.unregister(fileobj)
def select(self, timeout=None):
"""Return socket fd and data pairs from selectors.select call.
Returns entries ready to read in the form:
(socket_file_descriptor, connection)
"""
return (
(key.fd, key.data)
for key, _ in self._selector.select(timeout=timeout)
)
def close(self):
"""Close the selector."""
with self._lock:
self._selector.close()
class ConnectionManager:
"""Class which manages HTTPConnection objects.
This is for connections which are being kept-alive for follow-up requests.
"""
def __init__(self, server):
"""Initialize ConnectionManager object.
Args:
server (cheroot.server.HTTPServer): web server object
that uses this ConnectionManager instance.
"""
self._serving = False
self._stop_requested = False
self.server = server
self._selector = _ThreadsafeSelector()
self._selector.register(
server.socket.fileno(),
selectors.EVENT_READ, data=server,
)
def put(self, conn):
"""Put idle connection into the ConnectionManager to be managed.
:param conn: HTTP connection to be managed
:type conn: cheroot.server.HTTPConnection
"""
conn.last_used = time.time()
# if this conn doesn't have any more data waiting to be read,
# register it with the selector.
if conn.rfile.has_data():
self.server.process_conn(conn)
else:
self._selector.register(
conn.socket.fileno(), selectors.EVENT_READ, data=conn,
)
def _expire(self, threshold):
r"""Expire least recently used connections.
:param threshold: Connections that have not been used within this \
duration (in seconds), are considered expired and \
are closed and removed.
:type threshold: float
This should be called periodically.
"""
# find any connections still registered with the selector
# that have not been active recently enough.
timed_out_connections = [
(sock_fd, conn)
for (sock_fd, conn) in self._selector.connections
if conn != self.server and conn.last_used < threshold
]
for sock_fd, conn in timed_out_connections:
self._selector.unregister(sock_fd)
conn.close()
def stop(self):
"""Stop the selector loop in run() synchronously.
May take up to half a second.
"""
self._stop_requested = True
while self._serving:
time.sleep(0.01)
def run(self, expiration_interval):
"""Run the connections selector indefinitely.
Args:
expiration_interval (float): Interval, in seconds, at which
connections will be checked for expiration.
Connections that are ready to process are submitted via
self.server.process_conn()
Connections submitted for processing must be `put()`
back if they should be examined again for another request.
Can be shut down by calling `stop()`.
"""
self._serving = True
try:
self._run(expiration_interval)
finally:
self._serving = False
def _run(self, expiration_interval):
r"""Run connection handler loop until stop was requested.
:param expiration_interval: Interval, in seconds, at which \
connections will be checked for \
expiration.
:type expiration_interval: float
Use ``expiration_interval`` as ``select()`` timeout
to assure expired connections are closed in time.
On Windows cap the timeout to 0.05 seconds
as ``select()`` does not return when a socket is ready.
"""
last_expiration_check = time.time()
if IS_WINDOWS:
# 0.05 seconds are used as an empirically obtained balance between
# max connection delay and idle system load. Benchmarks show a
# mean processing time per connection of ~0.03 seconds on Linux
# and with 0.01 seconds timeout on Windows:
# https://github.com/cherrypy/cheroot/pull/352
# While this highly depends on system and hardware, 0.05 seconds
# max delay should hence usually not significantly increase the
# mean time/delay per connection, but significantly reduce idle
# system load by reducing socket loops to 1/5 with 0.01 seconds.
select_timeout = min(expiration_interval, 0.05)
else:
select_timeout = expiration_interval
while not self._stop_requested:
try:
active_list = self._selector.select(timeout=select_timeout)
except OSError:
self._remove_invalid_sockets()
continue
for (sock_fd, conn) in active_list:
if conn is self.server:
# New connection
new_conn = self._from_server_socket(self.server.socket)
if new_conn is not None:
self.server.process_conn(new_conn)
else:
# unregister connection from the selector until the server
# has read from it and returned it via put()
self._selector.unregister(sock_fd)
self.server.process_conn(conn)
now = time.time()
if (now - last_expiration_check) > expiration_interval:
self._expire(threshold=now - self.server.timeout)
last_expiration_check = now
def _remove_invalid_sockets(self):
"""Clean up the resources of any broken connections.
This method attempts to detect any connections in an invalid state,
unregisters them from the selector and closes the file descriptors of
the corresponding network sockets where possible.
"""
invalid_conns = []
for sock_fd, conn in self._selector.connections:
if conn is self.server:
continue
try:
os.fstat(sock_fd)
except OSError:
invalid_conns.append((sock_fd, conn))
for sock_fd, conn in invalid_conns:
self._selector.unregister(sock_fd)
# One of the reason on why a socket could cause an error
# is that the socket is already closed, ignore the
# socket error if we try to close it at this point.
# This is equivalent to OSError in Py3
with suppress(socket.error):
conn.close()
def _from_server_socket(self, server_socket): # noqa: C901 # FIXME
try:
s, addr = server_socket.accept()
if self.server.stats['Enabled']:
self.server.stats['Accepts'] += 1
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.server.timeout)
mf = MakeFile
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.server.ssl_adapter is not None:
try:
s, ssl_env = self.server.ssl_adapter.wrap(s)
except errors.NoSSLError:
msg = (
'The client sent a plain HTTP request, but '
'this server only speaks HTTPS on this port.'
)
buf = [
'%s 400 Bad Request\r\n' % self.server.protocol,
'Content-Length: %s\r\n' % len(msg),
'Content-Type: text/plain\r\n\r\n',
msg,
]
sock_to_make = s if not six.PY2 else s._sock
wfile = mf(sock_to_make, 'wb', io.DEFAULT_BUFFER_SIZE)
try:
wfile.write(''.join(buf).encode('ISO-8859-1'))
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
return
if not s:
return
mf = self.server.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.server.timeout)
conn = self.server.ConnectionClass(self.server, s, mf)
if not isinstance(
self.server.bind_addr,
(six.text_type, six.binary_type),
):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
return conn
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error as ex:
if self.server.stats['Enabled']:
self.server.stats['Socket Errors'] += 1
if ex.args[0] in errors.socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See
# https://github.com/cherrypy/cherrypy/issues/707.
return
if ex.args[0] in errors.socket_errors_nonblocking:
# Just try again. See
# https://github.com/cherrypy/cherrypy/issues/479.
return
if ex.args[0] in errors.socket_errors_to_ignore:
# Our socket was closed.
# See https://github.com/cherrypy/cherrypy/issues/686.
return
raise
def close(self):
"""Close all monitored connections."""
for (_, conn) in self._selector.connections:
if conn is not self.server: # server closes its own socket
conn.close()
self._selector.close()
@property
def _num_connections(self):
"""Return the current number of connections.
Includes all connections registered with the selector,
minus one for the server socket, which is always registered
with the selector.
"""
return len(self._selector) - 1
@property
def can_add_keepalive_connection(self):
"""Flag whether it is allowed to add a new keep-alive connection."""
ka_limit = self.server.keep_alive_conn_limit
return ka_limit is None or self._num_connections < ka_limit
| 14,723
|
Python
|
.py
| 336
| 31.738095
| 79
| 0.580204
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,970
|
connections.pyi
|
rembo10_headphones/lib/cheroot/connections.pyi
|
from typing import Any
def prevent_socket_inheritance(sock) -> None: ...
class _ThreadsafeSelector:
def __init__(self) -> None: ...
def __len__(self): ...
@property
def connections(self) -> None: ...
def register(self, fileobj, events, data: Any | None = ...): ...
def unregister(self, fileobj): ...
def select(self, timeout: Any | None = ...): ...
def close(self) -> None: ...
class ConnectionManager:
server: Any
def __init__(self, server) -> None: ...
def put(self, conn) -> None: ...
def stop(self) -> None: ...
def run(self, expiration_interval) -> None: ...
def close(self) -> None: ...
@property
def can_add_keepalive_connection(self): ...
| 714
|
Python
|
.py
| 20
| 31.35
| 68
| 0.59479
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,971
|
errors.pyi
|
rembo10_headphones/lib/cheroot/errors.pyi
|
from typing import Any, List, Set, Tuple
class MaxSizeExceeded(Exception): ...
class NoSSLError(Exception): ...
class FatalSSLAlert(Exception): ...
def plat_specific_errors(*errnames: str) -> List[int]: ...
socket_error_eintr: List[int]
socket_errors_to_ignore: List[int]
socket_errors_nonblocking: List[int]
acceptable_sock_shutdown_error_codes: Set[int]
acceptable_sock_shutdown_exceptions: Tuple[Exception]
| 413
|
Python
|
.py
| 10
| 40
| 58
| 0.785
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,972
|
__init__.py
|
rembo10_headphones/lib/cheroot/__init__.py
|
"""High-performance, pure-Python HTTP server used by CherryPy."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
import pkg_resources
except ImportError:
pass
try:
__version__ = pkg_resources.get_distribution('cheroot').version
except Exception:
__version__ = 'unknown'
| 334
|
Python
|
.py
| 11
| 27.545455
| 67
| 0.736677
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,973
|
makefile.py
|
rembo10_headphones/lib/cheroot/makefile.py
|
"""Socket file object."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
try:
# prefer slower Python-based io module
import _pyio as io
except ImportError:
# Python 2.6
import io
import six
from . import errors
from ._compat import extract_bytes, memoryview
# Write only 16K at a time to sockets
SOCK_WRITE_BLOCKSIZE = 16384
class BufferedWriter(io.BufferedWriter):
"""Faux file object attached to a socket object."""
def write(self, b):
"""Write bytes to buffer."""
self._checkClosed()
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
self._write_buf.extend(b)
self._flush_unlocked()
return len(b)
def _flush_unlocked(self):
self._checkClosed('flush of closed file')
while self._write_buf:
try:
# ssl sockets only except 'bytes', not bytearrays
# so perhaps we should conditionally wrap this for perf?
n = self.raw.write(bytes(self._write_buf))
except io.BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
class MakeFile_PY2(getattr(socket, '_fileobject', object)):
"""Faux file object attached to a socket object."""
def __init__(self, *args, **kwargs):
"""Initialize faux file object."""
self.bytes_read = 0
self.bytes_written = 0
socket._fileobject.__init__(self, *args, **kwargs)
self._refcount = 0
def _reuse(self):
self._refcount += 1
def _drop(self):
if self._refcount < 0:
self.close()
else:
self._refcount -= 1
def write(self, data):
"""Send entire data contents for non-blocking sockets."""
bytes_sent = 0
data_mv = memoryview(data)
payload_size = len(data_mv)
while bytes_sent < payload_size:
try:
bytes_sent += self.send(
data_mv[bytes_sent:bytes_sent + SOCK_WRITE_BLOCKSIZE],
)
except socket.error as e:
if e.args[0] not in errors.socket_errors_nonblocking:
raise
def send(self, data):
"""Send some part of message to the socket."""
bytes_sent = self._sock.send(extract_bytes(data))
self.bytes_written += bytes_sent
return bytes_sent
def flush(self):
"""Write all data from buffer to socket and reset write buffer."""
if self._wbuf:
buffer = ''.join(self._wbuf)
self._wbuf = []
self.write(buffer)
def recv(self, size):
"""Receive message of a size from the socket."""
while True:
try:
data = self._sock.recv(size)
self.bytes_read += len(data)
return data
except socket.error as e:
what = (
e.args[0] not in errors.socket_errors_nonblocking
and e.args[0] not in errors.socket_error_eintr
)
if what:
raise
class FauxSocket:
"""Faux socket with the minimal interface required by pypy."""
def _reuse(self):
pass
_fileobject_uses_str_type = six.PY2 and isinstance(
socket._fileobject(FauxSocket())._rbuf, six.string_types,
)
# FauxSocket is no longer needed
del FauxSocket
if not _fileobject_uses_str_type: # noqa: C901 # FIXME
def read(self, size=-1):
"""Read data from the socket to buffer."""
# Use max, disallow tiny reads in a loop as they are very
# inefficient.
# We never leave read() with any leftover data from a new recv()
# call in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned
# by recv() minimizes memory usage and fragmentation that occurs
# when rbufsize is large compared to the typical return value of
# recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(rbufsize)
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and
# return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return rv
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
data = self.recv(left)
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, 'recv(%d) returned %d bytes' % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
# assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
"""Read line from the socket to buffer."""
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
data = None
recv = self.recv
while data != '\n':
data = recv(1)
if not data:
break
buffers.append(data)
return ''.join(buffers)
buf.seek(0, 2) # seek end
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(self._rbufsize)
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes
# first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = io.BytesIO()
self._rbuf.write(buf.read())
return rv
# reset _rbuf. we consume it via buf.
self._rbuf = io.BytesIO()
while True:
data = self.recv(self._rbufsize)
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when
# returning a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
# assert buf_len == buf.tell()
return buf.getvalue()
def has_data(self):
"""Return true if there is buffered data to read."""
return bool(self._rbuf.getvalue())
else:
def read(self, size=-1):
"""Read data from the socket to buffer."""
if size < 0:
# Read until EOF
buffers = [self._rbuf]
self._rbuf = ''
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
return ''.join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
data = self._rbuf
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ''
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return ''.join(buffers)
def readline(self, size=-1):
"""Read line from the socket to buffer."""
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ''
buffers = []
while data != '\n':
data = self.recv(1)
if not data:
break
buffers.append(data)
return ''.join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ''
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return ''.join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes
# first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ''
while True:
data = self.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return ''.join(buffers)
def has_data(self):
"""Return true if there is buffered data to read."""
return bool(self._rbuf)
if not six.PY2:
class StreamReader(io.BufferedReader):
"""Socket stream reader."""
def __init__(self, sock, mode='r', bufsize=io.DEFAULT_BUFFER_SIZE):
"""Initialize socket stream reader."""
super().__init__(socket.SocketIO(sock, mode), bufsize)
self.bytes_read = 0
def read(self, *args, **kwargs):
"""Capture bytes read."""
val = super().read(*args, **kwargs)
self.bytes_read += len(val)
return val
def has_data(self):
"""Return true if there is buffered data to read."""
return len(self._read_buf) > self._read_pos
class StreamWriter(BufferedWriter):
"""Socket stream writer."""
def __init__(self, sock, mode='w', bufsize=io.DEFAULT_BUFFER_SIZE):
"""Initialize socket stream writer."""
super().__init__(socket.SocketIO(sock, mode), bufsize)
self.bytes_written = 0
def write(self, val, *args, **kwargs):
"""Capture bytes written."""
res = super().write(val, *args, **kwargs)
self.bytes_written += len(val)
return res
def MakeFile(sock, mode='r', bufsize=io.DEFAULT_BUFFER_SIZE):
"""File object attached to a socket object."""
cls = StreamReader if 'r' in mode else StreamWriter
return cls(sock, mode, bufsize)
else:
StreamReader = StreamWriter = MakeFile = MakeFile_PY2
| 16,368
|
Python
|
.py
| 403
| 24.07196
| 78
| 0.443628
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,974
|
testing.pyi
|
rembo10_headphones/lib/cheroot/testing.pyi
|
from typing import Any, Iterator, Optional, TypeVar
from .server import HTTPServer
from .wsgi import Server
T = TypeVar('T', bound=HTTPServer)
EPHEMERAL_PORT: int
NO_INTERFACE: Optional[str]
ANY_INTERFACE_IPV4: str
ANY_INTERFACE_IPV6: str
config: dict
def cheroot_server(server_factory: T) -> Iterator[T]: ...
def wsgi_server() -> Iterator[Server]: ...
def native_server() -> Iterator[HTTPServer]: ...
def get_server_client(server) -> Any: ...
| 448
|
Python
|
.py
| 13
| 33.153846
| 57
| 0.75174
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,975
|
testing.py
|
rembo10_headphones/lib/cheroot/testing.py
|
"""Pytest fixtures and other helpers for doing testing by end-users."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from contextlib import closing
import errno
import socket
import threading
import time
import pytest
from six.moves import http_client
import cheroot.server
from cheroot.test import webtest
import cheroot.wsgi
EPHEMERAL_PORT = 0
NO_INTERFACE = None # Using this or '' will cause an exception
ANY_INTERFACE_IPV4 = '0.0.0.0'
ANY_INTERFACE_IPV6 = '::'
config = {
cheroot.wsgi.Server: {
'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT),
'wsgi_app': None,
},
cheroot.server.HTTPServer: {
'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT),
'gateway': cheroot.server.Gateway,
},
}
def cheroot_server(server_factory):
"""Set up and tear down a Cheroot server instance."""
conf = config[server_factory].copy()
bind_port = conf.pop('bind_addr')[-1]
for interface in ANY_INTERFACE_IPV6, ANY_INTERFACE_IPV4:
try:
actual_bind_addr = (interface, bind_port)
httpserver = server_factory( # create it
bind_addr=actual_bind_addr,
**conf
)
except OSError:
pass
else:
break
httpserver.shutdown_timeout = 0 # Speed-up tests teardown
threading.Thread(target=httpserver.safe_start).start() # spawn it
while not httpserver.ready: # wait until fully initialized and bound
time.sleep(0.1)
yield httpserver
httpserver.stop() # destroy it
@pytest.fixture
def wsgi_server():
"""Set up and tear down a Cheroot WSGI server instance."""
for srv in cheroot_server(cheroot.wsgi.Server):
yield srv
@pytest.fixture
def native_server():
"""Set up and tear down a Cheroot HTTP server instance."""
for srv in cheroot_server(cheroot.server.HTTPServer):
yield srv
class _TestClient:
def __init__(self, server):
self._interface, self._host, self._port = _get_conn_data(
server.bind_addr,
)
self.server_instance = server
self._http_connection = self.get_connection()
def get_connection(self):
name = '{interface}:{port}'.format(
interface=self._interface,
port=self._port,
)
conn_cls = (
http_client.HTTPConnection
if self.server_instance.ssl_adapter is None else
http_client.HTTPSConnection
)
return conn_cls(name)
def request(
self, uri, method='GET', headers=None, http_conn=None,
protocol='HTTP/1.1',
):
return webtest.openURL(
uri, method=method,
headers=headers,
host=self._host, port=self._port,
http_conn=http_conn or self._http_connection,
protocol=protocol,
)
def __getattr__(self, attr_name):
def _wrapper(uri, **kwargs):
http_method = attr_name.upper()
return self.request(uri, method=http_method, **kwargs)
return _wrapper
def _probe_ipv6_sock(interface):
# Alternate way is to check IPs on interfaces using glibc, like:
# github.com/Gautier/minifail/blob/master/minifail/getifaddrs.py
try:
with closing(socket.socket(family=socket.AF_INET6)) as sock:
sock.bind((interface, 0))
except (OSError, socket.error) as sock_err:
# In Python 3 socket.error is an alias for OSError
# In Python 2 socket.error is a subclass of IOError
if sock_err.errno != errno.EADDRNOTAVAIL:
raise
else:
return True
return False
def _get_conn_data(bind_addr):
if isinstance(bind_addr, tuple):
host, port = bind_addr
else:
host, port = bind_addr, 0
interface = webtest.interface(host)
if ':' in interface and not _probe_ipv6_sock(interface):
interface = '127.0.0.1'
if ':' in host:
host = interface
return interface, host, port
def get_server_client(server):
"""Create and return a test client for the given server."""
return _TestClient(server)
| 4,172
|
Python
|
.py
| 120
| 27.691667
| 73
| 0.641204
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,976
|
cli.pyi
|
rembo10_headphones/lib/cheroot/cli.pyi
|
from typing import Any
class BindLocation: ...
class TCPSocket(BindLocation):
bind_addr: Any
def __init__(self, address, port) -> None: ...
class UnixSocket(BindLocation):
bind_addr: Any
def __init__(self, path) -> None: ...
class AbstractSocket(BindLocation):
bind_addr: Any
def __init__(self, abstract_socket) -> None: ...
class Application:
@classmethod
def resolve(cls, full_path): ...
wsgi_app: Any
def __init__(self, wsgi_app) -> None: ...
def server_args(self, parsed_args): ...
def server(self, parsed_args): ...
class GatewayYo:
gateway: Any
def __init__(self, gateway) -> None: ...
def server(self, parsed_args): ...
def parse_wsgi_bind_location(bind_addr_string: str): ...
def parse_wsgi_bind_addr(bind_addr_string: str): ...
def main() -> None: ...
| 828
|
Python
|
.py
| 25
| 29.44
| 56
| 0.646985
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,977
|
wsgi.py
|
rembo10_headphones/lib/cheroot/wsgi.py
|
"""This class holds Cheroot WSGI server implementation.
Simplest example on how to use this server::
from cheroot import wsgi
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
addr = '0.0.0.0', 8070
server = wsgi.Server(addr, my_crazy_app)
server.start()
The Cheroot WSGI server can serve as many WSGI applications
as you want in one instance by using a PathInfoDispatcher::
path_map = {
'/': my_crazy_app,
'/blog': my_blog_app,
}
d = wsgi.PathInfoDispatcher(path_map)
server = wsgi.Server(addr, d)
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import sys
import six
from six.moves import filter
from . import server
from .workers import threadpool
from ._compat import ntob, bton
class Server(server.HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(
self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5,
accepted_queue_size=-1, accepted_queue_timeout=10,
peercreds_enabled=False, peercreds_resolve_enabled=False,
):
"""Initialize WSGI Server instance.
Args:
bind_addr (tuple): network interface to listen to
wsgi_app (callable): WSGI application callable
numthreads (int): number of threads for WSGI thread pool
server_name (str): web server name to be advertised via
Server HTTP header
max (int): maximum number of worker threads
request_queue_size (int): the 'backlog' arg to
socket.listen(); max queued connections
timeout (int): the timeout in seconds for accepted connections
shutdown_timeout (int): the total time, in seconds, to
wait for worker threads to cleanly exit
accepted_queue_size (int): maximum number of active
requests in queue
accepted_queue_timeout (int): timeout for putting request
into queue
"""
super(Server, self).__init__(
bind_addr,
gateway=wsgi_gateways[self.wsgi_version],
server_name=server_name,
peercreds_enabled=peercreds_enabled,
peercreds_resolve_enabled=peercreds_resolve_enabled,
)
self.wsgi_app = wsgi_app
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.requests = threadpool.ThreadPool(
self, min=numthreads or 1, max=max,
accepted_queue_size=accepted_queue_size,
accepted_queue_timeout=accepted_queue_timeout,
)
@property
def numthreads(self):
"""Set minimum number of threads."""
return self.requests.min
@numthreads.setter
def numthreads(self, value):
self.requests.min = value
class Gateway(server.Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
"""Initialize WSGI Gateway instance with request.
Args:
req (HTTPRequest): current HTTP request
"""
super(Gateway, self).__init__(req)
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
@classmethod
def gateway_map(cls):
"""Create a mapping of gateways and their versions.
Returns:
dict[tuple[int,int],class]: map of gateway version and
corresponding class
"""
return {gw.version: gw for gw in cls.__subclasses__()}
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version."""
raise NotImplementedError # pragma: no cover
def respond(self):
"""Process the current request.
From :pep:`333`:
The start_response callable must not actually transmit
the response headers. Instead, it must store them for the
server or gateway to transmit only after the first
iteration of the application return value that yields
a NON-EMPTY string, or upon the application's first
invocation of the write() callable.
"""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in filter(None, response):
if not isinstance(chunk, six.binary_type):
raise ValueError('WSGI Applications must yield bytes')
self.write(chunk)
finally:
# Send headers if not already sent
self.req.ensure_headers_sent()
if hasattr(response, 'close'):
response.close()
def start_response(self, status, headers, exc_info=None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise RuntimeError(
'WSGI start_response called a second '
'time with no exc_info.',
)
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
six.reraise(*exc_info)
finally:
exc_info = None
self.req.status = self._encode_status(status)
for k, v in headers:
if not isinstance(k, str):
raise TypeError(
'WSGI response header key %r is not of type str.' % k,
)
if not isinstance(v, str):
raise TypeError(
'WSGI response header value %r is not of type str.' % v,
)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
out_header = ntob(k), ntob(v)
self.req.outheaders.append(out_header)
return self.write
@staticmethod
def _encode_status(status):
"""Cast status to bytes representation of current Python version.
According to :pep:`3333`, when using Python 3, the response status
and headers must be bytes masquerading as Unicode; that is, they
must be of type "str" but are restricted to code points in the
"Latin-1" set.
"""
if six.PY2:
return status
if not isinstance(status, str):
raise TypeError('WSGI response status is not of type str.')
return status.encode('ISO-8859-1')
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise RuntimeError('WSGI write called before start_response.')
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response(
'500 Internal Server Error',
'The requested resource returned more bytes than the '
'declared Content-Length.',
)
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
self.req.ensure_headers_sent()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
'Response body exceeds the declared Content-Length.',
)
class Gateway_10(Gateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
version = 1, 0
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version."""
req = self.req
req_conn = req.conn
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': bton(req.path),
'QUERY_STRING': bton(req.qs),
'REMOTE_ADDR': req_conn.remote_addr or '',
'REMOTE_PORT': str(req_conn.remote_port or ''),
'REQUEST_METHOD': bton(req.method),
'REQUEST_URI': bton(req.uri),
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': bton(req.request_protocol),
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.input_terminated': bool(req.chunked_read),
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': bton(req.scheme),
'wsgi.version': self.version,
}
if isinstance(req.server.bind_addr, six.string_types):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env['SERVER_PORT'] = ''
try:
env['X_REMOTE_PID'] = str(req_conn.peer_pid)
env['X_REMOTE_UID'] = str(req_conn.peer_uid)
env['X_REMOTE_GID'] = str(req_conn.peer_gid)
env['X_REMOTE_USER'] = str(req_conn.peer_user)
env['X_REMOTE_GROUP'] = str(req_conn.peer_group)
env['REMOTE_USER'] = env['X_REMOTE_USER']
except RuntimeError:
"""Unable to retrieve peer creds data.
Unsupported by current kernel or socket error happened, or
unsupported socket type, or disabled.
"""
else:
env['SERVER_PORT'] = str(req.server.bind_addr[1])
# Request headers
env.update(
(
'HTTP_{header_name!s}'.
format(header_name=bton(k).upper().replace('-', '_')),
bton(v),
)
for k, v in req.inheaders.items()
)
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop('HTTP_CONTENT_TYPE', None)
if ct is not None:
env['CONTENT_TYPE'] = ct
cl = env.pop('HTTP_CONTENT_LENGTH', None)
if cl is not None:
env['CONTENT_LENGTH'] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class Gateway_u0(Gateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses Unicode for keys
and values in both Python 2 and Python 3.
"""
version = 'u', 0
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version."""
req = self.req
env_10 = super(Gateway_u0, self).get_environ()
env = dict(map(self._decode_key, env_10.items()))
# Request-URI
enc = env.setdefault(six.u('wsgi.url_encoding'), six.u('utf-8'))
try:
env['PATH_INFO'] = req.path.decode(enc)
env['QUERY_STRING'] = req.qs.decode(enc)
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env['wsgi.url_encoding'] = 'ISO-8859-1'
env['PATH_INFO'] = env_10['PATH_INFO']
env['QUERY_STRING'] = env_10['QUERY_STRING']
env.update(map(self._decode_value, env.items()))
return env
@staticmethod
def _decode_key(item):
k, v = item
if six.PY2:
k = k.decode('ISO-8859-1')
return k, v
@staticmethod
def _decode_value(item):
k, v = item
skip_keys = 'REQUEST_URI', 'wsgi.input'
if not six.PY2 or not isinstance(v, bytes) or k in skip_keys:
return k, v
return k, v.decode('ISO-8859-1')
wsgi_gateways = Gateway.gateway_map()
class PathInfoDispatcher:
"""A WSGI dispatcher for dispatch based on the PATH_INFO."""
def __init__(self, apps):
"""Initialize path info WSGI app dispatcher.
Args:
apps (dict[str,object]|list[tuple[str,object]]): URI prefix
and WSGI app pairs
"""
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
def by_path_len(app):
return len(app[0])
apps.sort(key=by_path_len, reverse=True)
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip('/'), a) for p, a in apps]
def __call__(self, environ, start_response):
"""Process incoming WSGI request.
Ref: :pep:`3333`
Args:
environ (Mapping): a dict containing WSGI environment variables
start_response (callable): function, which sets response
status and headers
Returns:
list[bytes]: iterable containing bytes to be returned in
HTTP response body
"""
path = environ['PATH_INFO'] or '/'
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith('{path!s}/'.format(path=p)) or path == p:
environ = environ.copy()
environ['SCRIPT_NAME'] = environ.get('SCRIPT_NAME', '') + p
environ['PATH_INFO'] = path[len(p):]
return app(environ, start_response)
start_response(
'404 Not Found', [
('Content-Type', 'text/plain'),
('Content-Length', '0'),
],
)
return ['']
# compatibility aliases
globals().update(
WSGIServer=Server,
WSGIGateway=Gateway,
WSGIGateway_u0=Gateway_u0,
WSGIGateway_10=Gateway_10,
WSGIPathInfoDispatcher=PathInfoDispatcher,
)
| 14,870
|
Python
|
.py
| 356
| 31.154494
| 79
| 0.585313
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,978
|
server.py
|
rembo10_headphones/lib/cheroot/server.py
|
"""
A high-speed, production ready, thread pooled, generic HTTP server.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = HTTPServer(...)
server.start()
-> serve()
while ready:
_connections.run()
while not stop_requested:
child = socket.accept() # blocks until a request comes in
conn = HTTPConnection(child, ...)
server.process_conn(conn) # adds conn to threadpool
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
For running a server you can invoke :func:`start() <HTTPServer.start()>` (it
will run the server forever) or use invoking :func:`prepare()
<HTTPServer.prepare()>` and :func:`serve() <HTTPServer.serve()>` like this::
server = HTTPServer(...)
server.prepare()
try:
threading.Thread(target=server.serve).start()
# waiting/detecting some appropriate stop condition here
...
finally:
server.stop()
And now for a trivial doctest to exercise the test suite
.. testsetup::
from cheroot.server import HTTPServer
>>> 'HTTPServer' in globals()
True
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import io
import re
import email.utils
import socket
import sys
import time
import traceback as traceback_
import logging
import platform
import contextlib
import threading
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
import six
from six.moves import queue
from six.moves import urllib
from . import connections, errors, __version__
from ._compat import bton, ntou
from ._compat import IS_PPC
from .workers import threadpool
from .makefile import MakeFile, StreamWriter
__all__ = (
'HTTPRequest', 'HTTPConnection', 'HTTPServer',
'HeaderReader', 'DropUnderscoreHeaderReader',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'Gateway', 'get_ssl_adapter_class',
)
IS_WINDOWS = platform.system() == 'Windows'
"""Flag indicating whether the app is running under Windows."""
IS_GAE = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
"""Flag indicating whether the app is running in GAE env.
Ref:
https://cloud.google.com/appengine/docs/standard/python/tools
/using-local-server#detecting_application_runtime_environment
"""
IS_UID_GID_RESOLVABLE = not IS_WINDOWS and not IS_GAE
"""Indicates whether UID/GID resolution's available under current platform."""
if IS_UID_GID_RESOLVABLE:
try:
import grp
import pwd
except ImportError:
"""Unavailable in the current env.
This shouldn't be happening normally.
All of the known cases are excluded via the if clause.
"""
IS_UID_GID_RESOLVABLE = False
grp, pwd = None, None
import struct
if IS_WINDOWS and hasattr(socket, 'AF_INET6'):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
if not hasattr(socket, 'SO_PEERCRED'):
"""
NOTE: the value for SO_PEERCRED can be architecture specific, in
which case the getsockopt() will hopefully fail. The arch
specific value could be derived from platform.processor()
"""
socket.SO_PEERCRED = 21 if IS_PPC else 17
LF = b'\n'
CRLF = b'\r\n'
TAB = b'\t'
SPACE = b' '
COLON = b':'
SEMICOLON = b';'
EMPTY = b''
ASTERISK = b'*'
FORWARD_SLASH = b'/'
QUOTED_SLASH = b'%2F'
QUOTED_SLASH_REGEX = re.compile(b''.join((b'(?i)', QUOTED_SLASH)))
_STOPPING_FOR_INTERRUPT = object() # sentinel used during shutdown
comma_separated_headers = [
b'Accept', b'Accept-Charset', b'Accept-Encoding',
b'Accept-Language', b'Accept-Ranges', b'Allow', b'Cache-Control',
b'Connection', b'Content-Encoding', b'Content-Language', b'Expect',
b'If-Match', b'If-None-Match', b'Pragma', b'Proxy-Authenticate', b'TE',
b'Trailer', b'Transfer-Encoding', b'Upgrade', b'Vary', b'Via', b'Warning',
b'WWW-Authenticate',
]
if not hasattr(logging, 'statistics'):
logging.statistics = {}
class HeaderReader:
"""Object for reading headers from an HTTP request.
Interface and default implementation.
"""
def __call__(self, rfile, hdict=None): # noqa: C901 # FIXME
"""
Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP
spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError('Illegal header line.')
v = v.strip()
k = self._transform_key(k)
hname = k
if not self._allow_header(k):
continue
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b', '.join((existing, v))
hdict[hname] = v
return hdict
def _allow_header(self, key_name):
return True
def _transform_key(self, key_name):
# TODO: what about TE and WWW-Authenticate?
return key_name.strip().title()
class DropUnderscoreHeaderReader(HeaderReader):
"""Custom HeaderReader to exclude any headers with underscores in them."""
def _allow_header(self, key_name):
orig = super(DropUnderscoreHeaderReader, self)._allow_header(key_name)
return orig and '_' not in key_name
class SizeCheckWrapper:
"""Wraps a file-like object, raising MaxSizeExceeded if too large.
:param rfile: ``file`` of a limited size
:param int maxlen: maximum length of the file being read
"""
def __init__(self, rfile, maxlen):
"""Initialize SizeCheckWrapper instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded()
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param size: amount of data to read
:type size: int
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param size: minimum amount of data to read
:type size: int
:returns: one line from ``rfile``
:rtype: bytes
"""
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://github.com/cherrypy/cherrypy/issues/421
if len(data) < 256 or data[-1:] == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param sizehint: hint of minimum amount of data to read
:type sizehint: int
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
next = __next__
class KnownLengthRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
:param rfile: ``file`` of a known size
:param int content_length: length of the file being read
"""
def __init__(self, rfile, content_length):
"""Initialize KnownLengthRFile instance."""
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param size: amount of data to read
:type size: int
:rtype: bytes
:returns: chunk from ``rfile``, limited by size if specified
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param size: minimum amount of data to read
:type size: int
:returns: one line from ``rfile``
:rtype: bytes
"""
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param sizehint: hint of minimum amount of data to read
:type sizehint: int
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
def __iter__(self):
"""Return file iterator."""
return self
def __next__(self):
"""Generate next file chunk."""
data = next(self.rfile)
self.remaining -= len(data)
return data
next = __next__
class ChunkedRFile:
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
:param rfile: file encoded with the 'chunked' transfer encoding
:param int maxlen: maximum length of the file being read
:param int bufsize: size of the buffer used to read the file
"""
def __init__(self, rfile, maxlen, bufsize=8192):
"""Initialize ChunkedRFile instance."""
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise errors.MaxSizeExceeded(
'Request Entity Too Large', self.maxlen,
)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError(
'Bad chunked transfer size: {chunk_size!r}'.
format(chunk_size=chunk_size),
)
if chunk_size <= 0:
self.closed = True
return
# if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError('Request Entity Too Large')
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
'got ' + repr(crlf) + ')',
)
def read(self, size=None):
"""Read a chunk from ``rfile`` buffer and return it.
:param size: amount of data to read
:type size: int
:returns: chunk from ``rfile``, limited by size if specified
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
self.buffer = EMPTY
def readline(self, size=None):
"""Read a single line from ``rfile`` buffer and return it.
:param size: minimum amount of data to read
:type size: int
:returns: one line from ``rfile``
:rtype: bytes
"""
data = EMPTY
if size == 0:
return data
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
self.buffer = EMPTY
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
"""Read all lines from ``rfile`` buffer and return them.
:param sizehint: hint of minimum amount of data to read
:type sizehint: int
:returns: lines of bytes read from ``rfile``
:rtype: list[bytes]
"""
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
"""Read HTTP headers and yield them.
Returns:
Generator: yields CRLF separated lines.
"""
if not self.closed:
raise ValueError(
'Cannot read trailers until the request body has been read.',
)
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError('Illegal end of headers.')
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError('Request Entity Too Large')
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError('HTTP requires CRLF terminators')
yield line
def close(self):
"""Release resources allocated for ``rfile``."""
self.rfile.close()
class HTTPRequest:
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
header_reader = HeaderReader()
"""
A HeaderReader instance or compatible reader.
"""
def __init__(self, server, conn, proxy_mode=False, strict_mode=True):
"""Initialize HTTP request container instance.
Args:
server (HTTPServer): web server object receiving this request
conn (HTTPConnection): HTTP connection object for this request
proxy_mode (bool): whether this HTTPServer should behave as a PROXY
server for certain requests
strict_mode (bool): whether we should return a 400 Bad Request when
we encounter a request that a HTTP compliant client should not be
making
"""
self.server = server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = b'http'
if self.server.ssl_adapter is not None:
self.scheme = b'https'
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ''
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
self.proxy_mode = proxy_mode
self.strict_mode = strict_mode
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(
self.conn.rfile,
self.server.max_request_header_size,
)
try:
success = self.read_request_line()
except errors.MaxSizeExceeded:
self.simple_response(
'414 Request-URI Too Long',
'The Request-URI sent with the request exceeds the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
try:
success = self.read_request_headers()
except errors.MaxSizeExceeded:
self.simple_response(
'413 Request Entity Too Large',
'The headers sent with the request exceed the maximum '
'allowed bytes.',
)
return
else:
if not success:
return
self.ready = True
def read_request_line(self): # noqa: C901 # FIXME
"""Read and parse first line of the HTTP request.
Returns:
bool: True if the request line is valid or False if it's malformed.
"""
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response(
'400 Bad Request', 'HTTP requires CRLF terminators',
)
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
if not req_protocol.startswith(b'HTTP/'):
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad protocol',
)
return False
rp = req_protocol[5:].split(b'.', 1)
if len(rp) != 2:
self.simple_response(
'400 Bad Request', 'Malformed Request-Line: bad version',
)
return False
rp = tuple(map(int, rp)) # Minor.Major must be threat as integers
if rp > (1, 1):
self.simple_response(
'505 HTTP Version Not Supported', 'Cannot fulfill request',
)
return False
except (ValueError, IndexError):
self.simple_response('400 Bad Request', 'Malformed Request-Line')
return False
self.uri = uri
self.method = method.upper()
if self.strict_mode and method != self.method:
resp = (
'Malformed method name: According to RFC 2616 '
'(section 5.1.1) and its successors '
'RFC 7230 (section 3.1.1) and RFC 7231 (section 4.1) '
'method names are case-sensitive and uppercase.'
)
self.simple_response('400 Bad Request', resp)
return False
try:
if six.PY2: # FIXME: Figure out better way to do this
# Ref: https://stackoverflow.com/a/196392/595220 (like this?)
"""This is a dummy check for unicode in URI."""
ntou(bton(uri, 'ascii'), 'ascii')
scheme, authority, path, qs, fragment = urllib.parse.urlsplit(uri)
except UnicodeError:
self.simple_response('400 Bad Request', 'Malformed Request-URI')
return False
uri_is_absolute_form = (scheme or authority)
if self.method == b'OPTIONS':
# TODO: cover this branch with tests
path = (
uri
# https://tools.ietf.org/html/rfc7230#section-5.3.4
if (self.proxy_mode and uri_is_absolute_form)
else path
)
elif self.method == b'CONNECT':
# TODO: cover this branch with tests
if not self.proxy_mode:
self.simple_response('405 Method Not Allowed')
return False
# `urlsplit()` above parses "example.com:3128" as path part of URI.
# this is a workaround, which makes it detect netloc correctly
uri_split = urllib.parse.urlsplit(b''.join((b'//', uri)))
_scheme, _authority, _path, _qs, _fragment = uri_split
_port = EMPTY
try:
_port = uri_split.port
except ValueError:
pass
# FIXME: use third-party validation to make checks against RFC
# the validation doesn't take into account, that urllib parses
# invalid URIs without raising errors
# https://tools.ietf.org/html/rfc7230#section-5.3.3
invalid_path = (
_authority != uri
or not _port
or any((_scheme, _path, _qs, _fragment))
)
if invalid_path:
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI: request-'
'target must match authority-form.',
)
return False
authority = path = _authority
scheme = qs = fragment = EMPTY
else:
disallowed_absolute = (
self.strict_mode
and not self.proxy_mode
and uri_is_absolute_form
)
if disallowed_absolute:
# https://tools.ietf.org/html/rfc7230#section-5.3.2
# (absolute form)
"""Absolute URI is only allowed within proxies."""
self.simple_response(
'400 Bad Request',
'Absolute URI not allowed if server is not a proxy.',
)
return False
invalid_path = (
self.strict_mode
and not uri.startswith(FORWARD_SLASH)
and not uri_is_absolute_form
)
if invalid_path:
# https://tools.ietf.org/html/rfc7230#section-5.3.1
# (origin_form) and
"""Path should start with a forward slash."""
resp = (
'Invalid path in Request-URI: request-target must contain '
'origin-form which starts with absolute-path (URI '
'starting with a slash "/").'
)
self.simple_response('400 Bad Request', resp)
return False
if fragment:
self.simple_response(
'400 Bad Request',
'Illegal #fragment in Request-URI.',
)
return False
if path is None:
# FIXME: It looks like this case cannot happen
self.simple_response(
'400 Bad Request',
'Invalid path in Request-URI.',
)
return False
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." https://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not
# "/this/path".
try:
# TODO: Figure out whether exception can really happen here.
# It looks like it's caught on urlsplit() call above.
atoms = [
urllib.parse.unquote_to_bytes(x)
for x in QUOTED_SLASH_REGEX.split(path)
]
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
path = QUOTED_SLASH.join(atoms)
if not path.startswith(FORWARD_SLASH):
path = FORWARD_SLASH + path
if scheme is not EMPTY:
self.scheme = scheme
self.authority = authority
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
if sp[0] != rp[0]:
self.simple_response('505 HTTP Version Not Supported')
return False
self.request_protocol = req_protocol
self.response_protocol = 'HTTP/%s.%s' % min(rp, sp)
return True
def read_request_headers(self): # noqa: C901 # FIXME
"""Read ``self.rfile`` into ``self.inheaders``.
Ref: :py:attr:`self.inheaders <HTTPRequest.outheaders>`.
:returns: success status
:rtype: bool
"""
# then all the http headers
try:
self.header_reader(self.rfile, self.inheaders)
except ValueError as ex:
self.simple_response('400 Bad Request', ex.args[0])
return False
mrbs = self.server.max_request_body_size
try:
cl = int(self.inheaders.get(b'Content-Length', 0))
except ValueError:
self.simple_response(
'400 Bad Request',
'Malformed Content-Length Header.',
)
return False
if mrbs and cl > mrbs:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the maximum '
'allowed bytes.',
)
return False
# Persistent connection support
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1
if self.inheaders.get(b'Connection', b'') == b'close':
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get(b'Connection', b'') != b'Keep-Alive':
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == 'HTTP/1.1':
te = self.inheaders.get(b'Transfer-Encoding')
if te:
te = [x.strip().lower() for x in te.split(b',') if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == b'chunked':
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response('501 Unimplemented')
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get(b'Expect', b'') == b'100-continue':
# Don't use simple_response here, because it emits headers
# we don't want. See
# https://github.com/cherrypy/cherrypy/issues/951
msg = b''.join((
self.server.protocol.encode('ascii'), SPACE, b'100 Continue',
CRLF, CRLF,
))
try:
self.conn.wfile.write(msg)
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
return True
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b'Content-Length', 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response(
'413 Request Entity Too Large',
'The entity sent with the request exceeds the '
'maximum allowed bytes.',
)
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
self.ready and self.ensure_headers_sent()
if self.chunked_write:
self.conn.wfile.write(b'0\r\n\r\n')
def simple_response(self, status, msg=''):
"""Write a simple response back to the client."""
status = str(status)
proto_status = '%s %s\r\n' % (self.server.protocol, status)
content_length = 'Content-Length: %s\r\n' % len(msg)
content_type = 'Content-Type: text/plain\r\n'
buf = [
proto_status.encode('ISO-8859-1'),
content_length.encode('ISO-8859-1'),
content_type.encode('ISO-8859-1'),
]
if status[:3] in ('413', '414'):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append(b'Connection: close\r\n')
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = '400 Bad Request'
buf.append(CRLF)
if msg:
if isinstance(msg, six.text_type):
msg = msg.encode('ISO-8859-1')
buf.append(msg)
try:
self.conn.wfile.write(EMPTY.join(buf))
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
raise
def ensure_headers_sent(self):
"""Ensure headers are sent to the client if not already sent."""
if not self.sent_headers:
self.sent_headers = True
self.send_headers()
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
chunk_size_hex = hex(len(chunk))[2:].encode('ascii')
buf = [chunk_size_hex, CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
def send_headers(self): # noqa: C901 # FIXME
"""Assert, process, and send the HTTP response message-headers.
You must set ``self.status``, and :py:attr:`self.outheaders
<HTTPRequest.outheaders>` before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b'content-length' not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
needs_chunked = (
self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'
)
if needs_chunked:
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b'Transfer-Encoding', b'chunked'))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
# Override the decision to not close the connection if the connection
# manager doesn't have space for it.
if not self.close_connection:
can_keep = self.server.can_add_keepalive_connection
self.close_connection = not can_keep
if b'connection' not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b'Connection', b'close'))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b'Connection', b'Keep-Alive'))
if (b'Connection', b'Keep-Alive') in self.outheaders:
self.outheaders.append((
b'Keep-Alive',
u'timeout={connection_timeout}'.
format(connection_timeout=self.server.timeout).
encode('ISO-8859-1'),
))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b'date' not in hkeys:
self.outheaders.append((
b'Date',
email.utils.formatdate(usegmt=True).encode('ISO-8859-1'),
))
if b'server' not in hkeys:
self.outheaders.append((
b'Server',
self.server.server_name.encode('ISO-8859-1'),
))
proto = self.server.protocol.encode('ascii')
buf = [proto + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
class HTTPConnection:
"""An HTTP connection (active socket)."""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = io.DEFAULT_BUFFER_SIZE
wbufsize = io.DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
peercreds_enabled = False
peercreds_resolve_enabled = False
# Fields set by ConnectionManager.
last_used = None
def __init__(self, server, sock, makefile=MakeFile):
"""Initialize HTTPConnection instance.
Args:
server (HTTPServer): web server object receiving this request
sock (socket._socketobject): the raw socket object (usually
TCP) for this connection
makefile (file): a fileobject class for reading from the socket
"""
self.server = server
self.socket = sock
self.rfile = makefile(sock, 'rb', self.rbufsize)
self.wfile = makefile(sock, 'wb', self.wbufsize)
self.requests_seen = 0
self.peercreds_enabled = self.server.peercreds_enabled
self.peercreds_resolve_enabled = self.server.peercreds_resolve_enabled
# LRU cached methods:
# Ref: https://stackoverflow.com/a/14946506/595220
self.resolve_peer_creds = (
lru_cache(maxsize=1)(self.resolve_peer_creds)
)
self.get_peer_creds = (
lru_cache(maxsize=1)(self.get_peer_creds)
)
def communicate(self): # noqa: C901 # FIXME
"""Read each request and respond appropriately.
Returns true if the connection should be kept open.
"""
request_seen = False
try:
req = self.RequestHandlerClass(self.server, self)
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return False
request_seen = True
req.respond()
if not req.close_connection:
return True
except socket.error as ex:
errnum = ex.args[0]
# sadly SSL sockets return a different (longer) time out string
timeout_errs = 'timed out', 'The read operation timed out'
if errnum in timeout_errs:
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://github.com/cherrypy/cherrypy/issues/853
if (not request_seen) or (req and req.started_request):
self._conditional_error(req, '408 Request Timeout')
elif errnum not in errors.socket_errors_to_ignore:
self.server.error_log(
'socket.error %s' % repr(errnum),
level=logging.WARNING, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
except (KeyboardInterrupt, SystemExit):
raise
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
except Exception as ex:
self.server.error_log(
repr(ex), level=logging.ERROR, traceback=True,
)
self._conditional_error(req, '500 Internal Server Error')
return False
linger = False
def _handle_no_ssl(self, req):
if not req or req.sent_headers:
return
# Unwrap wfile
try:
resp_sock = self.socket._sock
except AttributeError:
# self.socket is of OpenSSL.SSL.Connection type
resp_sock = self.socket._socket
self.wfile = StreamWriter(resp_sock, 'wb', self.wbufsize)
msg = (
'The client sent a plain HTTP request, but '
'this server only speaks HTTPS on this port.'
)
req.simple_response('400 Bad Request', msg)
self.linger = True
def _conditional_error(self, req, response):
"""Respond with an error.
Don't bother writing if a response
has already started being written.
"""
if not req or req.sent_headers:
return
try:
req.simple_response(response)
except errors.FatalSSLAlert:
pass
except errors.NoSSLError:
self._handle_no_ssl(req)
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
self._close_kernel_socket()
# close the socket file descriptor
# (will be closed in the OS if there is no
# other reference to the underlying socket)
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
def get_peer_creds(self): # LRU cached on per-instance basis, see __init__
"""Return the PID/UID/GID tuple of the peer socket for UNIX sockets.
This function uses SO_PEERCRED to query the UNIX PID, UID, GID
of the peer, which is only available if the bind address is
a UNIX domain socket.
Raises:
NotImplementedError: in case of unsupported socket type
RuntimeError: in case of SO_PEERCRED lookup unsupported or disabled
"""
PEERCRED_STRUCT_DEF = '3i'
if IS_WINDOWS or self.socket.family != socket.AF_UNIX:
raise NotImplementedError(
'SO_PEERCRED is only supported in Linux kernel and WSL',
)
elif not self.peercreds_enabled:
raise RuntimeError(
'Peer creds lookup is disabled within this server',
)
try:
peer_creds = self.socket.getsockopt(
# FIXME: Use LOCAL_CREDS for BSD-like OSs
# Ref: https://gist.github.com/LucaFilipozzi/e4f1e118202aff27af6aadebda1b5d91 # noqa
socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize(PEERCRED_STRUCT_DEF),
)
except socket.error as socket_err:
"""Non-Linux kernels don't support SO_PEERCRED.
Refs:
http://welz.org.za/notes/on-peer-cred.html
https://github.com/daveti/tcpSockHack
msdn.microsoft.com/en-us/commandline/wsl/release_notes#build-15025
"""
six.raise_from( # 3.6+: raise RuntimeError from socket_err
RuntimeError,
socket_err,
)
else:
pid, uid, gid = struct.unpack(PEERCRED_STRUCT_DEF, peer_creds)
return pid, uid, gid
@property
def peer_pid(self):
"""Return the id of the connected peer process."""
pid, _, _ = self.get_peer_creds()
return pid
@property
def peer_uid(self):
"""Return the user id of the connected peer process."""
_, uid, _ = self.get_peer_creds()
return uid
@property
def peer_gid(self):
"""Return the group id of the connected peer process."""
_, _, gid = self.get_peer_creds()
return gid
def resolve_peer_creds(self): # LRU cached on per-instance basis
"""Look up the username and group tuple of the ``PEERCREDS``.
:returns: the username and group tuple of the ``PEERCREDS``
:raises NotImplementedError: if the OS is unsupported
:raises RuntimeError: if UID/GID lookup is unsupported or disabled
"""
if not IS_UID_GID_RESOLVABLE:
raise NotImplementedError(
'UID/GID lookup is unavailable under current platform. '
'It can only be done under UNIX-like OS '
'but not under the Google App Engine',
)
elif not self.peercreds_resolve_enabled:
raise RuntimeError(
'UID/GID lookup is disabled within this server',
)
user = pwd.getpwuid(self.peer_uid).pw_name # [0]
group = grp.getgrgid(self.peer_gid).gr_name # [0]
return user, group
@property
def peer_user(self):
"""Return the username of the connected peer process."""
user, _ = self.resolve_peer_creds()
return user
@property
def peer_group(self):
"""Return the group of the connected peer process."""
_, group = self.resolve_peer_creds()
return group
def _close_kernel_socket(self):
"""Terminate the connection at the transport level."""
# Honor ``sock_shutdown`` for PyOpenSSL connections.
shutdown = getattr(
self.socket, 'sock_shutdown',
self.socket.shutdown,
)
try:
shutdown(socket.SHUT_RDWR) # actually send a TCP FIN
except errors.acceptable_sock_shutdown_exceptions:
pass
except socket.error as e:
if e.errno not in errors.acceptable_sock_shutdown_error_codes:
raise
class HTTPServer:
"""An HTTP server."""
_bind_addr = '127.0.0.1'
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create.
(default -1 = no limit)"""
server_name = None
"""The name of the server; defaults to ``self.version``."""
protocol = 'HTTP/1.1'
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections.
(default 5)."""
shutdown_timeout = 5
"""The total time to wait for worker threads to cleanly exit.
Specified in seconds."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
expiration_interval = 0.5
"""The interval, in seconds, at which the server checks for
expired connections (default 0.5).
"""
version = 'Cheroot/{version!s}'.format(version=__version__)
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``.
"""
ready = False
"""Internal flag which indicating the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of ``ssl.Adapter`` (or a subclass).
Ref: :py:class:`ssl.Adapter <cheroot.ssl.Adapter>`.
You must have the corresponding TLS driver library installed.
"""
peercreds_enabled = False
"""
If :py:data:`True`, peer creds will be looked up via UNIX domain socket.
"""
peercreds_resolve_enabled = False
"""
If :py:data:`True`, username/group will be looked up in the OS from
``PEERCREDS``-provided IDs.
"""
keep_alive_conn_limit = 10
"""The maximum number of waiting keep-alive connections that will be kept open.
Default is 10. Set to None to have unlimited connections."""
def __init__(
self, bind_addr, gateway,
minthreads=10, maxthreads=-1, server_name=None,
peercreds_enabled=False, peercreds_resolve_enabled=False,
):
"""Initialize HTTPServer instance.
Args:
bind_addr (tuple): network interface to listen to
gateway (Gateway): gateway for processing HTTP requests
minthreads (int): minimum number of threads for HTTP thread pool
maxthreads (int): maximum number of threads for HTTP thread pool
server_name (str): web server name to be advertised via Server
HTTP header
"""
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = threadpool.ThreadPool(
self, min=minthreads or 1, max=maxthreads,
)
if not server_name:
server_name = self.version
self.server_name = server_name
self.peercreds_enabled = peercreds_enabled
self.peercreds_resolve_enabled = (
peercreds_resolve_enabled and peercreds_enabled
)
self.clear_stats()
def clear_stats(self):
"""Reset server stat counters.."""
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, 'qsize', None),
'Threads': lambda s: len(getattr(self.requests, '_threads', [])),
'Threads Idle': lambda s: getattr(self.requests, 'idle', None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum(
(w['Requests'](w) for w in s['Worker Threads'].values()), 0,
),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum(
(w['Bytes Read'](w) for w in s['Worker Threads'].values()), 0,
),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum(
(w['Bytes Written'](w) for w in s['Worker Threads'].values()),
0,
),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum(
(w['Work Time'](w) for w in s['Worker Threads'].values()), 0,
),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
(
w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()
), 0,
),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
(
w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()
), 0,
),
'Worker Threads': {},
}
logging.statistics['Cheroot HTTPServer %d' % id(self)] = self.stats
def runtime(self):
"""Return server uptime."""
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
"""Render Server instance representing bind address."""
return '%s.%s(%r)' % (
self.__module__, self.__class__.__name__,
self.bind_addr,
)
@property
def bind_addr(self):
"""Return the interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any
:term:`IPv4` or :term:`IPv6` address, or any valid hostname.
The string 'localhost' is a synonym for '127.0.0.1' (or '::1',
if your hosts file prefers :term:`IPv6`).
The string '0.0.0.0' is a special :term:`IPv4` entry meaning
"any active interface" (INADDR_ANY), and '::' is the similar
IN6ADDR_ANY for :term:`IPv6`.
The empty string or :py:data:`None` are not allowed.
For UNIX sockets, supply the file name as a string.
Systemd socket activation is automatic and doesn't require tempering
with this variable.
.. glossary::
:abbr:`IPv4 (Internet Protocol version 4)`
Internet Protocol version 4
:abbr:`IPv6 (Internet Protocol version 6)`
Internet Protocol version 6
"""
return self._bind_addr
@bind_addr.setter
def bind_addr(self, value):
"""Set the interface on which to listen for connections."""
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError(
"Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
'to listen on all active interfaces.',
)
self._bind_addr = value
def safe_start(self):
"""Run the server forever, and stop it cleanly on exit."""
try:
self.start()
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.error_log('Keyboard Interrupt: shutting down')
self.stop()
raise
except SystemExit:
self.error_log('SystemExit raised: shutting down')
self.stop()
raise
def prepare(self): # noqa: C901 # FIXME
"""Prepare server to serving requests.
It binds a socket's port, setups the socket to ``listen()`` and does
other preparing things.
"""
self._interrupt = None
if self.software is None:
self.software = '%s Server' % self.version
# Select the appropriate socket
self.socket = None
msg = 'No socket could be created'
if os.getenv('LISTEN_PID', None):
# systemd socket activation
self.socket = socket.fromfd(3, socket.AF_INET, socket.SOCK_STREAM)
elif isinstance(self.bind_addr, (six.text_type, six.binary_type)):
# AF_UNIX socket
try:
self.bind_unix_socket(self.bind_addr)
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, self.bind_addr, serr)
six.raise_from(socket.error(msg), serr)
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6
# addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE,
)
except socket.gaierror:
sock_type = socket.AF_INET
bind_addr = self.bind_addr
if ':' in host:
sock_type = socket.AF_INET6
bind_addr = bind_addr + (0, 0)
info = [(sock_type, socket.SOCK_STREAM, 0, '', bind_addr)]
for res in info:
af, socktype, proto, _canonname, sa = res
try:
self.bind(af, socktype, proto)
break
except socket.error as serr:
msg = '%s -- (%s: %s)' % (msg, sa, serr)
if self.socket:
self.socket.close()
self.socket = None
if not self.socket:
raise socket.error(msg)
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# must not be accessed once stop() has been called
self._connections = connections.ConnectionManager(self)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
def serve(self):
"""Serve requests, after invoking :func:`prepare()`."""
while self.ready and not self.interrupt:
try:
self._connections.run(self.expiration_interval)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.error_log(
'Error in HTTPServer.serve', level=logging.ERROR,
traceback=True,
)
# raise exceptions reported by any worker threads,
# such that the exception is raised from the serve() thread.
if self.interrupt:
while self._stopping_for_interrupt:
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def start(self):
"""Run the server forever.
It is shortcut for invoking :func:`prepare()` then :func:`serve()`.
"""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrypy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self.prepare()
self.serve()
@contextlib.contextmanager
def _run_in_thread(self):
"""Context manager for running this server in a thread."""
self.prepare()
thread = threading.Thread(target=self.serve)
thread.daemon = True
thread.start()
try:
yield thread
finally:
self.stop()
@property
def can_add_keepalive_connection(self):
"""Flag whether it is allowed to add a new keep-alive connection."""
return self.ready and self._connections.can_add_keepalive_connection
def put_conn(self, conn):
"""Put an idle connection back into the ConnectionManager."""
if self.ready:
self._connections.put(conn)
else:
# server is shutting down, just close it
conn.close()
def error_log(self, msg='', level=20, traceback=False):
"""Write error message to log.
Args:
msg (str): error message
level (int): logging level
traceback (bool): add traceback to output or not
"""
# Override this in subclasses as desired
sys.stderr.write('{msg!s}\n'.format(msg=msg))
sys.stderr.flush()
if traceback:
tblines = traceback_.format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
sock = self.prepare_socket(
self.bind_addr,
family, type, proto,
self.nodelay, self.ssl_adapter,
)
sock = self.socket = self.bind_socket(sock, self.bind_addr)
self.bind_addr = self.resolve_real_bind_addr(sock)
return sock
def bind_unix_socket(self, bind_addr): # noqa: C901 # FIXME
"""Create (or recreate) a UNIX socket object."""
if IS_WINDOWS:
"""
Trying to access socket.AF_UNIX under Windows
causes an AttributeError.
"""
raise ValueError( # or RuntimeError?
'AF_UNIX sockets are not supported under Windows.',
)
fs_permissions = 0o777 # TODO: allow changing mode
try:
# Make possible reusing the socket...
os.unlink(self.bind_addr)
except OSError:
"""
File does not exist, which is the primary goal anyway.
"""
except TypeError as typ_err:
err_msg = str(typ_err)
if (
'remove() argument 1 must be encoded '
'string without null bytes, not unicode'
not in err_msg
and 'embedded NUL character' not in err_msg # py34
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy2
):
raise
except ValueError as val_err:
err_msg = str(val_err)
if (
'unlink: embedded null '
'character in path' not in err_msg
and 'embedded null byte' not in err_msg
and 'argument must be a '
'string without NUL characters' not in err_msg # pypy3
):
raise
sock = self.prepare_socket(
bind_addr=bind_addr,
family=socket.AF_UNIX, type=socket.SOCK_STREAM, proto=0,
nodelay=self.nodelay, ssl_adapter=self.ssl_adapter,
)
try:
"""Linux way of pre-populating fs mode permissions."""
# Allow everyone access the socket...
os.fchmod(sock.fileno(), fs_permissions)
FS_PERMS_SET = True
except OSError:
FS_PERMS_SET = False
try:
sock = self.bind_socket(sock, bind_addr)
except socket.error:
sock.close()
raise
bind_addr = self.resolve_real_bind_addr(sock)
try:
"""FreeBSD/macOS pre-populating fs mode permissions."""
if not FS_PERMS_SET:
try:
os.lchmod(bind_addr, fs_permissions)
except AttributeError:
os.chmod(bind_addr, fs_permissions, follow_symlinks=False)
FS_PERMS_SET = True
except OSError:
pass
if not FS_PERMS_SET:
self.error_log(
'Failed to set socket fs mode permissions',
level=logging.WARNING,
)
self.bind_addr = bind_addr
self.socket = sock
return sock
@staticmethod
def prepare_socket(bind_addr, family, type, proto, nodelay, ssl_adapter):
"""Create and prepare the socket object."""
sock = socket.socket(family, type, proto)
connections.prevent_socket_inheritance(sock)
host, port = bind_addr[:2]
IS_EPHEMERAL_PORT = port == 0
if not (IS_WINDOWS or IS_EPHEMERAL_PORT):
"""Enable SO_REUSEADDR for the current socket.
Skip for Windows (has different semantics)
or ephemeral ports (can steal ports from others).
Refs:
* https://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
* https://github.com/cherrypy/cheroot/issues/114
* https://gavv.github.io/blog/ephemeral-port-reuse/
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if nodelay and not isinstance(
bind_addr,
(six.text_type, six.binary_type),
):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if ssl_adapter is not None:
sock = ssl_adapter.bind(sock)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See
# https://github.com/cherrypy/cherrypy/issues/871.
listening_ipv6 = (
hasattr(socket, 'AF_INET6')
and family == socket.AF_INET6
and host in ('::', '::0', '::0.0.0.0')
)
if listening_ipv6:
try:
sock.setsockopt(
socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0,
)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
return sock
@staticmethod
def bind_socket(socket_, bind_addr):
"""Bind the socket to given interface."""
socket_.bind(bind_addr)
return socket_
@staticmethod
def resolve_real_bind_addr(socket_):
"""Retrieve actual bind address from bound socket."""
# FIXME: keep requested bind_addr separate real bound_addr (port
# is different in case of ephemeral port 0)
bind_addr = socket_.getsockname()
if socket_.family in (
# Windows doesn't have socket.AF_UNIX, so not using it in check
socket.AF_INET,
socket.AF_INET6,
):
"""UNIX domain sockets are strings or bytes.
In case of bytes with a leading null-byte it's an abstract socket.
"""
return bind_addr[:2]
if isinstance(bind_addr, six.binary_type):
bind_addr = bton(bind_addr)
return bind_addr
def process_conn(self, conn):
"""Process an incoming HTTPConnection."""
try:
self.requests.put(conn)
except queue.Full:
# Just drop the conn. TODO: write 503 back?
conn.close()
@property
def interrupt(self):
"""Flag interrupt of the server."""
return self._interrupt
@property
def _stopping_for_interrupt(self):
"""Return whether the server is responding to an interrupt."""
return self._interrupt is _STOPPING_FOR_INTERRUPT
@interrupt.setter
def interrupt(self, interrupt):
"""Perform the shutdown of this server and save the exception.
Typically invoked by a worker thread in
:py:mod:`~cheroot.workers.threadpool`, the exception is raised
from the thread running :py:meth:`serve` once :py:meth:`stop`
has completed.
"""
self._interrupt = _STOPPING_FOR_INTERRUPT
self.stop()
self._interrupt = interrupt
def stop(self): # noqa: C901 # FIXME
"""Gracefully shutdown a server that is serving forever."""
if not self.ready:
return # already stopped
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
self._connections.stop()
sock = getattr(self, 'socket', None)
if sock:
if not isinstance(
self.bind_addr,
(six.text_type, six.binary_type),
):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error as ex:
if ex.args[0] not in errors.socket_errors_to_ignore:
# Changed to use error code and not message
# See
# https://github.com/cherrypy/cherrypy/issues/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(
host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM,
):
af, socktype, proto, _canonname, _sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See
# https://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, 'close'):
sock.close()
self.socket = None
self._connections.close()
self.requests.stop(self.shutdown_timeout)
class Gateway:
"""Base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
"""Initialize Gateway instance with request.
Args:
req (HTTPRequest): current HTTP request
"""
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplementedError # pragma: no cover
# These may either be ssl.Adapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cheroot.ssl.builtin.BuiltinSSLAdapter',
'pyopenssl': 'cheroot.ssl.pyopenssl.pyOpenSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, six.string_types):
last_dot = adapter.rfind('.')
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError(
"'%s' object has no attribute '%s'"
% (mod_path, attr_name),
)
return adapter
| 76,777
|
Python
|
.py
| 1,841
| 30.348724
| 101
| 0.566385
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,979
|
conftest.py
|
rembo10_headphones/lib/cheroot/test/conftest.py
|
"""Pytest configuration module.
Contains fixtures, which are tightly bound to the Cheroot framework
itself, useless for end-users' app testing.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type # pylint: disable=invalid-name
import threading
import time
import pytest
from ..server import Gateway, HTTPServer
from ..testing import ( # noqa: F401 # pylint: disable=unused-import
native_server, wsgi_server,
)
from ..testing import get_server_client
@pytest.fixture
# pylint: disable=redefined-outer-name
def wsgi_server_client(wsgi_server): # noqa: F811
"""Create a test client out of given WSGI server."""
return get_server_client(wsgi_server)
@pytest.fixture
# pylint: disable=redefined-outer-name
def native_server_client(native_server): # noqa: F811
"""Create a test client out of given HTTP server."""
return get_server_client(native_server)
@pytest.fixture
def http_server():
"""Provision a server creator as a fixture."""
def start_srv():
bind_addr = yield
if bind_addr is None:
return
httpserver = make_http_server(bind_addr)
yield httpserver
yield httpserver
srv_creator = iter(start_srv())
next(srv_creator) # pylint: disable=stop-iteration-return
yield srv_creator
try:
while True:
httpserver = next(srv_creator)
if httpserver is not None:
httpserver.stop()
except StopIteration:
pass
def make_http_server(bind_addr):
"""Create and start an HTTP server bound to ``bind_addr``."""
httpserver = HTTPServer(
bind_addr=bind_addr,
gateway=Gateway,
)
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
return httpserver
| 1,839
|
Python
|
.py
| 54
| 28.962963
| 70
| 0.699661
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,980
|
_pytest_plugin.py
|
rembo10_headphones/lib/cheroot/test/_pytest_plugin.py
|
"""Local pytest plugin.
Contains hooks, which are tightly bound to the Cheroot framework
itself, useless for end-users' app testing.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
pytest_version = tuple(map(int, pytest.__version__.split('.')))
def pytest_load_initial_conftests(early_config, parser, args):
"""Drop unfilterable warning ignores."""
if pytest_version < (6, 2, 0):
return
# pytest>=6.2.0 under Python 3.8:
# Refs:
# * https://docs.pytest.org/en/stable/usage.html#unraisable
# * https://github.com/pytest-dev/pytest/issues/5299
early_config._inicache['filterwarnings'].extend((
'ignore:Exception in thread CP Server Thread-:'
'pytest.PytestUnhandledThreadExceptionWarning:_pytest.threadexception',
'ignore:Exception in thread Thread-:'
'pytest.PytestUnhandledThreadExceptionWarning:_pytest.threadexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AddressFamily.AF_INET, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AddressFamily.AF_INET6, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AF_INET, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<socket.socket fd=-1, family=AF_INET6, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
'ignore:Exception ignored in. '
'<ssl.SSLSocket fd=-1, family=AddressFamily.AF_UNIX, '
'type=SocketKind.SOCK_STREAM, proto=.:'
'pytest.PytestUnraisableExceptionWarning:_pytest.unraisableexception',
))
| 2,085
|
Python
|
.py
| 42
| 42.928571
| 79
| 0.700737
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,981
|
test_makefile.py
|
rembo10_headphones/lib/cheroot/test/test_makefile.py
|
"""Tests for :py:mod:`cheroot.makefile`."""
from cheroot import makefile
__metaclass__ = type
class MockSocket:
"""A mock socket."""
def __init__(self):
"""Initialize :py:class:`MockSocket`."""
self.messages = []
def recv_into(self, buf):
"""Simulate ``recv_into`` for Python 3."""
if not self.messages:
return 0
msg = self.messages.pop(0)
for index, byte in enumerate(msg):
buf[index] = byte
return len(msg)
def recv(self, size):
"""Simulate ``recv`` for Python 2."""
try:
return self.messages.pop(0)
except IndexError:
return ''
def send(self, val):
"""Simulate a send."""
return len(val)
def test_bytes_read():
"""Reader should capture bytes read."""
sock = MockSocket()
sock.messages.append(b'foo')
rfile = makefile.MakeFile(sock, 'r')
rfile.read()
assert rfile.bytes_read == 3
def test_bytes_written():
"""Writer should capture bytes written."""
sock = MockSocket()
sock.messages.append(b'foo')
wfile = makefile.MakeFile(sock, 'w')
wfile.write(b'bar')
assert wfile.bytes_written == 3
| 1,214
|
Python
|
.py
| 39
| 24.358974
| 50
| 0.592083
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,982
|
test_cli.py
|
rembo10_headphones/lib/cheroot/test/test_cli.py
|
"""Tests to verify the command line interface.
.. spelling::
cli
"""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
import sys
import six
import pytest
from cheroot.cli import (
Application,
parse_wsgi_bind_addr,
)
@pytest.mark.parametrize(
('raw_bind_addr', 'expected_bind_addr'),
(
# tcp/ip
('192.168.1.1:80', ('192.168.1.1', 80)),
# ipv6 ips has to be enclosed in brakets when specified in url form
('[::1]:8000', ('::1', 8000)),
('localhost:5000', ('localhost', 5000)),
# this is a valid input, but foo gets discarted
('foo@bar:5000', ('bar', 5000)),
('foo', ('foo', None)),
('123456789', ('123456789', None)),
# unix sockets
('/tmp/cheroot.sock', '/tmp/cheroot.sock'),
('/tmp/some-random-file-name', '/tmp/some-random-file-name'),
# abstract sockets
('@cheroot', '\x00cheroot'),
),
)
def test_parse_wsgi_bind_addr(raw_bind_addr, expected_bind_addr):
"""Check the parsing of the --bind option.
Verify some of the supported addresses and the expected return value.
"""
assert parse_wsgi_bind_addr(raw_bind_addr) == expected_bind_addr
@pytest.fixture
def wsgi_app(monkeypatch):
"""Return a WSGI app stub."""
class WSGIAppMock:
"""Mock of a wsgi module."""
def application(self):
"""Empty application method.
Default method to be called when no specific callable
is defined in the wsgi application identifier.
It has an empty body because we are expecting to verify that
the same method is return no the actual execution of it.
"""
def main(self):
"""Empty custom method (callable) inside the mocked WSGI app.
It has an empty body because we are expecting to verify that
the same method is return no the actual execution of it.
"""
app = WSGIAppMock()
# patch sys.modules, to include the an instance of WSGIAppMock
# under a specific namespace
if six.PY2:
# python2 requires the previous namespaces to be part of sys.modules
# (e.g. for 'a.b.c' we need to insert 'a', 'a.b' and 'a.b.c')
# otherwise it fails, we're setting the same instance on each level,
# we don't really care about those, just the last one.
monkeypatch.setitem(sys.modules, 'mypkg', app)
monkeypatch.setitem(sys.modules, 'mypkg.wsgi', app)
return app
@pytest.mark.parametrize(
('app_name', 'app_method'),
(
(None, 'application'),
('application', 'application'),
('main', 'main'),
),
)
# pylint: disable=invalid-name
def test_Aplication_resolve(app_name, app_method, wsgi_app):
"""Check the wsgi application name conversion."""
if app_name is None:
wsgi_app_spec = 'mypkg.wsgi'
else:
wsgi_app_spec = 'mypkg.wsgi:{app_name}'.format(**locals())
expected_app = getattr(wsgi_app, app_method)
assert Application.resolve(wsgi_app_spec).wsgi_app == expected_app
| 3,091
|
Python
|
.py
| 82
| 31.146341
| 76
| 0.626796
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,983
|
test_wsgi.py
|
rembo10_headphones/lib/cheroot/test/test_wsgi.py
|
"""Test wsgi."""
from concurrent.futures.thread import ThreadPoolExecutor
from traceback import print_tb
import pytest
import portend
import requests
from requests_toolbelt.sessions import BaseUrlSession as Session
from jaraco.context import ExceptionTrap
from cheroot import wsgi
from cheroot._compat import IS_MACOS, IS_WINDOWS
IS_SLOW_ENV = IS_MACOS or IS_WINDOWS
@pytest.fixture
def simple_wsgi_server():
"""Fucking simple wsgi server fixture (duh)."""
port = portend.find_available_local_port()
def app(_environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
host = '::'
addr = host, port
server = wsgi.Server(addr, app, timeout=600 if IS_SLOW_ENV else 20)
# pylint: disable=possibly-unused-variable
url = 'http://localhost:{port}/'.format(**locals())
# pylint: disable=possibly-unused-variable
with server._run_in_thread() as thread:
yield locals()
def test_connection_keepalive(simple_wsgi_server):
"""Test the connection keepalive works (duh)."""
session = Session(base_url=simple_wsgi_server['url'])
pooled = requests.adapters.HTTPAdapter(
pool_connections=1, pool_maxsize=1000,
)
session.mount('http://', pooled)
def do_request():
with ExceptionTrap(requests.exceptions.ConnectionError) as trap:
resp = session.get('info')
resp.raise_for_status()
print_tb(trap.tb)
return bool(trap)
with ThreadPoolExecutor(max_workers=10 if IS_SLOW_ENV else 50) as pool:
tasks = [
pool.submit(do_request)
for n in range(250 if IS_SLOW_ENV else 1000)
]
failures = sum(task.result() for task in tasks)
assert not failures
def test_gateway_start_response_called_twice(monkeypatch):
"""Verify that repeat calls of ``Gateway.start_response()`` fail."""
monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {})
wsgi_gateway = wsgi.Gateway(None)
wsgi_gateway.started_response = True
err_msg = '^WSGI start_response called a second time with no exc_info.$'
with pytest.raises(RuntimeError, match=err_msg):
wsgi_gateway.start_response('200', (), None)
def test_gateway_write_needs_start_response_called_before(monkeypatch):
"""Check that calling ``Gateway.write()`` needs started response."""
monkeypatch.setattr(wsgi.Gateway, 'get_environ', lambda self: {})
wsgi_gateway = wsgi.Gateway(None)
err_msg = '^WSGI write called before start_response.$'
with pytest.raises(RuntimeError, match=err_msg):
wsgi_gateway.write(None) # The actual arg value is unimportant
| 2,758
|
Python
|
.py
| 63
| 38.079365
| 76
| 0.698318
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,984
|
test_dispatch.py
|
rembo10_headphones/lib/cheroot/test/test_dispatch.py
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division, print_function
from cheroot.wsgi import PathInfoDispatcher
def wsgi_invoke(app, environ):
"""Serve 1 request from a WSGI application."""
response = {}
def start_response(status, headers):
response.update({
'status': status,
'headers': headers,
})
response['body'] = b''.join(
app(environ, start_response),
)
return response
def test_dispatch_no_script_name():
"""Dispatch despite lack of ``SCRIPT_NAME`` in environ."""
# Bare bones WSGI hello world app (from PEP 333).
def app(environ, start_response):
start_response(
'200 OK', [
('Content-Type', 'text/plain; charset=utf-8'),
],
)
return [u'Hello, world!'.encode('utf-8')]
# Build a dispatch table.
d = PathInfoDispatcher([
('/', app),
])
# Dispatch a request without `SCRIPT_NAME`.
response = wsgi_invoke(
d, {
'PATH_INFO': '/foo',
},
)
assert response == {
'status': '200 OK',
'headers': [
('Content-Type', 'text/plain; charset=utf-8'),
],
'body': b'Hello, world!',
}
| 1,332
|
Python
|
.py
| 44
| 23.113636
| 64
| 0.561472
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,985
|
test_ssl.py
|
rembo10_headphones/lib/cheroot/test/test_ssl.py
|
"""Tests for TLS support."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import functools
import json
import os
import ssl
import subprocess
import sys
import threading
import time
import traceback
import OpenSSL.SSL
import pytest
import requests
import six
import trustme
from .._compat import bton, ntob, ntou
from .._compat import IS_ABOVE_OPENSSL10, IS_CI, IS_PYPY
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS
from ..server import HTTPServer, get_ssl_adapter_class
from ..testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
# get_server_client,
_get_conn_data,
_probe_ipv6_sock,
)
from ..wsgi import Gateway_10
IS_GITHUB_ACTIONS_WORKFLOW = bool(os.getenv('GITHUB_WORKFLOW'))
IS_WIN2016 = (
IS_WINDOWS
# pylint: disable=unsupported-membership-test
and b'Microsoft Windows Server 2016 Datacenter' in subprocess.check_output(
('systeminfo',),
)
)
IS_LIBRESSL_BACKEND = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_PYOPENSSL_SSL_VERSION_1_0 = (
OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION).
startswith(b'OpenSSL 1.0.')
)
PY27 = sys.version_info[:2] == (2, 7)
PY34 = sys.version_info[:2] == (3, 4)
PY3 = not six.PY2
PY310_PLUS = sys.version_info[:2] >= (3, 10)
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
fails_under_py3 = pytest.mark.xfail(
not six.PY2,
reason='Fails under Python 3+',
)
fails_under_py3_in_pypy = pytest.mark.xfail(
not six.PY2 and IS_PYPY,
reason='Fails under PyPy3',
)
missing_ipv6 = pytest.mark.skipif(
not _probe_ipv6_sock('::1'),
reason=''
'IPv6 is disabled '
'(for example, under Travis CI '
'which runs under GCE supporting only IPv4)',
)
class HelloWorldGateway(Gateway_10):
"""Gateway responding with Hello World to root URI."""
def respond(self):
"""Respond with dummy content via HTTP."""
req = self.req
req_uri = bton(req.uri)
if req_uri == '/':
req.status = b'200 OK'
req.ensure_headers_sent()
req.write(b'Hello world!')
return
if req_uri == '/env':
req.status = b'200 OK'
req.ensure_headers_sent()
env = self.get_environ()
# drop files so that it can be json dumped
env.pop('wsgi.errors')
env.pop('wsgi.input')
print(env)
req.write(json.dumps(env).encode('utf-8'))
return
return super(HelloWorldGateway, self).respond()
def make_tls_http_server(bind_addr, ssl_adapter, request):
"""Create and start an HTTP server bound to ``bind_addr``."""
httpserver = HTTPServer(
bind_addr=bind_addr,
gateway=HelloWorldGateway,
)
# httpserver.gateway = HelloWorldGateway
httpserver.ssl_adapter = ssl_adapter
threading.Thread(target=httpserver.safe_start).start()
while not httpserver.ready:
time.sleep(0.1)
request.addfinalizer(httpserver.stop)
return httpserver
@pytest.fixture
def tls_http_server(request):
"""Provision a server creator as a fixture."""
return functools.partial(make_tls_http_server, request=request)
@pytest.fixture
def ca():
"""Provide a certificate authority via fixture."""
return trustme.CA()
@pytest.fixture
def tls_ca_certificate_pem_path(ca):
"""Provide a certificate authority certificate file via fixture."""
with ca.cert_pem.tempfile() as ca_cert_pem:
yield ca_cert_pem
@pytest.fixture
def tls_certificate(ca):
"""Provide a leaf certificate via fixture."""
interface, _host, _port = _get_conn_data(ANY_INTERFACE_IPV4)
return ca.issue_cert(ntou(interface))
@pytest.fixture
def tls_certificate_chain_pem_path(tls_certificate):
"""Provide a certificate chain PEM file path via fixture."""
with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def tls_certificate_private_key_pem_path(tls_certificate):
"""Provide a certificate private key PEM file path via fixture."""
with tls_certificate.private_key_pem.tempfile() as cert_key_pem:
yield cert_key_pem
def _thread_except_hook(exceptions, args):
"""Append uncaught exception ``args`` in threads to ``exceptions``."""
if issubclass(args.exc_type, SystemExit):
return
# cannot store the exception, it references the thread's stack
exceptions.append((
args.exc_type,
str(args.exc_value),
''.join(
traceback.format_exception(
args.exc_type, args.exc_value, args.exc_traceback,
),
),
))
@pytest.fixture
def thread_exceptions():
"""Provide a list of uncaught exceptions from threads via a fixture.
Only catches exceptions on Python 3.8+.
The list contains: ``(type, str(value), str(traceback))``
"""
exceptions = []
# Python 3.8+
orig_hook = getattr(threading, 'excepthook', None)
if orig_hook is not None:
threading.excepthook = functools.partial(
_thread_except_hook, exceptions,
)
try:
yield exceptions
finally:
if orig_hook is not None:
threading.excepthook = orig_hook
@pytest.mark.parametrize(
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
def test_ssl_adapters(
tls_http_server, adapter_type,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
):
"""Test ability to connect to server via HTTPS using adapters."""
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_certificate.configure_cert(tls_adapter.context)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
# testclient = get_server_client(tlshttpserver)
# testclient.get('/')
interface, _host, port = _get_conn_data(
tlshttpserver.bind_addr,
)
resp = requests.get(
'https://{host!s}:{port!s}/'.format(host=interface, port=port),
verify=tls_ca_certificate_pem_path,
)
assert resp.status_code == 200
assert resp.text == 'Hello world!'
@pytest.mark.parametrize( # noqa: C901 # FIXME
'adapter_type',
(
'builtin',
'pyopenssl',
),
)
@pytest.mark.parametrize(
('is_trusted_cert', 'tls_client_identity'),
(
(True, 'localhost'), (True, '127.0.0.1'),
(True, '*.localhost'), (True, 'not_localhost'),
(False, 'localhost'),
),
)
@pytest.mark.parametrize(
'tls_verify_mode',
(
ssl.CERT_NONE, # server shouldn't validate client cert
ssl.CERT_OPTIONAL, # same as CERT_REQUIRED in client mode, don't use
ssl.CERT_REQUIRED, # server should validate if client cert CA is OK
),
)
@pytest.mark.xfail(
IS_PYPY and IS_CI,
reason='Fails under PyPy in CI for unknown reason',
strict=False,
)
def test_tls_client_auth( # noqa: C901 # FIXME
# FIXME: remove twisted logic, separate tests
mocker,
tls_http_server, adapter_type,
ca,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
is_trusted_cert, tls_client_identity,
tls_verify_mode,
):
"""Verify that client TLS certificate auth works correctly."""
test_cert_rejection = (
tls_verify_mode != ssl.CERT_NONE
and not is_trusted_cert
)
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
client_cert_root_ca = ca if is_trusted_cert else trustme.CA()
with mocker.mock_module.patch(
'idna.core.ulabel',
return_value=ntob(tls_client_identity),
):
client_cert = client_cert_root_ca.issue_cert(
ntou(tls_client_identity),
)
del client_cert_root_ca
with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_adapter.context.set_verify(
_stdlib_to_openssl_verify[tls_verify_mode],
lambda conn, cert, errno, depth, preverify_ok: preverify_ok,
)
else:
tls_adapter.context.verify_mode = tls_verify_mode
ca.configure_trust(tls_adapter.context)
tls_certificate.configure_cert(tls_adapter.context)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
interface, _host, port = _get_conn_data(tlshttpserver.bind_addr)
make_https_request = functools.partial(
requests.get,
'https://{host!s}:{port!s}/'.format(host=interface, port=port),
# Server TLS certificate verification:
verify=tls_ca_certificate_pem_path,
# Client TLS certificate verification:
cert=cl_pem,
)
if not test_cert_rejection:
resp = make_https_request()
is_req_successful = resp.status_code == 200
if (
not is_req_successful
and IS_PYOPENSSL_SSL_VERSION_1_0
and adapter_type == 'builtin'
and tls_verify_mode == ssl.CERT_REQUIRED
and tls_client_identity == 'localhost'
and is_trusted_cert
) or PY34:
pytest.xfail(
'OpenSSL 1.0 has problems with verifying client certs',
)
assert is_req_successful
assert resp.text == 'Hello world!'
return
# xfail some flaky tests
# https://github.com/cherrypy/cheroot/issues/237
issue_237 = (
IS_MACOS
and adapter_type == 'builtin'
and tls_verify_mode != ssl.CERT_NONE
)
if issue_237:
pytest.xfail('Test sometimes fails')
expected_ssl_errors = (
requests.exceptions.SSLError,
OpenSSL.SSL.Error,
) if PY34 else (
requests.exceptions.SSLError,
)
if IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:
expected_ssl_errors += requests.exceptions.ConnectionError,
with pytest.raises(expected_ssl_errors) as ssl_err:
make_https_request()
if PY34 and isinstance(ssl_err, OpenSSL.SSL.Error):
pytest.xfail(
'OpenSSL behaves wierdly under Python 3.4 '
'because of an outdated urllib3',
)
try:
err_text = ssl_err.value.args[0].reason.args[0].args[0]
except AttributeError:
if PY34:
pytest.xfail('OpenSSL behaves wierdly under Python 3.4')
elif IS_WINDOWS or IS_GITHUB_ACTIONS_WORKFLOW:
err_text = str(ssl_err.value)
else:
raise
if isinstance(err_text, int):
err_text = str(ssl_err.value)
expected_substrings = (
'sslv3 alert bad certificate' if IS_LIBRESSL_BACKEND
else 'tlsv1 alert unknown ca',
)
if not six.PY2:
if IS_MACOS and IS_PYPY and adapter_type == 'pyopenssl':
expected_substrings = ('tlsv1 alert unknown ca',)
if (
tls_verify_mode in (
ssl.CERT_REQUIRED,
ssl.CERT_OPTIONAL,
)
and not is_trusted_cert
and tls_client_identity == 'localhost'
):
expected_substrings += (
'bad handshake: '
"SysCallError(10054, 'WSAECONNRESET')",
"('Connection aborted.', "
'OSError("(10054, \'WSAECONNRESET\')"))',
"('Connection aborted.', "
'OSError("(10054, \'WSAECONNRESET\')",))',
"('Connection aborted.', "
'error("(10054, \'WSAECONNRESET\')",))',
"('Connection aborted.', "
'ConnectionResetError(10054, '
"'An existing connection was forcibly closed "
"by the remote host', None, 10054, None))",
"('Connection aborted.', "
'error(10054, '
"'An existing connection was forcibly closed "
"by the remote host'))",
) if IS_WINDOWS else (
"('Connection aborted.', "
'OSError("(104, \'ECONNRESET\')"))',
"('Connection aborted.', "
'OSError("(104, \'ECONNRESET\')",))',
"('Connection aborted.', "
'error("(104, \'ECONNRESET\')",))',
"('Connection aborted.', "
"ConnectionResetError(104, 'Connection reset by peer'))",
"('Connection aborted.', "
"error(104, 'Connection reset by peer'))",
) if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_LINUX
) else (
"('Connection aborted.', "
"BrokenPipeError(32, 'Broken pipe'))",
)
if PY310_PLUS:
# FIXME: Figure out what's happening and correct the problem
expected_substrings += (
'SSLError(SSLEOFError(8, '
"'EOF occurred in violation of protocol (_ssl.c:",
)
if IS_GITHUB_ACTIONS_WORKFLOW and IS_WINDOWS and PY310_PLUS:
expected_substrings += (
"('Connection aborted.', "
'RemoteDisconnected('
"'Remote end closed connection without response'))",
)
assert any(e in err_text for e in expected_substrings)
@pytest.mark.parametrize( # noqa: C901 # FIXME
'adapter_type',
(
pytest.param(
'builtin',
marks=pytest.mark.xfail(
IS_GITHUB_ACTIONS_WORKFLOW and IS_MACOS and PY310_PLUS,
reason='Unclosed TLS resource warnings happen on macOS '
'under Python 3.10',
strict=False,
),
),
'pyopenssl',
),
)
@pytest.mark.parametrize(
('tls_verify_mode', 'use_client_cert'),
(
(ssl.CERT_NONE, False),
(ssl.CERT_NONE, True),
(ssl.CERT_OPTIONAL, False),
(ssl.CERT_OPTIONAL, True),
(ssl.CERT_REQUIRED, True),
),
)
def test_ssl_env( # noqa: C901 # FIXME
thread_exceptions,
recwarn,
mocker,
tls_http_server, adapter_type,
ca, tls_verify_mode, tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
tls_ca_certificate_pem_path,
use_client_cert,
):
"""Test the SSL environment generated by the SSL adapters."""
interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)
with mocker.mock_module.patch(
'idna.core.ulabel',
return_value=ntob('127.0.0.1'),
):
client_cert = ca.issue_cert(ntou('127.0.0.1'))
with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_adapter.context.set_verify(
_stdlib_to_openssl_verify[tls_verify_mode],
lambda conn, cert, errno, depth, preverify_ok: preverify_ok,
)
else:
tls_adapter.context.verify_mode = tls_verify_mode
ca.configure_trust(tls_adapter.context)
tls_certificate.configure_cert(tls_adapter.context)
tlswsgiserver = tls_http_server((interface, port), tls_adapter)
interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)
resp = requests.get(
'https://' + interface + ':' + str(port) + '/env',
verify=tls_ca_certificate_pem_path,
cert=cl_pem if use_client_cert else None,
)
if PY34 and resp.status_code != 200:
pytest.xfail(
'Python 3.4 has problems with verifying client certs',
)
env = json.loads(resp.content.decode('utf-8'))
# hard coded env
assert env['wsgi.url_scheme'] == 'https'
assert env['HTTPS'] == 'on'
# ensure these are present
for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:
assert key in env
# pyOpenSSL generates the env before the handshake completes
if adapter_type == 'pyopenssl':
return
for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:
assert key in env
# client certificate env
if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:
assert env['SSL_CLIENT_VERIFY'] == 'NONE'
else:
assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'
with open(cl_pem, 'rt') as f:
assert env['SSL_CLIENT_CERT'] in f.read()
for key in {
'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',
'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',
}:
assert key in env
# builtin ssl environment generation may use a loopback socket
# ensure no ResourceWarning was raised during the test
# NOTE: python 2.7 does not emit ResourceWarning for ssl sockets
if IS_PYPY:
# NOTE: PyPy doesn't have ResourceWarning
# Ref: https://doc.pypy.org/en/latest/cpython_differences.html
return
for warn in recwarn:
if not issubclass(warn.category, ResourceWarning):
continue
# the tests can sporadically generate resource warnings
# due to timing issues
# all of these sporadic warnings appear to be about socket.socket
# and have been observed to come from requests connection pool
msg = str(warn.message)
if 'socket.socket' in msg:
pytest.xfail(
'\n'.join((
'Sometimes this test fails due to '
'a socket.socket ResourceWarning:',
msg,
)),
)
pytest.fail(msg)
# to perform the ssl handshake over that loopback socket,
# the builtin ssl environment generation uses a thread
for _, _, trace in thread_exceptions:
print(trace, file=sys.stderr)
assert not thread_exceptions, ': '.join((
thread_exceptions[0][0].__name__,
thread_exceptions[0][1],
))
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
),
)
def test_https_over_http_error(http_server, ip_addr):
"""Ensure that connecting over HTTPS to HTTP port is handled."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
interface, _host, port = _get_conn_data(httpserver.bind_addr)
with pytest.raises(ssl.SSLError) as ssl_err:
six.moves.http_client.HTTPSConnection(
'{interface}:{port}'.format(
interface=interface,
port=port,
),
).request('GET', '/')
expected_substring = (
'wrong version number' if IS_ABOVE_OPENSSL10
else 'unknown protocol'
)
assert expected_substring in ssl_err.value.args[-1]
http_over_https_error_builtin_marks = []
if IS_WINDOWS and six.PY2:
http_over_https_error_builtin_marks.append(
pytest.mark.flaky(reruns=5, reruns_delay=2),
)
@pytest.mark.parametrize(
'adapter_type',
(
pytest.param(
'builtin',
marks=http_over_https_error_builtin_marks,
),
'pyopenssl',
),
)
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
pytest.param(ANY_INTERFACE_IPV6, marks=missing_ipv6),
),
)
def test_http_over_https_error(
tls_http_server, adapter_type,
ca, ip_addr,
tls_certificate,
tls_certificate_chain_pem_path,
tls_certificate_private_key_pem_path,
):
"""Ensure that connecting over HTTP to HTTPS port is handled."""
# disable some flaky tests
# https://github.com/cherrypy/cheroot/issues/225
issue_225 = (
IS_MACOS
and adapter_type == 'builtin'
)
if issue_225:
pytest.xfail('Test fails in Travis-CI')
tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)
tls_adapter = tls_adapter_cls(
tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,
)
if adapter_type == 'pyopenssl':
tls_adapter.context = tls_adapter.get_context()
tls_certificate.configure_cert(tls_adapter.context)
interface, _host, port = _get_conn_data(ip_addr)
tlshttpserver = tls_http_server((interface, port), tls_adapter)
interface, _host, port = _get_conn_data(
tlshttpserver.bind_addr,
)
fqdn = interface
if ip_addr is ANY_INTERFACE_IPV6:
fqdn = '[{fqdn}]'.format(**locals())
expect_fallback_response_over_plain_http = (
(
adapter_type == 'pyopenssl'
and (IS_ABOVE_OPENSSL10 or not six.PY2)
)
or PY27
) or (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and not IS_WIN2016
)
if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and IS_WIN2016
and adapter_type == 'builtin'
and ip_addr is ANY_INTERFACE_IPV6
):
expect_fallback_response_over_plain_http = True
if (
IS_GITHUB_ACTIONS_WORKFLOW
and IS_WINDOWS
and six.PY2
and not IS_WIN2016
and adapter_type == 'builtin'
and ip_addr is not ANY_INTERFACE_IPV6
):
expect_fallback_response_over_plain_http = False
if expect_fallback_response_over_plain_http:
resp = requests.get(
'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),
)
assert resp.status_code == 400
assert resp.text == (
'The client sent a plain HTTP request, '
'but this server only speaks HTTPS on this port.'
)
return
with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:
requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL
'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),
)
if IS_LINUX:
expected_error_code, expected_error_text = (
104, 'Connection reset by peer',
)
if IS_MACOS:
expected_error_code, expected_error_text = (
54, 'Connection reset by peer',
)
if IS_WINDOWS:
expected_error_code, expected_error_text = (
10054,
'An existing connection was forcibly closed by the remote host',
)
underlying_error = ssl_err.value.args[0].args[-1]
err_text = str(underlying_error)
assert underlying_error.errno == expected_error_code, (
'The underlying error is {underlying_error!r}'.
format(**locals())
)
assert expected_error_text in err_text
| 23,991
|
Python
|
.py
| 662
| 27.667674
| 79
| 0.603281
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,986
|
test__compat.py
|
rembo10_headphones/lib/cheroot/test/test__compat.py
|
# -*- coding: utf-8 -*-
"""Test suite for cross-python compatibility helpers."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
import six
from cheroot._compat import extract_bytes, memoryview, ntob, ntou, bton
@pytest.mark.parametrize(
('func', 'inp', 'out'),
(
(ntob, 'bar', b'bar'),
(ntou, 'bar', u'bar'),
(bton, b'bar', 'bar'),
),
)
def test_compat_functions_positive(func, inp, out):
"""Check that compatibility functions work with correct input."""
assert func(inp, encoding='utf-8') == out
@pytest.mark.parametrize(
'func',
(
ntob,
ntou,
),
)
def test_compat_functions_negative_nonnative(func):
"""Check that compatibility functions fail loudly for incorrect input."""
non_native_test_str = u'bar' if six.PY2 else b'bar'
with pytest.raises(TypeError):
func(non_native_test_str, encoding='utf-8')
def test_ntou_escape():
"""Check that ``ntou`` supports escape-encoding under Python 2."""
expected = u'hišřії'
actual = ntou('hi\u0161\u0159\u0456\u0457', encoding='escape')
assert actual == expected
@pytest.mark.parametrize(
('input_argument', 'expected_result'),
(
(b'qwerty', b'qwerty'),
(memoryview(b'asdfgh'), b'asdfgh'),
),
)
def test_extract_bytes(input_argument, expected_result):
"""Check that legitimate inputs produce bytes."""
assert extract_bytes(input_argument) == expected_result
def test_extract_bytes_invalid():
"""Ensure that invalid input causes exception to be raised."""
with pytest.raises(
ValueError,
match=r'^extract_bytes\(\) only accepts bytes '
'and memoryview/buffer$',
):
extract_bytes(u'some юнікод їїї')
| 1,818
|
Python
|
.py
| 53
| 29.037736
| 77
| 0.656699
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,987
|
webtest.py
|
rembo10_headphones/lib/cheroot/test/webtest.py
|
"""Extensions to unittest for web frameworks.
Use the :py:meth:`WebCase.getPage` method to request a page
from your HTTP server.
Framework Integration
=====================
If you have control over your server process, you can handle errors
in the server-side of the HTTP conversation a bit better. You must run
both the client (your :py:class:`WebCase` tests) and the server in the
same process (but in separate threads, obviously).
When an error occurs in the framework, call server_error. It will print
the traceback to stdout, and keep any assertions you have from running
(the assumption is that, if the server errors, the page output will not
be of further significance to your tests).
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pprint
import re
import socket
import sys
import time
import traceback
import os
import json
import unittest # pylint: disable=deprecated-module,preferred-module
import warnings
import functools
from six.moves import http_client, map, urllib_parse
import six
from more_itertools.more import always_iterable
import jaraco.functools
def interface(host):
"""Return an IP address for a client connection given the server host.
If the server is listening on '0.0.0.0' (INADDR_ANY)
or '::' (IN6ADDR_ANY), this will return the proper localhost.
"""
if host == '0.0.0.0':
# INADDR_ANY, which should respond on localhost.
return '127.0.0.1'
if host == '::':
# IN6ADDR_ANY, which should respond on localhost.
return '::1'
return host
try:
# Jython support
if sys.platform[:4] == 'java':
def getchar():
"""Get a key press."""
# Hopefully this is enough
return sys.stdin.read(1)
else:
# On Windows, msvcrt.getch reads a single char without output.
import msvcrt
def getchar():
"""Get a key press."""
return msvcrt.getch()
except ImportError:
# Unix getchr
import tty
import termios
def getchar():
"""Get a key press."""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# from jaraco.properties
class NonDataProperty:
"""Non-data property decorator."""
def __init__(self, fget):
"""Initialize a non-data property."""
assert fget is not None, 'fget cannot be none'
assert callable(fget), 'fget must be callable'
self.fget = fget
def __get__(self, obj, objtype=None):
"""Return a class property."""
if obj is None:
return self
return self.fget(obj)
class WebCase(unittest.TestCase):
"""Helper web test suite base."""
HOST = '127.0.0.1'
PORT = 8000
HTTP_CONN = http_client.HTTPConnection
PROTOCOL = 'HTTP/1.1'
scheme = 'http'
url = None
ssl_context = None
status = None
headers = None
body = None
encoding = 'utf-8'
time = None
@property
def _Conn(self):
"""Return HTTPConnection or HTTPSConnection based on self.scheme.
* from :py:mod:`python:http.client`.
"""
cls_name = '{scheme}Connection'.format(scheme=self.scheme.upper())
return getattr(http_client, cls_name)
def get_conn(self, auto_open=False):
"""Return a connection to our HTTP server."""
conn = self._Conn(self.interface(), self.PORT)
# Automatically re-connect?
conn.auto_open = auto_open
conn.connect()
return conn
def set_persistent(self, on=True, auto_open=False):
"""Make our HTTP_CONN persistent (or not).
If the 'on' argument is True (the default), then self.HTTP_CONN
will be set to an instance of HTTP(S)?Connection
to persist across requests.
As this class only allows for a single open connection, if
self already has an open connection, it will be closed.
"""
try:
self.HTTP_CONN.close()
except (TypeError, AttributeError):
pass
self.HTTP_CONN = (
self.get_conn(auto_open=auto_open)
if on
else self._Conn
)
@property
def persistent(self):
"""Presence of the persistent HTTP connection."""
return hasattr(self.HTTP_CONN, '__class__')
@persistent.setter
def persistent(self, on):
self.set_persistent(on)
def interface(self):
"""Return an IP address for a client connection.
If the server is listening on '0.0.0.0' (INADDR_ANY)
or '::' (IN6ADDR_ANY), this will return the proper localhost.
"""
return interface(self.HOST)
def getPage(
self, url, headers=None, method='GET', body=None,
protocol=None, raise_subcls=(),
):
"""Open the url with debugging support.
Return status, headers, body.
url should be the identifier passed to the server, typically a
server-absolute path and query string (sent between method and
protocol), and should only be an absolute URI if proxy support is
enabled in the server.
If the application under test generates absolute URIs, be sure
to wrap them first with :py:func:`strip_netloc`::
>>> class MyAppWebCase(WebCase):
... def getPage(url, *args, **kwargs):
... super(MyAppWebCase, self).getPage(
... cheroot.test.webtest.strip_netloc(url),
... *args, **kwargs
... )
``raise_subcls`` is passed through to :py:func:`openURL`.
"""
ServerError.on = False
if isinstance(url, six.text_type):
url = url.encode('utf-8')
if isinstance(body, six.text_type):
body = body.encode('utf-8')
# for compatibility, support raise_subcls is None
raise_subcls = raise_subcls or ()
self.url = url
self.time = None
start = time.time()
result = openURL(
url, headers, method, body, self.HOST, self.PORT,
self.HTTP_CONN, protocol or self.PROTOCOL,
raise_subcls=raise_subcls,
ssl_context=self.ssl_context,
)
self.time = time.time() - start
self.status, self.headers, self.body = result
# Build a list of request cookies from the previous response cookies.
self.cookies = [
('Cookie', v) for k, v in self.headers
if k.lower() == 'set-cookie'
]
if ServerError.on:
raise ServerError()
return result
@NonDataProperty
def interactive(self):
"""Determine whether tests are run in interactive mode.
Load interactivity setting from environment, where
the value can be numeric or a string like true or
False or 1 or 0.
"""
env_str = os.environ.get('WEBTEST_INTERACTIVE', 'True')
is_interactive = bool(json.loads(env_str.lower()))
if is_interactive:
warnings.warn(
'Interactive test failure interceptor support via '
'WEBTEST_INTERACTIVE environment variable is deprecated.',
DeprecationWarning,
)
return is_interactive
console_height = 30
def _handlewebError(self, msg): # noqa: C901 # FIXME
print('')
print(' ERROR: %s' % msg)
if not self.interactive:
raise self.failureException(msg)
p = (
' Show: '
'[B]ody [H]eaders [S]tatus [U]RL; '
'[I]gnore, [R]aise, or sys.e[X]it >> '
)
sys.stdout.write(p)
sys.stdout.flush()
while True:
i = getchar().upper()
if not isinstance(i, type('')):
i = i.decode('ascii')
if i not in 'BHSUIRX':
continue
print(i.upper()) # Also prints new line
if i == 'B':
for x, line in enumerate(self.body.splitlines()):
if (x + 1) % self.console_height == 0:
# The \r and comma should make the next line overwrite
sys.stdout.write('<-- More -->\r')
m = getchar().lower()
# Erase our "More" prompt
sys.stdout.write(' \r')
if m == 'q':
break
print(line)
elif i == 'H':
pprint.pprint(self.headers)
elif i == 'S':
print(self.status)
elif i == 'U':
print(self.url)
elif i == 'I':
# return without raising the normal exception
return
elif i == 'R':
raise self.failureException(msg)
elif i == 'X':
sys.exit()
sys.stdout.write(p)
sys.stdout.flush()
@property
def status_code(self): # noqa: D401; irrelevant for properties
"""Integer HTTP status code."""
return int(self.status[:3])
def status_matches(self, expected):
"""Check whether actual status matches expected."""
actual = (
self.status_code
if isinstance(expected, int) else
self.status
)
return expected == actual
def assertStatus(self, status, msg=None):
"""Fail if self.status != status.
status may be integer code, exact string status, or
iterable of allowed possibilities.
"""
if any(map(self.status_matches, always_iterable(status))):
return
tmpl = 'Status {self.status} does not match {status}'
msg = msg or tmpl.format(**locals())
self._handlewebError(msg)
def assertHeader(self, key, value=None, msg=None):
"""Fail if (key, [value]) not in self.headers."""
lowkey = key.lower()
for k, v in self.headers:
if k.lower() == lowkey:
if value is None or str(value) == v:
return v
if msg is None:
if value is None:
msg = '%r not in headers' % key
else:
msg = '%r:%r not in headers' % (key, value)
self._handlewebError(msg)
def assertHeaderIn(self, key, values, msg=None):
"""Fail if header indicated by key doesn't have one of the values."""
lowkey = key.lower()
for k, v in self.headers:
if k.lower() == lowkey:
matches = [value for value in values if str(value) == v]
if matches:
return matches
if msg is None:
msg = '%(key)r not in %(values)r' % vars()
self._handlewebError(msg)
def assertHeaderItemValue(self, key, value, msg=None):
"""Fail if the header does not contain the specified value."""
actual_value = self.assertHeader(key, msg=msg)
header_values = map(str.strip, actual_value.split(','))
if value in header_values:
return value
if msg is None:
msg = '%r not in %r' % (value, header_values)
self._handlewebError(msg)
def assertNoHeader(self, key, msg=None):
"""Fail if key in self.headers."""
lowkey = key.lower()
matches = [k for k, v in self.headers if k.lower() == lowkey]
if matches:
if msg is None:
msg = '%r in headers' % key
self._handlewebError(msg)
def assertNoHeaderItemValue(self, key, value, msg=None):
"""Fail if the header contains the specified value."""
lowkey = key.lower()
hdrs = self.headers
matches = [k for k, v in hdrs if k.lower() == lowkey and v == value]
if matches:
if msg is None:
msg = '%r:%r in %r' % (key, value, hdrs)
self._handlewebError(msg)
def assertBody(self, value, msg=None):
"""Fail if value != self.body."""
if isinstance(value, six.text_type):
value = value.encode(self.encoding)
if value != self.body:
if msg is None:
msg = 'expected body:\n%r\n\nactual body:\n%r' % (
value, self.body,
)
self._handlewebError(msg)
def assertInBody(self, value, msg=None):
"""Fail if value not in self.body."""
if isinstance(value, six.text_type):
value = value.encode(self.encoding)
if value not in self.body:
if msg is None:
msg = '%r not in body: %s' % (value, self.body)
self._handlewebError(msg)
def assertNotInBody(self, value, msg=None):
"""Fail if value in self.body."""
if isinstance(value, six.text_type):
value = value.encode(self.encoding)
if value in self.body:
if msg is None:
msg = '%r found in body' % value
self._handlewebError(msg)
def assertMatchesBody(self, pattern, msg=None, flags=0):
"""Fail if value (a regex pattern) is not in self.body."""
if isinstance(pattern, six.text_type):
pattern = pattern.encode(self.encoding)
if re.search(pattern, self.body, flags) is None:
if msg is None:
msg = 'No match for %r in body' % pattern
self._handlewebError(msg)
methods_with_bodies = ('POST', 'PUT', 'PATCH')
def cleanHeaders(headers, method, body, host, port):
"""Return request headers, with required headers added (if missing)."""
if headers is None:
headers = []
# Add the required Host request header if not present.
# [This specifies the host:port of the server, not the client.]
found = False
for k, _v in headers:
if k.lower() == 'host':
found = True
break
if not found:
if port == 80:
headers.append(('Host', host))
else:
headers.append(('Host', '%s:%s' % (host, port)))
if method in methods_with_bodies:
# Stick in default type and length headers if not present
found = False
for k, v in headers:
if k.lower() == 'content-type':
found = True
break
if not found:
headers.append(
('Content-Type', 'application/x-www-form-urlencoded'),
)
headers.append(('Content-Length', str(len(body or ''))))
return headers
def shb(response):
"""Return status, headers, body the way we like from a response."""
resp_status_line = '%s %s' % (response.status, response.reason)
if not six.PY2:
return resp_status_line, response.getheaders(), response.read()
h = []
key, value = None, None
for line in response.msg.headers:
if line:
if line[0] in ' \t':
value += line.strip()
else:
if key and value:
h.append((key, value))
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
if key and value:
h.append((key, value))
return resp_status_line, h, response.read()
# def openURL(*args, raise_subcls=(), **kwargs):
# py27 compatible signature:
def openURL(*args, **kwargs):
"""
Open a URL, retrying when it fails.
Specify ``raise_subcls`` (class or tuple of classes) to exclude
those socket.error subclasses from being suppressed and retried.
"""
raise_subcls = kwargs.pop('raise_subcls', ())
opener = functools.partial(_open_url_once, *args, **kwargs)
def on_exception():
exc = sys.exc_info()[1]
if isinstance(exc, raise_subcls):
raise exc
time.sleep(0.5)
# Try up to 10 times
return jaraco.functools.retry_call(
opener,
retries=9,
cleanup=on_exception,
trap=socket.error,
)
def _open_url_once(
url, headers=None, method='GET', body=None,
host='127.0.0.1', port=8000, http_conn=http_client.HTTPConnection,
protocol='HTTP/1.1', ssl_context=None,
):
"""Open the given HTTP resource and return status, headers, and body."""
headers = cleanHeaders(headers, method, body, host, port)
# Allow http_conn to be a class or an instance
if hasattr(http_conn, 'host'):
conn = http_conn
else:
kw = {}
if ssl_context:
kw['context'] = ssl_context
conn = http_conn(interface(host), port, **kw)
conn._http_vsn_str = protocol
conn._http_vsn = int(''.join([x for x in protocol if x.isdigit()]))
if not six.PY2 and isinstance(url, bytes):
url = url.decode()
conn.putrequest(
method.upper(), url, skip_host=True,
skip_accept_encoding=True,
)
for key, value in headers:
conn.putheader(key, value.encode('Latin-1'))
conn.endheaders()
if body is not None:
conn.send(body)
# Handle response
response = conn.getresponse()
s, h, b = shb(response)
if not hasattr(http_conn, 'host'):
# We made our own conn instance. Close it.
conn.close()
return s, h, b
def strip_netloc(url):
"""Return absolute-URI path from URL.
Strip the scheme and host from the URL, returning the
server-absolute portion.
Useful for wrapping an absolute-URI for which only the
path is expected (such as in calls to :py:meth:`WebCase.getPage`).
.. testsetup::
from cheroot.test.webtest import strip_netloc
>>> strip_netloc('https://google.com/foo/bar?bing#baz')
'/foo/bar?bing'
>>> strip_netloc('//google.com/foo/bar?bing#baz')
'/foo/bar?bing'
>>> strip_netloc('/foo/bar?bing#baz')
'/foo/bar?bing'
"""
parsed = urllib_parse.urlparse(url)
_scheme, _netloc, path, params, query, _fragment = parsed
stripped = '', '', path, params, query, ''
return urllib_parse.urlunparse(stripped)
# Add any exceptions which your web framework handles
# normally (that you don't want server_error to trap).
ignored_exceptions = []
# You'll want set this to True when you can't guarantee
# that each response will immediately follow each request;
# for example, when handling requests via multiple threads.
ignore_all = False
class ServerError(Exception):
"""Exception for signalling server error."""
on = False
def server_error(exc=None):
"""Server debug hook.
Return True if exception handled, False if ignored.
You probably want to wrap this, so you can still handle an error using
your framework when it's ignored.
"""
if exc is None:
exc = sys.exc_info()
if ignore_all or exc[0] in ignored_exceptions:
return False
else:
ServerError.on = True
print('')
print(''.join(traceback.format_exception(*exc)))
return True
| 19,205
|
Python
|
.py
| 506
| 29.092885
| 78
| 0.587672
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,988
|
test_core.py
|
rembo10_headphones/lib/cheroot/test/test_core.py
|
"""Tests for managing HTTP issues (malformed requests, etc)."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import socket
import pytest
import six
from six.moves import urllib
from cheroot.test import helper
HTTP_BAD_REQUEST = 400
HTTP_LENGTH_REQUIRED = 411
HTTP_NOT_FOUND = 404
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_OK = 200
HTTP_VERSION_NOT_SUPPORTED = 505
class HelloController(helper.Controller):
"""Controller for serving WSGI apps."""
def hello(req, resp):
"""Render Hello world."""
return 'Hello world!'
def body_required(req, resp):
"""Render Hello world or set 411."""
if req.environ.get('Content-Length', None) is None:
resp.status = '411 Length Required'
return
return 'Hello world!'
def query_string(req, resp):
"""Render QUERY_STRING value."""
return req.environ.get('QUERY_STRING', '')
def asterisk(req, resp):
"""Render request method value."""
# pylint: disable=possibly-unused-variable
method = req.environ.get('REQUEST_METHOD', 'NO METHOD FOUND')
tmpl = 'Got asterisk URI path with {method} method'
return tmpl.format(**locals())
def _munge(string):
"""Encode PATH_INFO correctly depending on Python version.
WSGI 1.0 is a mess around unicode. Create endpoints
that match the PATH_INFO that it produces.
"""
if six.PY2:
return string
return string.encode('utf-8').decode('latin-1')
handlers = {
'/hello': hello,
'/no_body': hello,
'/body_required': body_required,
'/query_string': query_string,
_munge('/привіт'): hello,
_munge('/Юххууу'): hello,
'/\xa0Ðblah key 0 900 4 data': hello,
'/*': asterisk,
}
def _get_http_response(connection, method='GET'):
c = connection
kwargs = {'strict': c.strict} if hasattr(c, 'strict') else {}
# Python 3.2 removed the 'strict' feature, saying:
# "http.client now always assumes HTTP/1.x compliant servers."
return c.response_class(c.sock, method=method, **kwargs)
@pytest.fixture
def testing_server(wsgi_server_client):
"""Attach a WSGI app to the given server and preconfigure it."""
wsgi_server = wsgi_server_client.server_instance
wsgi_server.wsgi_app = HelloController()
wsgi_server.max_request_body_size = 30000000
wsgi_server.server_client = wsgi_server_client
return wsgi_server
@pytest.fixture
def test_client(testing_server):
"""Get and return a test client out of the given server."""
return testing_server.server_client
@pytest.fixture
def testing_server_with_defaults(wsgi_server_client):
"""Attach a WSGI app to the given server and preconfigure it."""
wsgi_server = wsgi_server_client.server_instance
wsgi_server.wsgi_app = HelloController()
wsgi_server.server_client = wsgi_server_client
return wsgi_server
@pytest.fixture
def test_client_with_defaults(testing_server_with_defaults):
"""Get and return a test client out of the given server."""
return testing_server_with_defaults.server_client
def test_http_connect_request(test_client):
"""Check that CONNECT query results in Method Not Allowed status."""
status_line = test_client.connect('/anything')[0]
actual_status = int(status_line[:3])
assert actual_status == 405
def test_normal_request(test_client):
"""Check that normal GET query succeeds."""
status_line, _, actual_resp_body = test_client.get('/hello')
actual_status = int(status_line[:3])
assert actual_status == HTTP_OK
assert actual_resp_body == b'Hello world!'
def test_query_string_request(test_client):
"""Check that GET param is parsed well."""
status_line, _, actual_resp_body = test_client.get(
'/query_string?test=True',
)
actual_status = int(status_line[:3])
assert actual_status == HTTP_OK
assert actual_resp_body == b'test=True'
@pytest.mark.parametrize(
'uri',
(
'/hello', # plain
'/query_string?test=True', # query
'/{0}?{1}={2}'.format( # quoted unicode
*map(urllib.parse.quote, ('Юххууу', 'ї', 'йо'))
),
),
)
def test_parse_acceptable_uri(test_client, uri):
"""Check that server responds with OK to valid GET queries."""
status_line = test_client.get(uri)[0]
actual_status = int(status_line[:3])
assert actual_status == HTTP_OK
@pytest.mark.xfail(six.PY2, reason='Fails on Python 2')
def test_parse_uri_unsafe_uri(test_client):
"""Test that malicious URI does not allow HTTP injection.
This effectively checks that sending GET request with URL
/%A0%D0blah%20key%200%20900%204%20data
is not converted into
GET /
blah key 0 900 4 data
HTTP/1.1
which would be a security issue otherwise.
"""
c = test_client.get_connection()
resource = '/\xa0Ðblah key 0 900 4 data'.encode('latin-1')
quoted = urllib.parse.quote(resource)
assert quoted == '/%A0%D0blah%20key%200%20900%204%20data'
request = 'GET {quoted} HTTP/1.1'.format(**locals())
c._output(request.encode('utf-8'))
c._send_output()
response = _get_http_response(c, method='GET')
response.begin()
assert response.status == HTTP_OK
assert response.read(12) == b'Hello world!'
c.close()
def test_parse_uri_invalid_uri(test_client):
"""Check that server responds with Bad Request to invalid GET queries.
Invalid request line test case: it should only contain US-ASCII.
"""
c = test_client.get_connection()
c._output(u'GET /йопта! HTTP/1.1'.encode('utf-8'))
c._send_output()
response = _get_http_response(c, method='GET')
response.begin()
assert response.status == HTTP_BAD_REQUEST
assert response.read(21) == b'Malformed Request-URI'
c.close()
@pytest.mark.parametrize(
'uri',
(
'hello', # ascii
'привіт', # non-ascii
),
)
def test_parse_no_leading_slash_invalid(test_client, uri):
"""Check that server responds with Bad Request to invalid GET queries.
Invalid request line test case: it should have leading slash (be absolute).
"""
status_line, _, actual_resp_body = test_client.get(
urllib.parse.quote(uri),
)
actual_status = int(status_line[:3])
assert actual_status == HTTP_BAD_REQUEST
assert b'starting with a slash' in actual_resp_body
def test_parse_uri_absolute_uri(test_client):
"""Check that server responds with Bad Request to Absolute URI.
Only proxy servers should allow this.
"""
status_line, _, actual_resp_body = test_client.get('http://google.com/')
actual_status = int(status_line[:3])
assert actual_status == HTTP_BAD_REQUEST
expected_body = b'Absolute URI not allowed if server is not a proxy.'
assert actual_resp_body == expected_body
def test_parse_uri_asterisk_uri(test_client):
"""Check that server responds with OK to OPTIONS with "*" Absolute URI."""
status_line, _, actual_resp_body = test_client.options('*')
actual_status = int(status_line[:3])
assert actual_status == HTTP_OK
expected_body = b'Got asterisk URI path with OPTIONS method'
assert actual_resp_body == expected_body
def test_parse_uri_fragment_uri(test_client):
"""Check that server responds with Bad Request to URI with fragment."""
status_line, _, actual_resp_body = test_client.get(
'/hello?test=something#fake',
)
actual_status = int(status_line[:3])
assert actual_status == HTTP_BAD_REQUEST
expected_body = b'Illegal #fragment in Request-URI.'
assert actual_resp_body == expected_body
def test_no_content_length(test_client):
"""Test POST query with an empty body being successful."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
#
# Send a message with neither header and no body.
c = test_client.get_connection()
c.request('POST', '/no_body')
response = c.getresponse()
actual_resp_body = response.read()
actual_status = response.status
assert actual_status == HTTP_OK
assert actual_resp_body == b'Hello world!'
def test_content_length_required(test_client):
"""Test POST query with body failing because of missing Content-Length."""
# Now send a message that has no Content-Length, but does send a body.
# Verify that CP times out the socket and responds
# with 411 Length Required.
c = test_client.get_connection()
c.request('POST', '/body_required')
response = c.getresponse()
response.read()
actual_status = response.status
assert actual_status == HTTP_LENGTH_REQUIRED
@pytest.mark.xfail(
reason='https://github.com/cherrypy/cheroot/issues/106',
strict=False, # sometimes it passes
)
def test_large_request(test_client_with_defaults):
"""Test GET query with maliciously large Content-Length."""
# If the server's max_request_body_size is not set (i.e. is set to 0)
# then this will result in an `OverflowError: Python int too large to
# convert to C ssize_t` in the server.
# We expect that this should instead return that the request is too
# large.
c = test_client_with_defaults.get_connection()
c.putrequest('GET', '/hello')
c.putheader('Content-Length', str(2**64))
c.endheaders()
response = c.getresponse()
actual_status = response.status
assert actual_status == HTTP_REQUEST_ENTITY_TOO_LARGE
@pytest.mark.parametrize(
('request_line', 'status_code', 'expected_body'),
(
(
b'GET /', # missing proto
HTTP_BAD_REQUEST, b'Malformed Request-Line',
),
(
b'GET / HTTPS/1.1', # invalid proto
HTTP_BAD_REQUEST, b'Malformed Request-Line: bad protocol',
),
(
b'GET / HTTP/1', # invalid version
HTTP_BAD_REQUEST, b'Malformed Request-Line: bad version',
),
(
b'GET / HTTP/2.15', # invalid ver
HTTP_VERSION_NOT_SUPPORTED, b'Cannot fulfill request',
),
),
)
def test_malformed_request_line(
test_client, request_line,
status_code, expected_body,
):
"""Test missing or invalid HTTP version in Request-Line."""
c = test_client.get_connection()
c._output(request_line)
c._send_output()
response = _get_http_response(c, method='GET')
response.begin()
assert response.status == status_code
assert response.read(len(expected_body)) == expected_body
c.close()
def test_malformed_http_method(test_client):
"""Test non-uppercase HTTP method."""
c = test_client.get_connection()
c.putrequest('GeT', '/malformed_method_case')
c.putheader('Content-Type', 'text/plain')
c.endheaders()
response = c.getresponse()
actual_status = response.status
assert actual_status == HTTP_BAD_REQUEST
actual_resp_body = response.read(21)
assert actual_resp_body == b'Malformed method name'
def test_malformed_header(test_client):
"""Check that broken HTTP header results in Bad Request."""
c = test_client.get_connection()
c.putrequest('GET', '/')
c.putheader('Content-Type', 'text/plain')
# See https://www.bitbucket.org/cherrypy/cherrypy/issue/941
c._output(b'Re, 1.2.3.4#015#012')
c.endheaders()
response = c.getresponse()
actual_status = response.status
assert actual_status == HTTP_BAD_REQUEST
actual_resp_body = response.read(20)
assert actual_resp_body == b'Illegal header line.'
def test_request_line_split_issue_1220(test_client):
"""Check that HTTP request line of exactly 256 chars length is OK."""
Request_URI = (
'/hello?'
'intervenant-entreprise-evenement_classaction='
'evenement-mailremerciements'
'&_path=intervenant-entreprise-evenement'
'&intervenant-entreprise-evenement_action-id=19404'
'&intervenant-entreprise-evenement_id=19404'
'&intervenant-entreprise_id=28092'
)
assert len('GET %s HTTP/1.1\r\n' % Request_URI) == 256
actual_resp_body = test_client.get(Request_URI)[2]
assert actual_resp_body == b'Hello world!'
def test_garbage_in(test_client):
"""Test that server sends an error for garbage received over TCP."""
# Connect without SSL regardless of server.scheme
c = test_client.get_connection()
c._output(b'gjkgjklsgjklsgjkljklsg')
c._send_output()
response = c.response_class(c.sock, method='GET')
try:
response.begin()
actual_status = response.status
assert actual_status == HTTP_BAD_REQUEST
actual_resp_body = response.read(22)
assert actual_resp_body == b'Malformed Request-Line'
c.close()
except socket.error as ex:
# "Connection reset by peer" is also acceptable.
if ex.errno != errno.ECONNRESET:
raise
class CloseController:
"""Controller for testing the close callback."""
def __call__(self, environ, start_response):
"""Get the req to know header sent status."""
self.req = start_response.__self__.req
resp = CloseResponse(self.close)
start_response(resp.status, resp.headers.items())
return resp
def close(self):
"""Close, writing hello."""
self.req.write(b'hello')
class CloseResponse:
"""Dummy empty response to trigger the no body status."""
def __init__(self, close):
"""Use some defaults to ensure we have a header."""
self.status = '200 OK'
self.headers = {'Content-Type': 'text/html'}
self.close = close
def __getitem__(self, index):
"""Ensure we don't have a body."""
raise IndexError()
def output(self):
"""Return self to hook the close method."""
return self
@pytest.fixture
def testing_server_close(wsgi_server_client):
"""Attach a WSGI app to the given server and preconfigure it."""
wsgi_server = wsgi_server_client.server_instance
wsgi_server.wsgi_app = CloseController()
wsgi_server.max_request_body_size = 30000000
wsgi_server.server_client = wsgi_server_client
return wsgi_server
def test_send_header_before_closing(testing_server_close):
"""Test we are actually sending the headers before calling 'close'."""
_, _, resp_body = testing_server_close.server_client.get('/')
assert resp_body == b'hello'
| 14,652
|
Python
|
.py
| 364
| 34.483516
| 79
| 0.672621
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,989
|
test_conn.py
|
rembo10_headphones/lib/cheroot/test/test_conn.py
|
"""Tests for TCP connection handling, including proper and timely close."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import socket
import time
import logging
import traceback as traceback_
from collections import namedtuple
from six.moves import range, http_client, urllib
import six
import pytest
from jaraco.text import trim, unwrap
from cheroot.test import helper, webtest
from cheroot._compat import IS_CI, IS_MACOS, IS_PYPY, IS_WINDOWS
import cheroot.server
IS_SLOW_ENV = IS_MACOS or IS_WINDOWS
timeout = 1
pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN'
class Controller(helper.Controller):
"""Controller for serving WSGI apps."""
def hello(req, resp):
"""Render Hello world."""
return 'Hello, world!'
def pov(req, resp):
"""Render ``pov`` value."""
return pov
def stream(req, resp):
"""Render streaming response."""
if 'set_cl' in req.environ['QUERY_STRING']:
resp.headers['Content-Length'] = str(10)
def content():
for x in range(10):
yield str(x)
return content()
def upload(req, resp):
"""Process file upload and render thank."""
if not req.environ['REQUEST_METHOD'] == 'POST':
raise AssertionError(
"'POST' != request.method %r" %
req.environ['REQUEST_METHOD'],
)
return "thanks for '%s'" % req.environ['wsgi.input'].read()
def custom_204(req, resp):
"""Render response with status 204."""
resp.status = '204'
return 'Code = 204'
def custom_304(req, resp):
"""Render response with status 304."""
resp.status = '304'
return 'Code = 304'
def err_before_read(req, resp):
"""Render response with status 500."""
resp.status = '500 Internal Server Error'
return 'ok'
def one_megabyte_of_a(req, resp):
"""Render 1MB response."""
return ['a' * 1024] * 1024
def wrong_cl_buffered(req, resp):
"""Render buffered response with invalid length value."""
resp.headers['Content-Length'] = '5'
return 'I have too many bytes'
def wrong_cl_unbuffered(req, resp):
"""Render unbuffered response with invalid length value."""
resp.headers['Content-Length'] = '5'
return ['I too', ' have too many bytes']
def _munge(string):
"""Encode PATH_INFO correctly depending on Python version.
WSGI 1.0 is a mess around unicode. Create endpoints
that match the PATH_INFO that it produces.
"""
if six.PY2:
return string
return string.encode('utf-8').decode('latin-1')
handlers = {
'/hello': hello,
'/pov': pov,
'/page1': pov,
'/page2': pov,
'/page3': pov,
'/stream': stream,
'/upload': upload,
'/custom/204': custom_204,
'/custom/304': custom_304,
'/err_before_read': err_before_read,
'/one_megabyte_of_a': one_megabyte_of_a,
'/wrong_cl_buffered': wrong_cl_buffered,
'/wrong_cl_unbuffered': wrong_cl_unbuffered,
}
class ErrorLogMonitor:
"""Mock class to access the server error_log calls made by the server."""
ErrorLogCall = namedtuple('ErrorLogCall', ['msg', 'level', 'traceback'])
def __init__(self):
"""Initialize the server error log monitor/interceptor.
If you need to ignore a particular error message use the property
``ignored_msgs`` by appending to the list the expected error messages.
"""
self.calls = []
# to be used the the teardown validation
self.ignored_msgs = []
def __call__(self, msg='', level=logging.INFO, traceback=False):
"""Intercept the call to the server error_log method."""
if traceback:
tblines = traceback_.format_exc()
else:
tblines = ''
self.calls.append(ErrorLogMonitor.ErrorLogCall(msg, level, tblines))
@pytest.fixture
def raw_testing_server(wsgi_server_client):
"""Attach a WSGI app to the given server and preconfigure it."""
app = Controller()
def _timeout(req, resp):
return str(wsgi_server.timeout)
app.handlers['/timeout'] = _timeout
wsgi_server = wsgi_server_client.server_instance
wsgi_server.wsgi_app = app
wsgi_server.max_request_body_size = 1001
wsgi_server.timeout = timeout
wsgi_server.server_client = wsgi_server_client
wsgi_server.keep_alive_conn_limit = 2
return wsgi_server
@pytest.fixture
def testing_server(raw_testing_server, monkeypatch):
"""Modify the "raw" base server to monitor the error_log messages.
If you need to ignore a particular error message use the property
``testing_server.error_log.ignored_msgs`` by appending to the list
the expected error messages.
"""
# patch the error_log calls of the server instance
monkeypatch.setattr(raw_testing_server, 'error_log', ErrorLogMonitor())
yield raw_testing_server
# Teardown verification, in case that the server logged an
# error that wasn't notified to the client or we just made a mistake.
# pylint: disable=possibly-unused-variable
for c_msg, c_level, c_traceback in raw_testing_server.error_log.calls:
if c_level <= logging.WARNING:
continue
assert c_msg in raw_testing_server.error_log.ignored_msgs, (
'Found error in the error log: '
"message = '{c_msg}', level = '{c_level}'\n"
'{c_traceback}'.format(**locals()),
)
@pytest.fixture
def test_client(testing_server):
"""Get and return a test client out of the given server."""
return testing_server.server_client
def header_exists(header_name, headers):
"""Check that a header is present."""
return header_name.lower() in (k.lower() for (k, _) in headers)
def header_has_value(header_name, header_value, headers):
"""Check that a header with a given value is present."""
return header_name.lower() in (
k.lower() for (k, v) in headers
if v == header_value
)
def test_HTTP11_persistent_connections(test_client):
"""Test persistent HTTP/1.1 connections."""
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
# Make the first request and assert there's no "Connection: close".
status_line, actual_headers, actual_resp_body = test_client.get(
'/pov', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
# Make another request on the same connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/page1', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
# Test client-side close.
status_line, actual_headers, actual_resp_body = test_client.get(
'/page2', http_conn=http_connection,
headers=[('Connection', 'close')],
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert header_has_value('Connection', 'close', actual_headers)
# Make another request on the same connection, which should error.
with pytest.raises(http_client.NotConnected):
test_client.get('/pov', http_conn=http_connection)
@pytest.mark.parametrize(
'set_cl',
(
False, # Without Content-Length
True, # With Content-Length
),
)
def test_streaming_11(test_client, set_cl):
"""Test serving of streaming responses with HTTP/1.1 protocol."""
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
# Make the first request and assert there's no "Connection: close".
status_line, actual_headers, actual_resp_body = test_client.get(
'/pov', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/stream?set_cl=Yes', http_conn=http_connection,
)
assert header_exists('Content-Length', actual_headers)
assert not header_has_value('Connection', 'close', actual_headers)
assert not header_exists('Transfer-Encoding', actual_headers)
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == b'0123456789'
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
status_line, actual_headers, actual_resp_body = test_client.get(
'/stream', http_conn=http_connection,
)
assert not header_exists('Content-Length', actual_headers)
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == b'0123456789'
chunked_response = False
for k, v in actual_headers:
if k.lower() == 'transfer-encoding':
if str(v) == 'chunked':
chunked_response = True
if chunked_response:
assert not header_has_value('Connection', 'close', actual_headers)
else:
assert header_has_value('Connection', 'close', actual_headers)
# Make another request on the same connection, which should
# error.
with pytest.raises(http_client.NotConnected):
test_client.get('/pov', http_conn=http_connection)
# Try HEAD.
# See https://www.bitbucket.org/cherrypy/cherrypy/issue/864.
# TODO: figure out how can this be possible on an closed connection
# (chunked_response case)
status_line, actual_headers, actual_resp_body = test_client.head(
'/stream', http_conn=http_connection,
)
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == b''
assert not header_exists('Transfer-Encoding', actual_headers)
@pytest.mark.parametrize(
'set_cl',
(
False, # Without Content-Length
True, # With Content-Length
),
)
def test_streaming_10(test_client, set_cl):
"""Test serving of streaming responses with HTTP/1.0 protocol."""
original_server_protocol = test_client.server_instance.protocol
test_client.server_instance.protocol = 'HTTP/1.0'
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
# Make the first request and assert Keep-Alive.
status_line, actual_headers, actual_resp_body = test_client.get(
'/pov', http_conn=http_connection,
headers=[('Connection', 'Keep-Alive')],
protocol='HTTP/1.0',
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert header_has_value('Connection', 'Keep-Alive', actual_headers)
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/stream?set_cl=Yes', http_conn=http_connection,
headers=[('Connection', 'Keep-Alive')],
protocol='HTTP/1.0',
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == b'0123456789'
assert header_exists('Content-Length', actual_headers)
assert header_has_value('Connection', 'Keep-Alive', actual_headers)
assert not header_exists('Transfer-Encoding', actual_headers)
else:
# When a Content-Length is not provided,
# the server should close the connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/stream', http_conn=http_connection,
headers=[('Connection', 'Keep-Alive')],
protocol='HTTP/1.0',
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == b'0123456789'
assert not header_exists('Content-Length', actual_headers)
assert not header_has_value('Connection', 'Keep-Alive', actual_headers)
assert not header_exists('Transfer-Encoding', actual_headers)
# Make another request on the same connection, which should error.
with pytest.raises(http_client.NotConnected):
test_client.get(
'/pov', http_conn=http_connection,
protocol='HTTP/1.0',
)
test_client.server_instance.protocol = original_server_protocol
@pytest.mark.parametrize(
'http_server_protocol',
(
'HTTP/1.0',
pytest.param(
'HTTP/1.1',
marks=pytest.mark.xfail(
IS_PYPY and IS_CI,
reason='Fails under PyPy in CI for unknown reason',
strict=False,
),
),
),
)
def test_keepalive(test_client, http_server_protocol):
"""Test Keep-Alive enabled connections."""
original_server_protocol = test_client.server_instance.protocol
test_client.server_instance.protocol = http_server_protocol
http_client_protocol = 'HTTP/1.0'
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
# Test a normal HTTP/1.0 request.
status_line, actual_headers, actual_resp_body = test_client.get(
'/page2',
protocol=http_client_protocol,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
# Test a keep-alive HTTP/1.0 request.
status_line, actual_headers, actual_resp_body = test_client.get(
'/page3', headers=[('Connection', 'Keep-Alive')],
http_conn=http_connection, protocol=http_client_protocol,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert header_has_value('Connection', 'Keep-Alive', actual_headers)
assert header_has_value(
'Keep-Alive',
'timeout={test_client.server_instance.timeout}'.format(**locals()),
actual_headers,
)
# Remove the keep-alive header again.
status_line, actual_headers, actual_resp_body = test_client.get(
'/page3', http_conn=http_connection,
protocol=http_client_protocol,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
assert not header_exists('Keep-Alive', actual_headers)
test_client.server_instance.protocol = original_server_protocol
def test_keepalive_conn_management(test_client):
"""Test management of Keep-Alive connections."""
test_client.server_instance.timeout = 2
def connection():
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
return http_connection
def request(conn, keepalive=True):
status_line, actual_headers, actual_resp_body = test_client.get(
'/page3', headers=[('Connection', 'Keep-Alive')],
http_conn=conn, protocol='HTTP/1.0',
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
if keepalive:
assert header_has_value('Connection', 'Keep-Alive', actual_headers)
assert header_has_value(
'Keep-Alive',
'timeout={test_client.server_instance.timeout}'.
format(**locals()),
actual_headers,
)
else:
assert not header_exists('Connection', actual_headers)
assert not header_exists('Keep-Alive', actual_headers)
def check_server_idle_conn_count(count, timeout=1.0):
deadline = time.time() + timeout
while True:
n = test_client.server_instance._connections._num_connections
if n == count:
return
assert time.time() <= deadline, (
'idle conn count mismatch, wanted {count}, got {n}'.
format(**locals()),
)
disconnect_errors = (
http_client.BadStatusLine,
http_client.CannotSendRequest,
http_client.NotConnected,
)
# Make a new connection.
c1 = connection()
request(c1)
check_server_idle_conn_count(1)
# Make a second one.
c2 = connection()
request(c2)
check_server_idle_conn_count(2)
# Reusing the first connection should still work.
request(c1)
check_server_idle_conn_count(2)
# Creating a new connection should still work, but we should
# have run out of available connections to keep alive, so the
# server should tell us to close.
c3 = connection()
request(c3, keepalive=False)
check_server_idle_conn_count(2)
# Show that the third connection was closed.
with pytest.raises(disconnect_errors):
request(c3)
check_server_idle_conn_count(2)
# Wait for some of our timeout.
time.sleep(1.2)
# Refresh the second connection.
request(c2)
check_server_idle_conn_count(2)
# Wait for the remainder of our timeout, plus one tick.
time.sleep(1.2)
check_server_idle_conn_count(1)
# First connection should now be expired.
with pytest.raises(disconnect_errors):
request(c1)
check_server_idle_conn_count(1)
# But the second one should still be valid.
request(c2)
check_server_idle_conn_count(1)
# Restore original timeout.
test_client.server_instance.timeout = timeout
@pytest.mark.parametrize(
('simulated_exception', 'error_number', 'exception_leaks'),
(
pytest.param(
socket.error, errno.ECONNRESET, False,
id='socket.error(ECONNRESET)',
),
pytest.param(
socket.error, errno.EPIPE, False,
id='socket.error(EPIPE)',
),
pytest.param(
socket.error, errno.ENOTCONN, False,
id='simulated socket.error(ENOTCONN)',
),
pytest.param(
None, # <-- don't raise an artificial exception
errno.ENOTCONN, False,
id='real socket.error(ENOTCONN)',
marks=pytest.mark.xfail(
IS_WINDOWS,
reason='Now reproducible this way on Windows',
),
),
pytest.param(
socket.error, errno.ESHUTDOWN, False,
id='socket.error(ESHUTDOWN)',
),
pytest.param(RuntimeError, 666, True, id='RuntimeError(666)'),
pytest.param(socket.error, -1, True, id='socket.error(-1)'),
) + (
() if six.PY2 else (
pytest.param(
ConnectionResetError, errno.ECONNRESET, False,
id='ConnectionResetError(ECONNRESET)',
),
pytest.param(
BrokenPipeError, errno.EPIPE, False,
id='BrokenPipeError(EPIPE)',
),
pytest.param(
BrokenPipeError, errno.ESHUTDOWN, False,
id='BrokenPipeError(ESHUTDOWN)',
),
)
),
)
def test_broken_connection_during_tcp_fin(
error_number, exception_leaks,
mocker, monkeypatch,
simulated_exception, test_client,
):
"""Test there's no traceback on broken connection during close.
It artificially causes :py:data:`~errno.ECONNRESET` /
:py:data:`~errno.EPIPE` / :py:data:`~errno.ESHUTDOWN` /
:py:data:`~errno.ENOTCONN` as well as unrelated :py:exc:`RuntimeError`
and :py:exc:`socket.error(-1) <socket.error>` on the server socket when
:py:meth:`socket.shutdown() <socket.socket.shutdown>` is called. It's
triggered by closing the client socket before the server had a chance
to respond.
The expectation is that only :py:exc:`RuntimeError` and a
:py:exc:`socket.error` with an unusual error code would leak.
With the :py:data:`None`-parameter, a real non-simulated
:py:exc:`OSError(107, 'Transport endpoint is not connected')
<OSError>` happens.
"""
exc_instance = (
None if simulated_exception is None
else simulated_exception(error_number, 'Simulated socket error')
)
old_close_kernel_socket = (
test_client.server_instance.
ConnectionClass._close_kernel_socket
)
def _close_kernel_socket(self):
monkeypatch.setattr( # `socket.shutdown` is read-only otherwise
self, 'socket',
mocker.mock_module.Mock(wraps=self.socket),
)
if exc_instance is not None:
monkeypatch.setattr(
self.socket, 'shutdown',
mocker.mock_module.Mock(side_effect=exc_instance),
)
_close_kernel_socket.fin_spy = mocker.spy(self.socket, 'shutdown')
try:
old_close_kernel_socket(self)
except simulated_exception:
_close_kernel_socket.exception_leaked = True
else:
_close_kernel_socket.exception_leaked = False
monkeypatch.setattr(
test_client.server_instance.ConnectionClass,
'_close_kernel_socket',
_close_kernel_socket,
)
conn = test_client.get_connection()
conn.auto_open = False
conn.connect()
conn.send(b'GET /hello HTTP/1.1')
conn.send(('Host: %s' % conn.host).encode('ascii'))
conn.close()
# Let the server attempt TCP shutdown:
for _ in range(10 * (2 if IS_SLOW_ENV else 1)):
time.sleep(0.1)
if hasattr(_close_kernel_socket, 'exception_leaked'):
break
if exc_instance is not None: # simulated by us
assert _close_kernel_socket.fin_spy.spy_exception is exc_instance
else: # real
assert isinstance(
_close_kernel_socket.fin_spy.spy_exception, socket.error,
)
assert _close_kernel_socket.fin_spy.spy_exception.errno == error_number
assert _close_kernel_socket.exception_leaked is exception_leaks
@pytest.mark.parametrize(
'timeout_before_headers',
(
True,
False,
),
)
def test_HTTP11_Timeout(test_client, timeout_before_headers):
"""Check timeout without sending any data.
The server will close the connection with a 408.
"""
conn = test_client.get_connection()
conn.auto_open = False
conn.connect()
if not timeout_before_headers:
# Connect but send half the headers only.
conn.send(b'GET /hello HTTP/1.1')
conn.send(('Host: %s' % conn.host).encode('ascii'))
# else: Connect but send nothing.
# Wait for our socket timeout
time.sleep(timeout * 2)
# The request should have returned 408 already.
response = conn.response_class(conn.sock, method='GET')
response.begin()
assert response.status == 408
conn.close()
def test_HTTP11_Timeout_after_request(test_client):
"""Check timeout after at least one request has succeeded.
The server should close the connection without 408.
"""
fail_msg = "Writing to timed out socket didn't fail as it should have: %s"
# Make an initial request
conn = test_client.get_connection()
conn.putrequest('GET', '/timeout?t=%s' % timeout, skip_host=True)
conn.putheader('Host', conn.host)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
assert response.status == 200
actual_body = response.read()
expected_body = str(timeout).encode()
assert actual_body == expected_body
# Make a second request on the same socket
conn._output(b'GET /hello HTTP/1.1')
conn._output(('Host: %s' % conn.host).encode('ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method='GET')
response.begin()
assert response.status == 200
actual_body = response.read()
expected_body = b'Hello, world!'
assert actual_body == expected_body
# Wait for our socket timeout
time.sleep(timeout * 2)
# Make another request on the same socket, which should error
conn._output(b'GET /hello HTTP/1.1')
conn._output(('Host: %s' % conn.host).encode('ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method='GET')
try:
response.begin()
except (socket.error, http_client.BadStatusLine):
pass
except Exception as ex:
pytest.fail(fail_msg % ex)
else:
if response.status != 408:
pytest.fail(fail_msg % response.read())
conn.close()
# Make another request on a new socket, which should work
conn = test_client.get_connection()
conn.putrequest('GET', '/pov', skip_host=True)
conn.putheader('Host', conn.host)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
assert response.status == 200
actual_body = response.read()
expected_body = pov.encode()
assert actual_body == expected_body
# Make another request on the same socket,
# but timeout on the headers
conn.send(b'GET /hello HTTP/1.1')
# Wait for our socket timeout
time.sleep(timeout * 2)
response = conn.response_class(conn.sock, method='GET')
try:
response.begin()
except (socket.error, http_client.BadStatusLine):
pass
except Exception as ex:
pytest.fail(fail_msg % ex)
else:
if response.status != 408:
pytest.fail(fail_msg % response.read())
conn.close()
# Retry the request on a new connection, which should work
conn = test_client.get_connection()
conn.putrequest('GET', '/pov', skip_host=True)
conn.putheader('Host', conn.host)
conn.endheaders()
response = conn.response_class(conn.sock, method='GET')
response.begin()
assert response.status == 200
actual_body = response.read()
expected_body = pov.encode()
assert actual_body == expected_body
conn.close()
def test_HTTP11_pipelining(test_client):
"""Test HTTP/1.1 pipelining.
:py:mod:`http.client` doesn't support this directly.
"""
conn = test_client.get_connection()
# Put request 1
conn.putrequest('GET', '/hello', skip_host=True)
conn.putheader('Host', conn.host)
conn.endheaders()
for trial in range(5):
# Put next request
conn._output(
('GET /hello?%s HTTP/1.1' % trial).encode('iso-8859-1'),
)
conn._output(('Host: %s' % conn.host).encode('ascii'))
conn._send_output()
# Retrieve previous response
response = conn.response_class(conn.sock, method='GET')
# there is a bug in python3 regarding the buffering of
# ``conn.sock``. Until that bug get's fixed we will
# monkey patch the ``response`` instance.
# https://bugs.python.org/issue23377
if not six.PY2:
response.fp = conn.sock.makefile('rb', 0)
response.begin()
body = response.read(13)
assert response.status == 200
assert body == b'Hello, world!'
# Retrieve final response
response = conn.response_class(conn.sock, method='GET')
response.begin()
body = response.read()
assert response.status == 200
assert body == b'Hello, world!'
conn.close()
def test_100_Continue(test_client):
"""Test 100-continue header processing."""
conn = test_client.get_connection()
# Try a page without an Expect request header first.
# Note that http.client's response.begin automatically ignores
# 100 Continue responses, so we must manually check for it.
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '4')
conn.endheaders()
conn.send(b"d'oh")
response = conn.response_class(conn.sock, method='POST')
_version, status, _reason = response._read_status()
assert status != 100
conn.close()
# Now try a page with an Expect header...
conn.connect()
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '17')
conn.putheader('Expect', '100-continue')
conn.endheaders()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
assert status == 100
while True:
line = response.fp.readline().strip()
if line:
pytest.fail(
'100 Continue should not output any headers. Got %r' %
line,
)
else:
break
# ...send the body
body = b'I am a small file'
conn.send(body)
# ...get the final response
response.begin()
status_line, _actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 200
expected_resp_body = ("thanks for '%s'" % body).encode()
assert actual_resp_body == expected_resp_body
conn.close()
@pytest.mark.parametrize(
'max_request_body_size',
(
0,
1001,
),
)
def test_readall_or_close(test_client, max_request_body_size):
"""Test a max_request_body_size of 0 (the default) and 1001."""
old_max = test_client.server_instance.max_request_body_size
test_client.server_instance.max_request_body_size = max_request_body_size
conn = test_client.get_connection()
# Get a POST page with an error
conn.putrequest('POST', '/err_before_read', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '1000')
conn.putheader('Expect', '100-continue')
conn.endheaders()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
_version, status, _reason = response._read_status()
assert status == 100
skip = True
while skip:
skip = response.fp.readline().strip()
# ...send the body
conn.send(b'x' * 1000)
# ...get the final response
response.begin()
status_line, _actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 500
# Now try a working page with an Expect header...
conn._output(b'POST /upload HTTP/1.1')
conn._output(('Host: %s' % conn.host).encode('ascii'))
conn._output(b'Content-Type: text/plain')
conn._output(b'Content-Length: 17')
conn._output(b'Expect: 100-continue')
conn._send_output()
response = conn.response_class(conn.sock, method='POST')
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
assert status == 100
skip = True
while skip:
skip = response.fp.readline().strip()
# ...send the body
body = b'I am a small file'
conn.send(body)
# ...get the final response
response.begin()
status_line, actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 200
expected_resp_body = ("thanks for '%s'" % body).encode()
assert actual_resp_body == expected_resp_body
conn.close()
test_client.server_instance.max_request_body_size = old_max
def test_No_Message_Body(test_client):
"""Test HTTP queries with an empty response body."""
# Initialize a persistent HTTP connection
http_connection = test_client.get_connection()
http_connection.auto_open = False
http_connection.connect()
# Make the first request and assert there's no "Connection: close".
status_line, actual_headers, actual_resp_body = test_client.get(
'/pov', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
assert actual_resp_body == pov.encode()
assert not header_exists('Connection', actual_headers)
# Make a 204 request on the same connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/custom/204', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 204
assert not header_exists('Content-Length', actual_headers)
assert actual_resp_body == b''
assert not header_exists('Connection', actual_headers)
# Make a 304 request on the same connection.
status_line, actual_headers, actual_resp_body = test_client.get(
'/custom/304', http_conn=http_connection,
)
actual_status = int(status_line[:3])
assert actual_status == 304
assert not header_exists('Content-Length', actual_headers)
assert actual_resp_body == b''
assert not header_exists('Connection', actual_headers)
@pytest.mark.xfail(
reason=unwrap(
trim("""
Headers from earlier request leak into the request
line for a subsequent request, resulting in 400
instead of 413. See cherrypy/cheroot#69 for details.
"""),
),
)
def test_Chunked_Encoding(test_client):
"""Test HTTP uploads with chunked transfer-encoding."""
# Initialize a persistent HTTP connection
conn = test_client.get_connection()
# Try a normal chunked request (with extensions)
body = (
b'8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n'
b'Content-Type: application/json\r\n'
b'\r\n'
)
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Trailer', 'Content-Type')
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader('Content-Length', '3')
conn.endheaders()
conn.send(body)
response = conn.getresponse()
status_line, _actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 200
assert status_line[4:] == 'OK'
expected_resp_body = ("thanks for '%s'" % b'xx\r\nxxxxyyyyy').encode()
assert actual_resp_body == expected_resp_body
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = b'\r\n'.join((b'3e3', b'x' * 995, b'0', b'', b''))
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Transfer-Encoding', 'chunked')
conn.putheader('Content-Type', 'text/plain')
# Chunked requests don't need a content-length
# conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
status_line, actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 413
conn.close()
def test_Content_Length_in(test_client):
"""Try a non-chunked request where Content-Length exceeds limit.
(server.max_request_body_size).
Assert error before body send.
"""
# Initialize a persistent HTTP connection
conn = test_client.get_connection()
conn.putrequest('POST', '/upload', skip_host=True)
conn.putheader('Host', conn.host)
conn.putheader('Content-Type', 'text/plain')
conn.putheader('Content-Length', '9999')
conn.endheaders()
response = conn.getresponse()
status_line, _actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == 413
expected_resp_body = (
b'The entity sent with the request exceeds '
b'the maximum allowed bytes.'
)
assert actual_resp_body == expected_resp_body
conn.close()
def test_Content_Length_not_int(test_client):
"""Test that malicious Content-Length header returns 400."""
status_line, _actual_headers, actual_resp_body = test_client.post(
'/upload',
headers=[
('Content-Type', 'text/plain'),
('Content-Length', 'not-an-integer'),
],
)
actual_status = int(status_line[:3])
assert actual_status == 400
assert actual_resp_body == b'Malformed Content-Length Header.'
@pytest.mark.parametrize(
('uri', 'expected_resp_status', 'expected_resp_body'),
(
(
'/wrong_cl_buffered', 500,
(
b'The requested resource returned more bytes than the '
b'declared Content-Length.'
),
),
('/wrong_cl_unbuffered', 200, b'I too'),
),
)
def test_Content_Length_out(
test_client,
uri, expected_resp_status, expected_resp_body,
):
"""Test response with Content-Length less than the response body.
(non-chunked response)
"""
conn = test_client.get_connection()
conn.putrequest('GET', uri, skip_host=True)
conn.putheader('Host', conn.host)
conn.endheaders()
response = conn.getresponse()
status_line, _actual_headers, actual_resp_body = webtest.shb(response)
actual_status = int(status_line[:3])
assert actual_status == expected_resp_status
assert actual_resp_body == expected_resp_body
conn.close()
# the server logs the exception that we had verified from the
# client perspective. Tell the error_log verification that
# it can ignore that message.
test_client.server_instance.error_log.ignored_msgs.extend((
# Python 3.7+:
"ValueError('Response body exceeds the declared Content-Length.')",
# Python 2.7-3.6 (macOS?):
"ValueError('Response body exceeds the declared Content-Length.',)",
))
@pytest.mark.xfail(
reason='Sometimes this test fails due to low timeout. '
'Ref: https://github.com/cherrypy/cherrypy/issues/598',
)
def test_598(test_client):
"""Test serving large file with a read timeout in place."""
# Initialize a persistent HTTP connection
conn = test_client.get_connection()
remote_data_conn = urllib.request.urlopen(
'%s://%s:%s/one_megabyte_of_a'
% ('http', conn.host, conn.port),
)
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
buf += data
remaining -= len(data)
assert len(buf) == 1024 * 1024
assert buf == b'a' * 1024 * 1024
assert remaining == 0
remote_data_conn.close()
@pytest.mark.parametrize(
'invalid_terminator',
(
b'\n\n',
b'\r\n\n',
),
)
def test_No_CRLF(test_client, invalid_terminator):
"""Test HTTP queries with no valid CRLF terminators."""
# Initialize a persistent HTTP connection
conn = test_client.get_connection()
# (b'%s' % b'') is not supported in Python 3.4, so just use bytes.join()
conn.send(b''.join((b'GET /hello HTTP/1.1', invalid_terminator)))
response = conn.response_class(conn.sock, method='GET')
response.begin()
actual_resp_body = response.read()
expected_resp_body = b'HTTP requires CRLF terminators'
assert actual_resp_body == expected_resp_body
conn.close()
class FaultySelect:
"""Mock class to insert errors in the selector.select method."""
def __init__(self, original_select):
"""Initilize helper class to wrap the selector.select method."""
self.original_select = original_select
self.request_served = False
self.os_error_triggered = False
def __call__(self, timeout):
"""Intercept the calls to selector.select."""
if self.request_served:
self.os_error_triggered = True
raise OSError('Error while selecting the client socket.')
return self.original_select(timeout)
class FaultyGetMap:
"""Mock class to insert errors in the selector.get_map method."""
def __init__(self, original_get_map):
"""Initilize helper class to wrap the selector.get_map method."""
self.original_get_map = original_get_map
self.sabotage_conn = False
self.conn_closed = False
def __call__(self):
"""Intercept the calls to selector.get_map."""
sabotage_targets = (
conn for _, (_, _, _, conn) in self.original_get_map().items()
if isinstance(conn, cheroot.server.HTTPConnection)
) if self.sabotage_conn and not self.conn_closed else ()
for conn in sabotage_targets:
# close the socket to cause OSError
conn.close()
self.conn_closed = True
return self.original_get_map()
def test_invalid_selected_connection(test_client, monkeypatch):
"""Test the error handling segment of HTTP connection selection.
See :py:meth:`cheroot.connections.ConnectionManager.get_conn`.
"""
# patch the select method
faux_select = FaultySelect(
test_client.server_instance._connections._selector.select,
)
monkeypatch.setattr(
test_client.server_instance._connections._selector,
'select',
faux_select,
)
# patch the get_map method
faux_get_map = FaultyGetMap(
test_client.server_instance._connections._selector._selector.get_map,
)
monkeypatch.setattr(
test_client.server_instance._connections._selector._selector,
'get_map',
faux_get_map,
)
# request a page with connection keep-alive to make sure
# we'll have a connection to be modified.
resp_status, _resp_headers, _resp_body = test_client.request(
'/page1', headers=[('Connection', 'Keep-Alive')],
)
assert resp_status == '200 OK'
# trigger the internal errors
faux_get_map.sabotage_conn = faux_select.request_served = True
# give time to make sure the error gets handled
time.sleep(test_client.server_instance.expiration_interval * 2)
assert faux_select.os_error_triggered
assert faux_get_map.conn_closed
| 43,739
|
Python
|
.py
| 1,100
| 32.753636
| 79
| 0.648977
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,990
|
helper.py
|
rembo10_headphones/lib/cheroot/test/helper.py
|
"""A library of helper functions for the Cheroot test suite."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import logging
import os
import sys
import time
import threading
import types
from six.moves import http_client
import six
import cheroot.server
import cheroot.wsgi
from cheroot.test import webtest
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
config = {
'bind_addr': ('127.0.0.1', 54583),
'server': 'wsgi',
'wsgi_app': None,
}
class CherootWebCase(webtest.WebCase):
"""Helper class for a web app test suite."""
script_name = ''
scheme = 'http'
available_servers = {
'wsgi': cheroot.wsgi.Server,
'native': cheroot.server.HTTPServer,
}
@classmethod
def setup_class(cls):
"""Create and run one HTTP server per class."""
conf = config.copy()
conf.update(getattr(cls, 'config', {}))
s_class = conf.pop('server', 'wsgi')
server_factory = cls.available_servers.get(s_class)
if server_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
cls.httpserver = server_factory(**conf)
cls.HOST, cls.PORT = cls.httpserver.bind_addr
if cls.httpserver.ssl_adapter is None:
ssl = ''
cls.scheme = 'http'
else:
ssl = ' (ssl)'
cls.HTTP_CONN = http_client.HTTPSConnection
cls.scheme = 'https'
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('Cheroot version: %s' % cheroot.__version__)
log.info('HTTP server version: %s%s' % (cls.httpserver.protocol, ssl))
log.info('PID: %s' % os.getpid())
if hasattr(cls, 'setup_server'):
# Clear the wsgi server so that
# it can be updated with the new root
cls.setup_server()
cls.start()
@classmethod
def teardown_class(cls):
"""Cleanup HTTP server."""
if hasattr(cls, 'setup_server'):
cls.stop()
@classmethod
def start(cls):
"""Load and start the HTTP server."""
threading.Thread(target=cls.httpserver.safe_start).start()
while not cls.httpserver.ready:
time.sleep(0.1)
@classmethod
def stop(cls):
"""Terminate HTTP server."""
cls.httpserver.stop()
td = getattr(cls, 'teardown', None)
if td:
td()
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert ``abs(dt1 - dt2)`` is within ``Y`` seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError(
'%r and %r are not within %r seconds.' %
(dt1, dt2, seconds),
)
class Request:
"""HTTP request container."""
def __init__(self, environ):
"""Initialize HTTP request."""
self.environ = environ
class Response:
"""HTTP response container."""
def __init__(self):
"""Initialize HTTP response."""
self.status = '200 OK'
self.headers = {'Content-Type': 'text/html'}
self.body = None
def output(self):
"""Generate iterable response body object."""
if self.body is None:
return []
elif isinstance(self.body, six.text_type):
return [self.body.encode('iso-8859-1')]
elif isinstance(self.body, six.binary_type):
return [self.body]
else:
return [x.encode('iso-8859-1') for x in self.body]
class Controller:
"""WSGI app for tests."""
def __call__(self, environ, start_response):
"""WSGI request handler."""
req, resp = Request(environ), Response()
try:
# Python 3 supports unicode attribute names
# Python 2 encodes them
handler = self.handlers[environ['PATH_INFO']]
except KeyError:
resp.status = '404 Not Found'
else:
output = handler(req, resp)
if (
output is not None
and not any(
resp.status.startswith(status_code)
for status_code in ('204', '304')
)
):
resp.body = output
try:
resp.headers.setdefault('Content-Length', str(len(output)))
except TypeError:
if not isinstance(output, types.GeneratorType):
raise
start_response(resp.status, resp.headers.items())
return resp.output()
| 4,896
|
Python
|
.py
| 140
| 26.014286
| 79
| 0.573698
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,991
|
test_server.py
|
rembo10_headphones/lib/cheroot/test/test_server.py
|
"""Tests for the HTTP server."""
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import socket
import tempfile
import threading
import uuid
import pytest
import requests
import requests_unixsocket
import six
from pypytools.gc.custom import DefaultGc
from six.moves import queue, urllib
from .._compat import bton, ntob
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS, SYS_PLATFORM
from ..server import IS_UID_GID_RESOLVABLE, Gateway, HTTPServer
from ..testing import (
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
EPHEMERAL_PORT,
)
IS_SLOW_ENV = IS_MACOS or IS_WINDOWS
unix_only_sock_test = pytest.mark.skipif(
not hasattr(socket, 'AF_UNIX'),
reason='UNIX domain sockets are only available under UNIX-based OS',
)
non_macos_sock_test = pytest.mark.skipif(
IS_MACOS,
reason='Peercreds lookup does not work under macOS/BSD currently.',
)
@pytest.fixture(params=('abstract', 'file'))
def unix_sock_file(request):
"""Check that bound UNIX socket address is stored in server."""
name = 'unix_{request.param}_sock'.format(**locals())
return request.getfixturevalue(name)
@pytest.fixture
def unix_abstract_sock():
"""Return an abstract UNIX socket address."""
if not IS_LINUX:
pytest.skip(
'{os} does not support an abstract '
'socket namespace'.format(os=SYS_PLATFORM),
)
return b''.join((
b'\x00cheroot-test-socket',
ntob(str(uuid.uuid4())),
)).decode()
@pytest.fixture
def unix_file_sock():
"""Yield a unix file socket."""
tmp_sock_fh, tmp_sock_fname = tempfile.mkstemp()
yield tmp_sock_fname
os.close(tmp_sock_fh)
os.unlink(tmp_sock_fname)
def test_prepare_makes_server_ready():
"""Check that prepare() makes the server ready, and stop() clears it."""
httpserver = HTTPServer(
bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT),
gateway=Gateway,
)
assert not httpserver.ready
assert not httpserver.requests._threads
httpserver.prepare()
assert httpserver.ready
assert httpserver.requests._threads
for thr in httpserver.requests._threads:
assert thr.ready
httpserver.stop()
assert not httpserver.requests._threads
assert not httpserver.ready
def test_stop_interrupts_serve():
"""Check that stop() interrupts running of serve()."""
httpserver = HTTPServer(
bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT),
gateway=Gateway,
)
httpserver.prepare()
serve_thread = threading.Thread(target=httpserver.serve)
serve_thread.start()
serve_thread.join(0.5)
assert serve_thread.is_alive()
httpserver.stop()
serve_thread.join(0.5)
assert not serve_thread.is_alive()
@pytest.mark.parametrize(
'exc_cls',
(
IOError,
KeyboardInterrupt,
OSError,
RuntimeError,
),
)
def test_server_interrupt(exc_cls):
"""Check that assigning interrupt stops the server."""
interrupt_msg = 'should catch {uuid!s}'.format(uuid=uuid.uuid4())
raise_marker_sentinel = object()
httpserver = HTTPServer(
bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT),
gateway=Gateway,
)
result_q = queue.Queue()
def serve_thread():
# ensure we catch the exception on the serve() thread
try:
httpserver.serve()
except exc_cls as e:
if str(e) == interrupt_msg:
result_q.put(raise_marker_sentinel)
httpserver.prepare()
serve_thread = threading.Thread(target=serve_thread)
serve_thread.start()
serve_thread.join(0.5)
assert serve_thread.is_alive()
# this exception is raised on the serve() thread,
# not in the calling context.
httpserver.interrupt = exc_cls(interrupt_msg)
serve_thread.join(0.5)
assert not serve_thread.is_alive()
assert result_q.get_nowait() is raise_marker_sentinel
def test_serving_is_false_and_stop_returns_after_ctrlc():
"""Check that stop() interrupts running of serve()."""
httpserver = HTTPServer(
bind_addr=(ANY_INTERFACE_IPV4, EPHEMERAL_PORT),
gateway=Gateway,
)
httpserver.prepare()
# Simulate a Ctrl-C on the first call to `run`.
def raise_keyboard_interrupt(*args, **kwargs):
raise KeyboardInterrupt()
httpserver._connections._selector.select = raise_keyboard_interrupt
serve_thread = threading.Thread(target=httpserver.serve)
serve_thread.start()
# The thread should exit right away due to the interrupt.
serve_thread.join(
httpserver.expiration_interval * (4 if IS_SLOW_ENV else 2),
)
assert not serve_thread.is_alive()
assert not httpserver._connections._serving
httpserver.stop()
@pytest.mark.parametrize(
'ip_addr',
(
ANY_INTERFACE_IPV4,
ANY_INTERFACE_IPV6,
),
)
def test_bind_addr_inet(http_server, ip_addr):
"""Check that bound IP address is stored in server."""
httpserver = http_server.send((ip_addr, EPHEMERAL_PORT))
assert httpserver.bind_addr[0] == ip_addr
assert httpserver.bind_addr[1] != EPHEMERAL_PORT
@unix_only_sock_test
def test_bind_addr_unix(http_server, unix_sock_file):
"""Check that bound UNIX socket address is stored in server."""
httpserver = http_server.send(unix_sock_file)
assert httpserver.bind_addr == unix_sock_file
@unix_only_sock_test
def test_bind_addr_unix_abstract(http_server, unix_abstract_sock):
"""Check that bound UNIX abstract socket address is stored in server."""
httpserver = http_server.send(unix_abstract_sock)
assert httpserver.bind_addr == unix_abstract_sock
PEERCRED_IDS_URI = '/peer_creds/ids'
PEERCRED_TEXTS_URI = '/peer_creds/texts'
class _TestGateway(Gateway):
def respond(self):
req = self.req
conn = req.conn
req_uri = bton(req.uri)
if req_uri == PEERCRED_IDS_URI:
peer_creds = conn.peer_pid, conn.peer_uid, conn.peer_gid
self.send_payload('|'.join(map(str, peer_creds)))
return
elif req_uri == PEERCRED_TEXTS_URI:
self.send_payload('!'.join((conn.peer_user, conn.peer_group)))
return
return super(_TestGateway, self).respond()
def send_payload(self, payload):
req = self.req
req.status = b'200 OK'
req.ensure_headers_sent()
req.write(ntob(payload))
@pytest.fixture
def peercreds_enabled_server(http_server, unix_sock_file):
"""Construct a test server with ``peercreds_enabled``."""
httpserver = http_server.send(unix_sock_file)
httpserver.gateway = _TestGateway
httpserver.peercreds_enabled = True
return httpserver
@unix_only_sock_test
@non_macos_sock_test
def test_peercreds_unix_sock(peercreds_enabled_server):
"""Check that ``PEERCRED`` lookup works when enabled."""
httpserver = peercreds_enabled_server
bind_addr = httpserver.bind_addr
if isinstance(bind_addr, six.binary_type):
bind_addr = bind_addr.decode()
# pylint: disable=possibly-unused-variable
quoted = urllib.parse.quote(bind_addr, safe='')
unix_base_uri = 'http+unix://{quoted}'.format(**locals())
expected_peercreds = os.getpid(), os.getuid(), os.getgid()
expected_peercreds = '|'.join(map(str, expected_peercreds))
with requests_unixsocket.monkeypatch():
peercreds_resp = requests.get(unix_base_uri + PEERCRED_IDS_URI)
peercreds_resp.raise_for_status()
assert peercreds_resp.text == expected_peercreds
peercreds_text_resp = requests.get(unix_base_uri + PEERCRED_TEXTS_URI)
assert peercreds_text_resp.status_code == 500
@pytest.mark.skipif(
not IS_UID_GID_RESOLVABLE,
reason='Modules `grp` and `pwd` are not available '
'under the current platform',
)
@unix_only_sock_test
@non_macos_sock_test
def test_peercreds_unix_sock_with_lookup(peercreds_enabled_server):
"""Check that ``PEERCRED`` resolution works when enabled."""
httpserver = peercreds_enabled_server
httpserver.peercreds_resolve_enabled = True
bind_addr = httpserver.bind_addr
if isinstance(bind_addr, six.binary_type):
bind_addr = bind_addr.decode()
# pylint: disable=possibly-unused-variable
quoted = urllib.parse.quote(bind_addr, safe='')
unix_base_uri = 'http+unix://{quoted}'.format(**locals())
import grp
import pwd
expected_textcreds = (
pwd.getpwuid(os.getuid()).pw_name,
grp.getgrgid(os.getgid()).gr_name,
)
expected_textcreds = '!'.join(map(str, expected_textcreds))
with requests_unixsocket.monkeypatch():
peercreds_text_resp = requests.get(unix_base_uri + PEERCRED_TEXTS_URI)
peercreds_text_resp.raise_for_status()
assert peercreds_text_resp.text == expected_textcreds
@pytest.mark.skipif(
IS_WINDOWS,
reason='This regression test is for a Linux bug, '
'and the resource module is not available on Windows',
)
@pytest.mark.parametrize(
'resource_limit',
(
1024,
2048,
),
indirect=('resource_limit',),
)
@pytest.mark.usefixtures('many_open_sockets')
def test_high_number_of_file_descriptors(native_server_client, resource_limit):
"""Test the server does not crash with a high file-descriptor value.
This test shouldn't cause a server crash when trying to access
file-descriptor higher than 1024.
The earlier implementation used to rely on ``select()`` syscall that
doesn't support file descriptors with numbers higher than 1024.
"""
# We want to force the server to use a file-descriptor with
# a number above resource_limit
# Patch the method that processes
_old_process_conn = native_server_client.server_instance.process_conn
def native_process_conn(conn):
native_process_conn.filenos.add(conn.socket.fileno())
return _old_process_conn(conn)
native_process_conn.filenos = set()
native_server_client.server_instance.process_conn = native_process_conn
# Trigger a crash if select() is used in the implementation
native_server_client.connect('/')
# Ensure that at least one connection got accepted, otherwise the
# follow-up check wouldn't make sense
assert len(native_process_conn.filenos) > 0
# Check at least one of the sockets created are above the target number
assert any(fn >= resource_limit for fn in native_process_conn.filenos)
if not IS_WINDOWS:
test_high_number_of_file_descriptors = pytest.mark.forked(
test_high_number_of_file_descriptors,
)
@pytest.fixture
def _garbage_bin():
"""Disable garbage collection when this fixture is in use."""
with DefaultGc().nogc():
yield
@pytest.fixture
def resource_limit(request):
"""Set the resource limit two times bigger then requested."""
resource = pytest.importorskip(
'resource',
reason='The "resource" module is Unix-specific',
)
# Get current resource limits to restore them later
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
# We have to increase the nofile limit above 1024
# Otherwise we see a 'Too many files open' error, instead of
# an error due to the file descriptor number being too high
resource.setrlimit(
resource.RLIMIT_NOFILE,
(request.param * 2, hard_limit),
)
try: # noqa: WPS501
yield request.param
finally:
# Reset the resource limit back to the original soft limit
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
@pytest.fixture
def many_open_sockets(request, resource_limit):
"""Allocate a lot of file descriptors by opening dummy sockets."""
# NOTE: `@pytest.mark.usefixtures` doesn't work on fixtures which
# NOTE: forces us to invoke this one dynamically to avoid having an
# NOTE: unused argument.
request.getfixturevalue('_garbage_bin')
# Hoard a lot of file descriptors by opening and storing a lot of sockets
test_sockets = []
# Open a lot of file descriptors, so the next one the server
# opens is a high number
try:
for _ in range(resource_limit):
sock = socket.socket()
test_sockets.append(sock)
# If we reach a high enough number, we don't need to open more
if sock.fileno() >= resource_limit:
break
# Check we opened enough descriptors to reach a high number
the_highest_fileno = test_sockets[-1].fileno()
assert the_highest_fileno >= resource_limit
yield the_highest_fileno
finally:
# Close our open resources
for test_socket in test_sockets:
test_socket.close()
| 12,813
|
Python
|
.py
| 332
| 32.924699
| 79
| 0.692215
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,992
|
test_errors.py
|
rembo10_headphones/lib/cheroot/test/test_errors.py
|
"""Test suite for ``cheroot.errors``."""
import pytest
from cheroot import errors
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS # noqa: WPS130
@pytest.mark.parametrize(
('err_names', 'err_nums'),
(
(('', 'some-nonsense-name'), []),
(
(
'EPROTOTYPE', 'EAGAIN', 'EWOULDBLOCK',
'WSAEWOULDBLOCK', 'EPIPE',
),
(91, 11, 32) if IS_LINUX else
(32, 35, 41) if IS_MACOS else
(32, 10041, 11, 10035) if IS_WINDOWS else
(),
),
),
)
def test_plat_specific_errors(err_names, err_nums):
"""Test that ``plat_specific_errors`` gets correct error numbers list."""
actual_err_nums = errors.plat_specific_errors(*err_names)
assert len(actual_err_nums) == len(err_nums)
assert sorted(actual_err_nums) == sorted(err_nums)
| 868
|
Python
|
.py
| 25
| 27.28
| 77
| 0.575179
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,993
|
pyopenssl.pyi
|
rembo10_headphones/lib/cheroot/ssl/pyopenssl.pyi
|
from . import Adapter
from ..makefile import StreamReader, StreamWriter
from OpenSSL import SSL
from typing import Any
ssl_conn_type: SSL.Connection
class SSLFileobjectMixin:
ssl_timeout: int
ssl_retry: float
def recv(self, size): ...
def readline(self, size: int = ...): ...
def sendall(self, *args, **kwargs): ...
def send(self, *args, **kwargs): ...
class SSLFileobjectStreamReader(SSLFileobjectMixin, StreamReader): ... # type:ignore
class SSLFileobjectStreamWriter(SSLFileobjectMixin, StreamWriter): ... # type:ignore
class SSLConnectionProxyMeta:
def __new__(mcl, name, bases, nmspc): ...
class SSLConnection():
def __init__(self, *args) -> None: ...
class pyOpenSSLAdapter(Adapter):
def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...) -> None: ...
def bind(self, sock): ...
def wrap(self, sock): ...
def get_environ(self): ...
def makefile(self, sock, mode: str = ..., bufsize: int = ...): ...
| 1,018
|
Python
|
.py
| 24
| 39
| 125
| 0.672065
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,994
|
pyopenssl.py
|
rembo10_headphones/lib/cheroot/ssl/pyopenssl.py
|
"""
A library for integrating :doc:`pyOpenSSL <pyopenssl:index>` with Cheroot.
The :py:mod:`OpenSSL <pyopenssl:OpenSSL>` module must be importable
for SSL/TLS/HTTPS functionality.
You can obtain it from `here <https://github.com/pyca/pyopenssl>`_.
To use this module, set :py:attr:`HTTPServer.ssl_adapter
<cheroot.server.HTTPServer.ssl_adapter>` to an instance of
:py:class:`ssl.Adapter <cheroot.ssl.Adapter>`.
There are two ways to use :abbr:`TLS (Transport-Level Security)`:
Method One
----------
* :py:attr:`ssl_adapter.context
<cheroot.ssl.pyopenssl.pyOpenSSLAdapter.context>`: an instance of
:py:class:`SSL.Context <pyopenssl:OpenSSL.SSL.Context>`.
If this is not None, it is assumed to be an :py:class:`SSL.Context
<pyopenssl:OpenSSL.SSL.Context>` instance, and will be passed to
:py:class:`SSL.Connection <pyopenssl:OpenSSL.SSL.Connection>` on bind().
The developer is responsible for forming a valid :py:class:`Context
<pyopenssl:OpenSSL.SSL.Context>` object. This
approach is to be preferred for more flexibility, e.g. if the cert and
key are streams instead of files, or need decryption, or
:py:data:`SSL.SSLv3_METHOD <pyopenssl:OpenSSL.SSL.SSLv3_METHOD>`
is desired instead of the default :py:data:`SSL.SSLv23_METHOD
<pyopenssl:OpenSSL.SSL.SSLv3_METHOD>`, etc. Consult
the :doc:`pyOpenSSL <pyopenssl:api/ssl>` documentation for
complete options.
Method Two (shortcut)
---------------------
* :py:attr:`ssl_adapter.certificate
<cheroot.ssl.pyopenssl.pyOpenSSLAdapter.certificate>`: the file name
of the server's TLS certificate.
* :py:attr:`ssl_adapter.private_key
<cheroot.ssl.pyopenssl.pyOpenSSLAdapter.private_key>`: the file name
of the server's private key file.
Both are :py:data:`None` by default. If :py:attr:`ssl_adapter.context
<cheroot.ssl.pyopenssl.pyOpenSSLAdapter.context>` is :py:data:`None`,
but ``.private_key`` and ``.certificate`` are both given and valid, they
will be read, and the context will be automatically created from them.
.. spelling::
pyopenssl
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
import sys
import threading
import time
import six
try:
import OpenSSL.version
from OpenSSL import SSL
from OpenSSL import crypto
try:
ssl_conn_type = SSL.Connection
except AttributeError:
ssl_conn_type = SSL.ConnectionType
except ImportError:
SSL = None
from . import Adapter
from .. import errors, server as cheroot_server
from ..makefile import StreamReader, StreamWriter
class SSLFileobjectMixin:
"""Base mixin for a TLS socket stream."""
ssl_timeout = 3
ssl_retry = .01
# FIXME:
def _safe_call(self, is_reader, call, *args, **kwargs): # noqa: C901
"""Wrap the given call with TLS error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
# Ref: https://stackoverflow.com/a/5133568/595220
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return b''
errnum = e.args[0]
if is_reader and errnum in errors.socket_errors_to_ignore:
return b''
raise socket.error(errnum)
except SSL.Error as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return b''
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise errors.NoSSLError()
raise errors.FatalSSLAlert(*e.args)
if time.time() - start > self.ssl_timeout:
raise socket.timeout('timed out')
def recv(self, size):
"""Receive message of a size from the socket."""
return self._safe_call(
True,
super(SSLFileobjectMixin, self).recv,
size,
)
def readline(self, size=-1):
"""Receive message of a size from the socket.
Matches the following interface:
https://docs.python.org/3/library/io.html#io.IOBase.readline
"""
return self._safe_call(
True,
super(SSLFileobjectMixin, self).readline,
size,
)
def sendall(self, *args, **kwargs):
"""Send whole message to the socket."""
return self._safe_call(
False,
super(SSLFileobjectMixin, self).sendall,
*args, **kwargs
)
def send(self, *args, **kwargs):
"""Send some part of message to the socket."""
return self._safe_call(
False,
super(SSLFileobjectMixin, self).send,
*args, **kwargs
)
class SSLFileobjectStreamReader(SSLFileobjectMixin, StreamReader):
"""SSL file object attached to a socket object."""
class SSLFileobjectStreamWriter(SSLFileobjectMixin, StreamWriter):
"""SSL file object attached to a socket object."""
class SSLConnectionProxyMeta:
"""Metaclass for generating a bunch of proxy methods."""
def __new__(mcl, name, bases, nmspc):
"""Attach a list of proxy methods to a new class."""
proxy_methods = (
'get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout',
'shutdown',
)
proxy_methods_no_args = (
'shutdown',
)
proxy_props = (
'family',
)
def lock_decorator(method):
"""Create a proxy method for a new class."""
def proxy_wrapper(self, *args):
self._lock.acquire()
try:
new_args = (
args[:] if method not in proxy_methods_no_args else []
)
return getattr(self._ssl_conn, method)(*new_args)
finally:
self._lock.release()
return proxy_wrapper
for m in proxy_methods:
nmspc[m] = lock_decorator(m)
nmspc[m].__name__ = m
def make_property(property_):
"""Create a proxy method for a new class."""
def proxy_prop_wrapper(self):
return getattr(self._ssl_conn, property_)
proxy_prop_wrapper.__name__ = property_
return property(proxy_prop_wrapper)
for p in proxy_props:
nmspc[p] = make_property(p)
# Doesn't work via super() for some reason.
# Falling back to type() instead:
return type(name, bases, nmspc)
@six.add_metaclass(SSLConnectionProxyMeta)
class SSLConnection:
r"""A thread-safe wrapper for an ``SSL.Connection``.
:param tuple args: the arguments to create the wrapped \
:py:class:`SSL.Connection(*args) \
<pyopenssl:OpenSSL.SSL.Connection>`
"""
def __init__(self, *args):
"""Initialize SSLConnection instance."""
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
class pyOpenSSLAdapter(Adapter):
"""A wrapper for integrating pyOpenSSL with Cheroot."""
certificate = None
"""The file name of the server's TLS certificate."""
private_key = None
"""The file name of the server's private key file."""
certificate_chain = None
"""Optional. The file name of CA's intermediate certificate bundle.
This is needed for cheaper "chained root" TLS certificates,
and should be left as :py:data:`None` if not required."""
context = None
"""
An instance of :py:class:`SSL.Context <pyopenssl:OpenSSL.SSL.Context>`.
"""
ciphers = None
"""The ciphers list of TLS."""
def __init__(
self, certificate, private_key, certificate_chain=None,
ciphers=None,
):
"""Initialize OpenSSL Adapter instance."""
if SSL is None:
raise ImportError('You must install pyOpenSSL to use HTTPS.')
super(pyOpenSSLAdapter, self).__init__(
certificate, private_key, certificate_chain, ciphers,
)
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
# pyOpenSSL doesn't perform the handshake until the first read/write
# forcing the handshake to complete tends to result in the connection
# closing so we can't reliably access protocol/client cert for the env
return sock, self._environ.copy()
def get_context(self):
"""Return an ``SSL.Context`` from self attributes.
Ref: :py:class:`SSL.Context <pyopenssl:OpenSSL.SSL.Context>`
"""
# See https://code.activestate.com/recipes/442473/
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
'wsgi.url_scheme': 'https',
'HTTPS': 'on',
'SSL_VERSION_INTERFACE': '%s %s/%s Python/%s' % (
cheroot_server.HTTPServer.version,
OpenSSL.version.__title__, OpenSSL.version.__version__,
sys.version,
),
'SSL_VERSION_LIBRARY': SSL.SSLeay_version(
SSL.SSLEAY_VERSION,
).decode(),
}
if self.certificate:
# Server certificate attributes
with open(self.certificate, 'rb') as cert_file:
cert = crypto.load_certificate(
crypto.FILETYPE_PEM, cert_file.read(),
)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
# 'SSL_SERVER_V_START':
# Validity of server's certificate (start time),
# 'SSL_SERVER_V_END':
# Validity of server's certificate (end time),
})
for prefix, dn in [
('I', cert.get_issuer()),
('S', cert.get_subject()),
]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind('=')
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind('/')
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
"""Return socket file object."""
cls = (
SSLFileobjectStreamReader
if 'r' in mode else
SSLFileobjectStreamWriter
)
if SSL and isinstance(sock, ssl_conn_type):
wrapped_socket = cls(sock, mode, bufsize)
wrapped_socket.ssl_timeout = sock.gettimeout()
return wrapped_socket
# This is from past:
# TODO: figure out what it's meant for
else:
return cheroot_server.CP_fileobject(sock, mode, bufsize)
| 13,339
|
Python
|
.py
| 313
| 32.527157
| 79
| 0.593116
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,995
|
builtin.pyi
|
rembo10_headphones/lib/cheroot/ssl/builtin.pyi
|
from typing import Any
from . import Adapter
generic_socket_error: OSError
DEFAULT_BUFFER_SIZE: int
class BuiltinSSLAdapter(Adapter):
CERT_KEY_TO_ENV: Any
CERT_KEY_TO_LDAP_CODE: Any
def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...) -> None: ...
@property
def context(self): ...
@context.setter
def context(self, context) -> None: ...
def bind(self, sock): ...
def wrap(self, sock): ...
def get_environ(self): ...
def makefile(self, sock, mode: str = ..., bufsize: int = ...): ...
| 585
|
Python
|
.py
| 16
| 32.6875
| 125
| 0.636684
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,996
|
__init__.pyi
|
rembo10_headphones/lib/cheroot/ssl/__init__.pyi
|
from abc import abstractmethod
from typing import Any
class Adapter():
certificate: Any
private_key: Any
certificate_chain: Any
ciphers: Any
context: Any
@abstractmethod
def __init__(self, certificate, private_key, certificate_chain: Any | None = ..., ciphers: Any | None = ...): ...
@abstractmethod
def bind(self, sock): ...
@abstractmethod
def wrap(self, sock): ...
@abstractmethod
def get_environ(self): ...
@abstractmethod
def makefile(self, sock, mode: str = ..., bufsize: int = ...): ...
| 555
|
Python
|
.py
| 18
| 26.444444
| 117
| 0.645522
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,997
|
__init__.py
|
rembo10_headphones/lib/cheroot/ssl/__init__.py
|
"""Implementation of the SSL adapter base interface."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from six import add_metaclass
@add_metaclass(ABCMeta)
class Adapter:
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) ->
socket file object``
"""
@abstractmethod
def __init__(
self, certificate, private_key, certificate_chain=None,
ciphers=None,
):
"""Set up certificates, private key ciphers and reset context."""
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self.ciphers = ciphers
self.context = None
@abstractmethod
def bind(self, sock):
"""Wrap and return the given socket."""
return sock
@abstractmethod
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
raise NotImplementedError # pragma: no cover
@abstractmethod
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
raise NotImplementedError # pragma: no cover
@abstractmethod
def makefile(self, sock, mode='r', bufsize=-1):
"""Return socket file object."""
raise NotImplementedError # pragma: no cover
| 1,539
|
Python
|
.py
| 40
| 31.725
| 74
| 0.663753
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,998
|
builtin.py
|
rembo10_headphones/lib/cheroot/ssl/builtin.py
|
"""
A library for integrating Python's builtin :py:mod:`ssl` library with Cheroot.
The :py:mod:`ssl` module must be importable for SSL functionality.
To use this module, set ``HTTPServer.ssl_adapter`` to an instance of
``BuiltinSSLAdapter``.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
import sys
import threading
try:
import ssl
except ImportError:
ssl = None
try:
from _pyio import DEFAULT_BUFFER_SIZE
except ImportError:
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = -1
import six
from . import Adapter
from .. import errors
from .._compat import IS_ABOVE_OPENSSL10, suppress
from ..makefile import StreamReader, StreamWriter
from ..server import HTTPServer
if six.PY2:
generic_socket_error = socket.error
else:
generic_socket_error = OSError
def _assert_ssl_exc_contains(exc, *msgs):
"""Check whether SSL exception contains either of messages provided."""
if len(msgs) < 1:
raise TypeError(
'_assert_ssl_exc_contains() requires '
'at least one message to be passed.',
)
err_msg_lower = str(exc).lower()
return any(m.lower() in err_msg_lower for m in msgs)
def _loopback_for_cert_thread(context, server):
"""Wrap a socket in ssl and perform the server-side handshake."""
# As we only care about parsing the certificate, the failure of
# which will cause an exception in ``_loopback_for_cert``,
# we can safely ignore connection and ssl related exceptions. Ref:
# https://github.com/cherrypy/cheroot/issues/302#issuecomment-662592030
with suppress(ssl.SSLError, OSError):
with context.wrap_socket(
server, do_handshake_on_connect=True, server_side=True,
) as ssl_sock:
# in TLS 1.3 (Python 3.7+, OpenSSL 1.1.1+), the server
# sends the client session tickets that can be used to
# resume the TLS session on a new connection without
# performing the full handshake again. session tickets are
# sent as a post-handshake message at some _unspecified_
# time and thus a successful connection may be closed
# without the client having received the tickets.
# Unfortunately, on Windows (Python 3.8+), this is treated
# as an incomplete handshake on the server side and a
# ``ConnectionAbortedError`` is raised.
# TLS 1.3 support is still incomplete in Python 3.8;
# there is no way for the client to wait for tickets.
# While not necessary for retrieving the parsed certificate,
# we send a tiny bit of data over the connection in an
# attempt to give the server a chance to send the session
# tickets and close the connection cleanly.
# Note that, as this is essentially a race condition,
# the error may still occur ocasionally.
ssl_sock.send(b'0000')
def _loopback_for_cert(certificate, private_key, certificate_chain):
"""Create a loopback connection to parse a cert with a private key."""
context = ssl.create_default_context(cafile=certificate_chain)
context.load_cert_chain(certificate, private_key)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Python 3+ Unix, Python 3.5+ Windows
client, server = socket.socketpair()
try:
# `wrap_socket` will block until the ssl handshake is complete.
# it must be called on both ends at the same time -> thread
# openssl will cache the peer's cert during a successful handshake
# and return it via `getpeercert` even after the socket is closed.
# when `close` is called, the SSL shutdown notice will be sent
# and then python will wait to receive the corollary shutdown.
thread = threading.Thread(
target=_loopback_for_cert_thread, args=(context, server),
)
try:
thread.start()
with context.wrap_socket(
client, do_handshake_on_connect=True,
server_side=False,
) as ssl_sock:
ssl_sock.recv(4)
return ssl_sock.getpeercert()
finally:
thread.join()
finally:
client.close()
server.close()
def _parse_cert(certificate, private_key, certificate_chain):
"""Parse a certificate."""
# loopback_for_cert uses socket.socketpair which was only
# introduced in Python 3.0 for *nix and 3.5 for Windows
# and requires OS support (AttributeError, OSError)
# it also requires a private key either in its own file
# or combined with the cert (SSLError)
with suppress(AttributeError, ssl.SSLError, OSError):
return _loopback_for_cert(certificate, private_key, certificate_chain)
# KLUDGE: using an undocumented, private, test method to parse a cert
# unfortunately, it is the only built-in way without a connection
# as a private, undocumented method, it may change at any time
# so be tolerant of *any* possible errors it may raise
with suppress(Exception):
return ssl._ssl._test_decode_cert(certificate)
return {}
def _sni_callback(sock, sni, context):
"""Handle the SNI callback to tag the socket with the SNI."""
sock.sni = sni
# return None to allow the TLS negotiation to continue
class BuiltinSSLAdapter(Adapter):
"""Wrapper for integrating Python's builtin :py:mod:`ssl` with Cheroot."""
certificate = None
"""The file name of the server SSL certificate."""
private_key = None
"""The file name of the server's private key file."""
certificate_chain = None
"""The file name of the certificate chain file."""
ciphers = None
"""The ciphers list of SSL."""
# from mod_ssl/pkg.sslmod/ssl_engine_vars.c ssl_var_lookup_ssl_cert
CERT_KEY_TO_ENV = {
'version': 'M_VERSION',
'serialNumber': 'M_SERIAL',
'notBefore': 'V_START',
'notAfter': 'V_END',
'subject': 'S_DN',
'issuer': 'I_DN',
'subjectAltName': 'SAN',
# not parsed by the Python standard library
# - A_SIG
# - A_KEY
# not provided by mod_ssl
# - OCSP
# - caIssuers
# - crlDistributionPoints
}
# from mod_ssl/pkg.sslmod/ssl_engine_vars.c ssl_var_lookup_ssl_cert_dn_rec
CERT_KEY_TO_LDAP_CODE = {
'countryName': 'C',
'stateOrProvinceName': 'ST',
# NOTE: mod_ssl also provides 'stateOrProvinceName' as 'SP'
# for compatibility with SSLeay
'localityName': 'L',
'organizationName': 'O',
'organizationalUnitName': 'OU',
'commonName': 'CN',
'title': 'T',
'initials': 'I',
'givenName': 'G',
'surname': 'S',
'description': 'D',
'userid': 'UID',
'emailAddress': 'Email',
# not provided by mod_ssl
# - dnQualifier: DNQ
# - domainComponent: DC
# - postalCode: PC
# - streetAddress: STREET
# - serialNumber
# - generationQualifier
# - pseudonym
# - jurisdictionCountryName
# - jurisdictionLocalityName
# - jurisdictionStateOrProvince
# - businessCategory
}
def __init__(
self, certificate, private_key, certificate_chain=None,
ciphers=None,
):
"""Set up context in addition to base class properties if available."""
if ssl is None:
raise ImportError('You must install the ssl module to use HTTPS.')
super(BuiltinSSLAdapter, self).__init__(
certificate, private_key, certificate_chain, ciphers,
)
self.context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH,
cafile=certificate_chain,
)
self.context.load_cert_chain(certificate, private_key)
if self.ciphers is not None:
self.context.set_ciphers(ciphers)
self._server_env = self._make_env_cert_dict(
'SSL_SERVER',
_parse_cert(certificate, private_key, self.certificate_chain),
)
if not self._server_env:
return
cert = None
with open(certificate, mode='rt') as f:
cert = f.read()
# strip off any keys by only taking the first certificate
cert_start = cert.find(ssl.PEM_HEADER)
if cert_start == -1:
return
cert_end = cert.find(ssl.PEM_FOOTER, cert_start)
if cert_end == -1:
return
cert_end += len(ssl.PEM_FOOTER)
self._server_env['SSL_SERVER_CERT'] = cert[cert_start:cert_end]
@property
def context(self):
""":py:class:`~ssl.SSLContext` that will be used to wrap sockets."""
return self._context
@context.setter
def context(self, context):
"""Set the ssl ``context`` to use."""
self._context = context
# Python 3.7+
# if a context is provided via `cherrypy.config.update` then
# `self.context` will be set after `__init__`
# use a property to intercept it to add an SNI callback
# but don't override the user's callback
# TODO: chain callbacks
with suppress(AttributeError):
if ssl.HAS_SNI and context.sni_callback is None:
context.sni_callback = _sni_callback
def bind(self, sock):
"""Wrap and return the given socket."""
return super(BuiltinSSLAdapter, self).bind(sock)
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
EMPTY_RESULT = None, {}
try:
s = self.context.wrap_socket(
sock, do_handshake_on_connect=True, server_side=True,
)
except ssl.SSLError as ex:
if ex.errno == ssl.SSL_ERROR_EOF:
# This is almost certainly due to the cherrypy engine
# 'pinging' the socket to assert it's connectable;
# the 'ping' isn't SSL.
return EMPTY_RESULT
elif ex.errno == ssl.SSL_ERROR_SSL:
if _assert_ssl_exc_contains(ex, 'http request'):
# The client is speaking HTTP to an HTTPS server.
raise errors.NoSSLError
# Check if it's one of the known errors
# Errors that are caught by PyOpenSSL, but thrown by
# built-in ssl
_block_errors = (
'unknown protocol', 'unknown ca', 'unknown_ca',
'unknown error',
'https proxy request', 'inappropriate fallback',
'wrong version number',
'no shared cipher', 'certificate unknown',
'ccs received early',
'certificate verify failed', # client cert w/o trusted CA
'version too low', # caused by SSL3 connections
'unsupported protocol', # caused by TLS1 connections
)
if _assert_ssl_exc_contains(ex, *_block_errors):
# Accepted error, let's pass
return EMPTY_RESULT
elif _assert_ssl_exc_contains(ex, 'handshake operation timed out'):
# This error is thrown by builtin SSL after a timeout
# when client is speaking HTTP to an HTTPS server.
# The connection can safely be dropped.
return EMPTY_RESULT
raise
except generic_socket_error as exc:
"""It is unclear why exactly this happens.
It's reproducible only with openssl>1.0 and stdlib
:py:mod:`ssl` wrapper.
In CherryPy it's triggered by Checker plugin, which connects
to the app listening to the socket port in TLS mode via plain
HTTP during startup (from the same process).
Ref: https://github.com/cherrypy/cherrypy/issues/1618
"""
is_error0 = exc.args == (0, 'Error')
if is_error0 and IS_ABOVE_OPENSSL10:
return EMPTY_RESULT
raise
return s, self.get_environ(s)
def get_environ(self, sock):
"""Create WSGI environ entries to be merged into each request."""
cipher = sock.cipher()
ssl_environ = {
'wsgi.url_scheme': 'https',
'HTTPS': 'on',
'SSL_PROTOCOL': cipher[1],
'SSL_CIPHER': cipher[0],
'SSL_CIPHER_EXPORT': '',
'SSL_CIPHER_USEKEYSIZE': cipher[2],
'SSL_VERSION_INTERFACE': '%s Python/%s' % (
HTTPServer.version, sys.version,
),
'SSL_VERSION_LIBRARY': ssl.OPENSSL_VERSION,
'SSL_CLIENT_VERIFY': 'NONE',
# 'NONE' - client did not provide a cert (overriden below)
}
# Python 3.3+
with suppress(AttributeError):
compression = sock.compression()
if compression is not None:
ssl_environ['SSL_COMPRESS_METHOD'] = compression
# Python 3.6+
with suppress(AttributeError):
ssl_environ['SSL_SESSION_ID'] = sock.session.id.hex()
with suppress(AttributeError):
target_cipher = cipher[:2]
for cip in sock.context.get_ciphers():
if target_cipher == (cip['name'], cip['protocol']):
ssl_environ['SSL_CIPHER_ALGKEYSIZE'] = cip['alg_bits']
break
# Python 3.7+ sni_callback
with suppress(AttributeError):
ssl_environ['SSL_TLS_SNI'] = sock.sni
if self.context and self.context.verify_mode != ssl.CERT_NONE:
client_cert = sock.getpeercert()
if client_cert:
# builtin ssl **ALWAYS** validates client certificates
# and terminates the connection on failure
ssl_environ['SSL_CLIENT_VERIFY'] = 'SUCCESS'
ssl_environ.update(
self._make_env_cert_dict('SSL_CLIENT', client_cert),
)
ssl_environ['SSL_CLIENT_CERT'] = ssl.DER_cert_to_PEM_cert(
sock.getpeercert(binary_form=True),
).strip()
ssl_environ.update(self._server_env)
# not supplied by the Python standard library (as of 3.8)
# - SSL_SESSION_RESUMED
# - SSL_SECURE_RENEG
# - SSL_CLIENT_CERT_CHAIN_n
# - SRP_USER
# - SRP_USERINFO
return ssl_environ
def _make_env_cert_dict(self, env_prefix, parsed_cert):
"""Return a dict of WSGI environment variables for a certificate.
E.g. SSL_CLIENT_M_VERSION, SSL_CLIENT_M_SERIAL, etc.
See https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#envvars.
"""
if not parsed_cert:
return {}
env = {}
for cert_key, env_var in self.CERT_KEY_TO_ENV.items():
key = '%s_%s' % (env_prefix, env_var)
value = parsed_cert.get(cert_key)
if env_var == 'SAN':
env.update(self._make_env_san_dict(key, value))
elif env_var.endswith('_DN'):
env.update(self._make_env_dn_dict(key, value))
else:
env[key] = str(value)
# mod_ssl 2.1+; Python 3.2+
# number of days until the certificate expires
if 'notBefore' in parsed_cert:
remain = ssl.cert_time_to_seconds(parsed_cert['notAfter'])
remain -= ssl.cert_time_to_seconds(parsed_cert['notBefore'])
remain /= 60 * 60 * 24
env['%s_V_REMAIN' % (env_prefix,)] = str(int(remain))
return env
def _make_env_san_dict(self, env_prefix, cert_value):
"""Return a dict of WSGI environment variables for a certificate DN.
E.g. SSL_CLIENT_SAN_Email_0, SSL_CLIENT_SAN_DNS_0, etc.
See SSL_CLIENT_SAN_* at
https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#envvars.
"""
if not cert_value:
return {}
env = {}
dns_count = 0
email_count = 0
for attr_name, val in cert_value:
if attr_name == 'DNS':
env['%s_DNS_%i' % (env_prefix, dns_count)] = val
dns_count += 1
elif attr_name == 'Email':
env['%s_Email_%i' % (env_prefix, email_count)] = val
email_count += 1
# other mod_ssl SAN vars:
# - SAN_OTHER_msUPN_n
return env
def _make_env_dn_dict(self, env_prefix, cert_value):
"""Return a dict of WSGI environment variables for a certificate DN.
E.g. SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_C, etc.
See SSL_CLIENT_S_DN_x509 at
https://httpd.apache.org/docs/2.4/mod/mod_ssl.html#envvars.
"""
if not cert_value:
return {}
dn = []
dn_attrs = {}
for rdn in cert_value:
for attr_name, val in rdn:
attr_code = self.CERT_KEY_TO_LDAP_CODE.get(attr_name)
dn.append('%s=%s' % (attr_code or attr_name, val))
if not attr_code:
continue
dn_attrs.setdefault(attr_code, [])
dn_attrs[attr_code].append(val)
env = {
env_prefix: ','.join(dn),
}
for attr_code, values in dn_attrs.items():
env['%s_%s' % (env_prefix, attr_code)] = ','.join(values)
if len(values) == 1:
continue
for i, val in enumerate(values):
env['%s_%s_%i' % (env_prefix, attr_code, i)] = val
return env
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
"""Return socket file object."""
cls = StreamReader if 'r' in mode else StreamWriter
return cls(sock, mode, bufsize)
| 18,045
|
Python
|
.py
| 419
| 32.792363
| 79
| 0.591059
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|
8,999
|
threadpool.pyi
|
rembo10_headphones/lib/cheroot/workers/threadpool.pyi
|
import threading
from typing import Any
class TrueyZero:
def __add__(self, other): ...
def __radd__(self, other): ...
trueyzero: TrueyZero
class WorkerThread(threading.Thread):
conn: Any
server: Any
ready: bool
requests_seen: int
bytes_read: int
bytes_written: int
start_time: Any
work_time: int
stats: Any
def __init__(self, server): ...
def run(self) -> None: ...
class ThreadPool:
server: Any
min: Any
max: Any
get: Any
def __init__(self, server, min: int = ..., max: int = ..., accepted_queue_size: int = ..., accepted_queue_timeout: int = ...) -> None: ...
def start(self) -> None: ...
@property
def idle(self): ...
def put(self, obj) -> None: ...
def grow(self, amount) -> None: ...
def shrink(self, amount) -> None: ...
def stop(self, timeout: int = ...) -> None: ...
@property
def qsize(self) -> int: ...
| 925
|
Python
|
.py
| 33
| 23.636364
| 142
| 0.585586
|
rembo10/headphones
| 3,370
| 601
| 527
|
GPL-3.0
|
9/5/2024, 5:10:38 PM (Europe/Amsterdam)
|