repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
gevent | gevent-master/src/gevent/tests/test__monkey_futures_thread.py | # -*- coding: utf-8 -*-
"""
Tests that on Python 2, if the futures backport of 'thread' is already
imported before we monkey-patch, it gets patched too.
"""
import unittest
try:
import thread
import _thread
HAS_BOTH = True
except ImportError:
HAS_BOTH = False
class TestMonkey(unittest.TestCase):
@unittest.skipUnless(HAS_BOTH, "Python 2, needs future backport installed")
def test_patches_both(self):
thread_lt = thread.LockType
_thread_lt = _thread.LockType
self.assertIs(thread_lt, _thread_lt)
from gevent.thread import LockType as gLockType
self.assertIsNot(thread_lt, gLockType)
import gevent.monkey
gevent.monkey.patch_all()
thread_lt2 = thread.LockType
_thread_lt2 = _thread.LockType
self.assertIs(thread_lt2, gLockType)
self.assertIs(_thread_lt2, gLockType)
self.assertIs(thread_lt2, _thread_lt2)
self.assertIsNot(thread_lt2, thread_lt)
# Retrieving the original on the old name still works
orig_locktype = gevent.monkey.get_original('thread', 'LockType')
self.assertIs(orig_locktype, thread_lt)
# And the new name
orig__locktype = gevent.monkey.get_original('_thread', 'LockType')
self.assertIs(orig__locktype, thread_lt)
if __name__ == '__main__':
unittest.main()
| 1,367 | 26.36 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__core_fork.py | from __future__ import print_function
from gevent import monkey
monkey.patch_all()
import os
import unittest
import multiprocessing
import gevent
hub = gevent.get_hub()
pid = os.getpid()
newpid = None
def on_fork():
global newpid
newpid = os.getpid()
fork_watcher = hub.loop.fork(ref=False)
fork_watcher.start(on_fork)
def in_child(q):
# libev only calls fork callbacks at the beginning of
# the loop; we use callbacks extensively so it takes *two*
# calls to sleep (with a timer) to actually get wrapped
# around to the beginning of the loop.
gevent.sleep(0.001)
gevent.sleep(0.001)
q.put(newpid)
class Test(unittest.TestCase):
def test(self):
self.assertEqual(hub.threadpool.size, 0)
# Use a thread to make us multi-threaded
hub.threadpool.apply(lambda: None)
self.assertEqual(hub.threadpool.size, 1)
# Not all platforms use fork by default, so we want to force it,
# where possible. The test is still useful even if we can't
# fork though.
try:
fork_ctx = multiprocessing.get_context('fork')
except (AttributeError, ValueError):
# ValueError if fork isn't supported.
# AttributeError on Python 2, which doesn't have get_context
fork_ctx = multiprocessing
# If the Queue is global, q.get() hangs on Windows; must pass as
# an argument.
q = fork_ctx.Queue()
p = fork_ctx.Process(target=in_child, args=(q,))
p.start()
p.join()
p_val = q.get()
self.assertIsNone(
newpid,
"The fork watcher ran in the parent for some reason."
)
self.assertIsNotNone(
p_val,
"The child process returned nothing, meaning the fork watcher didn't run in the child."
)
self.assertNotEqual(p_val, pid)
assert p_val != pid
if __name__ == '__main__':
# Must call for Windows to fork properly; the fork can't be in the top-level
multiprocessing.freeze_support()
# fork watchers weren't firing in multi-threading processes.
# This test is designed to prove that they are.
# However, it fails on Windows: The fork watcher never runs!
# This makes perfect sense: on Windows, our patches to os.fork()
# that call gevent.hub.reinit() don't get used; os.fork doesn't
# exist and multiprocessing.Process uses the windows-specific _subprocess.CreateProcess()
# to create a whole new process that has no relation to the current process;
# that process then calls multiprocessing.forking.main() to do its work.
# Since no state is shared, a fork watcher cannot exist in that process.
unittest.main()
| 2,738 | 31.223529 | 99 | py |
gevent | gevent-master/src/gevent/tests/test__exc_info.py | import gevent
import sys
import gevent.testing as greentest
from gevent.testing import six
from gevent.testing import ExpectedException as ExpectedError
if six.PY2:
sys.exc_clear()
class RawException(Exception):
pass
def hello(err):
assert sys.exc_info() == (None, None, None), sys.exc_info()
raise err
def hello2():
try:
hello(ExpectedError('expected exception in hello'))
except ExpectedError:
pass
class Test(greentest.TestCase):
def test1(self):
error = RawException('hello')
expected_error = ExpectedError('expected exception in hello')
try:
raise error
except RawException:
self.expect_one_error()
g = gevent.spawn(hello, expected_error)
g.join()
self.assert_error(ExpectedError, expected_error)
self.assertIsInstance(g.exception, ExpectedError)
try:
raise
except: # pylint:disable=bare-except
ex = sys.exc_info()[1]
self.assertIs(ex, error)
def test2(self):
timer = gevent.get_hub().loop.timer(0)
timer.start(hello2)
try:
gevent.sleep(0.1)
self.assertEqual(sys.exc_info(), (None, None, None))
finally:
timer.close()
if __name__ == '__main__':
greentest.main()
| 1,377 | 22.355932 | 69 | py |
gevent | gevent-master/src/gevent/tests/test__greenletset.py | from __future__ import print_function, division, absolute_import
import time
import gevent.testing as greentest
from gevent.testing import timing
import gevent
from gevent import pool
from gevent.timeout import Timeout
DELAY = timing.LARGE_TICK
class SpecialError(Exception):
pass
class Undead(object):
def __init__(self):
self.shot_count = 0
def __call__(self):
while True:
try:
gevent.sleep(1)
except SpecialError:
break
except: # pylint:disable=bare-except
self.shot_count += 1
class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def test_basic(self):
s = pool.Group()
s.spawn(gevent.sleep, timing.LARGE_TICK)
self.assertEqual(len(s), 1, s)
s.spawn(gevent.sleep, timing.LARGE_TICK * 5)
self.assertEqual(len(s), 2, s)
gevent.sleep()
gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ)
self.assertEqual(len(s), 1, s)
gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ)
self.assertFalse(s)
def test_waitall(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
start = time.time()
s.join(raise_error=True)
delta = time.time() - start
self.assertFalse(s)
self.assertEqual(len(s), 0)
self.assertTimeWithinRange(delta, DELAY * 1.9, DELAY * 2.5)
def test_kill_block(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
start = time.time()
s.kill()
self.assertFalse(s)
self.assertEqual(len(s), 0)
delta = time.time() - start
assert delta < DELAY * 0.8, delta
def test_kill_noblock(self):
s = pool.Group()
s.spawn(gevent.sleep, DELAY)
s.spawn(gevent.sleep, DELAY * 2)
assert len(s) == 2, s
s.kill(block=False)
assert len(s) == 2, s
gevent.sleep(0.0001)
self.assertFalse(s)
self.assertEqual(len(s), 0)
def test_kill_fires_once(self):
u1 = Undead()
u2 = Undead()
p1 = gevent.spawn(u1)
p2 = gevent.spawn(u2)
def check(count1, count2):
self.assertTrue(p1)
self.assertTrue(p2)
self.assertFalse(p1.dead, p1)
self.assertFalse(p2.dead, p2)
self.assertEqual(u1.shot_count, count1)
self.assertEqual(u2.shot_count, count2)
gevent.sleep(0.01)
s = pool.Group([p1, p2])
self.assertEqual(len(s), 2, s)
check(0, 0)
s.killone(p1, block=False)
check(0, 0)
gevent.sleep(0)
check(1, 0)
s.killone(p1)
check(1, 0)
s.killone(p1)
check(1, 0)
s.kill(block=False)
s.kill(block=False)
s.kill(block=False)
check(1, 0)
gevent.sleep(DELAY)
check(1, 1)
X = object()
kill_result = gevent.with_timeout(DELAY, s.kill, block=True, timeout_value=X)
assert kill_result is X, repr(kill_result)
assert len(s) == 2, s
check(1, 1)
p1.kill(SpecialError)
p2.kill(SpecialError)
def test_killall_subclass(self):
p1 = GreenletSubclass.spawn(lambda: 1 / 0)
p2 = GreenletSubclass.spawn(lambda: gevent.sleep(10))
s = pool.Group([p1, p2])
s.kill()
def test_killall_iterable_argument_non_block(self):
p1 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5))
p2 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5))
s = set()
s.add(p1)
s.add(p2)
gevent.killall(s, block=False)
gevent.sleep(0.5)
for g in s:
assert g.dead
def test_killall_iterable_argument_timeout_not_started(self):
def f():
try:
gevent.sleep(1.5)
except: # pylint:disable=bare-except
gevent.sleep(1)
p1 = GreenletSubclass.spawn(f)
p2 = GreenletSubclass.spawn(f)
s = set()
s.add(p1)
s.add(p2)
gevent.killall(s, timeout=0.5)
for g in s:
self.assertTrue(g.dead, g)
def test_killall_iterable_argument_timeout_started(self):
def f():
try:
gevent.sleep(1.5)
except: # pylint:disable=bare-except
gevent.sleep(1)
p1 = GreenletSubclass.spawn(f)
p2 = GreenletSubclass.spawn(f)
s = set()
s.add(p1)
s.add(p2)
# Get them both running.
gevent.sleep(timing.SMALLEST_RELIABLE_DELAY)
with self.assertRaises(Timeout):
gevent.killall(s, timeout=0.5)
for g in s:
self.assertFalse(g.dead, g)
class GreenletSubclass(gevent.Greenlet):
pass
if __name__ == '__main__':
greentest.main()
| 5,032 | 26.353261 | 85 | py |
gevent | gevent-master/src/gevent/tests/test__import_blocking_in_greenlet.py | #!/usr/bin/python
# See https://github.com/gevent/gevent/issues/108
import gevent
from gevent import monkey
monkey.patch_all()
import_errors = []
def some_func():
try:
from _blocks_at_top_level import x
assert x == 'done'
except ImportError as e:
import_errors.append(e)
raise
gs = [gevent.spawn(some_func) for i in range(2)]
gevent.joinall(gs)
assert not import_errors, import_errors
| 431 | 17.782609 | 49 | py |
gevent | gevent-master/src/gevent/tests/test__core.py |
from __future__ import absolute_import, print_function, division
# Important: This file should have no dependencies that are part of the
# ``test`` extra, because it is sometimes run for quick checks without those
# installed.
import unittest
import sys
import gevent.testing as greentest
from gevent._config import Loop
available_loops = Loop().get_options()
available_loops.pop('libuv', None)
def not_available(name):
return isinstance(available_loops[name], ImportError)
class WatcherTestMixin(object):
kind = None
def _makeOne(self):
return self.kind(default=False) # pylint:disable=not-callable
def destroyOne(self, loop):
loop.destroy()
def setUp(self):
self.loop = self._makeOne()
self.core = sys.modules[self.kind.__module__]
def tearDown(self):
self.destroyOne(self.loop)
del self.loop
def test_get_version(self):
version = self.core.get_version() # pylint: disable=no-member
self.assertIsInstance(version, str)
self.assertTrue(version)
header_version = self.core.get_header_version() # pylint: disable=no-member
self.assertIsInstance(header_version, str)
self.assertTrue(header_version)
self.assertEqual(version, header_version)
def test_events_conversion(self):
self.assertEqual(self.core._events_to_str(self.core.READ | self.core.WRITE), # pylint: disable=no-member
'READ|WRITE')
def test_EVENTS(self):
self.assertEqual(str(self.core.EVENTS), # pylint: disable=no-member
'gevent.core.EVENTS')
self.assertEqual(repr(self.core.EVENTS), # pylint: disable=no-member
'gevent.core.EVENTS')
def test_io(self):
if greentest.WIN:
# libev raises IOError, libuv raises ValueError
Error = (IOError, ValueError)
else:
Error = ValueError
with self.assertRaises(Error):
self.loop.io(-1, 1)
if hasattr(self.core, 'TIMER'):
# libev
with self.assertRaises(ValueError):
self.loop.io(1, self.core.TIMER) # pylint:disable=no-member
# Test we can set events and io before it's started
if not greentest.WIN:
# We can't do this with arbitrary FDs on windows;
# see libev_vfd.h
io = self.loop.io(1, self.core.READ) # pylint:disable=no-member
io.fd = 2
self.assertEqual(io.fd, 2)
io.events = self.core.WRITE # pylint:disable=no-member
if not hasattr(self.core, 'libuv'):
# libev
# pylint:disable=no-member
self.assertEqual(self.core._events_to_str(io.events), 'WRITE|_IOFDSET')
else:
self.assertEqual(self.core._events_to_str(io.events), # pylint:disable=no-member
'WRITE')
io.start(lambda: None)
io.close()
def test_timer_constructor(self):
with self.assertRaises(ValueError):
self.loop.timer(1, -1)
def test_signal_constructor(self):
with self.assertRaises(ValueError):
self.loop.signal(1000)
class LibevTestMixin(WatcherTestMixin):
def test_flags_conversion(self):
# pylint: disable=no-member
core = self.core
if not greentest.WIN:
self.assertEqual(core.loop(2, default=False).backend_int, 2)
self.assertEqual(core.loop('select', default=False).backend, 'select')
self.assertEqual(core._flags_to_int(None), 0)
self.assertEqual(core._flags_to_int(['kqueue', 'SELECT']), core.BACKEND_KQUEUE | core.BACKEND_SELECT)
self.assertEqual(core._flags_to_list(core.BACKEND_PORT | core.BACKEND_POLL), ['port', 'poll'])
self.assertRaises(ValueError, core.loop, ['port', 'blabla'])
self.assertRaises(TypeError, core.loop, object())
@unittest.skipIf(not_available('libev-cext'), "Needs libev-cext")
class TestLibevCext(LibevTestMixin, unittest.TestCase):
kind = available_loops['libev-cext']
@unittest.skipIf(not_available('libev-cffi'), "Needs libev-cffi")
class TestLibevCffi(LibevTestMixin, unittest.TestCase):
kind = available_loops['libev-cffi']
@unittest.skipIf(not_available('libuv-cffi'), "Needs libuv-cffi")
class TestLibuvCffi(WatcherTestMixin, unittest.TestCase):
kind = available_loops['libuv-cffi']
@greentest.skipOnLibev("libuv-specific")
@greentest.skipOnWindows("Destroying the loop somehow fails")
def test_io_multiplex_events(self):
# pylint:disable=no-member
import socket
sock = socket.socket()
fd = sock.fileno()
core = self.core
read = self.loop.io(fd, core.READ)
write = self.loop.io(fd, core.WRITE)
try:
real_watcher = read._watcher_ref
read.start(lambda: None)
self.assertEqual(real_watcher.events, core.READ)
write.start(lambda: None)
self.assertEqual(real_watcher.events, core.READ | core.WRITE)
write.stop()
self.assertEqual(real_watcher.events, core.READ)
write.start(lambda: None)
self.assertEqual(real_watcher.events, core.READ | core.WRITE)
read.stop()
self.assertEqual(real_watcher.events, core.WRITE)
write.stop()
self.assertEqual(real_watcher.events, 0)
finally:
read.close()
write.close()
sock.close()
if __name__ == '__main__':
greentest.main()
| 5,618 | 33.054545 | 112 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_module_run.py | """
Tests for running ``gevent.monkey`` as a module to launch a
patched script.
Uses files in the ``monkey_package/`` directory.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import os.path
import sys
from gevent import testing as greentest
from gevent.testing.util import absolute_pythonpath
from gevent.testing.util import run
class TestRun(greentest.TestCase):
maxDiff = None
def setUp(self):
self.abs_pythonpath = absolute_pythonpath() # before we cd
self.cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self.cwd)
def _run(self, script, module=False):
env = os.environ.copy()
env['PYTHONWARNINGS'] = 'ignore'
if self.abs_pythonpath:
env['PYTHONPATH'] = self.abs_pythonpath
run_kwargs = dict(
buffer_output=True,
quiet=True,
nested=True,
env=env,
timeout=10,
)
args = [sys.executable, '-m', 'gevent.monkey']
if module:
args.append('--module')
args += [script, 'patched']
monkey_result = run(
args,
**run_kwargs
)
self.assertTrue(monkey_result)
if module:
args = [sys.executable, "-m", script, 'stdlib']
else:
args = [sys.executable, script, 'stdlib']
std_result = run(
args,
**run_kwargs
)
self.assertTrue(std_result)
monkey_out_lines = monkey_result.output_lines
std_out_lines = std_result.output_lines
self.assertEqual(monkey_out_lines, std_out_lines)
self.assertEqual(monkey_result.error, std_result.error)
return monkey_out_lines
def test_run_simple(self):
self._run(os.path.join('monkey_package', 'script.py'))
def _run_package(self, module):
lines = self._run('monkey_package', module=module)
self.assertTrue(lines[0].endswith(u'__main__.py'), lines[0])
self.assertEqual(lines[1].strip(), u'__main__')
def test_run_package(self):
# Run a __main__ inside a package, even without specifying -m
self._run_package(module=False)
def test_run_module(self):
# Run a __main__ inside a package, when specifying -m
self._run_package(module=True)
def test_issue_302(self):
monkey_lines = self._run(os.path.join('monkey_package', 'issue302monkey.py'))
self.assertEqual(monkey_lines[0].strip(), u'True')
monkey_lines[1] = monkey_lines[1].replace(u'\\', u'/') # windows path
self.assertTrue(monkey_lines[1].strip().endswith(u'monkey_package/issue302monkey.py'))
self.assertEqual(monkey_lines[2].strip(), u'True', monkey_lines)
# These three tests all sometimes fail on Py2 on CI, writing
# to stderr:
# Unhandled exception in thread started by \n
# sys.excepthook is missing\n
# lost sys.stderr\n
# Fatal Python error: PyImport_GetModuleDict: no module dictionary!\n'
# I haven't been able to produce this locally on macOS or Linux.
# The last line seems new with 2.7.17?
# Also, occasionally, they get '3' instead of '2' for the number of threads.
# That could have something to do with...? Most commonly that's PyPy, but
# sometimes CPython. Again, haven't reproduced.
# Not relevant since Py2 has been dropped.
def test_threadpool_in_patched_after_patch(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run(os.path.join('monkey_package', 'threadpool_monkey_patches.py'))
self.assertEqual(out, ['False', '2'])
def test_threadpool_in_patched_after_patch_module(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run('monkey_package.threadpool_monkey_patches', module=True)
self.assertEqual(out, ['False', '2'])
def test_threadpool_not_patched_after_patch_module(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run('monkey_package.threadpool_no_monkey', module=True)
self.assertEqual(out, ['False', 'False', '2'])
if __name__ == '__main__':
greentest.main()
| 4,368 | 33.132813 | 94 | py |
gevent | gevent-master/src/gevent/tests/test__util.py | # -*- coding: utf-8 -*-
# Copyright 2018 gevent contributes
# See LICENSE for details.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import unittest
import gevent.testing as greentest
import gevent
from gevent import util
from gevent import local
from greenlet import getcurrent
from gevent._compat import NativeStrIO
class MyLocal(local.local):
# pylint:disable=disallowed-name
def __init__(self, foo):
self.foo = foo
@greentest.skipOnPyPy("5.10.x is *very* slow formatting stacks")
class TestFormat(greentest.TestCase):
def test_basic(self):
lines = util.format_run_info()
value = '\n'.join(lines)
self.assertIn('Threads', value)
self.assertIn('Greenlets', value)
# because it's a raw greenlet, we have no data for it.
self.assertNotIn("Spawned at", value)
self.assertNotIn("Parent greenlet", value)
self.assertNotIn("Spawn Tree Locals", value)
def test_with_Greenlet(self):
rl = local.local()
rl.some_attr = 1
def root():
l = MyLocal(42)
assert l
# And an empty local.
l2 = local.local()
assert l2
gevent.getcurrent().spawn_tree_locals['a value'] = 42
io = NativeStrIO()
g = gevent.spawn(util.print_run_info, file=io)
g.join()
return io.getvalue()
g = gevent.spawn(root)
g.name = 'Printer'
g.join()
value = g.value
self.assertIn("Spawned at", value)
self.assertIn("Parent:", value)
self.assertIn("Spawn Tree Locals", value)
self.assertIn("Greenlet Locals:", value)
self.assertIn('MyLocal', value)
self.assertIn("Printer", value) # The name is printed
# Empty locals should not be printed
self.assertNotIn('{}', value)
@greentest.skipOnPyPy("See TestFormat")
class TestTree(greentest.TestCase):
def setUp(self):
super(TestTree, self).setUp()
self.track_greenlet_tree = gevent.config.track_greenlet_tree
gevent.config.track_greenlet_tree = True
self.maxDiff = None
def tearDown(self):
gevent.config.track_greenlet_tree = self.track_greenlet_tree
super(TestTree, self).tearDown()
def _build_tree(self):
# pylint:disable=too-many-locals
# Python 2.7 on Travis seems to show unexpected greenlet objects
# so perhaps we need a GC?
for _ in range(3):
gc.collect()
gevent.get_hub().resolver = None # Reset resolver, don't need to see it
gevent.get_hub().threadpool = None # ditto the pool
glets = []
l = MyLocal(42)
assert l
def s(f):
str(getcurrent())
g = gevent.spawn(f)
# Access this in spawning order for consistent sorting
# at print time in the test case.
getattr(g, 'minimal_ident')
str(g)
return g
def t1():
raise greentest.ExpectedException()
def t2():
l = MyLocal(16)
assert l
g = s(t1)
g.name = 'CustomName-' + str(g.minimal_ident)
return g
s1 = s(t2)
#self.assertEqual(0, s1.minimal_ident)
s1.join()
glets.append(s(t2))
def t3():
return s(t2)
s3 = s(t3)
if s3.spawn_tree_locals is not None:
# Can only do this if we're tracking spawn trees
s3.spawn_tree_locals['stl'] = 'STL'
s3.join()
s4 = s(util.GreenletTree.current_tree)
s4.join()
tree = s4.value
return tree, str(tree), tree.format(details={'running_stacks': False,
'spawning_stacks': False})
def _normalize_tree_format(self, value):
import re
hexobj = re.compile('0x[0123456789abcdef]+L?', re.I)
hub_repr = repr(gevent.get_hub())
value = value.replace(hub_repr, "<HUB>")
value = hexobj.sub('X', value)
value = value.replace('epoll', 'select')
value = value.replace('select', 'default')
value = value.replace('test__util', '__main__')
value = re.compile(' fileno=.').sub('', value)
value = value.replace('ref=-1', 'ref=0')
value = value.replace("type.current_tree", 'GreenletTree.current_tree')
value = value.replace('gevent.tests.__main__.MyLocal', '__main__.MyLocal')
# The repr in CPython greenlet 1.0a1 added extra info
value = value.replace('(otid=X) ', '')
value = value.replace(' dead>', '>')
value = value.replace(' current active started main>', '>')
return value
@greentest.ignores_leakcheck
def test_tree(self):
with gevent.get_hub().ignoring_expected_test_error():
tree, str_tree, tree_format = self._build_tree()
self.assertTrue(tree.root)
self.assertNotIn('Parent', str_tree) # Simple output
value = self._normalize_tree_format(tree_format)
expected = """\
<greenlet.greenlet object at X>
: Parent: None
: Greenlet Locals:
: Local <class '__main__.MyLocal'> at X
: {'foo': 42}
+--- <HUB>
: Parent: <greenlet.greenlet object at X>
+--- <Greenlet "Greenlet-1" at X: t2>; finished with value <Greenlet "CustomName-0" at 0x
: Parent: <HUB>
| +--- <Greenlet "CustomName-0" at X: t1>; finished with exception ExpectedException()
: Parent: <HUB>
+--- <Greenlet "Greenlet-2" at X: t2>; finished with value <Greenlet "CustomName-4" at 0x
: Parent: <HUB>
| +--- <Greenlet "CustomName-4" at X: t1>; finished with exception ExpectedException()
: Parent: <HUB>
+--- <Greenlet "Greenlet-3" at X: t3>; finished with value <Greenlet "Greenlet-5" at X
: Parent: <HUB>
: Spawn Tree Locals
: {'stl': 'STL'}
| +--- <Greenlet "Greenlet-5" at X: t2>; finished with value <Greenlet "CustomName-6" at 0x
: Parent: <HUB>
| +--- <Greenlet "CustomName-6" at X: t1>; finished with exception ExpectedException()
: Parent: <HUB>
+--- <Greenlet "Greenlet-7" at X: <bound method GreenletTree.current_tree of <class 'gevent.util.GreenletTree'>>>; finished with value <gevent.util.GreenletTree obje
Parent: <HUB>
""".strip()
self.assertEqual(expected, value)
@greentest.ignores_leakcheck
def test_tree_no_track(self):
gevent.config.track_greenlet_tree = False
with gevent.get_hub().ignoring_expected_test_error():
self._build_tree()
@greentest.ignores_leakcheck
def test_forest_fake_parent(self):
from greenlet import greenlet as RawGreenlet
def t4():
# Ignore this one, make the child the parent,
# and don't be a child of the hub.
c = RawGreenlet(util.GreenletTree.current_tree)
c.parent.greenlet_tree_is_ignored = True
c.greenlet_tree_is_root = True
return c.switch()
g = RawGreenlet(t4)
tree = g.switch()
tree_format = tree.format(details={'running_stacks': False,
'spawning_stacks': False})
value = self._normalize_tree_format(tree_format)
expected = """\
<greenlet.greenlet object at X>; not running
: Parent: <greenlet.greenlet object at X>
""".strip()
self.assertEqual(expected, value)
class TestAssertSwitches(unittest.TestCase):
def test_time_sleep(self):
# A real blocking function
from time import sleep
# No time given, we detect the failure to switch immediately
with self.assertRaises(util._FailedToSwitch) as exc:
with util.assert_switches():
sleep(0.001)
message = str(exc.exception)
self.assertIn('To any greenlet in', message)
# Supply a max blocking allowed and exceed it
with self.assertRaises(util._FailedToSwitch):
with util.assert_switches(0.001):
sleep(0.1)
# Supply a max blocking allowed, and exit before that happens,
# but don't switch to the hub as requested
with self.assertRaises(util._FailedToSwitch) as exc:
with util.assert_switches(0.001, hub_only=True):
sleep(0)
message = str(exc.exception)
self.assertIn('To the hub in', message)
self.assertIn('(max allowed 0.0010 seconds)', message)
# Supply a max blocking allowed, and exit before that happens,
# and allow any switch (or no switch).
# Note that we need to use a relatively long duration;
# sleep(0) on Windows can actually take a substantial amount of time
# sometimes (more than 0.001s)
with util.assert_switches(1.0, hub_only=False):
sleep(0)
def test_no_switches_no_function(self):
# No blocking time given, no switch performed: exception
with self.assertRaises(util._FailedToSwitch):
with util.assert_switches():
pass
# blocking time given, for all greenlets, no switch performed: nothing
with util.assert_switches(max_blocking_time=1, hub_only=False):
pass
def test_exception_not_supressed(self):
with self.assertRaises(NameError):
with util.assert_switches():
raise NameError()
def test_nested(self):
from greenlet import gettrace
with util.assert_switches() as outer:
self.assertEqual(gettrace(), outer.tracer)
self.assertIsNotNone(outer.tracer.active_greenlet)
with util.assert_switches() as inner:
self.assertEqual(gettrace(), inner.tracer)
self.assertEqual(inner.tracer.previous_trace_function, outer.tracer)
inner.tracer('switch', (self, self))
self.assertIs(self, inner.tracer.active_greenlet)
self.assertIs(self, outer.tracer.active_greenlet)
self.assertEqual(gettrace(), outer.tracer)
if __name__ == '__main__':
greentest.main()
| 10,320 | 32.839344 | 166 | py |
gevent | gevent-master/src/gevent/tests/test__core_async.py | from __future__ import print_function
import gevent
import gevent.core
import time
try:
import thread
except ImportError:
import _thread as thread
from gevent import testing as greentest
class Test(greentest.TestCase):
def test(self):
hub = gevent.get_hub()
watcher = hub.loop.async_()
# BWC for <3.7: This should still be an attribute
assert hasattr(hub.loop, 'async')
gevent.spawn_later(0.1, thread.start_new_thread, watcher.send, ())
start = time.time()
with gevent.Timeout(1.0): # Large timeout for appveyor
hub.wait(watcher)
print('Watcher %r reacted after %.6f seconds' % (watcher, time.time() - start - 0.1))
if __name__ == '__main__':
greentest.main()
| 761 | 22.8125 | 93 | py |
gevent | gevent-master/src/gevent/tests/test__monkey.py | from gevent import monkey
monkey.patch_all()
import sys
import unittest
from gevent.testing.testcase import SubscriberCleanupMixin
class TestMonkey(SubscriberCleanupMixin, unittest.TestCase):
maxDiff = None
def setUp(self):
super(TestMonkey, self).setUp()
self.all_events = []
self.addSubscriber(self.all_events.append)
self.orig_saved = orig_saved = {}
for k, v in monkey.saved.items():
orig_saved[k] = v.copy()
def tearDown(self):
monkey.saved = self.orig_saved
del self.orig_saved
del self.all_events
super(TestMonkey, self).tearDown()
def test_time(self):
import time
from gevent import time as gtime
self.assertIs(time.sleep, gtime.sleep)
def test_thread(self):
try:
import thread
except ImportError:
import _thread as thread
import threading
from gevent import thread as gthread
self.assertIs(thread.start_new_thread, gthread.start_new_thread)
self.assertIs(threading._start_new_thread, gthread.start_new_thread)
# Event patched by default
self.assertTrue(monkey.is_object_patched('threading', 'Event'))
if sys.version_info[0] == 2:
from gevent import threading as gthreading
from gevent.event import Event as GEvent
self.assertIs(threading._sleep, gthreading._sleep)
self.assertTrue(monkey.is_object_patched('threading', '_Event'))
self.assertIs(threading._Event, GEvent)
def test_socket(self):
import socket
from gevent import socket as gevent_socket
self.assertIs(socket.create_connection, gevent_socket.create_connection)
def test_os(self):
import os
import types
from gevent import os as gos
for name in ('fork', 'forkpty'):
if hasattr(os, name):
attr = getattr(os, name)
self.assertNotIn('built-in', repr(attr))
self.assertNotIsInstance(attr, types.BuiltinFunctionType)
self.assertIsInstance(attr, types.FunctionType)
self.assertIs(attr, getattr(gos, name))
def test_saved(self):
self.assertTrue(monkey.saved)
for modname, objects in monkey.saved.items():
self.assertTrue(monkey.is_module_patched(modname))
for objname in objects:
self.assertTrue(monkey.is_object_patched(modname, objname))
def test_patch_subprocess_twice(self):
Popen = monkey.get_original('subprocess', 'Popen')
self.assertNotIn('gevent', repr(Popen))
self.assertIs(Popen, monkey.get_original('subprocess', 'Popen'))
monkey.patch_subprocess()
self.assertIs(Popen, monkey.get_original('subprocess', 'Popen'))
def test_patch_twice_warnings_events(self):
import warnings
all_events = self.all_events
with warnings.catch_warnings(record=True) as issued_warnings:
# Patch again, triggering just one warning, for
# a different set of arguments. Because we're going to False instead of
# turning something on, nothing is actually done, no events are issued.
monkey.patch_all(os=False, extra_kwarg=42)
self.assertEqual(len(issued_warnings), 1)
self.assertIn('more than once', str(issued_warnings[0].message))
self.assertEqual(all_events, [])
# Same warning again, but still nothing is done.
del issued_warnings[:]
monkey.patch_all(os=False)
self.assertEqual(len(issued_warnings), 1)
self.assertIn('more than once', str(issued_warnings[0].message))
self.assertEqual(all_events, [])
self.orig_saved['_gevent_saved_patch_all_module_settings'] = monkey.saved[
'_gevent_saved_patch_all_module_settings']
# Make sure that re-patching did not change the monkey.saved
# attribute, overwriting the original functions.
if 'logging' in monkey.saved and 'logging' not in self.orig_saved:
# some part of the warning or unittest machinery imports logging
self.orig_saved['logging'] = monkey.saved['logging']
self.assertEqual(self.orig_saved, monkey.saved)
# Make sure some problematic attributes stayed correct.
# NOTE: This was only a problem if threading was not previously imported.
for k, v in monkey.saved['threading'].items():
self.assertNotIn('gevent', str(v), (k, v))
def test_patch_events(self):
from gevent import events
from gevent.testing import verify
all_events = self.all_events
def veto(event):
if isinstance(event, events.GeventWillPatchModuleEvent) and event.module_name == 'ssl':
raise events.DoNotPatch
self.addSubscriber(veto)
monkey.saved = {} # Reset
monkey.patch_all(thread=False, select=False, extra_kwarg=42) # Go again
self.assertIsInstance(all_events[0], events.GeventWillPatchAllEvent)
self.assertEqual({'extra_kwarg': 42}, all_events[0].patch_all_kwargs)
verify.verifyObject(events.IGeventWillPatchAllEvent, all_events[0])
self.assertIsInstance(all_events[1], events.GeventWillPatchModuleEvent)
verify.verifyObject(events.IGeventWillPatchModuleEvent, all_events[1])
self.assertIsInstance(all_events[2], events.GeventDidPatchModuleEvent)
verify.verifyObject(events.IGeventWillPatchModuleEvent, all_events[1])
self.assertIsInstance(all_events[-2], events.GeventDidPatchBuiltinModulesEvent)
verify.verifyObject(events.IGeventDidPatchBuiltinModulesEvent, all_events[-2])
self.assertIsInstance(all_events[-1], events.GeventDidPatchAllEvent)
verify.verifyObject(events.IGeventDidPatchAllEvent, all_events[-1])
for e in all_events:
self.assertFalse(isinstance(e, events.GeventDidPatchModuleEvent)
and e.module_name == 'ssl')
def test_patch_queue(self):
try:
import queue
except ImportError:
# Python 2 called this Queue. Note that having
# python-future installed gives us a queue module on
# Python 2 as well.
queue = None
if not hasattr(queue, 'SimpleQueue'):
raise unittest.SkipTest("Needs SimpleQueue")
# pylint:disable=no-member
self.assertIs(queue.SimpleQueue, queue._PySimpleQueue)
if __name__ == '__main__':
unittest.main()
| 6,641 | 38.301775 | 99 | py |
gevent | gevent-master/src/gevent/tests/test__socketpair.py | from gevent import monkey; monkey.patch_all()
import socket
import unittest
class TestSocketpair(unittest.TestCase):
def test_makefile(self):
msg = b'hello world'
x, y = socket.socketpair()
x.sendall(msg)
x.close()
with y.makefile('rb') as f:
read = f.read()
self.assertEqual(msg, read)
y.close()
@unittest.skipUnless(hasattr(socket, 'fromfd'),
'Needs socket.fromfd')
def test_fromfd(self):
msg = b'hello world'
x, y = socket.socketpair()
xx = socket.fromfd(x.fileno(), x.family, socket.SOCK_STREAM)
x.close()
yy = socket.fromfd(y.fileno(), y.family, socket.SOCK_STREAM)
y.close()
xx.sendall(msg)
xx.close()
with yy.makefile('rb') as f:
read = f.read()
self.assertEqual(msg, read)
yy.close()
if __name__ == '__main__':
unittest.main()
| 951 | 24.052632 | 68 | py |
gevent | gevent-master/src/gevent/tests/test__threading_no_monkey.py | # -*- coding: utf-8 -*-
"""
Tests for ``gevent.threading`` that DO NOT monkey patch. This
allows easy comparison with the standard module.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from gevent import threading as gthreading
from gevent import testing
class TestDummyThread(testing.TestCase):
def test_name(self):
# Matches the stdlib.
# https://github.com/gevent/gevent/issues/1659
std_dummy = threading._DummyThread()
gvt_dummy = gthreading._DummyThread()
self.assertIsNot(type(std_dummy), type(gvt_dummy))
self.assertStartsWith(std_dummy.name, 'Dummy-')
self.assertStartsWith(gvt_dummy.name, 'Dummy-')
if __name__ == '__main__':
testing.main()
| 806 | 25.032258 | 61 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_selectors.py |
try:
# Do this before the patch to be sure we clean
# things up properly if the order is wrong.
import selectors
except ImportError:
import selectors2 as selectors
from gevent.monkey import patch_all
import gevent.testing as greentest
patch_all()
from gevent.selectors import DefaultSelector
from gevent.selectors import GeventSelector
from gevent.tests.test__selectors import SelectorTestMixin
class TestSelectors(SelectorTestMixin, greentest.TestCase):
@greentest.skipOnWindows(
"SelectSelector._select is a normal function on Windows"
)
def test_selectors_select_is_patched(self):
# https://github.com/gevent/gevent/issues/835
_select = selectors.SelectSelector._select
self.assertIn('_gevent_monkey', dir(_select))
def test_default(self):
# Depending on the order of imports, gevent.select.poll may be defined but
# selectors.PollSelector may not be defined.
# https://github.com/gevent/gevent/issues/1466
self.assertIs(DefaultSelector, GeventSelector)
self.assertIs(selectors.DefaultSelector, GeventSelector)
def test_import_selectors(self):
# selectors can always be imported once monkey-patched. On Python 2,
# this is an alias for gevent.selectors.
__import__('selectors')
def _make_test(name, kind): # pylint:disable=no-self-argument
if kind is None:
def m(self):
self.skipTest(name + ' is not defined')
else:
def m(self, k=kind):
with k() as sel:
self._check_selector(sel)
m.__name__ = 'test_selector_' + name
return m
SelKind = SelKindName = None
for SelKindName in (
# The subclass hierarchy changes between versions, and is
# complex (e.g, BaseSelector <- BaseSelectorImpl <-
# _PollLikSelector <- PollSelector) so its easier to check against
# names.
'KqueueSelector',
'EpollSelector',
'DevpollSelector',
'PollSelector',
'SelectSelector',
GeventSelector,
):
if not isinstance(SelKindName, type):
SelKind = getattr(selectors, SelKindName, None)
else:
SelKind = SelKindName
SelKindName = SelKind.__name__
m = _make_test(SelKindName, SelKind) # pylint:disable=too-many-function-args
locals()[m.__name__] = m
del SelKind
del SelKindName
del _make_test
if __name__ == '__main__':
greentest.main()
| 2,580 | 31.2625 | 84 | py |
gevent | gevent-master/src/gevent/tests/test__events.py | # -*- coding: utf-8 -*-
# Copyright 2018 gevent. See LICENSE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from gevent import events
try:
from zope.interface import verify
except ImportError:
verify = None
try:
from zope import event
except ImportError:
event = None
@unittest.skipIf(verify is None, "Needs zope.interface")
class TestImplements(unittest.TestCase):
def test_event_loop_blocked(self):
verify.verifyClass(events.IEventLoopBlocked, events.EventLoopBlocked)
def test_mem_threshold(self):
verify.verifyClass(events.IMemoryUsageThresholdExceeded,
events.MemoryUsageThresholdExceeded)
verify.verifyObject(events.IMemoryUsageThresholdExceeded,
events.MemoryUsageThresholdExceeded(0, 0, 0))
def test_mem_decreased(self):
verify.verifyClass(events.IMemoryUsageUnderThreshold,
events.MemoryUsageUnderThreshold)
verify.verifyObject(events.IMemoryUsageUnderThreshold,
events.MemoryUsageUnderThreshold(0, 0, 0, 0))
@unittest.skipIf(event is None, "Needs zope.event")
class TestEvents(unittest.TestCase):
def test_is_zope(self):
self.assertIs(events.subscribers, event.subscribers)
self.assertIs(events.notify, event.notify)
if __name__ == '__main__':
unittest.main()
| 1,465 | 27.745098 | 77 | py |
gevent | gevent-master/src/gevent/tests/test__destroy.py | from __future__ import absolute_import, print_function
import gevent
import unittest
class TestDestroyHub(unittest.TestCase):
def test_destroy_hub(self):
# Loop of initial Hub is default loop.
hub = gevent.get_hub()
self.assertTrue(hub.loop.default)
# Save `gevent.core.loop` object for later comparison.
initloop = hub.loop
# Increase test complexity via threadpool creation.
# Implicitly creates fork watcher connected to the current event loop.
tp = hub.threadpool
self.assertIsNotNone(tp)
# Destroy hub. Does not destroy libev default loop if not explicitly told to.
hub.destroy()
# Create new hub. Must re-use existing libev default loop.
hub = gevent.get_hub()
self.assertTrue(hub.loop.default)
# Ensure that loop object is identical to the initial one.
self.assertIs(hub.loop, initloop)
# Destroy hub including default loop.
hub.destroy(destroy_loop=True)
# Create new hub and explicitly request creation of a new default loop.
# (using default=True, but that's no longer possible.)
hub = gevent.get_hub()
self.assertTrue(hub.loop.default)
# `gevent.core.loop` objects as well as libev loop pointers must differ.
self.assertIsNot(hub.loop, initloop)
self.assertIsNot(hub.loop.ptr, initloop.ptr)
self.assertNotEqual(hub.loop.ptr, initloop.ptr)
# Destroy hub including default loop. The default loop regenerates.
hub.destroy(destroy_loop=True)
hub = gevent.get_hub()
self.assertTrue(hub.loop.default)
hub.destroy()
if __name__ == '__main__':
unittest.main() # pragma: testrunner-no-combine
| 1,765 | 32.320755 | 85 | py |
gevent | gevent-master/src/gevent/tests/test__environ.py | import os
import sys
import gevent
import gevent.core
import subprocess
if not sys.argv[1:]:
os.environ['GEVENT_BACKEND'] = 'select'
# (not in Py2) pylint:disable=consider-using-with
popen = subprocess.Popen([sys.executable, __file__, '1'])
assert popen.wait() == 0, popen.poll()
else: # pragma: no cover
hub = gevent.get_hub()
if 'select' in gevent.core.supported_backends():
assert hub.loop.backend == 'select', hub.loop.backend
else:
# libuv isn't configurable
assert hub.loop.backend == 'default', hub.loop.backend
| 572 | 29.157895 | 62 | py |
gevent | gevent-master/src/gevent/tests/test__import_wait.py | # https://github.com/gevent/gevent/issues/652 and 651
from gevent import monkey
monkey.patch_all()
import _import_wait # pylint:disable=import-error
assert _import_wait.x
| 173 | 20.75 | 53 | py |
gevent | gevent-master/src/gevent/tests/test__core_loop_run.py | from __future__ import print_function
import sys
from gevent import core
from gevent import signal_handler as signal
loop = core.loop(default=False)
signal = signal(2, sys.stderr.write, 'INTERRUPT!')
print('must exit immediately...')
loop.run() # must exit immediately
print('...and once more...')
loop.run() # repeating does not fail
print('..done')
print('must exit after 0.5 seconds.')
timer = loop.timer(0.5)
timer.start(lambda: None)
loop.run()
timer.close()
loop.destroy()
del loop
| 494 | 20.521739 | 50 | py |
gevent | gevent-master/src/gevent/tests/test__issue600.py | # Make sure that libev child watchers, implicitly installed through the use
# of subprocess, do not cause waitpid() to fail to poll for processes.
# NOTE: This was only reproducible under python 2.
from __future__ import print_function
import gevent
from gevent import monkey
monkey.patch_all()
import sys
from multiprocessing import Process
from subprocess import Popen, PIPE
from gevent import testing as greentest
def f(sleep_sec):
gevent.sleep(sleep_sec)
class TestIssue600(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_invoke(self):
# Run a subprocess through Popen to make sure
# libev is handling SIGCHLD. This could *probably* be simplified to use
# just hub.loop.install_sigchld
# (no __enter__/__exit__ on Py2) pylint:disable=consider-using-with
p = Popen([sys.executable, '-V'], stdout=PIPE, stderr=PIPE)
gevent.sleep(0)
p.communicate()
gevent.sleep(0)
def test_process(self):
# Launch
p = Process(target=f, args=(0.5,))
p.start()
with gevent.Timeout(3):
# Poll for up to 10 seconds. If the bug exists,
# this will timeout because our subprocess should
# be long gone by now
p.join(10)
if __name__ == '__main__':
greentest.main()
| 1,386 | 27.306122 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__threadpool.py | from __future__ import print_function
from time import time, sleep
import contextlib
import random
import weakref
import gc
import gevent.threadpool
from gevent.threadpool import ThreadPool
import gevent
from gevent.exceptions import InvalidThreadUseError
import gevent.testing as greentest
from gevent.testing import ExpectedException
from gevent.testing import PYPY
# pylint:disable=too-many-ancestors
@contextlib.contextmanager
def disabled_gc():
was_enabled = gc.isenabled()
gc.disable()
try:
yield
finally:
if was_enabled:
gc.enable()
class TestCase(greentest.TestCase):
# These generally need more time
__timeout__ = greentest.LARGE_TIMEOUT
pool = None
_all_pools = ()
ClassUnderTest = ThreadPool
def _FUT(self):
return self.ClassUnderTest
def _makeOne(self, maxsize, create_all_worker_threads=greentest.RUN_LEAKCHECKS):
self.pool = pool = self._FUT()(maxsize)
self._all_pools += (pool,)
if create_all_worker_threads:
# Max size to help eliminate false positives
self.pool.size = maxsize
return pool
def cleanup(self):
self.pool = None
all_pools, self._all_pools = self._all_pools, ()
for pool in all_pools:
kill = getattr(pool, 'kill', None) or getattr(pool, 'shutdown')
kill()
del kill
if greentest.RUN_LEAKCHECKS:
# Each worker thread created a greenlet object and switched to it.
# It's a custom subclass, but even if it's not, it appears that
# the root greenlet for the new thread sticks around until there's a
# gc. Simply calling 'getcurrent()' is enough to "leak" a greenlet.greenlet
# and a weakref.
for _ in range(3):
gc.collect()
class PoolBasicTests(TestCase):
def test_execute_async(self):
pool = self._makeOne(2)
r = []
first = pool.spawn(r.append, 1)
first.get()
self.assertEqual(r, [1])
gevent.sleep(0)
pool.apply_async(r.append, (2, ))
self.assertEqual(r, [1])
pool.apply_async(r.append, (3, ))
self.assertEqual(r, [1])
pool.apply_async(r.append, (4, ))
self.assertEqual(r, [1])
gevent.sleep(0.01)
self.assertEqualFlakyRaceCondition(sorted(r), [1, 2, 3, 4])
def test_apply(self):
pool = self._makeOne(1)
result = pool.apply(lambda a: ('foo', a), (1, ))
self.assertEqual(result, ('foo', 1))
def test_apply_raises(self):
pool = self._makeOne(1)
def raiser():
raise ExpectedException()
with self.assertRaises(ExpectedException):
pool.apply(raiser)
# Don't let the metaclass automatically force any error
# that reaches the hub from a spawned greenlet to become
# fatal; that defeats the point of the test.
test_apply_raises.error_fatal = False
def test_init_valueerror(self):
self.switch_expected = False
with self.assertRaises(ValueError):
self._makeOne(-1)
#
# tests from standard library test/test_multiprocessing.py
class TimingWrapper(object):
def __init__(self, the_func):
self.func = the_func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time() - t
def sqr(x, wait=0.0):
sleep(wait)
return x * x
def sqr_random_sleep(x):
sleep(random.random() * 0.1)
return x * x
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14
class _AbstractPoolTest(TestCase):
size = 1
MAP_IS_GEN = False
def setUp(self):
greentest.TestCase.setUp(self)
self._makeOne(self.size)
@greentest.ignores_leakcheck
def test_map(self):
pmap = self.pool.map
if self.MAP_IS_GEN:
pmap = lambda f, i: list(self.pool.map(f, i))
self.assertEqual(pmap(sqr, range(10)), list(map(sqr, range(10))))
self.assertEqual(pmap(sqr, range(100)), list(map(sqr, range(100))))
self.pool.kill()
del self.pool
del pmap
SMALL_RANGE = 10
LARGE_RANGE = 1000
if (greentest.PYPY and (greentest.WIN or greentest.RUN_COVERAGE)) or greentest.RUN_LEAKCHECKS:
# PyPy 5.10 is *really* slow at spawning or switching between
# threads (especially on Windows or when coverage is enabled) Tests that happen
# instantaneously on other platforms time out due to the overhead.
# Leakchecks also take much longer due to all the calls into the GC,
# most especially on Python 3
LARGE_RANGE = 50
class TestPool(_AbstractPoolTest):
def test_greenlet_class(self):
from greenlet import getcurrent
from gevent.threadpool import _WorkerGreenlet
worker_greenlet = self.pool.apply(getcurrent)
self.assertIsInstance(worker_greenlet, _WorkerGreenlet)
r = repr(worker_greenlet)
self.assertIn('ThreadPoolWorker', r)
self.assertIn('thread_ident', r)
self.assertIn('hub=', r)
from gevent.util import format_run_info
info = '\n'.join(format_run_info())
self.assertIn("<ThreadPoolWorker", info)
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
def test_async_callback(self):
result = []
res = self.pool.apply_async(sqr, (7, TIMEOUT1,), callback=result.append)
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
gevent.sleep(0) # lets the callback run
self.assertEqual(result, [49])
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(gevent.Timeout, get, timeout=TIMEOUT2)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT2, 1)
self.pool.join()
def test_imap_list_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(sqr, range(SMALL_RANGE))))
def test_imap_it_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
for i in range(SMALL_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_it_large(self):
it = self.pool.imap(sqr, range(LARGE_RANGE))
for i in range(LARGE_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_gc(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
for i in range(SMALL_RANGE):
self.assertEqual(next(it), i * i)
gc.collect()
self.assertRaises(StopIteration, next, it)
def test_imap_unordered_gc(self):
it = self.pool.imap_unordered(sqr, range(SMALL_RANGE))
result = []
for _ in range(SMALL_RANGE):
result.append(next(it))
gc.collect()
with self.assertRaises(StopIteration):
next(it)
self.assertEqual(sorted(result), [x * x for x in range(SMALL_RANGE)])
def test_imap_random(self):
it = self.pool.imap(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(sqr, range(SMALL_RANGE))))
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(sqr, range(LARGE_RANGE))))
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(sqr, range(LARGE_RANGE))))
def test_imap_unordered_random(self):
it = self.pool.imap_unordered(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(sorted(it), list(map(sqr, range(SMALL_RANGE))))
def test_terminate(self):
size = self.size or 10
result = self.pool.map_async(sleep, [0.1] * (size * 2))
gevent.sleep(0.1)
try:
with self.runs_in_given_time(0.1 * self.size + 0.5, min_time=0):
self.pool.kill()
finally:
result.join()
def sleep(self, x):
sleep(float(x) / 10.0)
return str(x)
def test_imap_unordered_sleep(self):
# testing that imap_unordered returns items in competion order
result = list(self.pool.imap_unordered(self.sleep, [10, 1, 2]))
if self.pool.size == 1:
expected = ['10', '1', '2']
else:
expected = ['1', '2', '10']
self.assertEqual(result, expected)
class TestPool2(TestPool):
size = 2
@greentest.ignores_leakcheck # Asking for the hub in the new thread shows up as a "leak"
def test_recursive_apply(self):
p = self.pool
def a():
return p.apply(b)
def b():
# make sure we can do both types of callbacks
# (loop iteration and end-of-loop) in the recursive
# call
gevent.sleep()
gevent.sleep(0.001)
return "B"
result = p.apply(a)
self.assertEqual(result, "B")
@greentest.ignores_leakcheck
class TestPool3(TestPool):
size = 3
@greentest.ignores_leakcheck
class TestPool10(TestPool):
size = 10
# class TestJoinSleep(greentest.GenericGetTestCase):
#
# def wait(self, timeout):
# pool = ThreadPool(1)
# pool.spawn(gevent.sleep, 10)
# pool.join(timeout=timeout)
#
#
# class TestJoinSleep_raise_error(greentest.GenericWaitTestCase):
#
# def wait(self, timeout):
# pool = ThreadPool(1)
# g = pool.spawn(gevent.sleep, 10)
# pool.join(timeout=timeout, raise_error=True)
class TestJoinEmpty(TestCase):
switch_expected = False
@greentest.skipIf(greentest.PYPY and greentest.LIBUV and greentest.RUNNING_ON_TRAVIS,
"This sometimes appears to crash in PyPy2 5.9.0, "
"but never crashes on macOS or local Ubunto with same PyPy version")
# Running this test standalone doesn't crash PyPy, only when it's run
# as part of this whole file. Removing it does solve the crash though.
def test(self):
pool = self._makeOne(1)
pool.join()
class TestSpawn(TestCase):
switch_expected = True
@greentest.ignores_leakcheck
def test_basics(self):
pool = self._makeOne(1)
self.assertEqual(len(pool), 0)
log = []
sleep_n_log = lambda item, seconds: [sleep(seconds), log.append(item)]
pool.spawn(sleep_n_log, 'a', 0.1)
self.assertEqual(len(pool), 1)
pool.spawn(sleep_n_log, 'b', 0.1)
# even though the pool is of size 1, it can contain 2 items
# since we allow +1 for better throughput
self.assertEqual(len(pool), 2)
gevent.sleep(0.15)
self.assertEqual(log, ['a'])
self.assertEqual(len(pool), 1)
gevent.sleep(0.15)
self.assertEqual(log, ['a', 'b'])
self.assertEqual(len(pool), 0)
@greentest.ignores_leakcheck
def test_cannot_spawn_from_other_thread(self):
# Only the thread that owns a threadpool can spawn to it;
# this is because the threadpool uses the creating thread's hub,
# which is not threadsafe.
pool1 = self._makeOne(1)
pool2 = self._makeOne(2)
def func():
pool2.spawn(lambda: "Hi")
res = pool1.spawn(func)
with self.assertRaises(InvalidThreadUseError):
res.get()
def error_iter():
yield 1
yield 2
raise greentest.ExpectedException
class TestErrorInIterator(TestCase):
error_fatal = False
def test(self):
self.pool = self._makeOne(3)
self.assertRaises(greentest.ExpectedException, self.pool.map, lambda x: None, error_iter())
gevent.sleep(0.001)
def test_unordered(self):
self.pool = self._makeOne(3)
def unordered():
return list(self.pool.imap_unordered(lambda x: None, error_iter()))
self.assertRaises(greentest.ExpectedException, unordered)
gevent.sleep(0.001)
class TestMaxsize(TestCase):
def test_inc(self):
self.pool = self._makeOne(0)
done = []
# Try to be careful not to tick over the libuv timer.
# See libuv/loop.py:_start_callback_timer
gevent.spawn(self.pool.spawn, done.append, 1)
gevent.spawn_later(0.01, self.pool.spawn, done.append, 2)
gevent.sleep(0.02)
self.assertEqual(done, [])
self.pool.maxsize = 1
gevent.sleep(0.02)
self.assertEqualFlakyRaceCondition(done, [1, 2])
@greentest.ignores_leakcheck
def test_setzero(self):
pool = self.pool = self._makeOne(3)
pool.spawn(sleep, 0.1)
pool.spawn(sleep, 0.2)
pool.spawn(sleep, 0.3)
gevent.sleep(0.2)
self.assertGreaterEqual(pool.size, 2)
pool.maxsize = 0
gevent.sleep(0.2)
self.assertEqualFlakyRaceCondition(pool.size, 0)
class TestSize(TestCase):
@greentest.reraises_flaky_race_condition()
def test(self):
pool = self.pool = self._makeOne(2, create_all_worker_threads=False)
self.assertEqual(pool.size, 0)
pool.size = 1
self.assertEqual(pool.size, 1)
pool.size = 2
self.assertEqual(pool.size, 2)
pool.size = 1
self.assertEqual(pool.size, 1)
with self.assertRaises(ValueError):
pool.size = -1
with self.assertRaises(ValueError):
pool.size = 3
pool.size = 0
self.assertEqual(pool.size, 0)
pool.size = 2
self.assertEqual(pool.size, 2)
class TestRef(TestCase):
def test(self):
pool = self.pool = self._makeOne(2)
refs = []
obj = SomeClass()
obj.refs = refs
func = obj.func
del obj
with disabled_gc():
# we do this:
# result = func(Object(), kwarg1=Object())
# but in a thread pool and see that arguments', result's and func's references are not leaked
result = pool.apply(func, (Object(), ), {'kwarg1': Object()})
self.assertIsInstance(result, Object)
gevent.sleep(0.1) # XXX should not be needed
refs.append(weakref.ref(func))
del func, result
if PYPY:
gc.collect()
gc.collect()
for r in refs:
self.assertIsNone(r())
self.assertEqual(4, len(refs))
class Object(object):
pass
class SomeClass(object):
refs = None
def func(self, arg1, kwarg1=None):
result = Object()
self.refs.extend([weakref.ref(x) for x in (arg1, kwarg1, result)])
return result
def noop():
pass
class TestRefCount(TestCase):
def test(self):
pool = self._makeOne(1)
pool.spawn(noop)
gevent.sleep(0)
pool.kill()
from gevent import monkey
@greentest.skipUnless(
hasattr(gevent.threadpool, 'ThreadPoolExecutor'),
"Requires ThreadPoolExecutor")
class TestTPE(_AbstractPoolTest):
size = 1
MAP_IS_GEN = True
@property
def ClassUnderTest(self):
return gevent.threadpool.ThreadPoolExecutor
MONKEY_PATCHED = False
@property
def FutureTimeoutError(self):
from concurrent.futures import TimeoutError as FutureTimeoutError
return FutureTimeoutError
@property
def cf_wait(self):
from concurrent.futures import wait as cf_wait
return cf_wait
@property
def cf_as_completed(self):
from concurrent.futures import as_completed as cf_as_completed
return cf_as_completed
@greentest.ignores_leakcheck
def test_future(self):
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
calledback = []
def fn():
gevent.sleep(0.5)
return 42
def callback(future):
future.calledback += 1
raise greentest.ExpectedException("Expected, ignored")
future = pool.submit(fn) # pylint:disable=no-member
future.calledback = 0
future.add_done_callback(callback)
self.assertRaises(self.FutureTimeoutError, future.result, timeout=0.001)
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
# Whether or not we are monkey patched, the background
# greenlet we spawned got to run while we waited.
self.assertEqual(future.result(), 42)
self.assertTrue(future.done())
self.assertFalse(future.cancelled())
# Make sure the notifier has a chance to run so the call back
# gets called
gevent.sleep()
self.assertEqual(future.calledback, 1)
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
# Adding the callback again runs immediately
future.add_done_callback(lambda f: calledback.append(True))
self.assertEqual(calledback, [True])
# We can wait on the finished future
done, _not_done = self.cf_wait((future,))
self.assertEqual(list(done), [future])
self.assertEqual(list(self.cf_as_completed((future,))), [future])
# Doing so does not call the callback again
self.assertEqual(future.calledback, 1)
# even after a trip around the event loop
gevent.sleep()
self.assertEqual(future.calledback, 1)
pool.kill()
del future
del pool
del self.pool
@greentest.ignores_leakcheck
def test_future_wait_module_function(self):
# Instead of waiting on the result, we can wait
# on the future using the module functions
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
def fn():
gevent.sleep(0.5)
return 42
future = pool.submit(fn) # pylint:disable=no-member
if self.MONKEY_PATCHED:
# Things work as expected when monkey-patched
_done, not_done = self.cf_wait((future,), timeout=0.001)
self.assertEqual(list(not_done), [future])
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
done, _not_done = self.cf_wait((future,))
self.assertEqual(list(done), [future])
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
else:
# When not monkey-patched, raises an AttributeError
self.assertRaises(AttributeError, self.cf_wait, (future,))
pool.kill()
del future
del pool
del self.pool
@greentest.ignores_leakcheck
def test_future_wait_gevent_function(self):
# The future object can be waited on with gevent functions.
self.assertEqual(monkey.is_module_patched('threading'),
self.MONKEY_PATCHED)
pool = self.pool
def fn():
gevent.sleep(0.5)
return 42
future = pool.submit(fn) # pylint:disable=no-member
def spawned():
return 2016
spawned_greenlet = gevent.spawn(spawned)
done = gevent.wait((future,))
self.assertEqual(list(done), [future])
self.assertTrue(spawned_greenlet.ready())
self.assertEqual(spawned_greenlet.value, 2016)
pool.kill()
del future
del pool
del self.pool
class TestThreadResult(greentest.TestCase):
def test_exception_in_on_async_doesnt_crash(self):
# Issue 1482. An FFI-based loop could crash the whole process
# by dereferencing a handle after it was closed.
called = []
class MyException(Exception):
pass
def bad_when_ready():
called.append(1)
raise MyException
tr = gevent.threadpool.ThreadResult(None, gevent.get_hub(), bad_when_ready)
def wake():
called.append(1)
tr.set(42)
gevent.spawn(wake).get()
# Spin the loop a few times to make sure we run the callbacks.
# If we neglect to spin, we don't trigger the bug.
# If error handling is correct, the exception raised from the callback
# will be surfaced in the main greenlet. On windows, it can sometimes take
# more than one spin for some reason; if we don't catch it here, then
# some other test is likely to die unexpectedly with MyException.
with self.assertRaises(MyException):
for _ in range(5):
gevent.sleep(0.001)
self.assertEqual(called, [1, 1])
# But value was cleared in a finally block
self.assertIsNone(tr.value)
self.assertIsNotNone(tr.receiver)
class TestWorkerProfileAndTrace(TestCase):
# Worker threads should execute the test and trace functions.
# (When running the user code.)
# https://github.com/gevent/gevent/issues/1670
old_profile = None
old_trace = None
def setUp(self):
super(TestWorkerProfileAndTrace, self).setUp()
self.old_profile = gevent.threadpool._get_thread_profile()
self.old_trace = gevent.threadpool._get_thread_trace()
def tearDown(self):
import threading
threading.setprofile(self.old_profile)
threading.settrace(self.old_trace)
super(TestWorkerProfileAndTrace, self).tearDown()
def test_get_profile(self):
import threading
threading.setprofile(self)
self.assertIs(gevent.threadpool._get_thread_profile(), self)
def test_get_trace(self):
import threading
threading.settrace(self)
self.assertIs(gevent.threadpool._get_thread_trace(), self)
def _test_func_called_in_task(self, func):
import threading
import sys
setter = getattr(threading, 'set' + func)
getter = getattr(sys, 'get' + func)
called = [0]
def callback(*_args):
called[0] += 1
def task():
test.assertIsNotNone(getter)
return 1701
before_task = []
after_task = []
test = self
class Pool(ThreadPool):
class _WorkerGreenlet(ThreadPool._WorkerGreenlet):
# pylint:disable=signature-differs
def _before_run_task(self, func, *args):
before_task.append(func)
before_task.append(getter())
ThreadPool._WorkerGreenlet._before_run_task(self, func, *args)
before_task.append(getter())
def _after_run_task(self, func, *args):
after_task.append(func)
after_task.append(getter())
ThreadPool._WorkerGreenlet._after_run_task(self, func, *args)
after_task.append(getter())
self.ClassUnderTest = Pool
pool = self._makeOne(1, create_all_worker_threads=True)
assert isinstance(pool, Pool)
# Do this after creating the pool and its thread to verify we don't
# capture the function at thread creation time.
setter(callback)
res = pool.apply(task)
self.assertEqual(res, 1701)
self.assertGreaterEqual(called[0], 1)
# Shutdown the pool. PyPy2.7-7.3.1 on Windows/Appveyor was
# properly seeing the before_task value, but after_task was empty.
# That suggested a memory consistency type issue, where the updates
# written by the other thread weren't fully visible to this thread
# yet. Try to kill it to see if that helps. (Couldn't reproduce
# on macOS).
#
# https://ci.appveyor.com/project/jamadden/gevent/build/job/wo9likk85cduui7n#L867
pool.kill()
# The function is active only for the scope of the function
self.assertEqual(before_task, [task, None, callback])
self.assertEqual(after_task, [task, callback, None])
def test_profile_called_in_task(self):
self._test_func_called_in_task('profile')
def test_trace_called_in_task(self):
self._test_func_called_in_task('trace')
if __name__ == '__main__':
greentest.main()
| 24,825 | 29.05569 | 105 | py |
gevent | gevent-master/src/gevent/tests/test___monkey_patching.py | import sys
import os
import glob
import atexit
# subprocess: include in subprocess tests
from gevent.testing import util
from gevent.testing import sysinfo
from gevent.testing.support import is_resource_enabled
TIMEOUT = 120
# XXX: Generalize this so other packages can use it.
def get_absolute_pythonpath():
paths = [os.path.abspath(p) for p in os.environ.get('PYTHONPATH', '').split(os.pathsep)]
return os.pathsep.join(paths)
def TESTRUNNER(tests=None):
if not is_resource_enabled('gevent_monkey'):
util.log('WARNING: Testing monkey-patched stdlib has been disabled',
color="suboptimal-behaviour")
return
try:
test_dir, version_test_dir = util.find_stdlib_tests()
except util.NoSetupPyFound as e:
util.log("WARNING: No setup.py and src/greentest found: %r", e,
color="suboptimal-behaviour")
return
if not os.path.exists(test_dir):
util.log('WARNING: No test directory found at %s', test_dir,
color="suboptimal-behaviour")
return
# pylint:disable=unspecified-encoding
with open(os.path.join(test_dir, 'version')) as f:
preferred_version = f.read().strip()
running_version = sysinfo.get_python_version()
if preferred_version != running_version:
util.log('WARNING: The tests in %s/ are from version %s and your Python is %s',
test_dir, preferred_version, running_version,
color="suboptimal-behaviour")
version_tests = glob.glob('%s/test_*.py' % version_test_dir)
version_tests = sorted(version_tests)
if not tests:
tests = glob.glob('%s/test_*.py' % test_dir)
tests = sorted(tests)
PYTHONPATH = (os.getcwd() + os.pathsep + get_absolute_pythonpath()).rstrip(':')
tests = sorted(set(os.path.basename(x) for x in tests))
version_tests = sorted(set(os.path.basename(x) for x in version_tests))
util.log("Discovered %d tests in %s", len(tests), test_dir)
util.log("Discovered %d version-specific tests in %s", len(version_tests), version_test_dir)
options = {
'cwd': test_dir,
'timeout': TIMEOUT,
'setenv': {
'PYTHONPATH': PYTHONPATH,
# debug produces resource tracking warnings for the
# CFFI backends. On Python 2, many of the stdlib tests
# rely on refcounting to close sockets so they produce
# lots of noise. Python 3 is not completely immune;
# test_ftplib.py tends to produce warnings---and the Python 3
# test framework turns those into test failures!
'GEVENT_DEBUG': 'error',
}
}
if tests and not sys.platform.startswith("win"):
atexit.register(os.system, 'rm -f */@test*')
basic_args = [sys.executable, '-u', '-W', 'ignore', '-m', 'gevent.testing.monkey_test']
for filename in tests:
if filename in version_tests:
util.log("Overriding %s from %s with file from %s", filename, test_dir, version_test_dir)
continue
yield basic_args + [filename], options.copy()
options['cwd'] = version_test_dir
for filename in version_tests:
yield basic_args + [filename], options.copy()
def main():
from gevent.testing import testrunner
discovered_tests = TESTRUNNER(sys.argv[1:])
discovered_tests = list(discovered_tests)
return testrunner.Runner(discovered_tests, quiet=None)()
if __name__ == '__main__':
main()
| 3,502 | 33.009709 | 101 | py |
gevent | gevent-master/src/gevent/tests/test__lock.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gevent import lock
import gevent.testing as greentest
from gevent.tests import test__semaphore
class TestRLockMultiThread(test__semaphore.TestSemaphoreMultiThread):
def _makeOne(self):
# If we don't set the hub before returning,
# there's a potential race condition, if the implementation
# isn't careful. If it's the background hub that winds up capturing
# the hub, it will ask the hub to switch back to itself and
# then switch to the hub, which will raise LoopExit (nothing
# for the background thread to do). What is supposed to happen
# is that the background thread realizes it's the background thread,
# starts an async watcher and then switches to the hub.
#
# So we deliberately don't set the hub to help test that condition.
return lock.RLock()
def assertOneHasNoHub(self, sem):
self.assertIsNone(sem._block.hub)
if __name__ == '__main__':
greentest.main()
| 1,100 | 31.382353 | 76 | py |
gevent | gevent-master/src/gevent/tests/test__ssl.py | from __future__ import print_function, division, absolute_import
from gevent import monkey
monkey.patch_all()
import os
import socket
import gevent.testing as greentest
# Be careful not to have TestTCP as a bare attribute in this module,
# even aliased, to avoid running duplicate tests
from gevent.tests import test__socket
import ssl
def ssl_listener(private_key, certificate):
raw_listener = socket.socket()
greentest.bind_and_listen(raw_listener)
# pylint:disable=deprecated-method
sock = wrap_socket(raw_listener, keyfile=private_key, certfile=certificate,
server_side=True)
return sock, raw_listener
def wrap_socket(sock, *, keyfile=None, certfile=None, server_side=False):
context = ssl.SSLContext(
protocol=ssl.PROTOCOL_TLS
)
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
context.load_default_certs()
if keyfile is not None or certfile is not None:
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
return context.wrap_socket(sock, server_side=server_side)
class TestSSL(test__socket.TestTCP):
# To generate:
# openssl req -x509 -newkey rsa:4096 -keyout test_server.key -out test_server.crt -days 36500 -nodes -subj '/CN=localhost'
certfile = os.path.join(os.path.dirname(__file__), 'test_server.crt')
privfile = os.path.join(os.path.dirname(__file__), 'test_server.key')
# Python 2.x has socket.sslerror (which is an alias for
# ssl.SSLError); That's gone in Py3 though. In Python 2, most timeouts are raised
# as SSLError, but Python 3 raises the normal socket.timeout instead. So this has
# the effect of making TIMEOUT_ERROR be SSLError on Py2 and socket.timeout on Py3
# See https://bugs.python.org/issue10272.
# PyPy3 7.2 has a bug, though: it shares much of the SSL implementation with Python 2,
# and it unconditionally does `socket.sslerror = SSLError` when ssl is imported.
# So we can't rely on getattr/hasattr tests, we must be explicit.
TIMEOUT_ERROR = socket.timeout # pylint:disable=no-member
def _setup_listener(self):
listener, raw_listener = ssl_listener(self.privfile, self.certfile)
self._close_on_teardown(raw_listener)
return listener
def create_connection(self, *args, **kwargs): # pylint:disable=signature-differs
return self._close_on_teardown(
# pylint:disable=deprecated-method
wrap_socket(super(TestSSL, self).create_connection(*args, **kwargs)))
# The SSL library can take a long time to buffer the large amount of data we're trying
# to send, so we can't compare to the timeout values
_test_sendall_timeout_check_time = False
# The SSL layer has extra buffering, so test_sendall needs
# to send a very large amount to make it timeout
_test_sendall_data = data_sent = b'hello' * 100000000
test_sendall_array = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_array
)
)
test_sendall_str = greentest.skipOnMacOnCI("Sometimes misses data")(
greentest.skipOnManylinux("Sometimes misses data")(
test__socket.TestTCP.test_sendall_str
)
)
@greentest.skipOnWindows("Not clear why we're skipping")
def test_ssl_sendall_timeout0(self):
# Issue #317: SSL_WRITE_PENDING in some corner cases
server_sock = []
acceptor = test__socket.Thread(target=lambda: server_sock.append(
# pylint:disable=no-member
self.listener.accept()))
client = self.create_connection()
client.setblocking(False)
try:
# Python 3 raises ssl.SSLWantWriteError; Python 2 simply *hangs*
# on non-blocking sockets because it's a simple loop around
# send(). Python 2.6 doesn't have SSLWantWriteError
expected = getattr(ssl, 'SSLWantWriteError', ssl.SSLError)
with self.assertRaises(expected):
client.sendall(self._test_sendall_data)
finally:
acceptor.join()
client.close()
server_sock[0][0].close()
# def test_fullduplex(self):
# try:
# super(TestSSL, self).test_fullduplex()
# except LoopExit:
# if greentest.LIBUV and greentest.WIN:
# # XXX: Unable to duplicate locally
# raise greentest.SkipTest("libuv on Windows sometimes raises LoopExit")
# raise
@greentest.ignores_leakcheck
@greentest.skipOnPy310("No longer raises SSLError")
def test_empty_send(self):
# Issue 719
# Sending empty bytes with the 'send' method raises
# ssl.SSLEOFError in the stdlib. PyPy 4.0 and CPython 2.6
# both just raise the superclass, ssl.SSLError.
# Ignored during leakchecks because the third or fourth iteration of the
# test hangs on CPython 2/posix for some reason, likely due to
# the use of _close_on_teardown keeping something alive longer than intended.
# cf test__makefile_ref
with self.assertRaises(ssl.SSLError):
super(TestSSL, self).test_empty_send()
@greentest.ignores_leakcheck
def test_sendall_nonblocking(self):
# Override; doesn't work with SSL sockets.
pass
@greentest.ignores_leakcheck
def test_connect_with_type_flags_ignored(self):
# Override; doesn't work with SSL sockets.
pass
if __name__ == '__main__':
greentest.main()
| 5,597 | 39.273381 | 126 | py |
gevent | gevent-master/src/gevent/tests/known_failures.py | # This is a list of known failures (=bugs).
# The tests listed there must fail (or testrunner.py will report error) unless they are prefixed with FLAKY
# in which cases the result of them is simply ignored
from __future__ import print_function
import sys
import struct
from gevent.testing import sysinfo
class Condition(object):
__slots__ = ()
def __and__(self, other):
return AndCondition(self, other)
def __or__(self, other):
return OrCondition(self, other)
def __bool__(self):
raise NotImplementedError
class AbstractBinaryCondition(Condition): # pylint:disable=abstract-method
__slots__ = (
'lhs',
'rhs',
)
OP = None
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "(%r %s %r)" % (
self.lhs,
self.OP,
self.rhs
)
class OrCondition(AbstractBinaryCondition):
__slots__ = ()
OP = '|'
def __bool__(self):
return bool(self.lhs) or bool(self.rhs)
class AndCondition(AbstractBinaryCondition):
__slots__ = ()
OP = '&'
def __bool__(self):
return bool(self.lhs) and bool(self.rhs)
class ConstantCondition(Condition):
__slots__ = (
'value',
'__name__',
)
def __init__(self, value, name=None):
self.value = bool(value)
self.__name__ = name or str(value)
def __bool__(self):
return self.value
def __repr__(self):
return self.__name__
ALWAYS = ConstantCondition(True)
NEVER = ConstantCondition(False)
class _AttrCondition(ConstantCondition):
__slots__ = (
)
def __init__(self, name):
ConstantCondition.__init__(self, getattr(sysinfo, name), name)
PYPY = _AttrCondition('PYPY')
PYPY3 = _AttrCondition('PYPY3')
PY3 = _AttrCondition('PY3')
PY2 = _AttrCondition('PY2')
OSX = _AttrCondition('OSX')
LIBUV = _AttrCondition('LIBUV')
WIN = _AttrCondition('WIN')
APPVEYOR = _AttrCondition('RUNNING_ON_APPVEYOR')
TRAVIS = _AttrCondition('RUNNING_ON_TRAVIS')
CI = _AttrCondition('RUNNING_ON_CI')
LEAKTEST = _AttrCondition('RUN_LEAKCHECKS')
COVERAGE = _AttrCondition('RUN_COVERAGE')
RESOLVER_NOT_SYSTEM = _AttrCondition('RESOLVER_NOT_SYSTEM')
BIT_64 = ConstantCondition(struct.calcsize('P') * 8 == 64, 'BIT_64')
PY380_EXACTLY = ConstantCondition(sys.version_info[:3] == (3, 8, 0), 'PY380_EXACTLY')
PY312B3_EXACTLY = ConstantCondition(sys.version_info == (3, 12, 0, 'beta', 3))
PY312B4_EXACTLY = ConstantCondition(sys.version_info == (3, 12, 0, 'beta', 4))
class _Definition(object):
__slots__ = (
'__name__',
# When does the class of this condition apply?
'when',
# When should this test be run alone, if it's run?
'run_alone',
# Should this test be ignored during coverage measurement?
'ignore_coverage',
# {name: (Condition, value)}
'options',
)
def __init__(self, when, run_alone, ignore_coverage, options):
assert isinstance(when, Condition)
assert isinstance(run_alone, Condition)
assert isinstance(ignore_coverage, Condition)
self.when = when
self.__name__ = None # pylint:disable=non-str-assignment-to-dunder-name
self.run_alone = run_alone
self.ignore_coverage = ignore_coverage
if options:
for v in options.values():
assert isinstance(v, tuple) and len(v) == 2
assert isinstance(v[0], Condition)
self.options = options
def __set_name__(self, owner, name):
self.__name__ = name
def __repr__(self):
return '<%s for %s when=%r=%s run_alone=%r=%s>' % (
type(self).__name__,
self.__name__,
self.when, bool(self.when),
self.run_alone, bool(self.run_alone)
)
class _Action(_Definition):
__slots__ = (
'reason',
)
def __init__(self, reason='', when=ALWAYS, run_alone=NEVER, ignore_coverage=NEVER,
options=None):
_Definition.__init__(self, when, run_alone, ignore_coverage, options)
self.reason = reason
class RunAlone(_Action):
__slots__ = ()
def __init__(self, reason='', when=ALWAYS, ignore_coverage=NEVER):
_Action.__init__(self, reason, run_alone=when, ignore_coverage=ignore_coverage)
class Failing(_Action):
__slots__ = ()
class Flaky(Failing):
__slots__ = ()
class Ignored(_Action):
__slots__ = ()
class Multi(object):
def __init__(self):
self._conds = []
def flaky(self, reason='', when=True, ignore_coverage=NEVER, run_alone=NEVER):
self._conds.append(
Flaky(
reason, when=when,
ignore_coverage=ignore_coverage,
run_alone=run_alone,
)
)
return self
def ignored(self, reason='', when=True):
self._conds.append(Ignored(reason, when=when))
return self
def __set_name__(self, owner, name):
for c in self._conds:
c.__set_name__(owner, name)
class DefinitionsMeta(type):
# a metaclass on Python 3 that makes sure we only set attributes once. pylint doesn't
# warn about that.
@classmethod
def __prepare__(mcs, name, bases): # pylint:disable=unused-argument,bad-dunder-name
return SetOnceMapping()
class SetOnceMapping(dict):
def __setitem__(self, name, value):
if name in self:
raise AttributeError(name)
dict.__setitem__(self, name, value)
som = SetOnceMapping()
som[1] = 1
try:
som[1] = 2
except AttributeError:
del som
else:
raise AssertionError("SetOnceMapping is broken")
DefinitionsBase = DefinitionsMeta('DefinitionsBase', (object,), {})
class Definitions(DefinitionsBase):
test__util = RunAlone(
"""
If we have extra greenlets hanging around due to changes in GC, we won't
match the expected output.
So far, this is only seen on one version, in CI environment.
""",
when=(CI & (PY312B3_EXACTLY | PY312B4_EXACTLY))
)
test__issue6 = Flaky(
"""test__issue6 (see comments in test file) is really flaky on both Travis and Appveyor;
on Travis we could just run the test again (but that gets old fast), but on appveyor
we don't have that option without a new commit---and sometimes we really need a build
to succeed in order to get a release wheel"""
)
test__core_fork = Ignored(
"""fork watchers don't get called on windows
because fork is not a concept windows has.
See this file for a detailed explanation.""",
when=WIN
)
test__greenletset = Flaky(
when=WIN,
ignore_coverage=PYPY
)
test__example_udp_client = test__example_udp_server = Flaky(
"""
These both run on port 9000 and can step on each other...seems
like the appveyor containers aren't fully port safe? Or it
takes longer for the processes to shut down? Or we run them in
a different order in the process pool than we do other places?
On PyPy on Travis, this fails to get the correct results,
sometimes. I can't reproduce locally
""",
when=APPVEYOR | (PYPY & TRAVIS)
)
# This one sometimes randomly closes connections, but no indication
# of a server crash, only a client side close.
test__server_pywsgi = Flaky(when=APPVEYOR)
test_threading = Multi().ignored(
"""
This one seems to just stop right after patching is done. It
passes on a local win 10 vm, and the main test_threading_2.py
does as well. Based on the printouts we added, it appears to
not even finish importing:
https://ci.appveyor.com/project/denik/gevent/build/1.0.1277/job/tpvhesij5gldjxqw#L1190
Ignored because it takes two minutes to time out.
""",
when=APPVEYOR & LIBUV & PYPY
).flaky(
"""
test_set_and_clear in Py3 relies on 5 threads all starting and
coming to an Event wait point while a sixth thread sleeps for a half
second. The sixth thread then does something and checks that
the 5 threads were all at the wait point. But the timing is sometimes
too tight for appveyor. This happens even if Event isn't
monkey-patched
""",
when=APPVEYOR & PY3
)
test_ftplib = Flaky(
r"""
could be a problem of appveyor - not sure
======================================================================
ERROR: test_af (__main__.TestIPv6Environment)
----------------------------------------------------------------------
File "C:\Python27-x64\lib\ftplib.py", line 135, in connect
self.sock = socket.create_connection((self.host, self.port), self.timeout)
File "c:\projects\gevent\gevent\socket.py", line 73, in create_connection
raise err
error: [Errno 10049] [Error 10049] The requested address is not valid in its context.
XXX: On Jan 3 2016 this suddenly started passing on Py27/64; no idea why, the python version
was 2.7.11 before and after.
""",
when=APPVEYOR & BIT_64
)
test__backdoor = Flaky(when=LEAKTEST | PYPY)
test__socket_errors = Flaky(when=LEAKTEST)
test_signal = Multi().flaky(
"On Travis, this very frequently fails due to timing",
when=TRAVIS & LEAKTEST,
# Partial workaround for the _testcapi issue on PyPy,
# but also because signal delivery can sometimes be slow, and this
# spawn processes of its own
run_alone=APPVEYOR,
).ignored(
"""
This fails to run a single test. It looks like just importing the module
can hang. All I see is the output from patch_all()
""",
when=APPVEYOR & PYPY3
)
test__monkey_sigchld_2 = Ignored(
"""
This hangs for no apparent reason when run by the testrunner,
even wher maked standalone when run standalone from the
command line, it's fine. Issue in pypy2 6.0?
""",
when=PYPY & LIBUV
)
test_ssl = Ignored(
"""
PyPy 7.0 and 7.1 on Travis with Ubunto Xenial 16.04 can't
allocate SSL Context objects, either in Python 2.7 or 3.6.
There must be some library incompatibility. No point even
running them. XXX: Remember to turn this back on.
On Windows, with PyPy3.7 7.3.7, there seem to be all kind of certificate
errors.
""",
when=(PYPY & TRAVIS) | (PYPY3 & WIN)
)
test_httpservers = Ignored(
"""
All the CGI tests hang. There appear to be subprocess problems.
""",
when=PYPY3 & WIN
)
test__pywsgi = Ignored(
"""
XXX: Re-enable this when we can investigate more. This has
started crashing with a SystemError. I cannot reproduce with
the same version on macOS and I cannot reproduce with the same
version in a Linux vm. Commenting out individual tests just
moves the crash around.
https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
On Appveyor 3.8.0, for some reason this takes *way* too long, about 100s, which
often goes just over the default timeout of 100s. This makes no sense.
But it also takes nearly that long in 3.7. 3.6 and earlier are much faster.
It also takes just over 100s on PyPy 3.7.
""",
when=(PYPY & TRAVIS & LIBUV) | PY380_EXACTLY,
# https://bitbucket.org/pypy/pypy/issues/2769/systemerror-unexpected-internal-exception
run_alone=(CI & LEAKTEST & PY3) | (PYPY & LIBUV),
# This often takes much longer on PyPy on CI.
options={'timeout': (CI & PYPY, 180)},
)
test_subprocess = Multi().flaky(
"Unknown, can't reproduce locally; times out one test",
when=PYPY & PY3 & TRAVIS,
ignore_coverage=ALWAYS,
).ignored(
"Tests don't even start before the process times out.",
when=PYPY3 & WIN
)
test__threadpool = Ignored(
"""
XXX: Re-enable these when we have more time to investigate.
This test, which normally takes ~60s, sometimes
hangs forever after running several tests. I cannot reproduce,
it seems highly load dependent. Observed with both libev and libuv.
""",
when=TRAVIS & (PYPY | OSX),
# This often takes much longer on PyPy on CI.
options={'timeout': (CI & PYPY, 180)},
)
test__threading_2 = Ignored(
"""
This test, which normally takes 4-5s, sometimes
hangs forever after running two tests. I cannot reproduce,
it seems highly load dependent. Observed with both libev and libuv.
""",
when=TRAVIS & (PYPY | OSX),
# This often takes much longer on PyPy on CI.
options={'timeout': (CI & PYPY, 180)},
)
test__issue230 = Ignored(
"""
This rarely hangs for unknown reasons. I cannot reproduce
locally.
""",
when=TRAVIS & OSX
)
test_selectors = Flaky(
"""
Timing issues on appveyor.
""",
when=PY3 & APPVEYOR,
ignore_coverage=ALWAYS,
)
test__example_portforwarder = Flaky(
"""
This one sometimes times out, often after output "The process
with PID XXX could not be terminated. Reason: There is no
running instance of the task.",
""",
when=APPVEYOR | COVERAGE
)
test__issue302monkey = test__threading_vs_settrace = Flaky(
"""
The gevent concurrency plugin tends to slow things
down and get us past our default timeout value. These
tests in particular are sensitive to it. So in fact we just turn them
off.
""",
when=COVERAGE,
ignore_coverage=ALWAYS,
)
test__hub_join_timeout = Ignored(
r"""
This sometimes times out. It appears to happen when the
times take too long and a test raises a FlakyTestTimeout error,
aka a unittest.SkipTest error. This probably indicates that we're
not cleaning something up correctly:
.....ss
GEVENTTEST_USE_RESOURCES=-network C:\Python38-x64\python.exe -u \
-mgevent.tests.test__hub_join_timeout [code TIMEOUT] [took 100.4s]
""",
when=APPVEYOR
)
test__example_wsgiserver = test__example_webproxy = RunAlone(
"""
These share the same port, which means they can conflict
between concurrent test runs too
XXX: Fix this by dynamically picking a port.
""",
)
test__pool = RunAlone(
"""
On a heavily loaded box, these can all take upwards of 200s.
""",
when=(CI & LEAKTEST) | (PYPY3 & APPVEYOR)
)
test_socket = RunAlone(
"Sometimes has unexpected timeouts",
when=CI & PYPY & PY3,
ignore_coverage=ALWAYS, # times out
)
test__refcount = Ignored(
"Sometimes fails to connect for no reason",
when=(CI & OSX) | (CI & PYPY) | APPVEYOR,
ignore_coverage=PYPY
)
test__doctests = Ignored(
"Sometimes times out during/after gevent._config.Config",
when=CI & OSX
)
# tests that can't be run when coverage is enabled
# TODO: Now that we have this declarative, we could eliminate this list,
# just add them to the main IGNORED_TESTS list.
IGNORE_COVERAGE = [
]
# A mapping from test file basename to a dictionary of
# options that will be applied on top of the DEFAULT_RUN_OPTIONS.
TEST_FILE_OPTIONS = {
}
FAILING_TESTS = []
IGNORED_TESTS = []
# tests that don't do well when run on busy box
# or that are mutually exclusive
RUN_ALONE = [
]
def populate(): # pylint:disable=too-many-branches
# TODO: Maybe move to the metaclass.
# TODO: This could be better.
for k, v in Definitions.__dict__.items():
if isinstance(v, Multi):
actions = v._conds
else:
actions = (v,)
test_name = k + '.py'
del k, v
for action in actions:
if not isinstance(action, _Action):
continue
if action.run_alone:
RUN_ALONE.append(test_name)
if action.ignore_coverage:
IGNORE_COVERAGE.append(test_name)
if action.options:
for opt_name, (condition, value) in action.options.items():
# TODO: Verify that this doesn't match more than once.
if condition:
TEST_FILE_OPTIONS.setdefault(test_name, {})[opt_name] = value
if action.when:
if isinstance(action, Ignored):
IGNORED_TESTS.append(test_name)
elif isinstance(action, Flaky):
FAILING_TESTS.append('FLAKY ' + test_name)
elif isinstance(action, Failing):
FAILING_TESTS.append(test_name)
FAILING_TESTS.sort()
IGNORED_TESTS.sort()
RUN_ALONE.sort()
populate()
if __name__ == '__main__':
print('known_failures:\n', FAILING_TESTS)
print('ignored tests:\n', IGNORED_TESTS)
print('run alone:\n', RUN_ALONE)
print('options:\n', TEST_FILE_OPTIONS)
print("ignore during coverage:\n", IGNORE_COVERAGE)
| 17,473 | 30.945155 | 107 | py |
gevent | gevent-master/src/gevent/tests/test__threading_holding_lock_while_monkey.py | from gevent import monkey
import threading
# Make sure that we can patch gevent while holding
# a threading lock. Under Python2, where RLock is implemented
# in python code, this used to throw RuntimeErro("Cannot release un-acquired lock")
# See https://github.com/gevent/gevent/issues/615
# pylint:disable=useless-with-lock
with threading.RLock():
monkey.patch_all() # pragma: testrunner-no-monkey-combine
| 411 | 40.2 | 83 | py |
gevent | gevent-master/src/gevent/tests/test__subprocess.py | import sys
import os
import errno
import unittest
import time
import tempfile
import gevent.testing as greentest
import gevent
from gevent.testing import mock
from gevent import subprocess
if not hasattr(subprocess, 'mswindows'):
# PyPy3, native python subprocess
subprocess.mswindows = False
PYPY = hasattr(sys, 'pypy_version_info')
PY3 = sys.version_info[0] >= 3
if subprocess.mswindows:
SETBINARY = 'import msvcrt; msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY);'
else:
SETBINARY = ''
python_universal_newlines = hasattr(sys.stdout, 'newlines')
# The stdlib of Python 3 on Windows doesn't properly handle universal newlines
# (it produces broken results compared to Python 2)
# See gevent.subprocess for more details.
python_universal_newlines_broken = PY3 and subprocess.mswindows
@greentest.skipWithoutResource('subprocess')
class TestPopen(greentest.TestCase):
# Use the normal error handling. Make sure that any background greenlets
# subprocess spawns propagate errors as expected.
error_fatal = False
def test_exit(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(10)'])
self.assertEqual(popen.wait(), 10)
def test_wait(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(11)'])
gevent.wait([popen])
self.assertEqual(popen.poll(), 11)
def test_child_exception(self):
with self.assertRaises(OSError) as exc:
subprocess.Popen(['*']).wait()
self.assertEqual(exc.exception.errno, 2)
def test_leak(self):
num_before = greentest.get_number_open_files()
p = subprocess.Popen([sys.executable, "-c", "print()"],
stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
del p
num_after = greentest.get_number_open_files()
self.assertEqual(num_before, num_after)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
if sys.executable.endswith('-dbg'):
assert stderr.startswith(b'pineapple')
else:
self.assertEqual(stderr, b"pineapple")
@greentest.skipIf(subprocess.mswindows,
"Windows does weird things here")
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults")
def test_communicate_universal(self):
# Native string all the things. See https://github.com/gevent/gevent/issues/1039
p = subprocess.Popen(
[
sys.executable,
"-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple\\r\\n\\xff\\xff\\xf2\\xf9\\r\\n");'
'sys.stdout.write(sys.stdin.read())'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
(stdout, stderr) = p.communicate('banana\r\n\xff\xff\xf2\xf9\r\n')
self.assertIsInstance(stdout, str)
self.assertIsInstance(stderr, str)
self.assertEqual(stdout,
'banana\n\xff\xff\xf2\xf9\n')
self.assertEqual(stderr,
'pineapple\n\xff\xff\xf2\xf9\n')
@greentest.skipOnWindows("Windows IO is weird; this doesn't raise")
def test_communicate_undecodable(self):
# If the subprocess writes non-decodable data, `communicate` raises the
# same UnicodeDecodeError that the stdlib does, instead of
# printing it to the hub. This only applies to Python 3, because only it
# will actually use text mode.
# See https://github.com/gevent/gevent/issues/1510
with subprocess.Popen(
[
sys.executable,
'-W', 'ignore',
'-c',
"import os, sys; "
r'os.write(sys.stdout.fileno(), b"\xff")'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, universal_newlines=True
) as p:
with self.assertRaises(UnicodeDecodeError):
p.communicate()
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal1(self):
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal2(self):
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
@greentest.skipOnWindows("Uses 'grep' command")
def test_nonblock_removed(self):
# see issue #134
r, w = os.pipe()
stdin = subprocess.FileObject(r)
with subprocess.Popen(['grep', 'text'], stdin=stdin) as p:
try:
# Closing one half of the pipe causes Python 3 on OS X to terminate the
# child process; it exits with code 1 and the assert that p.poll is None
# fails. Removing the close lets it pass under both Python 3 and 2.7.
# If subprocess.Popen._remove_nonblock_flag is changed to a noop, then
# the test fails (as expected) even with the close removed
#os.close(w)
time.sleep(0.1)
self.assertEqual(p.poll(), None)
finally:
if p.poll() is None:
p.kill()
stdin.close()
os.close(w)
def test_issue148(self):
for _ in range(7):
with self.assertRaises(OSError) as exc:
with subprocess.Popen('this_name_must_not_exist'):
pass
self.assertEqual(exc.exception.errno, errno.ENOENT)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_keyword_error(self):
with self.assertRaises(subprocess.CalledProcessError) as exc: # pylint:disable=no-member
subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)'])
self.assertEqual(exc.exception.returncode, 44)
@greentest.skipOnPy3("The default buffer changed in Py3")
def test_popen_bufsize(self):
# Test that subprocess has unbuffered output by default
# (as the vanilla subprocess module)
with subprocess.Popen(
[sys.executable, '-u', '-c',
'import sys; sys.stdout.write(sys.stdin.readline())'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
) as p:
p.stdin.write(b'foobar\n')
r = p.stdout.readline()
self.assertEqual(r, b'foobar\n')
@greentest.ignores_leakcheck
@greentest.skipOnWindows("Not sure why?")
def test_subprocess_in_native_thread(self):
# gevent.subprocess doesn't work from a background
# native thread. See #688
from gevent import monkey
# must be a native thread; defend against monkey-patching
ex = []
Thread = monkey.get_original('threading', 'Thread')
def fn():
with self.assertRaises(TypeError) as exc:
gevent.subprocess.Popen('echo 123', shell=True)
ex.append(exc.exception)
thread = Thread(target=fn)
thread.start()
thread.join()
self.assertEqual(len(ex), 1)
self.assertTrue(isinstance(ex[0], TypeError), ex)
self.assertEqual(ex[0].args[0], 'child watchers are only available on the default loop')
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def __test_no_output(self, kwargs, kind):
with subprocess.Popen(
[sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
**kwargs
) as proc:
stdout, stderr = proc.communicate()
self.assertIsInstance(stdout, kind)
self.assertIsNone(stderr)
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults; "
"https://travis-ci.org/gevent/gevent/jobs/327357682")
def test_universal_newlines_text_mode_no_output_is_always_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'universal_newlines': True}, str)
@greentest.skipIf(sys.version_info[:2] < (3, 6), "Need encoding argument")
def test_encoded_text_mode_no_output_is_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'encoding': 'utf-8'}, str)
def test_default_mode_no_output_is_always_str(self):
# If the file is in default mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({}, bytes)
@greentest.skipOnWindows("Testing POSIX fd closing")
class TestFDs(unittest.TestCase):
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('os.close')
def test_close_fds_brute_force(self, close, set_inheritable, closerange):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
subprocess.Popen._close_fds_brute_force(keep, None)
closerange.assert_has_calls([
mock.call(3, 4),
mock.call(8, subprocess.MAXFD),
])
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(5, True),
])
close.assert_called_once_with(6)
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.listdir')
def test_close_fds_from_path_bad_values(self, listdir, brute_force):
listdir.return_value = 'Not an Integer'
subprocess.Popen._close_fds_from_path('path', [], 42)
brute_force.assert_called_once_with([], 42)
@mock.patch('os.listdir')
@mock.patch('os.closerange')
@mock.patch('gevent.subprocess._set_inheritable')
@mock.patch('os.close')
def test_close_fds_from_path(self, close, set_inheritable, closerange, listdir):
keep = (
4, 5,
# Leave a hole
# 6,
7,
)
listdir.return_value = ['1', '6', '37']
subprocess.Popen._close_fds_from_path('path', keep, 5)
self.assertEqual([], closerange.mock_calls)
set_inheritable.assert_has_calls([
mock.call(4, True),
mock.call(7, True),
])
close.assert_has_calls([
mock.call(6),
mock.call(37),
])
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_no_dir(self, isdir, brute_force):
isdir.return_value = False
subprocess.Popen._close_fds([], 42)
brute_force.assert_called_once_with([], 42)
isdir.assert_has_calls([
mock.call('/proc/self/fd'),
mock.call('/dev/fd'),
])
@mock.patch('gevent.subprocess.Popen._close_fds_from_path')
@mock.patch('gevent.subprocess.Popen._close_fds_brute_force')
@mock.patch('os.path.isdir')
def test_close_fds_with_dir(self, isdir, brute_force, from_path):
isdir.return_value = True
subprocess.Popen._close_fds([7], 42)
self.assertEqual([], brute_force.mock_calls)
from_path.assert_called_once_with('/proc/self/fd', [7], 42)
class RunFuncTestCase(greentest.TestCase):
# Based on code from python 3.6+
__timeout__ = greentest.LARGE_TIMEOUT
@greentest.skipWithoutResource('subprocess')
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError): # pylint:disable=no-member
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c: # pylint:disable=no-member
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
with tempfile.TemporaryFile() as tf:
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
with tempfile.TemporaryFile() as tf:
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
self.run_python(
(
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"
),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@greentest.skipOnWindows("requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
#Output capturing after a timeout mustn't hang forever on open filehandles
with self.runs_in_given_time(0.1):
with self.assertRaises(subprocess.TimeoutExpired):
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
if __name__ == '__main__':
greentest.main()
| 20,208 | 38.013514 | 103 | py |
gevent | gevent-master/src/gevent/tests/test__threading.py | """
Tests specifically for the monkey-patched threading module.
"""
from gevent import monkey; monkey.patch_all() # pragma: testrunner-no-monkey-combine
import gevent.hub
# check that the locks initialized by 'threading' did not init the hub
assert gevent.hub._get_hub() is None, 'monkey.patch_all() should not init hub'
import gevent
import gevent.testing as greentest
import threading
def helper():
threading.current_thread()
gevent.sleep(0.2)
class TestCleanup(greentest.TestCase):
def _do_test(self, spawn):
before = len(threading._active)
g = spawn(helper)
gevent.sleep(0.1)
self.assertEqual(len(threading._active), before + 1)
try:
g.join()
except AttributeError:
while not g.dead:
gevent.sleep()
# Raw greenlet has no join(), uses a weakref to cleanup.
# so the greenlet has to die. On CPython, it's enough to
# simply delete our reference.
del g
# On PyPy, it might take a GC, but for some reason, even
# running several GC's doesn't clean it up under 5.6.0.
# So we skip the test.
#import gc
#gc.collect()
self.assertEqual(len(threading._active), before)
def test_cleanup_gevent(self):
self._do_test(gevent.spawn)
@greentest.skipOnPyPy("weakref is not cleaned up in a timely fashion")
def test_cleanup_raw(self):
self._do_test(gevent.spawn_raw)
class TestLockThread(greentest.TestCase):
def _spawn(self, func):
t = threading.Thread(target=func)
t.start()
return t
def test_spin_lock_switches(self):
# https://github.com/gevent/gevent/issues/1464
# pylint:disable=consider-using-with
lock = threading.Lock()
lock.acquire()
spawned = []
def background():
spawned.append(True)
while not lock.acquire(False):
pass
thread = threading.Thread(target=background)
# If lock.acquire(False) doesn't yield when it fails,
# then this never returns.
thread.start()
# Verify it tried to run
self.assertEqual(spawned, [True])
# We can attempt to join it, which won't work.
thread.join(0)
# We can release the lock and then it will acquire.
lock.release()
thread.join()
class TestLockGreenlet(TestLockThread):
def _spawn(self, func):
return gevent.spawn(func)
if __name__ == '__main__':
greentest.main()
| 2,585 | 27.108696 | 84 | py |
gevent | gevent-master/src/gevent/tests/test__server_pywsgi.py | import socket
import gevent.testing as greentest
import gevent
from gevent import pywsgi
from gevent.tests import test__server
def application(environ, start_response):
if environ['PATH_INFO'] == '/':
start_response("200 OK", [])
return [b"PONG"]
if environ['PATH_INFO'] == '/ping':
start_response("200 OK", [])
return [b"PONG"]
if environ['PATH_INFO'] == '/short':
gevent.sleep(0.5)
start_response("200 OK", [])
return []
if environ['PATH_INFO'] == '/long':
gevent.sleep(10)
start_response("200 OK", [])
return []
start_response("404 pywsgi WTF?", [])
return []
class SimpleWSGIServer(pywsgi.WSGIServer):
application = staticmethod(application)
internal_error_start = b'HTTP/1.1 500 Internal Server Error\n'.replace(b'\n', b'\r\n')
internal_error_end = b'\n\nInternal Server Error'.replace(b'\n', b'\r\n')
internal_error503 = b'''HTTP/1.1 503 Service Unavailable
Connection: close
Content-type: text/plain
Content-length: 31
Service Temporarily Unavailable'''.replace(b'\n', b'\r\n')
class Settings(test__server.Settings):
ServerClass = pywsgi.WSGIServer
ServerSubClass = SimpleWSGIServer
close_socket_detected = True
restartable = False
close_socket_detected = False
@staticmethod
def assert500(inst):
with inst.makefile() as conn:
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
inst.assertTrue(result.startswith(internal_error_start),
(result, internal_error_start))
inst.assertTrue(result.endswith(internal_error_end),
(result, internal_error_end))
@staticmethod
def assert503(inst):
with inst.makefile() as conn:
conn.write(b'GET / HTTP/1.0\r\n\r\n')
result = conn.read()
inst.assertEqual(result, internal_error503)
@staticmethod
def assertPoolFull(inst):
with inst.assertRaises(socket.timeout):
inst.assertRequestSucceeded()
@staticmethod
def assertAcceptedConnectionError(inst):
with inst.makefile() as conn:
result = conn.read()
inst.assertFalse(result)
@staticmethod
def fill_default_server_args(inst, kwargs):
kwargs = test__server.Settings.fill_default_server_args(inst, kwargs)
kwargs.setdefault('log', pywsgi._NoopLog())
return kwargs
class TestCase(test__server.TestCase):
Settings = Settings
class TestDefaultSpawn(test__server.TestDefaultSpawn):
Settings = Settings
class TestSSLSocketNotAllowed(test__server.TestSSLSocketNotAllowed):
Settings = Settings
class TestRawSpawn(test__server.TestRawSpawn): # pylint:disable=too-many-ancestors
Settings = Settings
class TestSSLGetCertificate(test__server.TestSSLGetCertificate):
Settings = Settings
class TestPoolSpawn(test__server.TestPoolSpawn): # pylint:disable=too-many-ancestors
Settings = Settings
if __name__ == '__main__':
greentest.main()
| 3,074 | 27.738318 | 86 | py |
gevent | gevent-master/src/gevent/tests/test__GreenletExit.py | from gevent import GreenletExit
assert issubclass(GreenletExit, BaseException)
assert not issubclass(GreenletExit, Exception)
| 127 | 24.6 | 46 | py |
gevent | gevent-master/src/gevent/tests/test__hub_join.py | from contextlib import contextmanager
import unittest
import gevent
from gevent.testing import ignores_leakcheck
class TestJoin(unittest.TestCase):
def test_join_many_times(self):
# hub.join() guarantees that loop has exited cleanly
res = gevent.get_hub().join()
self.assertTrue(res)
self.assertFalse(gevent.get_hub().dead)
res = gevent.get_hub().join()
self.assertTrue(res)
# but it is still possible to use gevent afterwards
gevent.sleep(0.01)
res = gevent.get_hub().join()
self.assertTrue(res)
@staticmethod
def __clean():
import gc
for _ in range(2):
while gc.collect():
pass
@contextmanager
def assert_no_greenlet_growth(self):
from gevent._greenlet_primitives import get_reachable_greenlets
clean = self.__clean
clean()
count_before = len(get_reachable_greenlets())
yield
count_after = len(get_reachable_greenlets())
if count_after > count_before:
# We could be off by exactly 1. Not entirely clear where.
# But it only happens the first time.
count_after -= 1
# If we were run in multiple process, our count could actually have
# gone down due to the GC's we did.
self.assertEqual(count_after, count_before)
@ignores_leakcheck
def test_join_in_new_thread_doesnt_leak_hub_or_greenlet(self):
# https://github.com/gevent/gevent/issues/1601
import threading
clean = self.__clean
def thread_main():
g = gevent.Greenlet(run=lambda: 0)
g.start()
g.join()
hub = gevent.get_hub()
hub.join()
hub.destroy(destroy_loop=True)
del hub
def tester(main):
t = threading.Thread(target=main)
t.start()
t.join()
clean()
with self.assert_no_greenlet_growth():
for _ in range(10):
tester(thread_main)
del tester
del thread_main
@ignores_leakcheck
def test_destroy_in_main_thread_from_new_thread(self):
# https://github.com/gevent/gevent/issues/1631
import threading
clean = self.__clean
class Thread(threading.Thread):
hub = None
def run(self):
g = gevent.Greenlet(run=lambda: 0)
g.start()
g.join()
del g
hub = gevent.get_hub()
hub.join()
self.hub = hub
def tester(Thread, clean):
t = Thread()
t.start()
t.join()
t.hub.destroy(destroy_loop=True)
t.hub = None
del t
clean()
# Unfortunately, this WILL leak greenlets,
# at least on CPython. The frames of the dead threads
# are referenced by the hub in some sort of cycle, and
# greenlets don't particpate in GC.
for _ in range(10):
tester(Thread, clean)
del tester
del Thread
if __name__ == '__main__':
unittest.main()
| 3,217 | 26.271186 | 75 | py |
gevent | gevent-master/src/gevent/tests/test__issue230.py | import gevent.monkey
gevent.monkey.patch_all()
import socket
import multiprocessing
from gevent import testing as greentest
# Make sure that using the resolver in a forked process
# doesn't hang forever.
def block():
socket.getaddrinfo('localhost', 8001)
class Test(greentest.TestCase):
def test(self):
socket.getaddrinfo('localhost', 8001)
p = multiprocessing.Process(target=block)
p.start()
p.join()
if __name__ == '__main__':
greentest.main()
| 500 | 16.892857 | 55 | py |
gevent | gevent-master/src/gevent/tests/test__example_echoserver.py | from gevent.socket import create_connection, timeout
import gevent.testing as greentest
import gevent
from gevent.testing import util
from gevent.testing import params
class Test(util.TestServer):
example = 'echoserver.py'
def _run_all_tests(self):
def test_client(message):
if greentest.PY3:
kwargs = {'buffering': 1}
else:
kwargs = {'bufsize': 1}
kwargs['mode'] = 'rb'
conn = create_connection((params.DEFAULT_LOCAL_HOST_ADDR, 16000))
conn.settimeout(greentest.DEFAULT_XPC_SOCKET_TIMEOUT)
rfile = conn.makefile(**kwargs)
welcome = rfile.readline()
self.assertIn(b'Welcome', welcome)
conn.sendall(message)
received = rfile.read(len(message))
self.assertEqual(received, message)
self.assertRaises(timeout, conn.recv, 1)
rfile.close()
conn.close()
client1 = gevent.spawn(test_client, b'hello\r\n')
client2 = gevent.spawn(test_client, b'world\r\n')
gevent.joinall([client1, client2], raise_error=True)
if __name__ == '__main__':
greentest.main()
| 1,198 | 28.243902 | 77 | py |
gevent | gevent-master/src/gevent/tests/test__threading_monkey_in_thread.py | # We can monkey-patch in a thread, but things don't work as expected.
from __future__ import print_function
import threading
from gevent import monkey
import gevent.testing as greentest
class Test(greentest.TestCase):
@greentest.ignores_leakcheck # can't be run multiple times
def test_patch_in_thread(self):
all_warnings = []
try:
get_ident = threading.get_ident
except AttributeError:
get_ident = threading._get_ident
def process_warnings(warnings):
all_warnings.extend(warnings)
monkey._process_warnings = process_warnings
current = threading.current_thread()
current_id = get_ident()
def target():
tcurrent = threading.current_thread()
monkey.patch_all() # pragma: testrunner-no-monkey-combine
tcurrent2 = threading.current_thread()
self.assertIsNot(tcurrent, current)
# We get a dummy thread now
self.assertIsNot(tcurrent, tcurrent2)
thread = threading.Thread(target=target)
thread.start()
try:
thread.join()
except: # pylint:disable=bare-except
# XXX: This can raise LoopExit in some cases.
greentest.reraiseFlakyTestRaceCondition()
self.assertNotIsInstance(current, threading._DummyThread)
self.assertIsInstance(current, monkey.get_original('threading', 'Thread'))
# We generated some warnings
if greentest.PY3:
self.assertEqual(
all_warnings,
['Monkey-patching outside the main native thread. Some APIs will not be '
'available. Expect a KeyError to be printed at shutdown.',
'Monkey-patching not on the main thread; threading.main_thread().join() '
'will hang from a greenlet'])
else:
self.assertEqual(
all_warnings,
['Monkey-patching outside the main native thread. Some APIs will not be '
'available. Expect a KeyError to be printed at shutdown.'])
# Manual clean up so we don't get a KeyError
del threading._active[current_id]
threading._active[(getattr(threading, 'get_ident', None) or threading._get_ident)()] = current
if __name__ == '__main__':
greentest.main()
| 2,366 | 33.304348 | 102 | py |
gevent | gevent-master/src/gevent/tests/test__ares_timeout.py | from __future__ import print_function
import unittest
import gevent
try:
from gevent.resolver.ares import Resolver
except ImportError as ex:
Resolver = None
from gevent import socket
import gevent.testing as greentest
from gevent.testing.sockets import udp_listener
@unittest.skipIf(
Resolver is None,
"Needs ares resolver"
)
class TestTimeout(greentest.TestCase):
__timeout__ = 30
def test(self):
listener = self._close_on_teardown(udp_listener())
address = listener.getsockname()
def reader():
while True:
listener.recvfrom(10000)
greader = gevent.spawn(reader)
self._close_on_teardown(greader.kill)
r = Resolver(servers=[address[0]], timeout=0.001, tries=1,
udp_port=address[-1])
self._close_on_teardown(r)
with self.assertRaisesRegex(socket.herror, "ARES_ETIMEOUT"):
r.gethostbyname('www.google.com')
if __name__ == '__main__':
greentest.main()
| 1,015 | 21.577778 | 68 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_ssl_warning3.py | import unittest
import warnings
import sys
# All supported python versions now provide SSLContext.
# We subclass without importing by name. Compare with
# warning2.py
import ssl
class MySubclass(ssl.SSLContext):
pass
# This file should only have this one test in it
# because we have to be careful about our imports
# and because we need to be careful about our patching.
class Test(unittest.TestCase):
@unittest.skipIf(sys.version_info[:2] < (3, 6),
"Only on Python 3.6+")
def test_ssl_subclass_and_module_reference(self):
from gevent import monkey
self.assertFalse(monkey.saved)
with warnings.catch_warnings(record=True) as issued_warnings:
warnings.simplefilter('always')
monkey.patch_all()
monkey.patch_all()
issued_warnings = [x for x in issued_warnings
if isinstance(x.message, monkey.MonkeyPatchWarning)]
self.assertEqual(1, len(issued_warnings))
message = str(issued_warnings[0].message)
self.assertNotIn("Modules that had direct imports", message)
self.assertIn("Subclasses (NOT patched)", message)
# the gevent subclasses should not be in here.
self.assertNotIn('gevent.', message)
if __name__ == '__main__':
unittest.main()
| 1,330 | 26.729167 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__issue1686.py | # -*- coding: utf-8 -*-
"""
Tests for https://github.com/gevent/gevent/issues/1686
which is about destroying a hub when there are active
callbacks or IO in operation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
from gevent import testing as greentest
# Don't let the testrunner put us in a process with other
# tests; we are strict on the state of the hub and greenlets.
# pragma: testrunner-no-combine
@greentest.skipOnWindows("Uses os.fork")
class TestDestroyInChildWithActiveSpawn(unittest.TestCase):
def test(self): # pylint:disable=too-many-locals
# If this test is broken, there are a few failure modes.
# - In the original examples, the parent process just hangs, because the
# child has raced ahead, spawned the greenlet and read the data. When the
# greenlet goes to read in the parent, it blocks, and the hub and loop
# wait for it.
# - Here, our child detects the greenlet ran when it shouldn't and
# raises an error, which translates to a non-zero exit status,
# which the parent checks for and fails by raising an exception before
# returning control to the hub. We can replicate the hang by removing the
# assertion in the child.
from time import sleep as hang
from gevent import get_hub
from gevent import spawn
from gevent.socket import wait_read
from gevent.os import nb_read
from gevent.os import nb_write
from gevent.os import make_nonblocking
from gevent.os import fork
from gevent.os import waitpid
pipe_read_fd, pipe_write_fd = os.pipe()
make_nonblocking(pipe_read_fd)
make_nonblocking(pipe_write_fd)
run = []
def reader():
run.append(1)
return nb_read(pipe_read_fd, 4096)
# Put data in the pipe
DATA = b'test'
nb_write(pipe_write_fd, DATA)
# Make sure we're ready to read it
wait_read(pipe_read_fd)
# Schedule a greenlet to start
reader = spawn(reader)
hub = get_hub()
pid = fork()
if pid == 0:
# Child destroys the hub. The reader should not have run.
hub.destroy(destroy_loop=True)
self.assertFalse(run)
os._exit(0)
return # pylint:disable=unreachable
# The parent.
# Briefly prevent us from spinning our event loop.
hang(0.5)
wait_child_result = waitpid(pid, 0)
self.assertEqual(wait_child_result, (pid, 0))
# We should get the data; the greenlet only runs in the parent.
data = reader.get()
self.assertEqual(run, [1])
self.assertEqual(data, DATA)
if __name__ == '__main__':
greentest.main()
| 2,878 | 32.476744 | 83 | py |
gevent | gevent-master/src/gevent/tests/test__pywsgi.py | # Copyright (c) 2007, Linden Research, Inc.
# Copyright (c) 2009-2010 gevent contributors
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pylint: disable=too-many-lines,unused-argument,too-many-ancestors
from __future__ import print_function
from gevent import monkey
monkey.patch_all()
from contextlib import contextmanager
try:
from urllib.parse import parse_qs
except ImportError:
# Python 2
from urlparse import parse_qs
import os
import sys
try:
# On Python 2, we want the C-optimized version if
# available; it has different corner-case behaviour than
# the Python implementation, and it used by socket.makefile
# by default.
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import weakref
import unittest
from wsgiref.validate import validator
import gevent.testing as greentest
import gevent
from gevent.testing import PY3, PYPY
from gevent.testing.exception import ExpectedException
from gevent import socket
from gevent import pywsgi
from gevent.pywsgi import Input
class ExpectedAssertionError(ExpectedException, AssertionError):
"""An expected assertion error"""
CONTENT_LENGTH = 'Content-Length'
CONN_ABORTED_ERRORS = greentest.CONN_ABORTED_ERRORS
REASONS = {
200: 'OK',
500: 'Internal Server Error'
}
class ConnectionClosed(Exception):
pass
def read_headers(fd):
response_line = fd.readline()
if not response_line:
raise ConnectionClosed
response_line = response_line.decode('latin-1')
headers = {}
while True:
line = fd.readline().strip()
if not line:
break
line = line.decode('latin-1')
try:
key, value = line.split(': ', 1)
except:
print('Failed to split: %r' % (line, ))
raise
assert key.lower() not in {x.lower() for x in headers}, 'Header %r:%r sent more than once: %r' % (key, value, headers)
headers[key] = value
return response_line, headers
def iread_chunks(fd):
while True:
line = fd.readline()
chunk_size = line.strip()
chunk_size = int(chunk_size, 16)
if chunk_size == 0:
crlf = fd.read(2)
assert crlf == b'\r\n', repr(crlf)
break
data = fd.read(chunk_size)
yield data
crlf = fd.read(2)
assert crlf == b'\r\n', repr(crlf)
class Response(object):
def __init__(self, status_line, headers):
self.status_line = status_line
self.headers = headers
self.body = None
self.chunks = False
try:
version, code, self.reason = status_line[:-2].split(' ', 2)
self.code = int(code)
HTTP, self.version = version.split('/')
assert HTTP == 'HTTP', repr(HTTP)
assert self.version in ('1.0', '1.1'), repr(self.version)
except Exception:
print('Error: %r' % status_line)
raise
def __iter__(self):
yield self.status_line
yield self.headers
yield self.body
def __str__(self):
args = (self.__class__.__name__, self.status_line, self.headers, self.body, self.chunks)
return '<%s status_line=%r headers=%r body=%r chunks=%r>' % args
def assertCode(self, code):
if hasattr(code, '__contains__'):
assert self.code in code, 'Unexpected code: %r (expected %r)\n%s' % (self.code, code, self)
else:
assert self.code == code, 'Unexpected code: %r (expected %r)\n%s' % (self.code, code, self)
def assertReason(self, reason):
assert self.reason == reason, 'Unexpected reason: %r (expected %r)\n%s' % (self.reason, reason, self)
def assertVersion(self, version):
assert self.version == version, 'Unexpected version: %r (expected %r)\n%s' % (self.version, version, self)
def assertHeader(self, header, value):
real_value = self.headers.get(header, False)
assert real_value == value, \
'Unexpected header %r: %r (expected %r)\n%s' % (header, real_value, value, self)
def assertBody(self, body):
if isinstance(body, str) and PY3:
body = body.encode("ascii")
assert self.body == body, 'Unexpected body: %r (expected %r)\n%s' % (self.body, body, self)
@classmethod
def read(cls, fd, code=200, reason='default', version='1.1',
body=None, chunks=None, content_length=None):
# pylint:disable=too-many-branches
_status_line, headers = read_headers(fd)
self = cls(_status_line, headers)
if code is not None:
self.assertCode(code)
if reason == 'default':
reason = REASONS.get(code)
if reason is not None:
self.assertReason(reason)
if version is not None:
self.assertVersion(version)
if self.code == 100:
return self
if content_length is not None:
if isinstance(content_length, int):
content_length = str(content_length)
self.assertHeader('Content-Length', content_length)
if 'chunked' in headers.get('Transfer-Encoding', ''):
if CONTENT_LENGTH in headers:
print("WARNING: server used chunked transfer-encoding despite having Content-Length header (libevent 1.x's bug)")
self.chunks = list(iread_chunks(fd))
self.body = b''.join(self.chunks)
elif CONTENT_LENGTH in headers:
num = int(headers[CONTENT_LENGTH])
self.body = fd.read(num)
else:
self.body = fd.read()
if body is not None:
self.assertBody(body)
if chunks is not None:
assert chunks == self.chunks, (chunks, self.chunks)
return self
read_http = Response.read
class TestCase(greentest.TestCase):
server = None
validator = staticmethod(validator)
application = None
# Bind to default address, which should give us ipv6 (when available)
# and ipv4. (see self.connect())
listen_addr = greentest.DEFAULT_BIND_ADDR
# connect on ipv4, even though we bound to ipv6 too
# to prove ipv4 works...except on Windows, it apparently doesn't.
# So use the hostname.
connect_addr = greentest.DEFAULT_LOCAL_HOST_ADDR
class handler_class(pywsgi.WSGIHandler):
ApplicationError = ExpectedAssertionError
def init_logger(self):
import logging
logger = logging.getLogger('gevent.tests.pywsgi')
logger.setLevel(logging.CRITICAL)
return logger
def init_server(self, application):
logger = self.logger = self.init_logger()
self.server = pywsgi.WSGIServer(
(self.listen_addr, 0),
application,
log=logger, error_log=logger,
handler_class=self.handler_class,
)
def setUp(self):
application = self.application
if self.validator is not None:
application = self.validator(application)
self.init_server(application)
self.server.start()
while not self.server.server_port:
print("Waiting on server port")
self.port = self.server.server_port
assert self.port
greentest.TestCase.setUp(self)
if greentest.CPYTHON and greentest.PY2:
# Keeping raw sockets alive keeps SSL sockets
# from being closed too, at least on CPython2, so we
# need to use weakrefs.
# In contrast, on PyPy, *only* having a weakref lets the
# original socket die and leak
def _close_on_teardown(self, resource):
self.close_on_teardown.append(weakref.ref(resource))
return resource
def _tearDownCloseOnTearDown(self):
self.close_on_teardown = [r() for r in self.close_on_teardown if r() is not None]
super(TestCase, self)._tearDownCloseOnTearDown()
def tearDown(self):
greentest.TestCase.tearDown(self)
if self.server is not None:
with gevent.Timeout.start_new(0.5):
self.server.stop()
self.server = None
if greentest.PYPY:
import gc
gc.collect()
gc.collect()
@contextmanager
def connect(self):
conn = socket.create_connection((self.connect_addr, self.port))
result = conn
if PY3:
conn_makefile = conn.makefile
def makefile(*args, **kwargs):
if 'bufsize' in kwargs:
kwargs['buffering'] = kwargs.pop('bufsize')
if 'mode' in kwargs:
return conn_makefile(*args, **kwargs)
# Under Python3, you can't read and write to the same
# makefile() opened in (default) r, and r+ is not allowed
kwargs['mode'] = 'rwb'
rconn = conn_makefile(*args, **kwargs)
_rconn_write = rconn.write
def write(data):
if isinstance(data, str):
data = data.encode('ascii')
return _rconn_write(data)
rconn.write = write
self._close_on_teardown(rconn)
return rconn
class proxy(object):
def __getattribute__(self, name):
if name == 'makefile':
return makefile
return getattr(conn, name)
result = proxy()
try:
yield result
finally:
result.close()
@contextmanager
def makefile(self):
with self.connect() as sock:
try:
result = sock.makefile(bufsize=1) # pylint:disable=unexpected-keyword-arg
yield result
finally:
result.close()
def urlopen(self, *args, **kwargs):
with self.connect() as sock:
with sock.makefile(bufsize=1) as fd: # pylint:disable=unexpected-keyword-arg
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
return read_http(fd, *args, **kwargs)
HTTP_CLIENT_VERSION = '1.1'
DEFAULT_EXTRA_CLIENT_HEADERS = {}
def format_request(self, method='GET', path='/', **headers):
def_headers = self.DEFAULT_EXTRA_CLIENT_HEADERS.copy()
def_headers.update(headers)
headers = def_headers
headers = '\r\n'.join('%s: %s' % item for item in headers.items())
headers = headers + '\r\n' if headers else headers
result = (
'%(method)s %(path)s HTTP/%(http_ver)s\r\n'
'Host: localhost\r\n'
'%(headers)s'
'\r\n'
)
result = result % dict(
method=method,
path=path,
http_ver=self.HTTP_CLIENT_VERSION,
headers=headers
)
return result
class CommonTestMixin(object):
PIPELINE_NOT_SUPPORTED_EXS = ()
EXPECT_CLOSE = False
EXPECT_KEEPALIVE = False
def test_basic(self):
with self.makefile() as fd:
fd.write(self.format_request())
response = read_http(fd, body='hello world')
if response.headers.get('Connection') == 'close':
self.assertTrue(self.EXPECT_CLOSE, "Server closed connection, not expecting that")
return response, None
self.assertFalse(self.EXPECT_CLOSE)
if self.EXPECT_KEEPALIVE:
response.assertHeader('Connection', 'keep-alive')
fd.write(self.format_request(path='/notexist'))
dne_response = read_http(fd, code=404, reason='Not Found', body='not found')
fd.write(self.format_request())
response = read_http(fd, body='hello world')
return response, dne_response
def test_pipeline(self):
exception = AssertionError('HTTP pipelining not supported; the second request is thrown away')
with self.makefile() as fd:
fd.write(self.format_request() + self.format_request(path='/notexist'))
read_http(fd, body='hello world')
try:
timeout = gevent.Timeout.start_new(0.5, exception=exception)
try:
read_http(fd, code=404, reason='Not Found', body='not found')
finally:
timeout.close()
except self.PIPELINE_NOT_SUPPORTED_EXS:
pass
except AssertionError as ex:
if ex is not exception:
raise
def test_connection_close(self):
with self.makefile() as fd:
fd.write(self.format_request())
response = read_http(fd)
if response.headers.get('Connection') == 'close':
self.assertTrue(self.EXPECT_CLOSE, "Server closed connection, not expecting that")
return
self.assertFalse(self.EXPECT_CLOSE)
if self.EXPECT_KEEPALIVE:
response.assertHeader('Connection', 'keep-alive')
fd.write(self.format_request(Connection='close'))
read_http(fd)
fd.write(self.format_request())
# This may either raise, or it may return an empty response,
# depend on timing and the Python version.
try:
result = fd.readline()
except socket.error as ex:
if ex.args[0] not in CONN_ABORTED_ERRORS:
raise
else:
self.assertFalse(
result,
'The remote side is expected to close the connection, but it sent %r'
% (result,))
@unittest.skip("Not sure")
def test_006_reject_long_urls(self):
path_parts = []
for _ in range(3000):
path_parts.append('path')
path = '/'.join(path_parts)
with self.makefile() as fd:
request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path
fd.write(request)
result = fd.readline()
status = result.split(' ')[1]
self.assertEqual(status, '414')
class TestNoChunks(CommonTestMixin, TestCase):
# when returning a list of strings a shortcut is employed by the server:
# it calculates the content-length and joins all the chunks before sending
validator = None
last_environ = None
def _check_environ(self, input_terminated=True):
if input_terminated:
self.assertTrue(self.last_environ.get('wsgi.input_terminated'))
else:
self.assertFalse(self.last_environ['wsgi.input_terminated'])
def application(self, env, start_response):
self.last_environ = env
path = env['PATH_INFO']
if path == '/':
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'hello ', b'world']
if path == '/websocket':
write = start_response('101 Switching Protocols',
[('Content-Type', 'text/plain'),
# Con:close is to make our simple client
# happy; otherwise it wants to read data from the
# body thot's being kept open.
('Connection', 'close')])
write(b'') # Trigger finalizing the headers now.
return [b'upgrading to', b'websocket']
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'not ', b'found']
def test_basic(self):
response, dne_response = super(TestNoChunks, self).test_basic()
self._check_environ()
self.assertFalse(response.chunks)
response.assertHeader('Content-Length', '11')
if dne_response is not None:
self.assertFalse(dne_response.chunks)
dne_response.assertHeader('Content-Length', '9')
def test_dne(self):
with self.makefile() as fd:
fd.write(self.format_request(path='/notexist'))
response = read_http(fd, code=404, reason='Not Found', body='not found')
self.assertFalse(response.chunks)
self._check_environ()
response.assertHeader('Content-Length', '9')
class TestConnectionUpgrades(TestNoChunks):
def test_connection_upgrade(self):
with self.makefile() as fd:
fd.write(self.format_request(path='/websocket', Connection='upgrade'))
response = read_http(fd, code=101)
self._check_environ(input_terminated=False)
self.assertFalse(response.chunks)
def test_upgrade_websocket(self):
with self.makefile() as fd:
fd.write(self.format_request(path='/websocket', Upgrade='websocket'))
response = read_http(fd, code=101)
self._check_environ(input_terminated=False)
self.assertFalse(response.chunks)
class TestNoChunks10(TestNoChunks):
HTTP_CLIENT_VERSION = '1.0'
PIPELINE_NOT_SUPPORTED_EXS = (ConnectionClosed,)
EXPECT_CLOSE = True
class TestNoChunks10KeepAlive(TestNoChunks10):
DEFAULT_EXTRA_CLIENT_HEADERS = {
'Connection': 'keep-alive',
}
EXPECT_CLOSE = False
EXPECT_KEEPALIVE = True
class TestExplicitContentLength(TestNoChunks): # pylint:disable=too-many-ancestors
# when returning a list of strings a shortcut is employed by the
# server - it caculates the content-length
def application(self, env, start_response):
self.last_environ = env
self.assertTrue(env.get('wsgi.input_terminated'))
path = env['PATH_INFO']
if path == '/':
start_response('200 OK', [('Content-Type', 'text/plain'), ('Content-Length', '11')])
return [b'hello ', b'world']
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', '9')])
return [b'not ', b'found']
class TestYield(CommonTestMixin, TestCase):
@staticmethod
def application(env, start_response):
path = env['PATH_INFO']
if path == '/':
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b"hello world"
else:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
yield b"not found"
class TestBytearray(CommonTestMixin, TestCase):
validator = None
@staticmethod
def application(env, start_response):
path = env['PATH_INFO']
if path == '/':
start_response('200 OK', [('Content-Type', 'text/plain')])
return [bytearray(b"hello "), bytearray(b"world")]
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [bytearray(b"not found")]
class TestMultiLineHeader(TestCase):
@staticmethod
def application(env, start_response):
assert "test.submit" in env["CONTENT_TYPE"]
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b"ok"]
def test_multiline_116(self):
"""issue #116"""
request = '\r\n'.join((
'POST / HTTP/1.0',
'Host: localhost',
'Content-Type: multipart/related; boundary="====XXXX====";',
' type="text/xml";start="test.submit"',
'Content-Length: 0',
'', ''))
with self.makefile() as fd:
fd.write(request)
read_http(fd)
class TestGetArg(TestCase):
@staticmethod
def application(env, start_response):
body = env['wsgi.input'].read(3)
if PY3:
body = body.decode('ascii')
a = parse_qs(body).get('a', [1])[0]
start_response('200 OK', [('Content-Type', 'text/plain')])
return [('a is %s, body is %s' % (a, body)).encode('ascii')]
def test_007_get_arg(self):
# define a new handler that does a get_arg as well as a read_body
request = '\r\n'.join((
'POST / HTTP/1.0',
'Host: localhost',
'Content-Length: 3',
'',
'a=a'))
with self.makefile() as fd:
fd.write(request)
# send some junk after the actual request
fd.write('01234567890123456789')
read_http(fd, body='a is a, body is a=a')
class TestCloseIter(TestCase):
# The *Validator* closes the iterators!
validator = None
def application(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return self
def __iter__(self):
yield bytearray(b"Hello World")
yield b"!"
closed = False
def close(self):
self.closed += 1
def test_close_is_called(self):
self.closed = False
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
read_http(fd, body=b"Hello World!", chunks=[b'Hello World', b'!'])
# We got closed exactly once.
self.assertEqual(self.closed, 1)
class TestChunkedApp(TestCase):
chunks = [b'this', b'is', b'chunked']
def body(self):
return b''.join(self.chunks)
def application(self, env, start_response):
self.assertTrue(env.get('wsgi.input_terminated'))
start_response('200 OK', [('Content-Type', 'text/plain')])
for chunk in self.chunks:
yield chunk
def test_chunked_response(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
response = read_http(fd, body=self.body(), chunks=None)
response.assertHeader('Transfer-Encoding', 'chunked')
self.assertEqual(response.chunks, self.chunks)
def test_no_chunked_http_1_0(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: close\r\n\r\n')
response = read_http(fd)
self.assertEqual(response.body, self.body())
self.assertEqual(response.headers.get('Transfer-Encoding'), None)
content_length = response.headers.get('Content-Length')
if content_length is not None:
self.assertEqual(content_length, str(len(self.body())))
class TestBigChunks(TestChunkedApp):
chunks = [b'a' * 8192] * 3
class TestNegativeRead(TestCase):
def application(self, env, start_response):
self.assertTrue(env.get('wsgi.input_terminated'))
start_response('200 OK', [('Content-Type', 'text/plain')])
if env['PATH_INFO'] == '/read':
data = env['wsgi.input'].read(-1)
return [data]
def test_negative_chunked_read(self):
data = (b'POST /read HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_negative_nonchunked_read(self):
data = (b'POST /read HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 6\r\n\r\n'
b'oh hai')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
class TestNegativeReadline(TestCase):
validator = None
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
if env['PATH_INFO'] == '/readline':
data = env['wsgi.input'].readline(-1)
return [data]
def test_negative_chunked_readline(self):
data = (b'POST /readline HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_negative_nonchunked_readline(self):
data = (b'POST /readline HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 6\r\n\r\n'
b'oh hai')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
class TestChunkedPost(TestCase):
def application(self, env, start_response):
self.assertTrue(env.get('wsgi.input_terminated'))
start_response('200 OK', [('Content-Type', 'text/plain')])
if env['PATH_INFO'] == '/a':
data = env['wsgi.input'].read(6)
return [data]
if env['PATH_INFO'] == '/b':
lines = list(iter(lambda: env['wsgi.input'].read(6), b''))
return lines
if env['PATH_INFO'] == '/c':
return list(iter(lambda: env['wsgi.input'].read(1), b''))
def test_014_chunked_post(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
# self.close_opened() # XXX: Why?
with self.makefile() as fd:
fd.write(data.replace(b'/a', b'/b'))
read_http(fd, body='oh hai')
with self.makefile() as fd:
fd.write(data.replace(b'/a', b'/c'))
read_http(fd, body='oh hai')
def test_229_incorrect_chunk_no_newline(self):
# Giving both a Content-Length and a Transfer-Encoding,
# TE is preferred. But if the chunking is bad from the client,
# missing its terminating newline,
# the server doesn't hang
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 12\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'{"hi": "ho"}')
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
def test_229_incorrect_chunk_non_hex(self):
# Giving both a Content-Length and a Transfer-Encoding,
# TE is preferred. But if the chunking is bad from the client,
# the server doesn't hang
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Content-Length: 12\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'{"hi": "ho"}\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
def test_229_correct_chunk_quoted_ext(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token="oh hi"\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_229_correct_chunk_token_ext(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token=oh_hi\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
with self.makefile() as fd:
fd.write(data)
read_http(fd, body='oh hai')
def test_229_incorrect_chunk_token_ext_too_long(self):
data = (b'POST /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
b'Transfer-Encoding: chunked\r\n\r\n'
b'2;token=oh_hi\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
data = data.replace(b'oh_hi', b'_oh_hi' * 4000)
with self.makefile() as fd:
fd.write(data)
read_http(fd, code=400)
class TestUseWrite(TestCase):
body = b'abcde'
end = b'end'
content_length = str(len(body + end))
def application(self, env, start_response):
if env['PATH_INFO'] == '/explicit-content-length':
write = start_response('200 OK', [('Content-Type', 'text/plain'),
('Content-Length', self.content_length)])
write(self.body)
elif env['PATH_INFO'] == '/no-content-length':
write = start_response('200 OK', [('Content-Type', 'text/plain')])
write(self.body)
elif env['PATH_INFO'] == '/no-content-length-twice':
write = start_response('200 OK', [('Content-Type', 'text/plain')])
write(self.body)
write(self.body)
else:
# pylint:disable-next=broad-exception-raised
raise Exception('Invalid url')
return [self.end]
def test_explicit_content_length(self):
with self.makefile() as fd:
fd.write('GET /explicit-content-length HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
response = read_http(fd, body=self.body + self.end)
response.assertHeader('Content-Length', self.content_length)
response.assertHeader('Transfer-Encoding', False)
def test_no_content_length(self):
with self.makefile() as fd:
fd.write('GET /no-content-length HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
response = read_http(fd, body=self.body + self.end)
response.assertHeader('Content-Length', False)
response.assertHeader('Transfer-Encoding', 'chunked')
def test_no_content_length_twice(self):
with self.makefile() as fd:
fd.write('GET /no-content-length-twice HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
response = read_http(fd, body=self.body + self.body + self.end)
response.assertHeader('Content-Length', False)
response.assertHeader('Transfer-Encoding', 'chunked')
self.assertEqual(response.chunks, [self.body, self.body, self.end])
class HttpsTestCase(TestCase):
certfile = os.path.join(os.path.dirname(__file__), 'test_server.crt')
keyfile = os.path.join(os.path.dirname(__file__), 'test_server.key')
def init_server(self, application):
self.server = pywsgi.WSGIServer((self.listen_addr, 0), application,
certfile=self.certfile, keyfile=self.keyfile)
def urlopen(self, method='GET', post_body=None, **kwargs): # pylint:disable=arguments-differ
import ssl
with self.connect() as raw_sock:
with ssl.wrap_socket(raw_sock) as sock: # pylint:disable=deprecated-method
with sock.makefile(bufsize=1) as fd: # pylint:disable=unexpected-keyword-arg
fd.write('%s / HTTP/1.1\r\nHost: localhost\r\n' % method)
if post_body is not None:
fd.write('Content-Length: %s\r\n\r\n' % len(post_body))
fd.write(post_body)
if kwargs.get('body') is None:
kwargs['body'] = post_body
else:
fd.write('\r\n')
fd.flush()
return read_http(fd, **kwargs)
def application(self, environ, start_response):
assert environ['wsgi.url_scheme'] == 'https', environ['wsgi.url_scheme']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [environ['wsgi.input'].read(10)]
import gevent.ssl
HAVE_SSLCONTEXT = getattr(gevent.ssl, 'create_default_context')
if HAVE_SSLCONTEXT:
class HttpsSslContextTestCase(HttpsTestCase):
def init_server(self, application):
# On 2.7, our certs don't line up with hostname.
# If we just use create_default_context as-is, we get
# `ValueError: check_hostname requires server_hostname`.
# If we set check_hostname to False, we get
# `SSLError: [SSL: PEER_DID_NOT_RETURN_A_CERTIFICATE] peer did not return a certificate`
# (Neither of which happens in Python 3.) But the unverified context
# works both places. See also test___example_servers.py
from gevent.ssl import _create_unverified_context # pylint:disable=no-name-in-module
context = _create_unverified_context()
context.load_cert_chain(certfile=self.certfile, keyfile=self.keyfile)
self.server = pywsgi.WSGIServer((self.listen_addr, 0),
application, ssl_context=context)
class TestHttps(HttpsTestCase):
if hasattr(socket, 'ssl'):
def test_012_ssl_server(self):
result = self.urlopen(method="POST", post_body='abc')
self.assertEqual(result.body, 'abc')
def test_013_empty_return(self):
result = self.urlopen()
self.assertEqual(result.body, '')
if HAVE_SSLCONTEXT:
class TestHttpsWithContext(HttpsSslContextTestCase, TestHttps): # pylint:disable=too-many-ancestors
pass
class TestInternational(TestCase):
validator = None # wsgiref.validate.IteratorWrapper([]) does not have __len__
def application(self, environ, start_response):
path_bytes = b'/\xd0\xbf\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82'
if PY3:
# Under PY3, the escapes were decoded as latin-1
path_bytes = path_bytes.decode('latin-1')
self.assertEqual(environ['PATH_INFO'], path_bytes)
self.assertEqual(environ['QUERY_STRING'], '%D0%B2%D0%BE%D0%BF%D1%80%D0%BE%D1%81=%D0%BE%D1%82%D0%B2%D0%B5%D1%82')
start_response("200 PASSED", [('Content-Type', 'text/plain')])
return []
def test(self):
with self.connect() as sock:
sock.sendall(
b'''GET /%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82?%D0%B2%D0%BE%D0%BF%D1%80%D0%BE%D1%81=%D0%BE%D1%82%D0%B2%D0%B5%D1%82 HTTP/1.1
Host: localhost
Connection: close
'''.replace(b'\n', b'\r\n'))
with sock.makefile() as fd:
read_http(fd, reason='PASSED', chunks=False, body='', content_length=0)
class TestNonLatin1HeaderFromApplication(TestCase):
error_fatal = False # Allow sending the exception response, don't kill the greenlet
validator = None # Don't validate the application, it's deliberately bad
header = b'\xe1\xbd\x8a3' # bomb in utf-8 bytes
should_error = PY3 # non-native string under Py3
def setUp(self):
super(TestNonLatin1HeaderFromApplication, self).setUp()
self.errors = []
def tearDown(self):
self.errors = []
super(TestNonLatin1HeaderFromApplication, self).tearDown()
def application(self, environ, start_response):
# We return a header that cannot be encoded in latin-1
try:
start_response("200 PASSED",
[('Content-Type', 'text/plain'),
('Custom-Header', self.header)])
except:
self.errors.append(sys.exc_info()[:2])
raise
return []
def test(self):
with self.connect() as sock:
self.expect_one_error()
sock.sendall(b'''GET / HTTP/1.1\r\n\r\n''')
with sock.makefile() as fd:
if self.should_error:
read_http(fd, code=500, reason='Internal Server Error')
self.assert_error(where_type=pywsgi.SecureEnviron)
self.assertEqual(len(self.errors), 1)
_, v = self.errors[0]
self.assertIsInstance(v, UnicodeError)
else:
read_http(fd, code=200, reason='PASSED')
self.assertEqual(len(self.errors), 0)
class TestNonLatin1UnicodeHeaderFromApplication(TestNonLatin1HeaderFromApplication):
# Flip-flop of the superclass: Python 3 native string, Python 2 unicode object
header = u"\u1f4a3" # bomb in unicode
# Error both on py3 and py2. On py2, non-native string. On py3, native string
# that cannot be encoded to latin-1
should_error = True
class TestInputReadline(TestCase):
# this test relies on the fact that readline() returns '' after it reached EOF
# this behaviour is not mandated by WSGI spec, it's just happens that gevent.wsgi behaves like that
# as such, this may change in the future
validator = None
def application(self, environ, start_response):
input = environ['wsgi.input']
lines = []
while True:
line = input.readline()
if not line:
break
line = line.decode('ascii') if PY3 else line
lines.append(repr(line) + ' ')
start_response('200 hello', [])
return [l.encode('ascii') for l in lines] if PY3 else lines
def test(self):
with self.makefile() as fd:
content = 'hello\n\nworld\n123'
fd.write('POST / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
'Content-Length: %s\r\n\r\n%s' % (len(content), content))
fd.flush()
read_http(fd, reason='hello', body="'hello\\n' '\\n' 'world\\n' '123' ")
class TestInputIter(TestInputReadline):
def application(self, environ, start_response):
input = environ['wsgi.input']
lines = []
for line in input:
if not line:
break
line = line.decode('ascii') if PY3 else line
lines.append(repr(line) + ' ')
start_response('200 hello', [])
return [l.encode('ascii') for l in lines] if PY3 else lines
class TestInputReadlines(TestInputReadline):
def application(self, environ, start_response):
input = environ['wsgi.input']
lines = [l.decode('ascii') if PY3 else l for l in input.readlines()]
lines = [repr(line) + ' ' for line in lines]
start_response('200 hello', [])
return [l.encode('ascii') for l in lines] if PY3 else lines
class TestInputN(TestCase):
# testing for this:
# File "/home/denis/work/gevent/gevent/pywsgi.py", line 70, in _do_read
# if length and length > self.content_length - self.position:
# TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'
validator = None
def application(self, environ, start_response):
environ['wsgi.input'].read(5)
start_response('200 OK', [])
return []
def test(self):
self.urlopen()
class TestErrorInApplication(TestCase):
error = object()
error_fatal = False
def application(self, env, start_response):
self.error = greentest.ExpectedException('TestError.application')
raise self.error
def test(self):
self.expect_one_error()
self.urlopen(code=500)
self.assert_error(greentest.ExpectedException, self.error)
class TestError_after_start_response(TestErrorInApplication):
def application(self, env, start_response):
self.error = greentest.ExpectedException('TestError_after_start_response.application')
start_response('200 OK', [('Content-Type', 'text/plain')])
raise self.error
class TestEmptyYield(TestCase):
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b""
yield b""
def test_err(self):
chunks = []
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, body='', chunks=chunks)
garbage = fd.read()
self.assertEqual(garbage, b"", "got garbage: %r" % garbage)
class TestFirstEmptyYield(TestCase):
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b""
yield b"hello"
def test_err(self):
chunks = [b'hello']
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, body='hello', chunks=chunks)
garbage = fd.read()
self.assertEqual(garbage, b"")
class TestEmptyYield304(TestCase):
@staticmethod
def application(env, start_response):
start_response('304 Not modified', [])
yield b""
yield b""
def test_err(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, code=304, body='', chunks=False)
garbage = fd.read()
self.assertEqual(garbage, b"")
class TestContentLength304(TestCase):
validator = None
def application(self, env, start_response):
try:
start_response('304 Not modified', [('Content-Length', '100')])
except AssertionError as ex:
start_response('200 Raised', [])
return ex.args
raise AssertionError('start_response did not fail but it should')
def test_err(self):
body = "Invalid Content-Length for 304 response: '100' (must be absent or zero)"
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, code=200, reason='Raised', body=body, chunks=False)
garbage = fd.read()
self.assertEqual(garbage, b"")
class TestBody304(TestCase):
validator = None
def application(self, env, start_response):
start_response('304 Not modified', [])
return [b'body']
def test_err(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
with self.assertRaises(AssertionError) as exc:
read_http(fd)
ex = exc.exception
self.assertEqual(str(ex), 'The 304 response must have no body')
class TestWrite304(TestCase):
validator = None
error_raised = None
def application(self, env, start_response):
write = start_response('304 Not modified', [])
self.error_raised = False
try:
write('body')
except AssertionError as ex:
self.error_raised = True
raise ExpectedAssertionError(*ex.args)
def test_err(self):
with self.makefile() as fd:
fd.write(b'GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
with self.assertRaises(AssertionError) as exc:
read_http(fd)
ex = exc.exception
self.assertEqual(str(ex), 'The 304 response must have no body')
self.assertTrue(self.error_raised, 'write() must raise')
class TestEmptyWrite(TestEmptyYield):
@staticmethod
def application(env, start_response):
write = start_response('200 OK', [('Content-Type', 'text/plain')])
write(b"")
write(b"")
return []
class BadRequestTests(TestCase):
validator = None
# pywsgi checks content-length, but wsgi does not
content_length = None
assert TestCase.handler_class._print_unexpected_exc
class handler_class(TestCase.handler_class):
def _print_unexpected_exc(self):
raise AssertionError("Should not print a traceback")
def application(self, env, start_response):
self.assertEqual(env['CONTENT_LENGTH'], self.content_length)
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'hello']
def test_negative_content_length(self):
self.content_length = '-100'
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: %s\r\n\r\n' % self.content_length)
read_http(fd, code=(200, 400))
def test_illegal_content_length(self):
self.content_length = 'abc'
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: %s\r\n\r\n' % self.content_length)
read_http(fd, code=(200, 400))
def test_bad_request_line_with_percent(self):
# If the request is invalid and contains Python formatting characters (%)
# we don't fail to log the error and we do generate a 400.
# https://github.com/gevent/gevent/issues/1708
bad_request = 'GET / HTTP %\r\n'
with self.makefile() as fd:
fd.write(bad_request)
read_http(fd, code=400)
class ChunkedInputTests(TestCase):
dirt = ""
validator = None
def application(self, env, start_response):
input = env['wsgi.input']
response = []
pi = env["PATH_INFO"]
if pi == "/short-read":
d = input.read(10)
response = [d]
elif pi == "/lines":
for x in input:
response.append(x)
elif pi == "/ping":
input.read(1)
response.append(b"pong")
else:
raise RuntimeError("bad path")
start_response('200 OK', [('Content-Type', 'text/plain')])
return response
def chunk_encode(self, chunks, dirt=None):
if dirt is None:
dirt = self.dirt
return chunk_encode(chunks, dirt=dirt)
def body(self, dirt=None):
return self.chunk_encode(["this", " is ", "chunked", "\nline", " 2", "\n", "line3", ""], dirt=dirt)
def ping(self, fd):
fd.write("GET /ping HTTP/1.1\r\n\r\n")
read_http(fd, body="pong")
def ping_if_possible(self, fd):
self.ping(fd)
def test_short_read_with_content_length(self):
body = self.body()
req = b"POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\nContent-Length:1000\r\n\r\n" + body
with self.connect() as conn:
with conn.makefile(bufsize=1) as fd: # pylint:disable=unexpected-keyword-arg
fd.write(req)
read_http(fd, body="this is ch")
self.ping_if_possible(fd)
def test_short_read_with_zero_content_length(self):
body = self.body()
req = b"POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\nContent-Length:0\r\n\r\n" + body
#print("REQUEST:", repr(req))
with self.makefile() as fd:
fd.write(req)
read_http(fd, body="this is ch")
self.ping_if_possible(fd)
def test_short_read(self):
body = self.body()
req = b"POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
with self.makefile() as fd:
fd.write(req)
read_http(fd, body="this is ch")
self.ping_if_possible(fd)
def test_dirt(self):
body = self.body(dirt="; here is dirt\0bla")
req = b"POST /ping HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
with self.makefile() as fd:
fd.write(req)
read_http(fd, body="pong")
self.ping_if_possible(fd)
def test_chunked_readline(self):
body = self.body()
req = "POST /lines HTTP/1.1\r\nContent-Length: %s\r\ntransfer-encoding: Chunked\r\n\r\n" % (len(body))
req = req.encode('latin-1')
req += body
with self.makefile() as fd:
fd.write(req)
read_http(fd, body='this is chunked\nline 2\nline3')
def test_close_before_finished(self):
self.expect_one_error()
body = b'4\r\nthi'
req = b"POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
with self.connect() as sock:
with sock.makefile(bufsize=1, mode='wb') as fd:# pylint:disable=unexpected-keyword-arg
fd.write(req)
fd.close()
# Python 3 keeps the socket open even though the only
# makefile is gone; python 2 closed them both (because there were
# no outstanding references to the socket). Closing is essential for the server
# to get the message that the read will fail. It's better to be explicit
# to avoid a ResourceWarning
sock.close()
# Under Py2 it still needs to go away, which was implicit before
del fd
del sock
gevent.get_hub().loop.update_now()
gevent.sleep(0.01) # timing needed for cpython
if greentest.PYPY:
# XXX: Something is keeping the socket alive,
# by which I mean, the close event is not propagating to the server
# and waking up its recv() loop...we are stuck with the three bytes of
# 'thi' in the buffer and trying to read the forth. No amount of tinkering
# with the timing changes this...the only thing that does is running a
# GC and letting some object get collected. Might this be a problem in real life?
import gc
gc.collect()
gevent.sleep(0.01)
gevent.get_hub().loop.update_now()
gc.collect()
gevent.sleep(0.01)
# XXX2: Sometimes windows and PyPy/Travis fail to get this error, leading to a test failure.
# This would have to be due to the socket being kept around and open,
# not closed at the low levels. I haven't seen this locally.
# In the PyPy case, I've seen the IOError reported on the console, but not
# captured in the variables.
# https://travis-ci.org/gevent/gevent/jobs/329232976#L1374
self.assert_error(IOError, 'unexpected end of file while parsing chunked data')
class Expect100ContinueTests(TestCase):
validator = None
def application(self, environ, start_response):
content_length = int(environ['CONTENT_LENGTH'])
if content_length > 1024:
start_response('417 Expectation Failed', [('Content-Length', '7'), ('Content-Type', 'text/plain')])
return [b'failure']
# pywsgi did sent a "100 continue" for each read
# see http://code.google.com/p/gevent/issues/detail?id=93
text = environ['wsgi.input'].read(1)
text += environ['wsgi.input'].read(content_length - 1)
start_response('200 OK', [('Content-Length', str(len(text))), ('Content-Type', 'text/plain')])
return [text]
def test_continue(self):
with self.makefile() as fd:
fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\nExpect: 100-continue\r\n\r\n')
read_http(fd, code=417, body="failure")
fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\nExpect: 100-continue\r\n\r\ntesting')
read_http(fd, code=100)
read_http(fd, body="testing")
class MultipleCookieHeadersTest(TestCase):
validator = None
def application(self, environ, start_response):
self.assertEqual(environ['HTTP_COOKIE'], 'name1="value1"; name2="value2"')
self.assertEqual(environ['HTTP_COOKIE2'], 'nameA="valueA"; nameB="valueB"')
start_response('200 OK', [])
return []
def test(self):
with self.makefile() as fd:
fd.write('''GET / HTTP/1.1
Host: localhost
Cookie: name1="value1"
Cookie2: nameA="valueA"
Cookie2: nameB="valueB"
Cookie: name2="value2"\n\n'''.replace('\n', '\r\n'))
read_http(fd)
class TestLeakInput(TestCase):
_leak_wsgi_input = None
_leak_environ = None
def tearDown(self):
TestCase.tearDown(self)
self._leak_wsgi_input = None
self._leak_environ = None
def application(self, environ, start_response):
pi = environ["PATH_INFO"]
self._leak_wsgi_input = environ["wsgi.input"]
self._leak_environ = environ
if pi == "/leak-frame":
environ["_leak"] = sys._getframe(0)
text = b"foobar"
start_response('200 OK', [('Content-Length', str(len(text))), ('Content-Type', 'text/plain')])
return [text]
def test_connection_close_leak_simple(self):
with self.makefile() as fd:
fd.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
d = fd.read()
self.assertTrue(d.startswith(b"HTTP/1.1 200 OK"), d)
def test_connection_close_leak_frame(self):
with self.makefile() as fd:
fd.write(b"GET /leak-frame HTTP/1.0\r\nConnection: close\r\n\r\n")
d = fd.read()
self.assertTrue(d.startswith(b"HTTP/1.1 200 OK"), d)
self._leak_environ.pop('_leak')
class TestHTTPResponseSplitting(TestCase):
# The validator would prevent the app from doing the
# bad things it needs to do
validator = None
status = '200 OK'
headers = ()
start_exc = None
def setUp(self):
TestCase.setUp(self)
self.start_exc = None
self.status = TestHTTPResponseSplitting.status
self.headers = TestHTTPResponseSplitting.headers
def tearDown(self):
TestCase.tearDown(self)
self.start_exc = None
def application(self, environ, start_response):
try:
start_response(self.status, self.headers)
except Exception as e: # pylint: disable=broad-except
self.start_exc = e
else:
self.start_exc = None
return ()
def _assert_failure(self, message):
with self.makefile() as fd:
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
fd.read()
self.assertIsInstance(self.start_exc, ValueError)
self.assertEqual(self.start_exc.args[0], message)
def test_newline_in_status(self):
self.status = '200 OK\r\nConnection: close\r\nContent-Length: 0\r\n\r\n'
self._assert_failure('carriage return or newline in status')
def test_newline_in_header_value(self):
self.headers = [('Test', 'Hi\r\nConnection: close')]
self._assert_failure('carriage return or newline in header value')
def test_newline_in_header_name(self):
self.headers = [('Test\r\n', 'Hi')]
self._assert_failure('carriage return or newline in header name')
class TestInvalidEnviron(TestCase):
validator = None
# check that WSGIServer does not insert any default values for CONTENT_LENGTH
def application(self, environ, start_response):
for key, value in environ.items():
if key in ('CONTENT_LENGTH', 'CONTENT_TYPE') or key.startswith('HTTP_'):
if key != 'HTTP_HOST':
raise ExpectedAssertionError('Unexpected environment variable: %s=%r' % (
key, value))
start_response('200 OK', [])
return []
def test(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
read_http(fd)
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
read_http(fd)
class TestInvalidHeadersDropped(TestCase):
validator = None
# check that invalid headers with a _ are dropped
def application(self, environ, start_response):
self.assertNotIn('HTTP_X_AUTH_USER', environ)
start_response('200 OK', [])
return []
def test(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.0\r\nx-auth_user: bob\r\n\r\n')
read_http(fd)
class TestHandlerSubclass(TestCase):
validator = None
class handler_class(TestCase.handler_class):
def read_requestline(self):
data = self.rfile.read(7)
if data[0] == b'<'[0]: # py3: indexing bytes returns ints. sigh.
# Returning nothing stops handle_one_request()
# Note that closing or even deleting self.socket() here
# can lead to the read side throwing Connection Reset By Peer,
# depending on the Python version and OS
data += self.rfile.read(15)
if data.lower() == b'<policy-file-request/>':
self.socket.sendall(b'HELLO')
else:
self.log_error('Invalid request: %r', data)
return None
return data + self.rfile.readline()
def application(self, environ, start_response):
start_response('200 OK', [])
return []
def test(self):
with self.makefile() as fd:
fd.write(b'<policy-file-request/>\x00')
fd.flush() # flush() is needed on PyPy, apparently it buffers slightly differently
self.assertEqual(fd.read(), b'HELLO')
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
read_http(fd)
with self.makefile() as fd:
# Trigger an error
fd.write('<policy-file-XXXuest/>\x00')
fd.flush()
self.assertEqual(fd.read(), b'')
class TestErrorAfterChunk(TestCase):
validator = None
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b"hello"
raise greentest.ExpectedException('TestErrorAfterChunk')
def test(self):
with self.makefile() as fd:
self.expect_one_error()
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
with self.assertRaises(ValueError):
read_http(fd)
self.assert_error(greentest.ExpectedException)
def chunk_encode(chunks, dirt=None):
if dirt is None:
dirt = ""
b = b""
for c in chunks:
x = "%x%s\r\n%s\r\n" % (len(c), dirt, c)
b += x.encode('ascii')
return b
class TestInputRaw(greentest.BaseTestCase):
def make_input(self, data, content_length=None, chunked_input=False):
if isinstance(data, list):
data = chunk_encode(data)
chunked_input = True
elif isinstance(data, str) and PY3:
data = data.encode("ascii")
return Input(StringIO(data), content_length=content_length, chunked_input=chunked_input)
if PY3:
def assertEqual(self, first, second, msg=None):
if isinstance(second, str):
second = second.encode('ascii')
super(TestInputRaw, self).assertEqual(first, second, msg)
def test_short_post(self):
i = self.make_input("1", content_length=2)
self.assertRaises(IOError, i.read)
def test_short_post_read_with_length(self):
i = self.make_input("1", content_length=2)
self.assertRaises(IOError, i.read, 2)
def test_short_post_readline(self):
i = self.make_input("1", content_length=2)
self.assertRaises(IOError, i.readline)
def test_post(self):
i = self.make_input("12", content_length=2)
data = i.read()
self.assertEqual(data, "12")
def test_post_read_with_length(self):
i = self.make_input("12", content_length=2)
data = i.read(10)
self.assertEqual(data, "12")
def test_chunked(self):
i = self.make_input(["1", "2", ""])
data = i.read()
self.assertEqual(data, "12")
def test_chunked_read_with_length(self):
i = self.make_input(["1", "2", ""])
data = i.read(10)
self.assertEqual(data, "12")
def test_chunked_missing_chunk(self):
i = self.make_input(["1", "2"])
self.assertRaises(IOError, i.read)
def test_chunked_missing_chunk_read_with_length(self):
i = self.make_input(["1", "2"])
self.assertRaises(IOError, i.read, 10)
def test_chunked_missing_chunk_readline(self):
i = self.make_input(["1", "2"])
self.assertRaises(IOError, i.readline)
def test_chunked_short_chunk(self):
i = self.make_input("2\r\n1", chunked_input=True)
self.assertRaises(IOError, i.read)
def test_chunked_short_chunk_read_with_length(self):
i = self.make_input("2\r\n1", chunked_input=True)
self.assertRaises(IOError, i.read, 2)
def test_chunked_short_chunk_readline(self):
i = self.make_input("2\r\n1", chunked_input=True)
self.assertRaises(IOError, i.readline)
def test_32bit_overflow(self):
# https://github.com/gevent/gevent/issues/289
# Should not raise an OverflowError on Python 2
data = b'asdf\nghij\n'
long_data = b'a' * (pywsgi.MAX_REQUEST_LINE + 10)
long_data += b'\n'
data += long_data
partial_data = b'qjk\n' # Note terminating \n
n = 25 * 1000000000
if hasattr(n, 'bit_length'):
self.assertEqual(n.bit_length(), 35)
if not PY3 and not PYPY:
# Make sure we have the impl we think we do
self.assertRaises(OverflowError, StringIO(data).readline, n)
i = self.make_input(data, content_length=n)
# No size hint, but we have too large a content_length to fit
self.assertEqual(i.readline(), b'asdf\n')
# Large size hint
self.assertEqual(i.readline(n), b'ghij\n')
self.assertEqual(i.readline(n), long_data)
# Now again with the real content length, assuring we can't read past it
i = self.make_input(data + partial_data, content_length=len(data) + 1)
self.assertEqual(i.readline(), b'asdf\n')
self.assertEqual(i.readline(n), b'ghij\n')
self.assertEqual(i.readline(n), long_data)
# Now we've reached content_length so we shouldn't be able to
# read anymore except the one byte remaining
self.assertEqual(i.readline(n), b'q')
class Test414(TestCase):
@staticmethod
def application(env, start_response):
raise AssertionError('should not get there')
def test(self):
longline = 'x' * 20000
with self.makefile() as fd:
fd.write(('''GET /%s HTTP/1.0\r\nHello: world\r\n\r\n''' % longline).encode('latin-1'))
read_http(fd, code=414)
class TestLogging(TestCase):
# Something that gets wrapped in a LoggingLogAdapter
class Logger(object):
accessed = None
logged = None
thing = None
def log(self, level, msg):
self.logged = (level, msg)
def access(self, msg):
self.accessed = msg
def get_thing(self):
return self.thing
def init_logger(self):
return self.Logger()
@staticmethod
def application(env, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'hello']
# Tests for issue #663
def test_proxy_methods_on_log(self):
# An object that looks like a logger gets wrapped
# with a proxy that
self.assertTrue(isinstance(self.server.log, pywsgi.LoggingLogAdapter))
self.server.log.access("access")
self.server.log.write("write")
self.assertEqual(self.server.log.accessed, "access")
self.assertEqual(self.server.log.logged, (20, "write"))
def test_set_attributes(self):
# Not defined by the wrapper, it goes to the logger
self.server.log.thing = 42
self.assertEqual(self.server.log.get_thing(), 42)
del self.server.log.thing
self.assertEqual(self.server.log.get_thing(), None)
def test_status_log(self):
# Issue 664: Make sure we format the status line as a string
self.urlopen()
msg = self.server.log.logged[1]
self.assertTrue('"GET / HTTP/1.1" 200 ' in msg, msg)
# Issue 756: Make sure we don't throw a newline on the end
self.assertTrue('\n' not in msg, msg)
class TestEnviron(TestCase):
# The wsgiref validator asserts type(environ) is dict.
# https://mail.python.org/pipermail/web-sig/2016-March/005455.html
validator = None
def init_server(self, application):
super(TestEnviron, self).init_server(application)
self.server.environ_class = pywsgi.SecureEnviron
def application(self, env, start_response):
self.assertIsInstance(env, pywsgi.SecureEnviron)
start_response('200 OK', [('Content-Type', 'text/plain')])
return []
def test_environ_is_secure_by_default(self):
self.urlopen()
def test_default_secure_repr(self):
environ = pywsgi.SecureEnviron()
self.assertIn('<pywsgi.SecureEnviron dict (keys: 0) at', repr(environ))
self.assertIn('<pywsgi.SecureEnviron dict (keys: 0) at', str(environ))
environ['key'] = 'value'
self.assertIn('<pywsgi.SecureEnviron dict (keys: 1) at', repr(environ))
self.assertIn('<pywsgi.SecureEnviron dict (keys: 1) at', str(environ))
environ.secure_repr = False
self.assertEqual(str({'key': 'value'}), str(environ))
self.assertEqual(repr({'key': 'value'}), repr(environ))
del environ.secure_repr
environ.whitelist_keys = ('missing value',)
self.assertEqual(str({'key': "<MASKED>"}), str(environ))
self.assertEqual(repr({'key': "<MASKED>"}), repr(environ))
environ.whitelist_keys = ('key',)
self.assertEqual(str({'key': 'value'}), str(environ))
self.assertEqual(repr({'key': 'value'}), repr(environ))
del environ.whitelist_keys
def test_override_class_defaults(self):
class EnvironClass(pywsgi.SecureEnviron):
__slots__ = ()
environ = EnvironClass()
self.assertTrue(environ.secure_repr)
EnvironClass.default_secure_repr = False
self.assertFalse(environ.secure_repr)
self.assertEqual(str({}), str(environ))
self.assertEqual(repr({}), repr(environ))
EnvironClass.default_secure_repr = True
EnvironClass.default_whitelist_keys = ('key',)
environ['key'] = 1
self.assertEqual(str({'key': 1}), str(environ))
self.assertEqual(repr({'key': 1}), repr(environ))
# Clean up for leaktests
del environ
del EnvironClass
import gc; gc.collect()
def test_copy_still_secure(self):
for cls in (pywsgi.Environ, pywsgi.SecureEnviron):
self.assertIsInstance(cls().copy(), cls)
def test_pickle_copy_returns_dict(self):
# Anything going through copy.copy/pickle should
# return the same pickle that a dict would.
import pickle
import json
for cls in (pywsgi.Environ, pywsgi.SecureEnviron):
bltin = {'key': 'value'}
env = cls(bltin)
self.assertIsInstance(env, cls)
self.assertEqual(bltin, env)
self.assertEqual(env, bltin)
for protocol in range(0, pickle.HIGHEST_PROTOCOL + 1):
# It's impossible to get a subclass of dict to pickle
# identically, but it can restore identically
env_dump = pickle.dumps(env, protocol)
self.assertNotIn(b'Environ', env_dump)
loaded = pickle.loads(env_dump)
self.assertEqual(type(loaded), dict)
self.assertEqual(json.dumps(bltin), json.dumps(env))
if __name__ == '__main__':
greentest.main()
| 67,741 | 34.785526 | 138 | py |
gevent | gevent-master/src/gevent/tests/test__destroy_default_loop.py | from __future__ import print_function
import gevent
import unittest
class TestDestroyDefaultLoop(unittest.TestCase):
def tearDown(self):
self._reset_hub()
super(TestDestroyDefaultLoop, self).tearDown()
def _reset_hub(self):
from gevent._hub_local import set_hub
from gevent._hub_local import set_loop
from gevent._hub_local import get_hub_if_exists
hub = get_hub_if_exists()
if hub is not None:
hub.destroy(destroy_loop=True)
set_hub(None)
set_loop(None)
def test_destroy_gc(self):
# Issue 1098: destroying the default loop
# while using the C extension could crash
# the interpreter when it exits
# Create the hub greenlet. This creates one loop
# object pointing to the default loop.
gevent.get_hub()
# Get a new loop object, but using the default
# C loop
loop = gevent.config.loop(default=True)
self.assertTrue(loop.default)
# Destroy it
loop.destroy()
# It no longer claims to be the default
self.assertFalse(loop.default)
# Delete it
del loop
# Delete the hub. This prompts garbage
# collection of it and its loop object.
# (making this test more repeatable; the exit
# crash only happened when that greenlet object
# was collected at exit time, which was most common
# in CPython 3.5)
self._reset_hub()
def test_destroy_two(self):
# Get two new loop object, but using the default
# C loop
loop1 = gevent.config.loop(default=True)
loop2 = gevent.config.loop(default=True)
self.assertTrue(loop1.default)
self.assertTrue(loop2.default)
# Destroy the first
loop1.destroy()
# It no longer claims to be the default
self.assertFalse(loop1.default)
# Destroy the second. This doesn't crash.
loop2.destroy()
self.assertFalse(loop2.default)
self.assertFalse(loop2.ptr)
self._reset_hub()
self.assertTrue(gevent.get_hub().loop.ptr)
if __name__ == '__main__':
unittest.main()
| 2,199 | 29.136986 | 59 | py |
gevent | gevent-master/src/gevent/tests/test__hub_join_timeout.py | import functools
import unittest
import gevent
import gevent.core
from gevent.event import Event
from gevent.testing.testcase import TimeAssertMixin
SMALL_TICK = 0.05
# setting up signal does not affect join()
gevent.signal_handler(1, lambda: None) # wouldn't work on windows
def repeated(func, repetitions=2):
@functools.wraps(func)
def f(self):
for _ in range(repetitions):
func(self)
return f
class Test(TimeAssertMixin, unittest.TestCase):
@repeated
def test_callback(self):
# exiting because the spawned greenlet finished execution (spawn (=callback) variant)
x = gevent.spawn(lambda: 5)
with self.runs_in_no_time():
result = gevent.wait(timeout=10)
self.assertTrue(result)
self.assertTrue(x.dead, x)
self.assertEqual(x.value, 5)
@repeated
def test_later(self):
# exiting because the spawned greenlet finished execution (spawn_later (=timer) variant)
x = gevent.spawn_later(SMALL_TICK, lambda: 5)
with self.runs_in_given_time(SMALL_TICK):
result = gevent.wait(timeout=10)
self.assertTrue(result)
self.assertTrue(x.dead, x)
@repeated
def test_timeout(self):
# exiting because of timeout (the spawned greenlet still runs)
x = gevent.spawn_later(10, lambda: 5)
with self.runs_in_given_time(SMALL_TICK):
result = gevent.wait(timeout=SMALL_TICK)
self.assertFalse(result)
self.assertFalse(x.dead, x)
x.kill()
with self.runs_in_no_time():
result = gevent.wait()
self.assertTrue(result)
@repeated
def test_event(self):
# exiting because of event (the spawned greenlet still runs)
x = gevent.spawn_later(10, lambda: 5)
event = Event()
event_set = gevent.spawn_later(SMALL_TICK, event.set)
with self.runs_in_given_time(SMALL_TICK):
result = gevent.wait([event])
self.assertEqual(result, [event])
self.assertFalse(x.dead, x)
self.assertTrue(event_set.dead)
self.assertTrue(event.is_set)
x.kill()
with self.runs_in_no_time():
result = gevent.wait()
self.assertTrue(result)
@repeated
def test_ref_arg(self):
# checking "ref=False" argument
gevent.get_hub().loop.timer(10, ref=False).start(lambda: None)
with self.runs_in_no_time():
result = gevent.wait()
self.assertTrue(result)
@repeated
def test_ref_attribute(self):
# checking "ref=False" attribute
w = gevent.get_hub().loop.timer(10)
w.start(lambda: None)
w.ref = False
with self.runs_in_no_time():
result = gevent.wait()
self.assertTrue(result)
class TestAgain(Test):
"Repeat the same tests"
if __name__ == '__main__':
unittest.main()
| 2,913 | 28.14 | 96 | py |
gevent | gevent-master/src/gevent/tests/test__example_wsgiserver.py | import sys
try:
from urllib import request as urllib2
except ImportError:
import urllib2
import socket
import ssl
import gevent.testing as greentest
from gevent.testing import DEFAULT_XPC_SOCKET_TIMEOUT
from gevent.testing import util
from gevent.testing import params
@greentest.skipOnCI("Timing issues sometimes lead to a connection refused")
class Test_wsgiserver(util.TestServer):
example = 'wsgiserver.py'
URL = 'http://%s:8088' % (params.DEFAULT_LOCAL_HOST_ADDR,)
PORT = 8088
not_found_message = b'<h1>Not Found</h1>'
ssl_ctx = None
_use_ssl = False
def read(self, path='/'):
url = self.URL + path
try:
kwargs = {}
if self.ssl_ctx is not None:
kwargs = {'context': self.ssl_ctx}
response = urllib2.urlopen(url, None,
DEFAULT_XPC_SOCKET_TIMEOUT,
**kwargs)
except urllib2.HTTPError:
response = sys.exc_info()[1]
result = '%s %s' % (response.code, response.msg), response.read()
# XXX: It looks like under PyPy this isn't directly closing the socket
# when SSL is in use. It takes a GC cycle to make that true.
response.close()
return result
def _test_hello(self):
status, data = self.read('/')
self.assertEqual(status, '200 OK')
self.assertEqual(data, b"<b>hello world</b>")
def _test_not_found(self):
status, data = self.read('/xxx')
self.assertEqual(status, '404 Not Found')
self.assertEqual(data, self.not_found_message)
def _do_test_a_blocking_client(self):
# We spawn this in a separate server because if it's broken
# the whole server hangs
with self.running_server():
# First, make sure we can talk to it.
self._test_hello()
# Now create a connection and only partway finish
# the transaction
sock = socket.create_connection((params.DEFAULT_LOCAL_HOST_ADDR, self.PORT))
ssl_sock = None
if self._use_ssl:
context = ssl.SSLContext()
ssl_sock = context.wrap_socket(sock)
sock_file = ssl_sock.makefile(mode='rwb')
else:
sock_file = sock.makefile(mode='rwb')
# write an incomplete request
sock_file.write(b'GET /xxx HTTP/1.0\r\n')
sock_file.flush()
# Leave it open and not doing anything
# while the other request runs to completion.
# This demonstrates that a blocking client
# doesn't hang the whole server
self._test_hello()
# now finish the original request
sock_file.write(b'\r\n')
sock_file.flush()
line = sock_file.readline()
self.assertEqual(line, b'HTTP/1.1 404 Not Found\r\n')
sock_file.close()
if ssl_sock is not None:
ssl_sock.close()
sock.close()
def test_a_blocking_client(self):
self._do_test_a_blocking_client()
if __name__ == '__main__':
greentest.main()
| 3,202 | 32.715789 | 88 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_hub_in_thread.py | from gevent.monkey import patch_all
patch_all(thread=False)
from threading import Thread
import time
# The first time we init the hub is in the native
# thread with time.sleep(), needing multiple
# threads at the same time. Note: this is very timing
# dependent.
# See #687
def func():
time.sleep()
def main():
threads = []
for _ in range(3):
th = Thread(target=func)
th.start()
threads.append(th)
for th in threads:
th.join()
if __name__ == '__main__':
main()
| 520 | 16.965517 | 53 | py |
gevent | gevent-master/src/gevent/tests/test__refcount_core.py | import sys
import weakref
from gevent import testing as greentest
class Dummy(object):
def __init__(self):
__import__('gevent.core')
@greentest.skipIf(weakref.ref(Dummy())() is not None,
"Relies on refcounting for fast weakref cleanup")
class Test(greentest.TestCase):
def test(self):
from gevent import socket
s = socket.socket()
r = weakref.ref(s)
s.close()
del s
self.assertIsNone(r())
assert weakref.ref(Dummy())() is None or hasattr(sys, 'pypy_version_info')
if __name__ == '__main__':
greentest.main()
| 600 | 22.115385 | 74 | py |
gevent | gevent-master/src/gevent/tests/lock_tests.py | """
Various tests for synchronization primitives.
"""
# pylint:disable=no-member,abstract-method
import sys
import time
try:
from thread import start_new_thread, get_ident
except ImportError:
from _thread import start_new_thread, get_ident
import threading
import unittest
from gevent.testing import support
from gevent.testing.testcase import TimeAssertMixin
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, f, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.n = n
self.started = []
self.finished = []
self._can_exit = not wait_before_exit
def task():
tid = get_ident()
self.started.append(tid)
try:
f()
finally:
self.finished.append(tid)
while not self._can_exit:
_wait()
for _ in range(n):
start_new_thread(task, ())
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit = True
class BaseTestCase(TimeAssertMixin, unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
def tearDown(self):
support.threading_cleanup(*self._threads)
support.reap_children()
class BaseLockTests(BaseTestCase):
"""
Tests for both recursive and non-recursive locks.
"""
def locktype(self):
raise NotImplementedError()
def test_constructor(self):
lock = self.locktype()
del lock
def test_acquire_destroy(self):
lock = self.locktype()
lock.acquire()
del lock
def test_acquire_release(self):
lock = self.locktype()
lock.acquire()
lock.release()
del lock
def test_try_acquire(self):
lock = self.locktype()
self.assertTrue(lock.acquire(False))
lock.release()
def test_try_acquire_contended(self):
lock = self.locktype()
lock.acquire()
result = []
def f():
result.append(lock.acquire(False))
Bunch(f, 1).wait_for_finished()
self.assertFalse(result[0])
lock.release()
def test_acquire_contended(self):
lock = self.locktype()
lock.acquire()
N = 5
def f():
lock.acquire()
lock.release()
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(len(b.finished), 0)
lock.release()
b.wait_for_finished()
self.assertEqual(len(b.finished), N)
def test_with(self):
lock = self.locktype()
def f():
lock.acquire()
lock.release()
def _with(err=None):
with lock:
if err is not None:
raise err # pylint:disable=raising-bad-type
_with()
# Check the lock is unacquired
Bunch(f, 1).wait_for_finished()
self.assertRaises(TypeError, _with, TypeError)
# Check the lock is unacquired
Bunch(f, 1).wait_for_finished()
def test_thread_leak(self):
# The lock shouldn't leak a Thread instance when used from a foreign
# (non-threading) thread.
lock = self.locktype()
def f():
lock.acquire()
lock.release()
n = len(threading.enumerate())
# We run many threads in the hope that existing threads ids won't
# be recycled.
Bunch(f, 15).wait_for_finished()
self.assertEqual(n, len(threading.enumerate()))
class LockTests(BaseLockTests): # pylint:disable=abstract-method
"""
Tests for non-recursive, weak locks
(which can be acquired and released from different threads).
"""
def test_reacquire(self):
# Lock needs to be released before re-acquiring.
lock = self.locktype()
phase = []
def f():
lock.acquire()
phase.append(None)
lock.acquire()
phase.append(None)
start_new_thread(f, ())
while not phase:
_wait()
_wait()
self.assertEqual(len(phase), 1)
lock.release()
while len(phase) == 1:
_wait()
self.assertEqual(len(phase), 2)
def test_different_thread(self):
# Lock can be released from a different thread.
lock = self.locktype()
lock.acquire()
def f():
lock.release()
b = Bunch(f, 1)
b.wait_for_finished()
lock.acquire()
lock.release()
class RLockTests(BaseLockTests):
"""
Tests for recursive locks.
"""
def test_reacquire(self):
lock = self.locktype()
lock.acquire()
lock.acquire()
lock.release()
lock.acquire()
lock.release()
lock.release()
def test_release_unacquired(self):
# Cannot release an unacquired lock
lock = self.locktype()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.acquire()
lock.release()
lock.acquire()
lock.release()
lock.release()
self.assertRaises(RuntimeError, lock.release)
def test_different_thread(self):
# Cannot release from a different thread
lock = self.locktype()
def f():
lock.acquire()
b = Bunch(f, 1, True)
try:
self.assertRaises(RuntimeError, lock.release)
finally:
b.do_finish()
def test__is_owned(self):
lock = self.locktype()
self.assertFalse(lock._is_owned())
lock.acquire()
self.assertTrue(lock._is_owned())
lock.acquire()
self.assertTrue(lock._is_owned())
result = []
def f():
result.append(lock._is_owned())
Bunch(f, 1).wait_for_finished()
self.assertFalse(result[0])
lock.release()
self.assertTrue(lock._is_owned())
lock.release()
self.assertFalse(lock._is_owned())
class EventTests(BaseTestCase):
"""
Tests for Event objects.
"""
def eventtype(self):
raise NotImplementedError()
def test_is_set(self):
evt = self.eventtype()
self.assertFalse(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
def _check_notify(self, evt):
# All threads get notified
N = 5
results1 = []
results2 = []
def f():
evt.wait()
results1.append(evt.is_set())
evt.wait()
results2.append(evt.is_set())
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(len(results1), 0)
evt.set()
b.wait_for_finished()
self.assertEqual(results1, [True] * N)
self.assertEqual(results2, [True] * N)
def test_notify(self):
evt = self.eventtype()
self._check_notify(evt)
# Another time, after an explicit clear()
evt.set()
evt.clear()
self._check_notify(evt)
def test_timeout(self):
evt = self.eventtype()
results1 = []
results2 = []
N = 5
def f():
evt.wait(0.0)
results1.append(evt.is_set())
t1 = time.time()
evt.wait(0.2)
r = evt.is_set()
t2 = time.time()
results2.append((r, t2 - t1))
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [False] * N)
for r, dt in results2:
self.assertFalse(r)
self.assertTimeWithinRange(dt, 0.18, 10)
# The event is set
results1 = []
results2 = []
evt.set()
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [True] * N)
for r, dt in results2:
self.assertTrue(r)
class ConditionTests(BaseTestCase):
"""
Tests for condition variables.
"""
def condtype(self, *args):
raise NotImplementedError()
def test_acquire(self):
cond = self.condtype()
# Be default we have an RLock: the condition can be acquired multiple
# times.
# pylint:disable=consider-using-with
cond.acquire()
cond.acquire()
cond.release()
cond.release()
lock = threading.Lock()
cond = self.condtype(lock)
cond.acquire()
self.assertFalse(lock.acquire(False))
cond.release()
self.assertTrue(lock.acquire(False))
self.assertFalse(cond.acquire(False))
lock.release()
with cond:
self.assertFalse(lock.acquire(False))
def test_unacquired_wait(self):
cond = self.condtype()
self.assertRaises(RuntimeError, cond.wait)
def test_unacquired_notify(self):
cond = self.condtype()
self.assertRaises(RuntimeError, cond.notify)
def _check_notify(self, cond):
N = 5
results1 = []
results2 = []
phase_num = 0
def f():
cond.acquire()
cond.wait()
cond.release()
results1.append(phase_num)
cond.acquire()
cond.wait()
cond.release()
results2.append(phase_num)
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(results1, [])
# Notify 3 threads at first
cond.acquire()
cond.notify(3)
_wait()
phase_num = 1
cond.release()
while len(results1) < 3:
_wait()
self.assertEqual(results1, [1] * 3)
self.assertEqual(results2, [])
# Notify 5 threads: they might be in their first or second wait
cond.acquire()
cond.notify(5)
_wait()
phase_num = 2
cond.release()
while len(results1) + len(results2) < 8:
_wait()
self.assertEqual(results1, [1] * 3 + [2] * 2)
self.assertEqual(results2, [2] * 3)
# Notify all threads: they are all in their second wait
cond.acquire()
cond.notify_all()
_wait()
phase_num = 3
cond.release()
while len(results2) < 5:
_wait()
self.assertEqual(results1, [1] * 3 + [2] * 2)
self.assertEqual(results2, [2] * 3 + [3] * 2)
b.wait_for_finished()
def test_notify(self):
cond = self.condtype()
self._check_notify(cond)
# A second time, to check internal state is still ok.
self._check_notify(cond)
def test_timeout(self):
cond = self.condtype()
results = []
N = 5
def f():
cond.acquire()
t1 = time.time()
cond.wait(0.2)
t2 = time.time()
cond.release()
results.append(t2 - t1)
Bunch(f, N).wait_for_finished()
self.assertEqual(len(results), 5)
for dt in results:
# XXX: libuv sometimes produces 0.19958
self.assertTimeWithinRange(dt, 0.19, 2.0)
class BaseSemaphoreTests(BaseTestCase):
"""
Common tests for {bounded, unbounded} semaphore objects.
"""
def semtype(self, *args):
raise NotImplementedError()
def test_constructor(self):
self.assertRaises(ValueError, self.semtype, value=-1)
# Py3 doesn't have sys.maxint
self.assertRaises((ValueError, OverflowError), self.semtype,
value=-getattr(sys, 'maxint', getattr(sys, 'maxsize', None)))
def test_acquire(self):
sem = self.semtype(1)
sem.acquire()
sem.release()
sem = self.semtype(2)
sem.acquire()
sem.acquire()
sem.release()
sem.release()
def test_acquire_destroy(self):
sem = self.semtype()
sem.acquire()
del sem
def test_acquire_contended(self):
sem = self.semtype(7)
sem.acquire()
#N = 10
results1 = []
results2 = []
phase_num = 0
def f():
sem.acquire()
results1.append(phase_num)
sem.acquire()
results2.append(phase_num)
b = Bunch(f, 10)
b.wait_for_started()
while len(results1) + len(results2) < 6:
_wait()
self.assertEqual(results1 + results2, [0] * 6)
phase_num = 1
for _ in range(7):
sem.release()
while len(results1) + len(results2) < 13:
_wait()
self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7)
phase_num = 2
for _ in range(6):
sem.release()
while len(results1) + len(results2) < 19:
_wait()
self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7 + [2] * 6)
# The semaphore is still locked
self.assertFalse(sem.acquire(False))
# Final release, to let the last thread finish
sem.release()
b.wait_for_finished()
def test_try_acquire(self):
sem = self.semtype(2)
self.assertTrue(sem.acquire(False))
self.assertTrue(sem.acquire(False))
self.assertFalse(sem.acquire(False))
sem.release()
self.assertTrue(sem.acquire(False))
def test_try_acquire_contended(self):
sem = self.semtype(4)
sem.acquire()
results = []
def f():
results.append(sem.acquire(False))
results.append(sem.acquire(False))
Bunch(f, 5).wait_for_finished()
# There can be a thread switch between acquiring the semaphore and
# appending the result, therefore results will not necessarily be
# ordered.
self.assertEqual(sorted(results), [False] * 7 + [True] * 3)
def test_default_value(self):
# The default initial value is 1.
sem = self.semtype()
sem.acquire()
def f():
sem.acquire()
sem.release()
b = Bunch(f, 1)
b.wait_for_started()
_wait()
self.assertFalse(b.finished)
sem.release()
b.wait_for_finished()
def test_with(self):
sem = self.semtype(2)
def _with(err=None):
with sem:
self.assertTrue(sem.acquire(False))
sem.release()
with sem:
self.assertFalse(sem.acquire(False))
if err:
raise err # pylint:disable=raising-bad-type
_with()
self.assertTrue(sem.acquire(False))
sem.release()
self.assertRaises(TypeError, _with, TypeError)
self.assertTrue(sem.acquire(False))
sem.release()
class SemaphoreTests(BaseSemaphoreTests):
"""
Tests for unbounded semaphores.
"""
def test_release_unacquired(self):
# Unbounded releases are allowed and increment the semaphore's value
sem = self.semtype(1)
sem.release()
sem.acquire()
sem.acquire()
sem.release()
class BoundedSemaphoreTests(BaseSemaphoreTests):
"""
Tests for bounded semaphores.
"""
def test_release_unacquired(self):
# Cannot go past the initial value
sem = self.semtype()
self.assertRaises(ValueError, sem.release)
sem.acquire()
sem.release()
self.assertRaises(ValueError, sem.release)
class BarrierTests(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 2.0
def setUp(self):
self.barrier = self.barriertype(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
def run_threads(self, f):
b = Bunch(f, self.N-1)
f()
b.wait_for_finished()
def multipass(self, results, n):
m = self.barrier.parties
self.assertEqual(m, self.N)
for i in range(n):
results[0].append(True)
self.assertEqual(len(results[1]), i * m)
self.barrier.wait()
results[1].append(True)
self.assertEqual(len(results[0]), (i + 1) * m)
self.barrier.wait()
self.assertEqual(self.barrier.n_waiting, 0)
self.assertFalse(self.barrier.broken)
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [[], []]
def f():
self.multipass(results, passes)
self.run_threads(f)
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
results = []
def f():
r = self.barrier.wait()
results.append(r)
self.run_threads(f)
self.assertEqual(sum(results), sum(range(self.N)))
def test_action(self):
"""
Test the 'action' callback
"""
results = []
def action():
results.append(True)
barrier = self.barriertype(self.N, action)
def f():
barrier.wait()
self.assertEqual(len(results), 1)
self.run_threads(f)
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = []
results2 = []
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
self.barrier.abort()
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = []
results2 = []
results3 = []
def f():
i = self.barrier.wait()
if i == self.N//2:
# Wait until the other threads are all in the barrier.
while self.barrier.n_waiting < self.N-1:
time.sleep(0.001)
self.barrier.reset()
else:
try:
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
self.barrier.wait()
results3.append(True)
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = []
results2 = []
results3 = []
barrier2 = self.barriertype(self.N)
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
self.barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == self.N//2:
self.barrier.reset()
barrier2.wait()
self.barrier.wait()
results3.append(True)
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
def test_timeout(self):
"""
Test wait(timeout)
"""
def f():
i = self.barrier.wait()
if i == self.N // 2:
# One thread is late!
time.sleep(1.0)
# Default timeout is 2.0, so this is shorter.
self.assertRaises(threading.BrokenBarrierError,
self.barrier.wait, 0.5)
self.run_threads(f)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
# create a barrier with a low default timeout
barrier = self.barriertype(self.N, timeout=0.3)
def f():
i = barrier.wait()
if i == self.N // 2:
# One thread is later than the default timeout of 0.3s.
time.sleep(1.0)
self.assertRaises(threading.BrokenBarrierError, barrier.wait)
self.run_threads(f)
def test_single_thread(self):
b = self.barriertype(1)
b.wait()
b.wait()
if __name__ == '__main__':
print("This module contains no tests; it is used by other test cases like test_threading_2")
| 21,858 | 27.573856 | 96 | py |
gevent | gevent-master/src/gevent/tests/_blocks_at_top_level.py | from gevent import sleep
sleep(0.01)
x = "done"
| 48 | 11.25 | 24 | py |
gevent | gevent-master/src/gevent/tests/test__selectors.py | # Tests for gevent.selectors in its native form, without
# monkey-patching.
import gevent
from gevent import socket
from gevent import selectors
import gevent.testing as greentest
class SelectorTestMixin(object):
@staticmethod
def run_selector_once(sel, timeout=3):
# Run in a background greenlet, leaving the main
# greenlet free to send data.
events = sel.select(timeout=timeout)
for key, mask in events:
key.data(sel, key.fileobj, mask)
gevent.sleep()
unregister_after_send = True
def read_from_ready_socket_and_reply(self, selector, conn, _events):
data = conn.recv(100) # Should be ready
if data:
conn.send(data) # Hope it won't block
# Must unregister before we close.
if self.unregister_after_send:
selector.unregister(conn)
conn.close()
def _check_selector(self, sel):
server, client = socket.socketpair()
glet = None
try:
sel.register(server, selectors.EVENT_READ, self.read_from_ready_socket_and_reply)
glet = gevent.spawn(self.run_selector_once, sel)
DATA = b'abcdef'
client.send(DATA)
data = client.recv(50) # here is probably where we yield to the event loop
self.assertEqual(data, DATA)
finally:
sel.close()
server.close()
client.close()
if glet is not None:
glet.join(10)
self.assertTrue(glet is not None and glet.ready())
class GeventSelectorTest(SelectorTestMixin,
greentest.TestCase):
def test_select_using_socketpair(self):
# Basic test.
with selectors.GeventSelector() as sel:
self._check_selector(sel)
def test_select_many_sockets(self):
try:
AF_UNIX = socket.AF_UNIX
except AttributeError:
AF_UNIX = None
pairs = [socket.socketpair() for _ in range(10)]
try:
server_sel = selectors.GeventSelector()
client_sel = selectors.GeventSelector()
for i, pair in enumerate(pairs):
server, client = pair
server_sel.register(server, selectors.EVENT_READ,
self.read_from_ready_socket_and_reply)
client_sel.register(client, selectors.EVENT_READ, i)
# Prime them all to be ready at once.
data = str(i).encode('ascii')
client.send(data)
# Read and reply to all the clients..
# Everyone should be ready, so we ask not to block.
# The call to gevent.idle() is there to make sure that
# all event loop implementations (looking at you, libuv)
# get a chance to poll for IO. Without it, libuv
# doesn't find any results here.
# Not blocking only works for AF_UNIX sockets, though.
# If we got AF_INET (Windows) the data may need some time to
# traverse through the layers.
gevent.idle()
self.run_selector_once(
server_sel,
timeout=-1 if pairs[0][0].family == AF_UNIX else 3)
found = 0
for key, _ in client_sel.select(timeout=3):
expected = str(key.data).encode('ascii')
data = key.fileobj.recv(50)
self.assertEqual(data, expected)
found += 1
self.assertEqual(found, len(pairs))
finally:
server_sel.close()
client_sel.close()
for pair in pairs:
for s in pair:
s.close()
if __name__ == '__main__':
greentest.main()
| 3,789 | 32.839286 | 93 | py |
gevent | gevent-master/src/gevent/tests/getaddrinfo_module.py | import socket
import gevent.socket as gevent_socket
gevent_socket.getaddrinfo(u'gevent.org', None, socket.AF_INET)
| 116 | 22.4 | 62 | py |
gevent | gevent-master/src/gevent/tests/_import_patch.py | import gevent.monkey
gevent.monkey.patch_all()
| 47 | 15 | 25 | py |
gevent | gevent-master/src/gevent/tests/test__examples.py | """
Test the contents of the ``examples/`` directory.
If an existing test in *this* directory named ``test__example_<fn>.py`` exists,
where ``<fn>`` is the base filename of an example file, it will not be tested
here.
Examples can specify that they need particular test resources to be enabled
by commenting (one per line) ``# gevent-test-requires-resource: <resource>``;
most commonly the resource will be ``network``. You can use this technique to specify
non-existant resources for things that should never be tested.
"""
import re
import os
import glob
import time
import unittest
import gevent.testing as greentest
from gevent.testing import util
this_dir = os.path.dirname(__file__)
def _find_files_to_ignore():
old_dir = os.getcwd()
try:
os.chdir(this_dir)
result = [x[14:] for x in glob.glob('test__example_*.py')]
if greentest.PYPY and greentest.RUNNING_ON_APPVEYOR:
# For some reason on Windows with PyPy, this times out,
# when it should be very fast.
result.append("processes.py")
finally:
os.chdir(old_dir)
return result
default_time_range = (2, 10)
time_ranges = { # what is this even supposed to mean? pylint:disable=consider-using-namedtuple-or-dataclass
'concurrent_download.py': (0, 30),
'processes.py': (0, default_time_range[-1])
}
class _AbstractTestMixin(util.ExampleMixin):
time_range = default_time_range
example = None
def _check_resources(self):
from gevent.testing import resources
# pylint:disable=unspecified-encoding
with open(os.path.join(self.cwd, self.example), 'r') as f:
contents = f.read()
pattern = re.compile('^# gevent-test-requires-resource: (.*)$', re.MULTILINE)
resources_needed = re.finditer(pattern, contents)
for match in resources_needed:
needed = contents[match.start(1):match.end(1)]
resources.skip_without_resource(needed)
def test_runs(self):
self._check_resources()
start = time.time()
min_time, max_time = self.time_range
self.start_kwargs = {
'timeout': max_time,
'quiet': True,
'buffer_output': True,
'nested': True,
'setenv': {'GEVENT_DEBUG': 'error'}
}
if not self.run_example():
self.fail("Failed example: " + self.example)
else:
took = time.time() - start
self.assertGreaterEqual(took, min_time)
def _build_test_classes():
result = {}
try:
example_dir = util.ExampleMixin().cwd
except unittest.SkipTest:
util.log("WARNING: No examples dir found", color='suboptimal-behaviour')
return result
ignore = _find_files_to_ignore()
for filename in glob.glob(example_dir + '/*.py'):
bn = os.path.basename(filename)
if bn in ignore:
continue
tc = type(
'Test_' + bn,
(_AbstractTestMixin, greentest.TestCase),
{
'example': bn,
'time_range': time_ranges.get(bn, _AbstractTestMixin.time_range)
}
)
result[tc.__name__] = tc
return result
for k, v in _build_test_classes().items():
locals()[k] = v
if __name__ == '__main__':
greentest.main()
| 3,336 | 29.336364 | 107 | py |
gevent | gevent-master/src/gevent/tests/test__issue330.py | # A greenlet that's killed before it is ever started
# should never be switched to
import gevent
import gevent.testing as greentest
class MyException(Exception):
pass
class TestSwitch(greentest.TestCase):
def setUp(self):
super(TestSwitch, self).setUp()
self.switched_to = [False, False]
self.caught = None
def should_never_run(self, i): # pragma: no cover
self.switched_to[i] = True
def check(self, g, g2):
gevent.joinall((g, g2))
self.assertEqual([False, False], self.switched_to)
# They both have a GreenletExit as their value
self.assertIsInstance(g.value, gevent.GreenletExit)
self.assertIsInstance(g2.value, gevent.GreenletExit)
# They both have no reported exc_info
self.assertIsNone(g.exc_info)
self.assertIsNone(g2.exc_info)
self.assertIsNone(g.exception)
self.assertIsNone(g2.exception)
def test_gevent_kill(self):
g = gevent.spawn(self.should_never_run, 0) # create but do not switch to
g2 = gevent.spawn(self.should_never_run, 1) # create but do not switch to
# Using gevent.kill
gevent.kill(g)
gevent.kill(g2)
self.check(g, g2)
def test_greenlet_kill(self):
# killing directly
g = gevent.spawn(self.should_never_run, 0)
g2 = gevent.spawn(self.should_never_run, 1)
g.kill()
g2.kill()
self.check(g, g2)
def test_throw(self):
# throwing
g = gevent.spawn(self.should_never_run, 0)
g2 = gevent.spawn(self.should_never_run, 1)
g.throw(gevent.GreenletExit)
g2.throw(gevent.GreenletExit)
self.check(g, g2)
def catcher(self):
try:
while True:
gevent.sleep(0)
except MyException as e:
self.caught = e
def test_kill_exception(self):
# Killing with gevent.kill gets the right exception,
# and we can pass exception objects, not just exception classes.
g = gevent.spawn(self.catcher)
g.start()
gevent.sleep()
gevent.kill(g, MyException())
gevent.sleep()
self.assertIsInstance(self.caught, MyException)
self.assertIsNone(g.exception, MyException)
if __name__ == '__main__':
greentest.main()
| 2,333 | 27.120482 | 81 | py |
gevent | gevent-master/src/gevent/tests/test__signal.py | from __future__ import print_function
import signal
import gevent.testing as greentest
import gevent
import pkg_resources
try:
cffi_version = pkg_resources.get_distribution('cffi').parsed_version
except Exception: # pylint:disable=broad-except
# No cffi installed. Shouldn't happen to gevent standard tests,
# but maybe some downstream distributor removed it.
cffi_version = None
class Expected(Exception):
pass
def raise_Expected():
raise Expected('TestSignal')
@greentest.skipUnless(hasattr(signal, 'SIGALRM'),
"Uses SIGALRM")
class TestSignal(greentest.TestCase):
error_fatal = False
__timeout__ = greentest.LARGE_TIMEOUT
def test_handler(self):
with self.assertRaises(TypeError):
gevent.signal_handler(signal.SIGALRM, 1)
def test_alarm(self):
sig = gevent.signal_handler(signal.SIGALRM, raise_Expected)
self.assertFalse(sig.ref)
sig.ref = True
self.assertTrue(sig.ref)
sig.ref = False
def test():
signal.alarm(1)
with self.assertRaises(Expected) as exc:
gevent.sleep(2)
ex = exc.exception
self.assertEqual(str(ex), 'TestSignal')
try:
test()
# also let's check that the handler stays installed.
test()
finally:
sig.cancel()
@greentest.skipIf((greentest.PY3
and greentest.CFFI_BACKEND
and cffi_version < pkg_resources.parse_version('1.11.3')),
"https://bitbucket.org/cffi/cffi/issues/352/systemerror-returned-a-result-with-an")
@greentest.ignores_leakcheck
def test_reload(self):
# The site module tries to set attributes
# on all the modules that are loaded (specifically, __file__).
# If gevent.signal is loaded, and is our compatibility shim,
# this used to fail on Python 2: sys.modules['gevent.signal'] has no
# __loader__ attribute, so site.py's main() function tries to do
# gevent.signal.__file__ = os.path.abspath(gevent.signal.__file__), which
# used to not be allowed. (Under Python 3, __loader__ is present so this
# doesn't happen). See
# https://github.com/gevent/gevent/issues/805
# This fails on Python 3.5 under linux (travis CI) but not
# locally on macOS with (for both libuv and libev cffi); sometimes it
# failed with libuv on Python 3.6 too, but not always:
# AttributeError: cffi library 'gevent.libuv._corecffi' has no function,
# constant or global variable named '__loader__'
# which in turn leads to:
# SystemError: <built-in function getattr> returned a result with an error set
# It's not safe to continue after a SystemError, so we just skip the test there.
# As of Jan 2018 with CFFI 1.11.2 this happens reliably on macOS 3.6 and 3.7
# as well.
# See https://bitbucket.org/cffi/cffi/issues/352/systemerror-returned-a-result-with-an
# This is fixed in 1.11.3
import gevent.signal # make sure it's in sys.modules pylint:disable=redefined-outer-name
assert gevent.signal
import site
if greentest.PY3:
from importlib import reload as reload_module
else:
# builtin on py2
reload_module = reload # pylint:disable=undefined-variable
try:
reload_module(site)
except TypeError:
# Non-CFFI on Travis triggers this, for some reason,
# but only on 3.6, not 3.4 or 3.5, and not yet on 3.7.
# The only module seen to trigger this is __main__, i.e., this module.
# This is hard to trigger in a virtualenv since it appears they
# install their own site.py, different from the one that ships with
# Python 3.6., and at least the version I have doesn't mess with
# __cached__
assert greentest.PY36
import sys
for m in set(sys.modules.values()):
try:
if m.__cached__ is None:
print("Module has None __cached__", m, file=sys.stderr)
except AttributeError:
continue
if __name__ == '__main__':
greentest.main()
| 4,385 | 35.247934 | 105 | py |
gevent | gevent-master/src/gevent/tests/test__subprocess_interrupted.py | import sys
if 'runtestcase' in sys.argv[1:]: # pragma: no cover
import gevent
import gevent.subprocess
gevent.spawn(sys.exit, 'bye')
# Look closely, this doesn't actually do anything, that's a string
# not a division
gevent.subprocess.Popen([sys.executable, '-c', '"1/0"'])
gevent.sleep(1)
else:
# XXX: Handle this more automatically. See comments in the testrunner.
from gevent.testing.resources import exit_without_resource
exit_without_resource('subprocess')
import subprocess
for _ in range(5):
# not on Py2 pylint:disable=consider-using-with
out, err = subprocess.Popen([sys.executable, '-W', 'ignore',
__file__, 'runtestcase'],
stderr=subprocess.PIPE).communicate()
# We've seen a few unexpected forms of output.
#
# The first involves 'refs'; I don't remember what that was
# about, but I think it had to do with debug builds of Python.
#
# The second is the classic "Unhandled exception in thread
# started by \nsys.excepthook is missing\nlost sys.stderr".
# This is a race condition between closing sys.stderr and
# writing buffered data to a pipe that hasn't been read. We
# only see this using GEVENT_FILE=thread (which makes sense);
# likewise, on Python 2 with thread, we can sometimes get
# `super() argument 1 must be type, not None`; this happens on module
# cleanup.
#
# The third is similar to the second: "AssertionError:
# ...\nIOError: close() called during concurrent operation on
# the same file object.\n"
if b'refs' in err or b'sys.excepthook' in err or b'concurrent' in err:
assert err.startswith(b'bye'), repr(err) # pragma: no cover
else:
assert err.strip() == b'bye', repr(err)
| 1,922 | 43.72093 | 78 | py |
gevent | gevent-master/src/gevent/tests/test__socket_ssl.py | #!/usr/bin/python
from gevent import monkey
monkey.patch_all()
try:
import httplib
except ImportError:
from http import client as httplib
import socket
import gevent.testing as greentest
@greentest.skipUnless(
hasattr(socket, 'ssl'),
"Needs socket.ssl (Python 2)"
)
@greentest.skipWithoutExternalNetwork("Tries to access amazon.com")
class AmazonHTTPSTests(greentest.TestCase):
__timeout__ = 30
def test_amazon_response(self):
conn = httplib.HTTPSConnection('sdb.amazonaws.com')
conn.request('GET', '/')
conn.getresponse()
def test_str_and_repr(self):
conn = socket.socket()
conn.connect(('sdb.amazonaws.com', 443))
ssl_conn = socket.ssl(conn) # pylint:disable=no-member
assert str(ssl_conn)
assert repr(ssl_conn)
if __name__ == "__main__":
greentest.main()
| 865 | 20.65 | 67 | py |
gevent | gevent-master/src/gevent/tests/test__all__.py | # Check __all__, __implements__, __extensions__, __imports__ of the modules
from __future__ import print_function
from __future__ import absolute_import
import functools
import sys
import unittest
import types
import importlib
import warnings
from gevent.testing import six
from gevent.testing import modules
from gevent.testing.sysinfo import PLATFORM_SPECIFIC_SUFFIXES
from gevent.testing.util import debug
from gevent._patcher import MAPPING
class ANY(object):
def __contains__(self, item):
return True
ANY = ANY()
NOT_IMPLEMENTED = {
'socket': ['CAPI'],
'thread': ['allocate', 'exit_thread', 'interrupt_main', 'start_new'],
'select': ANY,
'os': ANY,
'threading': ANY,
'__builtin__' if six.PY2 else 'builtins': ANY,
'signal': ANY,
}
COULD_BE_MISSING = {
'socket': ['create_connection', 'RAND_add', 'RAND_egd', 'RAND_status'],
'subprocess': ['_posixsubprocess'],
}
# Things without an __all__ should generally be internal implementation
# helpers
NO_ALL = {
'gevent.threading',
'gevent._compat',
'gevent._corecffi',
'gevent._ffi',
'gevent._fileobjectcommon',
'gevent._fileobjectposix',
'gevent._patcher',
'gevent._socketcommon',
'gevent._tblib',
'gevent._util',
'gevent.resolver._addresses',
'gevent.resolver._hostsfile',
}
ALLOW_IMPLEMENTS = [
'gevent._queue',
# 'gevent.resolver.dnspython',
# 'gevent.resolver_thread',
# 'gevent.resolver.blocking',
# 'gevent.resolver_ares',
# 'gevent.server',
# 'gevent._resolver.hostfile',
# 'gevent.util',
# 'gevent.threadpool',
# 'gevent.timeout',
]
# A list of modules that may contain things that aren't actually, technically,
# extensions, but that need to be in __extensions__ anyway due to the way,
# for example, monkey patching, needs to work.
EXTRA_EXTENSIONS = []
if sys.platform.startswith('win'):
EXTRA_EXTENSIONS.append('gevent.signal')
_MISSING = '<marker object>'
def skip_if_no_stdlib_counterpart(f):
@functools.wraps(f)
def m(self):
if not self.stdlib_module:
self.skipTest("Need stdlib counterpart to %s" % self.modname)
f(self)
return m
class AbstractTestMixin(object):
modname = None
stdlib_has_all = False
stdlib_all = None
stdlib_name = None
stdlib_module = None
@classmethod
def setUpClass(cls):
modname = cls.modname
if modname.endswith(PLATFORM_SPECIFIC_SUFFIXES):
raise unittest.SkipTest("Module %s is platform specific" % modname)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
try:
cls.module = importlib.import_module(modname)
except ImportError:
if modname in modules.OPTIONAL_MODULES:
msg = "Unable to import %s" % modname
raise unittest.SkipTest(msg)
raise
cls.__implements__ = getattr(cls.module, '__implements__', None)
cls.__imports__ = getattr(cls.module, '__imports__', [])
cls.__extensions__ = getattr(cls.module, '__extensions__', [])
cls.stdlib_name = MAPPING.get(modname)
if cls.stdlib_name is not None:
try:
cls.stdlib_module = __import__(cls.stdlib_name)
except ImportError:
pass
else:
cls.stdlib_has_all = True
cls.stdlib_all = getattr(cls.stdlib_module, '__all__', None)
if cls.stdlib_all is None:
cls.stdlib_has_all = False
cls.stdlib_all = [
name
for name in dir(cls.stdlib_module)
if not name.startswith('_')
and not isinstance(getattr(cls.stdlib_module, name), types.ModuleType)
]
def skipIfNoAll(self):
if not hasattr(self.module, '__all__'):
self.assertIn(self.modname, NO_ALL)
self.skipTest("%s Needs __all__" % self.modname)
def test_all(self):
# Check that __all__ is present in the gevent module,
# and only includes things that actually exist and can be
# imported from it.
self.skipIfNoAll()
names = {}
six.exec_("from %s import *" % self.modname, names)
names.pop('__builtins__', None)
self.maxDiff = None
# It should match both as a set
self.assertEqual(set(names), set(self.module.__all__))
# and it should not contain duplicates.
self.assertEqual(sorted(names), sorted(self.module.__all__))
def test_all_formula(self):
self.skipIfNoAll()
# Check __all__ = __implements__ + __extensions__ + __imported__
# This is disabled because it was previously being skipped entirely
# back when we had to call things manually. In that time, it drifted
# out of sync. It should be enabled again and problems corrected.
all_calculated = (
tuple(self.__implements__ or ())
+ tuple(self.__imports__ or ())
+ tuple(self.__extensions__ or ())
)
try:
self.assertEqual(sorted(all_calculated),
sorted(self.module.__all__))
except AssertionError:
self.skipTest("Module %s fails the all formula; fix it" % self.modname)
def test_implements_presence_justified(self):
# Check that __implements__ is present only if the module is modeled
# after a module from stdlib (like gevent.socket).
if self.modname in ALLOW_IMPLEMENTS:
return
if self.__implements__ is not None and self.stdlib_module is None:
raise AssertionError(
'%s (%r) has __implements__ (%s) but no stdlib counterpart module exists (%s)'
% (self.modname, self.module, self.__implements__, self.stdlib_name))
@skip_if_no_stdlib_counterpart
def test_implements_subset_of_stdlib_all(self):
# Check that __implements__ + __imports__ is a subset of the
# corresponding standard module __all__ or dir()
for name in tuple(self.__implements__ or ()) + tuple(self.__imports__):
if name in self.stdlib_all:
continue
if name in COULD_BE_MISSING.get(self.stdlib_name, ()):
continue
if name in dir(self.stdlib_module): # like thread._local which is not in thread.__all__
continue
raise AssertionError('%r is not found in %r.__all__ nor in dir(%r)' % (name, self.stdlib_module, self.stdlib_module))
@skip_if_no_stdlib_counterpart
def test_implements_actually_implements(self):
# Check that the module actually implements the entries from
# __implements__
for name in self.__implements__ or ():
item = getattr(self.module, name)
try:
stdlib_item = getattr(self.stdlib_module, name)
self.assertIsNot(item, stdlib_item)
except AttributeError:
if name not in COULD_BE_MISSING.get(self.stdlib_name, []):
raise
@skip_if_no_stdlib_counterpart
def test_imports_actually_imports(self):
# Check that the module actually imports the entries from
# __imports__
for name in self.__imports__:
item = getattr(self.module, name)
stdlib_item = getattr(self.stdlib_module, name)
self.assertIs(item, stdlib_item)
@skip_if_no_stdlib_counterpart
def test_extensions_actually_extend(self):
# Check that the module actually defines new entries in
# __extensions__
if self.modname in EXTRA_EXTENSIONS:
return
for name in self.__extensions__:
if hasattr(self.stdlib_module, name):
raise AssertionError("'%r' is not an extension, it is found in %r" % (name, self.stdlib_module))
@skip_if_no_stdlib_counterpart
def test_completeness(self): # pylint:disable=too-many-branches
# Check that __all__ (or dir()) of the corresponsing stdlib is
# a subset of __all__ of this module
missed = []
for name in self.stdlib_all:
if name not in getattr(self.module, '__all__', []):
missed.append(name)
# handle stuff like ssl.socket and ssl.socket_error which have no reason to be in gevent.ssl.__all__
if not self.stdlib_has_all:
for name in missed[:]:
if hasattr(self.module, name):
missed.remove(name)
# remove known misses
not_implemented = NOT_IMPLEMENTED.get(self.stdlib_name)
if not_implemented is not None:
result = []
for name in missed:
if name in not_implemented:
# We often don't want __all__ to be set because we wind up
# documenting things that we just copy in from the stdlib.
# But if we implement it, don't print a warning
if getattr(self.module, name, _MISSING) is _MISSING:
debug('IncompleteImplWarning: %s.%s' % (self.modname, name))
else:
result.append(name)
missed = result
if missed:
if self.stdlib_has_all:
msg = '''The following items
in %r.__all__
are missing from %r:
%r''' % (self.stdlib_module, self.module, missed)
else:
msg = '''The following items
in dir(%r)
are missing from %r:
%r''' % (self.stdlib_module, self.module, missed)
raise AssertionError(msg)
def _create_tests():
for _, modname in modules.walk_modules(include_so=False, recursive=True,
check_optional=False):
if modname.endswith(PLATFORM_SPECIFIC_SUFFIXES):
continue
orig_modname = modname
modname_no_period = orig_modname.replace('.', '_')
cls = type(
'Test_' + modname_no_period,
(AbstractTestMixin, unittest.TestCase),
{
'__module__': __name__,
'modname': orig_modname
}
)
globals()[cls.__name__] = cls
_create_tests()
if __name__ == "__main__":
unittest.main()
| 10,489 | 33.735099 | 129 | py |
gevent | gevent-master/src/gevent/tests/test___config.py | # Copyright 2018 gevent contributors. See LICENSE for details.
import os
import unittest
import sys
from gevent import _config
class TestResolver(unittest.TestCase):
old_resolver = None
def setUp(self):
if 'GEVENT_RESOLVER' in os.environ:
self.old_resolver = os.environ['GEVENT_RESOLVER']
del os.environ['GEVENT_RESOLVER']
def tearDown(self):
if self.old_resolver:
os.environ['GEVENT_RESOLVER'] = self.old_resolver
def test_key(self):
self.assertEqual(_config.Resolver.environment_key, 'GEVENT_RESOLVER')
def test_default(self):
from gevent.resolver.thread import Resolver
conf = _config.Resolver()
self.assertEqual(conf.get(), Resolver)
def test_env(self):
from gevent.resolver.blocking import Resolver
os.environ['GEVENT_RESOLVER'] = 'foo,bar,block,dnspython'
conf = _config.Resolver()
self.assertEqual(conf.get(), Resolver)
os.environ['GEVENT_RESOLVER'] = 'dnspython'
# The existing value is unchanged
self.assertEqual(conf.get(), Resolver)
# A new object reflects it
try:
from gevent.resolver.dnspython import Resolver as DResolver
except ImportError: # pragma: no cover
# dnspython is optional; skip it.
import warnings
warnings.warn('dnspython not installed')
else:
conf = _config.Resolver()
self.assertEqual(conf.get(), DResolver)
def test_set_str_long(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set('gevent.resolver.blocking.Resolver')
self.assertEqual(conf.get(), Resolver)
def test_set_str_short(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set('block')
self.assertEqual(conf.get(), Resolver)
def test_set_class(self):
from gevent.resolver.blocking import Resolver
conf = _config.Resolver()
conf.set(Resolver)
self.assertEqual(conf.get(), Resolver)
def test_set_through_config(self):
from gevent.resolver.thread import Resolver as Default
from gevent.resolver.blocking import Resolver
conf = _config.Config()
self.assertEqual(conf.resolver, Default)
conf.resolver = 'block'
self.assertEqual(conf.resolver, Resolver)
class TestFunctions(unittest.TestCase):
def test_validate_bool(self):
self.assertTrue(_config.validate_bool('on'))
self.assertTrue(_config.validate_bool('1'))
self.assertFalse(_config.validate_bool('off'))
self.assertFalse(_config.validate_bool('0'))
self.assertFalse(_config.validate_bool(''))
with self.assertRaises(ValueError):
_config.validate_bool(' hmm ')
def test_validate_invalid(self):
with self.assertRaises(ValueError):
_config.validate_invalid(self)
class TestConfig(unittest.TestCase):
def test__dir__(self):
self.assertEqual(sorted(_config.config.settings),
sorted(dir(_config.config)))
def test_getattr(self):
# Bypass the property that might be set here
self.assertIsNotNone(_config.config.__getattr__('resolver'))
def test__getattr__invalid(self):
with self.assertRaises(AttributeError):
getattr(_config.config, 'no_such_setting')
def test_set_invalid(self):
with self.assertRaises(AttributeError):
_config.config.set('no such setting', True)
class TestImportableSetting(unittest.TestCase):
def test_empty_list(self):
i = _config.ImportableSetting()
with self.assertRaisesRegex(ImportError,
"Cannot import from empty list"):
i._import_one_of([])
def test_path_not_supported(self):
import warnings
i = _config.ImportableSetting()
path = list(sys.path)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with self.assertRaisesRegex(ImportError,
"Cannot import 'foo/bar/gevent.no_such_module'"):
i._import_one('foo/bar/gevent.no_such_module')
# We restored the path
self.assertEqual(path, sys.path)
# We did not issue a warning
self.assertEqual(len(w), 0)
def test_non_string(self):
i = _config.ImportableSetting()
self.assertIs(i._import_one(self), self)
def test_get_options(self):
i = _config.ImportableSetting()
self.assertEqual({}, i.get_options())
i.shortname_map = {'foo': 'bad/path'}
options = i.get_options()
self.assertIn('foo', options)
if __name__ == '__main__':
unittest.main()
| 4,889 | 29.5625 | 89 | py |
gevent | gevent-master/src/gevent/tests/test__threadpool_executor_patched.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from gevent import monkey; monkey.patch_all()
import gevent.testing as greentest
from . import test__threadpool
class TestPatchedTPE(test__threadpool.TestTPE): # pylint:disable=too-many-ancestors
MONKEY_PATCHED = True
if __name__ == '__main__':
greentest.main()
| 386 | 20.5 | 83 | py |
gevent | gevent-master/src/gevent/tests/test__core_watcher.py | from __future__ import absolute_import, print_function
import gevent.testing as greentest
from gevent import config
from gevent.testing.sysinfo import CFFI_BACKEND
from gevent.core import READ # pylint:disable=no-name-in-module
from gevent.core import WRITE # pylint:disable=no-name-in-module
class Test(greentest.TestCase):
__timeout__ = None
def setUp(self):
super(Test, self).setUp()
self.loop = config.loop(default=False)
self.timer = self.loop.timer(0.01)
def tearDown(self):
if self.timer is not None:
self.timer.close()
if self.loop is not None:
self.loop.destroy()
self.loop = self.timer = None
super(Test, self).tearDown()
def test_non_callable_to_start(self):
# test that cannot pass non-callable thing to start()
self.assertRaises(TypeError, self.timer.start, None)
self.assertRaises(TypeError, self.timer.start, 5)
def test_non_callable_after_start(self):
# test that cannot set 'callback' to non-callable thing later either
lst = []
timer = self.timer
timer.start(lst.append)
with self.assertRaises(TypeError):
timer.callback = False
with self.assertRaises(TypeError):
timer.callback = 5
def test_args_can_be_changed_after_start(self):
lst = []
timer = self.timer
self.timer.start(lst.append)
self.assertEqual(timer.args, ())
timer.args = (1, 2, 3)
self.assertEqual(timer.args, (1, 2, 3))
# Only tuple can be args
with self.assertRaises(TypeError):
timer.args = 5
with self.assertRaises(TypeError):
timer.args = [4, 5]
self.assertEqual(timer.args, (1, 2, 3))
# None also works, means empty tuple
# XXX why?
timer.args = None
self.assertEqual(timer.args, None)
def test_run(self):
loop = self.loop
lst = []
self.timer.start(lambda *args: lst.append(args))
loop.run()
loop.update_now()
self.assertEqual(lst, [()])
# Even if we lose all references to it, the ref in the callback
# keeps it alive
self.timer.start(reset, self.timer, lst)
self.timer = None
loop.run()
self.assertEqual(lst, [(), 25])
def test_invalid_fd(self):
loop = self.loop
# Negative case caught everywhere. ValueError
# on POSIX, OSError on Windows Py3, IOError on Windows Py2
with self.assertRaises((ValueError, OSError, IOError)):
loop.io(-1, READ)
@greentest.skipOnWindows("Stdout can't be watched on Win32")
def test_reuse_io(self):
loop = self.loop
# Watchers aren't reused once all outstanding
# refs go away BUT THEY MUST BE CLOSED
tty_watcher = loop.io(1, WRITE)
watcher_handle = tty_watcher._watcher if CFFI_BACKEND else tty_watcher
tty_watcher.close()
del tty_watcher
# XXX: Note there is a cycle in the CFFI code
# from watcher_handle._handle -> watcher_handle.
# So it doesn't go away until a GC runs.
import gc
gc.collect()
tty_watcher = loop.io(1, WRITE)
self.assertIsNot(tty_watcher._watcher if CFFI_BACKEND else tty_watcher, watcher_handle)
tty_watcher.close()
def reset(watcher, lst):
watcher.args = None
watcher.callback = lambda: None
lst.append(25)
watcher.close()
if __name__ == '__main__':
greentest.main()
| 3,573 | 27.592 | 95 | py |
gevent | gevent-master/src/gevent/tests/test__pool.py | from time import time
import gevent
import gevent.pool
from gevent.event import Event
from gevent.queue import Queue
import gevent.testing as greentest
import gevent.testing.timing
import random
from gevent.testing import ExpectedException
import unittest
class TestCoroutinePool(unittest.TestCase):
klass = gevent.pool.Pool
def test_apply_async(self):
done = Event()
def some_work(_):
done.set()
pool = self.klass(2)
pool.apply_async(some_work, ('x', ))
done.wait()
def test_apply(self):
value = 'return value'
def some_work():
return value
pool = self.klass(2)
result = pool.apply(some_work)
self.assertEqual(value, result)
def test_apply_raises(self):
pool = self.klass(1)
def raiser():
raise ExpectedException()
try:
pool.apply(raiser)
except ExpectedException:
pass
else:
self.fail("Should have raised ExpectedException")
# Don't let the metaclass automatically force any error
# that reaches the hub from a spawned greenlet to become
# fatal; that defeats the point of the test.
test_apply_raises.error_fatal = False
def test_multiple_coros(self):
evt = Event()
results = []
def producer():
gevent.sleep(0.001)
results.append('prod')
evt.set()
def consumer():
results.append('cons1')
evt.wait()
results.append('cons2')
pool = self.klass(2)
done = pool.spawn(consumer)
pool.apply_async(producer)
done.get()
self.assertEqual(['cons1', 'prod', 'cons2'], results)
def dont_test_timer_cancel(self):
timer_fired = []
def fire_timer():
timer_fired.append(True)
def some_work():
gevent.timer(0, fire_timer) # pylint:disable=no-member
pool = self.klass(2)
pool.apply(some_work)
gevent.sleep(0)
self.assertEqual(timer_fired, [])
def test_reentrant(self):
pool = self.klass(1)
result = pool.apply(pool.apply, (lambda a: a + 1, (5, )))
self.assertEqual(result, 6)
evt = Event()
pool.apply_async(evt.set)
evt.wait()
@greentest.skipOnPyPy("Does not work on PyPy") # Why?
def test_stderr_raising(self):
# testing that really egregious errors in the error handling code
# (that prints tracebacks to stderr) don't cause the pool to lose
# any members
import sys
pool = self.klass(size=1)
# we're going to do this by causing the traceback.print_exc in
# safe_apply to raise an exception and thus exit _main_loop
normal_err = sys.stderr
try:
sys.stderr = FakeFile()
waiter = pool.spawn(crash)
with gevent.Timeout(2):
self.assertRaises(RuntimeError, waiter.get)
# the pool should have something free at this point since the
# waiter returned
# pool.Pool change: if an exception is raised during execution of a link,
# the rest of the links are scheduled to be executed on the next hub iteration
# this introduces a delay in updating pool.sem which makes pool.free_count() report 0
# therefore, sleep:
gevent.sleep(0)
self.assertEqual(pool.free_count(), 1)
# shouldn't block when trying to get
with gevent.Timeout.start_new(0.1):
pool.apply(gevent.sleep, (0, ))
finally:
sys.stderr = normal_err
pool.join()
def crash(*_args, **_kw):
raise RuntimeError("Whoa")
class FakeFile(object):
def write(self, *_args):
raise RuntimeError('Whaaa')
class PoolBasicTests(greentest.TestCase):
klass = gevent.pool.Pool
def test_execute_async(self):
p = self.klass(size=2)
self.assertEqual(p.free_count(), 2)
r = []
first = p.spawn(r.append, 1)
self.assertEqual(p.free_count(), 1)
first.get()
self.assertEqual(r, [1])
gevent.sleep(0)
self.assertEqual(p.free_count(), 2)
#Once the pool is exhausted, calling an execute forces a yield.
p.apply_async(r.append, (2, ))
self.assertEqual(1, p.free_count())
self.assertEqual(r, [1])
p.apply_async(r.append, (3, ))
self.assertEqual(0, p.free_count())
self.assertEqual(r, [1])
p.apply_async(r.append, (4, ))
self.assertEqual(r, [1])
gevent.sleep(0.01)
self.assertEqual(sorted(r), [1, 2, 3, 4])
def test_discard(self):
p = self.klass(size=1)
first = p.spawn(gevent.sleep, 1000)
p.discard(first)
first.kill()
self.assertFalse(first)
self.assertEqual(len(p), 0)
self.assertEqual(p._semaphore.counter, 1)
def test_add_method(self):
p = self.klass(size=1)
first = gevent.spawn(gevent.sleep, 1000)
try:
second = gevent.spawn(gevent.sleep, 1000)
try:
self.assertEqual(p.free_count(), 1)
self.assertEqual(len(p), 0)
p.add(first)
self.assertEqual(p.free_count(), 0)
self.assertEqual(len(p), 1)
with self.assertRaises(gevent.Timeout):
with gevent.Timeout(0.1):
p.add(second)
self.assertEqual(p.free_count(), 0)
self.assertEqual(len(p), 1)
finally:
second.kill()
finally:
first.kill()
@greentest.ignores_leakcheck
def test_add_method_non_blocking(self):
p = self.klass(size=1)
first = gevent.spawn(gevent.sleep, 1000)
try:
second = gevent.spawn(gevent.sleep, 1000)
try:
p.add(first)
with self.assertRaises(gevent.pool.PoolFull):
p.add(second, blocking=False)
finally:
second.kill()
finally:
first.kill()
@greentest.ignores_leakcheck
def test_add_method_timeout(self):
p = self.klass(size=1)
first = gevent.spawn(gevent.sleep, 1000)
try:
second = gevent.spawn(gevent.sleep, 1000)
try:
p.add(first)
with self.assertRaises(gevent.pool.PoolFull):
p.add(second, timeout=0.100)
finally:
second.kill()
finally:
first.kill()
@greentest.ignores_leakcheck
def test_start_method_timeout(self):
p = self.klass(size=1)
first = gevent.spawn(gevent.sleep, 1000)
try:
second = gevent.Greenlet(gevent.sleep, 1000)
try:
p.add(first)
with self.assertRaises(gevent.pool.PoolFull):
p.start(second, timeout=0.100)
finally:
second.kill()
finally:
first.kill()
def test_apply(self):
p = self.klass()
result = p.apply(lambda a: ('foo', a), (1, ))
self.assertEqual(result, ('foo', 1))
def test_init_error(self):
self.switch_expected = False
self.assertRaises(ValueError, self.klass, -1)
#
# tests from standard library test/test_multiprocessing.py
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time() - t
def sqr(x, wait=0.0):
gevent.sleep(wait)
return x * x
def squared(x):
return x * x
def sqr_random_sleep(x):
gevent.sleep(random.random() * 0.1)
return x * x
def final_sleep():
for i in range(3):
yield i
gevent.sleep(0.2)
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.082, 0.035, 0.14
SMALL_RANGE = 10
LARGE_RANGE = 1000
if (greentest.PYPY and greentest.WIN) or greentest.RUN_LEAKCHECKS or greentest.RUN_COVERAGE:
# See comments in test__threadpool.py.
LARGE_RANGE = 25
elif greentest.RUNNING_ON_CI or greentest.EXPECT_POOR_TIMER_RESOLUTION:
LARGE_RANGE = 100
class TestPool(greentest.TestCase): # pylint:disable=too-many-public-methods
__timeout__ = greentest.LARGE_TIMEOUT
size = 1
def setUp(self):
greentest.TestCase.setUp(self)
self.pool = gevent.pool.Pool(self.size)
def cleanup(self):
self.pool.join()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), 25)
self.assertEqual(papply(sqr, (), {'x': 3}), 9)
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(SMALL_RANGE)), list(map(squared, range(SMALL_RANGE))))
self.assertEqual(pmap(sqr, range(100)), list(map(squared, range(100))))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
def test_async_callback(self):
result = []
res = self.pool.apply_async(sqr, (7, TIMEOUT1,), callback=result.append)
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT1, 1)
gevent.sleep(0) # lets the callback run
self.assertEqual(result, [49])
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(gevent.Timeout, get, timeout=TIMEOUT2)
self.assertTimeoutAlmostEqual(get.elapsed, TIMEOUT2, 1)
self.pool.join()
def test_imap_list_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(sqr, range(SMALL_RANGE))))
def test_imap_it_small(self):
it = self.pool.imap(sqr, range(SMALL_RANGE))
for i in range(SMALL_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_it_large(self):
it = self.pool.imap(sqr, range(LARGE_RANGE))
for i in range(LARGE_RANGE):
self.assertEqual(next(it), i * i)
self.assertRaises(StopIteration, next, it)
def test_imap_random(self):
it = self.pool.imap(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(list(it), list(map(squared, range(SMALL_RANGE))))
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(LARGE_RANGE))))
it = self.pool.imap_unordered(sqr, range(LARGE_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(LARGE_RANGE))))
def test_imap_unordered_random(self):
it = self.pool.imap_unordered(sqr_random_sleep, range(SMALL_RANGE))
self.assertEqual(sorted(it), list(map(squared, range(SMALL_RANGE))))
def test_empty_imap_unordered(self):
it = self.pool.imap_unordered(sqr, [])
self.assertEqual(list(it), [])
def test_empty_imap(self):
it = self.pool.imap(sqr, [])
self.assertEqual(list(it), [])
def test_empty_map(self):
self.assertEqual(self.pool.map(sqr, []), [])
def test_terminate(self):
result = self.pool.map_async(gevent.sleep, [0.1] * ((self.size or 10) * 2))
gevent.sleep(0.1)
kill = TimingWrapper(self.pool.kill)
kill()
self.assertTimeWithinRange(kill.elapsed, 0.0, 0.5)
result.join()
def sleep(self, x):
gevent.sleep(float(x) / 10.)
return str(x)
def test_imap_unordered_sleep(self):
# testing that imap_unordered returns items in competion order
result = list(self.pool.imap_unordered(self.sleep, [10, 1, 2]))
if self.pool.size == 1:
expected = ['10', '1', '2']
else:
expected = ['1', '2', '10']
self.assertEqual(result, expected)
# https://github.com/gevent/gevent/issues/423
def test_imap_no_stop(self):
q = Queue()
q.put(123)
gevent.spawn_later(0.1, q.put, StopIteration)
result = list(self.pool.imap(lambda _: _, q))
self.assertEqual(result, [123])
def test_imap_unordered_no_stop(self):
q = Queue()
q.put(1234)
gevent.spawn_later(0.1, q.put, StopIteration)
result = list(self.pool.imap_unordered(lambda _: _, q))
self.assertEqual(result, [1234])
# same issue, but different test: https://github.com/gevent/gevent/issues/311
def test_imap_final_sleep(self):
result = list(self.pool.imap(sqr, final_sleep()))
self.assertEqual(result, [0, 1, 4])
def test_imap_unordered_final_sleep(self):
result = list(self.pool.imap_unordered(sqr, final_sleep()))
self.assertEqual(result, [0, 1, 4])
# Issue 638
def test_imap_unordered_bounded_queue(self):
iterable = list(range(100))
running = [0]
def short_running_func(i, _j):
running[0] += 1
return i
def make_reader(mapping):
# Simulate a long running reader. No matter how many workers
# we have, we will never have a queue more than size 1
def reader():
result = []
for i, x in enumerate(mapping):
self.assertTrue(running[0] <= i + 2, running[0])
result.append(x)
gevent.sleep(0.01)
self.assertTrue(len(mapping.queue) <= 2, len(mapping.queue))
return result
return reader
# Send two iterables to make sure varargs and kwargs are handled
# correctly
for meth in self.pool.imap_unordered, self.pool.imap:
running[0] = 0
mapping = meth(short_running_func, iterable, iterable,
maxsize=1)
reader = make_reader(mapping)
l = reader()
self.assertEqual(sorted(l), iterable)
@greentest.ignores_leakcheck
class TestPool2(TestPool):
size = 2
@greentest.ignores_leakcheck
class TestPool3(TestPool):
size = 3
@greentest.ignores_leakcheck
class TestPool10(TestPool):
size = 10
class TestPoolUnlimit(TestPool):
size = None
class TestPool0(greentest.TestCase):
size = 0
def test_wait_full(self):
p = gevent.pool.Pool(size=0)
self.assertEqual(0, p.free_count())
self.assertTrue(p.full())
self.assertEqual(0, p.wait_available(timeout=0.01))
class TestJoinSleep(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
p = gevent.pool.Pool()
g = p.spawn(gevent.sleep, 10)
try:
p.join(timeout=timeout)
finally:
g.kill()
class TestJoinSleep_raise_error(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
p = gevent.pool.Pool()
g = p.spawn(gevent.sleep, 10)
try:
p.join(timeout=timeout, raise_error=True)
finally:
g.kill()
class TestJoinEmpty(greentest.TestCase):
switch_expected = False
def test(self):
p = gevent.pool.Pool()
res = p.join()
self.assertTrue(res, "empty should return true")
class TestSpawn(greentest.TestCase):
switch_expected = True
def test(self):
p = gevent.pool.Pool(1)
self.assertEqual(len(p), 0)
p.spawn(gevent.sleep, 0.1)
self.assertEqual(len(p), 1)
p.spawn(gevent.sleep, 0.1) # this spawn blocks until the old one finishes
self.assertEqual(len(p), 1)
gevent.sleep(0.19 if not greentest.EXPECT_POOR_TIMER_RESOLUTION else 0.5)
self.assertEqual(len(p), 0)
def testSpawnAndWait(self):
p = gevent.pool.Pool(1)
self.assertEqual(len(p), 0)
p.spawn(gevent.sleep, 0.1)
self.assertEqual(len(p), 1)
res = p.join(0.01)
self.assertFalse(res, "waiting on a full pool should return false")
res = p.join()
self.assertTrue(res, "waiting to finish should be true")
self.assertEqual(len(p), 0)
def error_iter():
yield 1
yield 2
raise ExpectedException
class TestErrorInIterator(greentest.TestCase):
error_fatal = False
def test(self):
p = gevent.pool.Pool(3)
self.assertRaises(ExpectedException, p.map, lambda x: None, error_iter())
gevent.sleep(0.001)
def test_unordered(self):
p = gevent.pool.Pool(3)
def unordered():
return list(p.imap_unordered(lambda x: None, error_iter()))
self.assertRaises(ExpectedException, unordered)
gevent.sleep(0.001)
def divide_by(x):
return 1.0 / x
class TestErrorInHandler(greentest.TestCase):
error_fatal = False
def test_map(self):
p = gevent.pool.Pool(3)
self.assertRaises(ZeroDivisionError, p.map, divide_by, [1, 0, 2])
def test_imap(self):
p = gevent.pool.Pool(1)
it = p.imap(divide_by, [1, 0, 2])
self.assertEqual(next(it), 1.0)
self.assertRaises(ZeroDivisionError, next, it)
self.assertEqual(next(it), 0.5)
self.assertRaises(StopIteration, next, it)
def test_imap_unordered(self):
p = gevent.pool.Pool(1)
it = p.imap_unordered(divide_by, [1, 0, 2])
self.assertEqual(next(it), 1.0)
self.assertRaises(ZeroDivisionError, next, it)
self.assertEqual(next(it), 0.5)
self.assertRaises(StopIteration, next, it)
if __name__ == '__main__':
greentest.main()
| 17,935 | 28.695364 | 97 | py |
gevent | gevent-master/src/gevent/tests/test__hub.py | # Copyright (c) 2009 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import time
import unittest
import gevent.testing as greentest
import gevent.testing.timing
import gevent
from gevent import socket
from gevent.hub import Waiter, get_hub
from gevent._compat import NativeStrIO
from gevent._compat import get_this_psutil_process
DELAY = 0.1
class TestCloseSocketWhilePolling(greentest.TestCase):
def test(self):
sock = socket.socket()
self._close_on_teardown(sock)
t = get_hub().loop.timer(0)
t.start(sock.close)
with self.assertRaises(socket.error):
try:
sock.connect(('python.org', 81))
finally:
t.close()
gevent.sleep(0)
class TestExceptionInMainloop(greentest.TestCase):
def test_sleep(self):
# even if there was an error in the mainloop, the hub should continue to work
start = time.time()
gevent.sleep(DELAY)
delay = time.time() - start
delay_range = DELAY * 0.9
self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range)
error = greentest.ExpectedException('TestExceptionInMainloop.test_sleep/fail')
def fail():
raise error
with get_hub().loop.timer(0.001) as t:
t.start(fail)
self.expect_one_error()
start = time.time()
gevent.sleep(DELAY)
delay = time.time() - start
self.assert_error(value=error)
self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range)
class TestSleep(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
gevent.sleep(timeout)
def test_simple(self):
gevent.sleep(0)
class TestWaiterGet(gevent.testing.timing.AbstractGenericWaitTestCase):
def setUp(self):
super(TestWaiterGet, self).setUp()
self.waiter = Waiter()
def wait(self, timeout):
with get_hub().loop.timer(timeout) as evt:
evt.start(self.waiter.switch, None)
return self.waiter.get()
class TestWaiter(greentest.TestCase):
def test(self):
waiter = Waiter()
self.assertEqual(str(waiter), '<Waiter greenlet=None>')
waiter.switch(25)
self.assertEqual(str(waiter), '<Waiter greenlet=None value=25>')
self.assertEqual(waiter.get(), 25)
waiter = Waiter()
waiter.throw(ZeroDivisionError)
assert re.match('^<Waiter greenlet=None exc_info=.*ZeroDivisionError.*$', str(waiter)), str(waiter)
self.assertRaises(ZeroDivisionError, waiter.get)
waiter = Waiter()
g = gevent.spawn(waiter.get)
g.name = 'AName'
gevent.sleep(0)
str_waiter = str(waiter)
self.assertTrue(str_waiter.startswith('<Waiter greenlet=<Greenlet "AName'),
str_waiter)
g.kill()
@greentest.skipOnCI("Racy on CI")
class TestPeriodicMonitoringThread(greentest.TestCase):
def _reset_hub(self):
hub = get_hub()
try:
del hub.exception_stream
except AttributeError:
pass
if hub._threadpool is not None:
hub.threadpool.join()
hub.threadpool.kill()
del hub.threadpool
def setUp(self):
super(TestPeriodicMonitoringThread, self).setUp()
self.monitor_thread = gevent.config.monitor_thread
gevent.config.monitor_thread = True
from gevent.monkey import get_original
self.lock = get_original('threading', 'Lock')()
self.monitor_fired = 0
self.monitored_hubs = set()
self._reset_hub()
def tearDown(self):
hub = get_hub()
if not self.monitor_thread and hub.periodic_monitoring_thread:
# If it was true, nothing to do. If it was false, tear things down.
hub.periodic_monitoring_thread.kill()
hub.periodic_monitoring_thread = None
gevent.config.monitor_thread = self.monitor_thread
self.monitored_hubs = None
self._reset_hub()
super(TestPeriodicMonitoringThread, self).tearDown()
def _monitor(self, hub):
with self.lock:
self.monitor_fired += 1
if self.monitored_hubs is not None:
self.monitored_hubs.add(hub)
def test_config(self):
self.assertEqual(0.1, gevent.config.max_blocking_time)
def _run_monitoring_threads(self, monitor, kill=True):
self.assertTrue(monitor.should_run)
from threading import Condition
cond = Condition()
cond.acquire()
def monitor_cond(_hub):
cond.acquire()
cond.notify_all()
cond.release()
if kill:
# Only run once. Especially helpful on PyPy, where
# formatting stacks is expensive.
monitor.kill()
monitor.add_monitoring_function(monitor_cond, 0.01)
cond.wait()
cond.release()
monitor.add_monitoring_function(monitor_cond, None)
@greentest.ignores_leakcheck
def test_kill_removes_trace(self):
from greenlet import gettrace
hub = get_hub()
hub.start_periodic_monitoring_thread()
self.assertIsNotNone(gettrace())
hub.periodic_monitoring_thread.kill()
self.assertIsNone(gettrace())
@greentest.ignores_leakcheck
def test_blocking_this_thread(self):
hub = get_hub()
stream = hub.exception_stream = NativeStrIO()
monitor = hub.start_periodic_monitoring_thread()
self.assertIsNotNone(monitor)
basic_monitor_func_count = 1
if get_this_psutil_process() is not None:
# psutil is installed
basic_monitor_func_count += 1
self.assertEqual(basic_monitor_func_count,
len(monitor.monitoring_functions()))
monitor.add_monitoring_function(self._monitor, 0.1)
self.assertEqual(basic_monitor_func_count + 1,
len(monitor.monitoring_functions()))
self.assertEqual(self._monitor, monitor.monitoring_functions()[-1].function)
self.assertEqual(0.1, monitor.monitoring_functions()[-1].period)
# We must make sure we have switched greenlets at least once,
# otherwise we can't detect a failure.
gevent.sleep(hub.loop.approx_timer_resolution)
assert hub.exception_stream is stream
try:
time.sleep(0.3) # Thrice the default
self._run_monitoring_threads(monitor)
finally:
monitor.add_monitoring_function(self._monitor, None)
self.assertEqual(basic_monitor_func_count,
len(monitor._monitoring_functions))
assert hub.exception_stream is stream
monitor.kill()
del hub.exception_stream
self.assertGreaterEqual(self.monitor_fired, 1)
data = stream.getvalue()
self.assertIn('appears to be blocked', data)
self.assertIn('PeriodicMonitoringThread', data)
def _prep_worker_thread(self):
hub = get_hub()
threadpool = hub.threadpool
worker_hub = threadpool.apply(get_hub)
assert hub is not worker_hub
stream = NativeStrIO()
# It does not have a monitoring thread yet
self.assertIsNone(worker_hub.periodic_monitoring_thread)
# So switch to it and give it one by letting it run.
# XXX: Python 3.10 appears to have made some changes in the memory model.
# Specifically, reading values from the background that are set in the
# background hub *from this thread* is flaky. It takes them awhile to show up.
# Really, that's correct and expected from a standard C point of view, as we
# don't insert any memory barriers or things like that. It just always used to
# work in the past. So now, rather than read them directly, we need to read them
# from the background thread itself. The same, apparently, goes for
# writing.
# Need to figure out what exactly the change was.
def task():
get_hub().exception_stream = stream
gevent.sleep(0.01)
mon = get_hub().periodic_monitoring_thread
mon.add_monitoring_function(self._monitor, 0.1)
return mon
worker_monitor = threadpool.apply(task)
self.assertIsNotNone(worker_monitor)
return worker_hub, stream, worker_monitor
@greentest.ignores_leakcheck
def test_blocking_threadpool_thread_task_queue(self):
# A threadpool thread spends much of its time
# blocked on the native Lock object. Unless we take
# care, if that thread had created a hub, it will constantly
# be reported as blocked.
worker_hub, stream, worker_monitor = self._prep_worker_thread()
# Now wait until the monitoring threads have run.
self._run_monitoring_threads(worker_monitor)
worker_monitor.kill()
# We did run the monitor in the worker thread, but it
# did NOT report itself blocked by the worker thread sitting there.
with self.lock:
self.assertIn(worker_hub, self.monitored_hubs)
self.assertEqual(stream.getvalue(), '')
@greentest.ignores_leakcheck
def test_blocking_threadpool_thread_one_greenlet(self):
# If the background threadpool thread has no other greenlets to run
# and never switches, then even if it has a hub
# we don't report it blocking. The threadpool is *meant* to run
# tasks that block.
hub = get_hub()
threadpool = hub.threadpool
worker_hub, stream, worker_monitor = self._prep_worker_thread()
task = threadpool.spawn(time.sleep, 0.3)
# Now wait until the monitoring threads have run.
self._run_monitoring_threads(worker_monitor)
# and be sure the task ran
task.get()
worker_monitor.kill()
# We did run the monitor in the worker thread, but it
# did NOT report itself blocked by the worker thread
with self.lock:
self.assertIn(worker_hub, self.monitored_hubs)
self.assertEqual(stream.getvalue(), '')
@greentest.ignores_leakcheck
def test_blocking_threadpool_thread_multi_greenlet(self):
# If the background threadpool thread ever switches
# greenlets, monitoring goes into affect.
hub = get_hub()
threadpool = hub.threadpool
worker_hub, stream, worker_monitor = self._prep_worker_thread()
def task():
g = gevent.spawn(time.sleep, 0.7)
g.join()
task = threadpool.spawn(task)
# Now wait until the monitoring threads have run.
self._run_monitoring_threads(worker_monitor, kill=False)
# and be sure the task ran
task.get()
worker_monitor.kill()
# We did run the monitor in the worker thread, and it
# DID report itself blocked by the worker thread
self.assertIn(worker_hub, self.monitored_hubs)
data = stream.getvalue()
self.assertIn('appears to be blocked', data)
self.assertIn('PeriodicMonitoringThread', data)
class TestLoopInterface(unittest.TestCase):
def test_implemensts_ILoop(self):
from gevent.testing import verify
from gevent._interfaces import ILoop
loop = get_hub().loop
verify.verifyObject(ILoop, loop)
def test_callback_implements_ICallback(self):
from gevent.testing import verify
from gevent._interfaces import ICallback
loop = get_hub().loop
cb = loop.run_callback(lambda: None)
verify.verifyObject(ICallback, cb)
def test_callback_ts_implements_ICallback(self):
from gevent.testing import verify
from gevent._interfaces import ICallback
loop = get_hub().loop
cb = loop.run_callback_threadsafe(lambda: None)
verify.verifyObject(ICallback, cb)
class TestHandleError(unittest.TestCase):
def tearDown(self):
try:
del get_hub().handle_error
except AttributeError:
pass
def test_exception_in_custom_handle_error_does_not_crash(self):
def bad_handle_error(*args):
raise AttributeError
get_hub().handle_error = bad_handle_error
class MyException(Exception):
pass
def raises():
raise MyException
with self.assertRaises(MyException):
gevent.spawn(raises).get()
if __name__ == '__main__':
greentest.main()
| 13,728 | 32.898765 | 107 | py |
gevent | gevent-master/src/gevent/tests/test__greenio.py | # Copyright (c) 2006-2007, Linden Research, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import gevent
from gevent import socket
from gevent import testing as greentest
from gevent.testing import TestCase, tcp_listener
from gevent.testing import gc_collect_if_needed
from gevent.testing import skipOnPyPy
from gevent.testing import params
PY3 = sys.version_info[0] >= 3
def _write_to_closed(f, s):
try:
r = f.write(s)
except ValueError:
assert PY3
else:
assert r is None, r
class TestGreenIo(TestCase):
def test_close_with_makefile(self):
def accept_close_early(listener):
# verify that the makefile and the socket are truly independent
# by closing the socket prior to using the made file
try:
conn, _ = listener.accept()
fd = conn.makefile(mode='wb')
conn.close()
fd.write(b'hello\n')
fd.close()
_write_to_closed(fd, b'a')
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def accept_close_late(listener):
# verify that the makefile and the socket are truly independent
# by closing the made file and then sending a character
try:
conn, _ = listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello')
fd.close()
conn.send(b'\n')
conn.close()
_write_to_closed(fd, b'a')
self.assertRaises(socket.error, conn.send, b'b')
finally:
listener.close()
def did_it_work(server):
client = socket.create_connection((params.DEFAULT_CONNECT, server.getsockname()[1]))
fd = client.makefile(mode='rb')
client.close()
self.assertEqual(fd.readline(), b'hello\n')
self.assertFalse(fd.read())
fd.close()
server = tcp_listener()
server_greenlet = gevent.spawn(accept_close_early, server)
did_it_work(server)
server_greenlet.kill()
server = tcp_listener()
server_greenlet = gevent.spawn(accept_close_late, server)
did_it_work(server)
server_greenlet.kill()
@skipOnPyPy("Takes multiple GCs and issues a warning we can't catch")
def test_del_closes_socket(self):
import warnings
def accept_once(listener):
# delete/overwrite the original conn
# object, only keeping the file object around
# closing the file object should close everything
# This is not *exactly* true on Python 3. This produces
# a ResourceWarning, which we silence below. (Previously we actually
# *saved* a reference to the socket object, so we
# weren't testing what we thought we were.)
# It's definitely not true on PyPy, which needs GC to
# reliably close everything; sometimes this is more than
# one collection cycle. And PyPy issues a warning with -X
# track-resources that we cannot catch.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
try:
conn = listener.accept()[0]
# Note that we overwrite the original variable,
# losing our reference to the socket.
conn = conn.makefile(mode='wb')
conn.write(b'hello\n')
conn.close()
_write_to_closed(conn, b'a')
finally:
listener.close()
del listener
del conn
gc_collect_if_needed()
gc_collect_if_needed()
server = tcp_listener()
gevent.spawn(accept_once, server)
client = socket.create_connection((params.DEFAULT_CONNECT, server.getsockname()[1]))
with gevent.Timeout.start_new(0.5):
fd = client.makefile()
client.close()
self.assertEqual(fd.read(), 'hello\n')
# If the socket isn't closed when 'accept_once' finished,
# then this will hang and exceed the timeout
self.assertEqual(fd.read(), '')
fd.close()
del client
del fd
if __name__ == '__main__':
greentest.main()
| 5,523 | 36.578231 | 96 | py |
gevent | gevent-master/src/gevent/tests/test__example_wsgiserver_ssl.py | import ssl
import gevent.testing as greentest
from gevent.testing import params
from . import test__example_wsgiserver
@greentest.skipOnCI("Timing issues sometimes lead to a connection refused")
class Test_wsgiserver_ssl(test__example_wsgiserver.Test_wsgiserver):
example = 'wsgiserver_ssl.py'
URL = 'https://%s:8443' % (params.DEFAULT_LOCAL_HOST_ADDR,)
PORT = 8443
_use_ssl = True
if hasattr(ssl, '_create_unverified_context'):
# Disable verification for our self-signed cert
# on Python >= 2.7.9 and 3.4
ssl_ctx = ssl._create_unverified_context()
if __name__ == '__main__':
greentest.main()
| 649 | 25 | 75 | py |
gevent | gevent-master/src/gevent/tests/test__issue639.py | # Test idle
import gevent
from gevent import testing as greentest
class Test(greentest.TestCase):
def test(self):
gevent.sleep()
gevent.idle()
if __name__ == '__main__':
greentest.main()
| 214 | 15.538462 | 39 | py |
gevent | gevent-master/src/gevent/tests/test__fileobject.py | from __future__ import print_function
from __future__ import absolute_import
import functools
import gc
import io
import os
import sys
import tempfile
import unittest
import gevent
from gevent import fileobject
from gevent._fileobjectcommon import OpenDescriptor
try:
from gevent._fileobjectposix import GreenOpenDescriptor
except ImportError:
GreenOpenDescriptor = None
import gevent.testing as greentest
from gevent.testing import sysinfo
# pylint:disable=unspecified-encoding
def Writer(fobj, line):
for character in line:
fobj.write(character)
fobj.flush()
fobj.close()
def close_fd_quietly(fd):
try:
os.close(fd)
except OSError:
pass
def skipUnlessWorksWithRegularFiles(func):
@functools.wraps(func)
def f(self):
if not self.WORKS_WITH_REGULAR_FILES:
self.skipTest("Doesn't work with regular files")
func(self)
return f
class CleanupMixin(object):
def _mkstemp(self, suffix):
fileno, path = tempfile.mkstemp(suffix)
self.addCleanup(os.remove, path)
self.addCleanup(close_fd_quietly, fileno)
return fileno, path
def _pipe(self):
r, w = os.pipe()
self.addCleanup(close_fd_quietly, r)
self.addCleanup(close_fd_quietly, w)
return r, w
class TestFileObjectBlock(CleanupMixin,
greentest.TestCase):
# serves as a base for the concurrent tests too
WORKS_WITH_REGULAR_FILES = True
def _getTargetClass(self):
return fileobject.FileObjectBlock
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _test_del(self, **kwargs):
r, w = self._pipe()
self._do_test_del((r, w), **kwargs)
def _do_test_del(self, pipe, **kwargs):
r, w = pipe
s = self._makeOne(w, 'wb', **kwargs)
s.write(b'x')
try:
s.flush()
except IOError:
# Sometimes seen on Windows/AppVeyor
print("Failed flushing fileobject", repr(s), file=sys.stderr)
import traceback
traceback.print_exc()
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', ResourceWarning)
# Deliberately getting ResourceWarning with FileObject(Thread) under Py3
del s
gc.collect() # PyPy
if kwargs.get("close", True):
with self.assertRaises((OSError, IOError)):
# expected, because FileObject already closed it
os.close(w)
else:
os.close(w)
with self._makeOne(r, 'rb') as fobj:
self.assertEqual(fobj.read(), b'x')
def test_del(self):
# Close should be true by default
self._test_del()
def test_del_close(self):
self._test_del(close=True)
@skipUnlessWorksWithRegularFiles
def test_seek(self):
fileno, path = self._mkstemp('.gevent.test__fileobject.test_seek')
s = b'a' * 1024
os.write(fileno, b'B' * 15)
os.write(fileno, s)
os.close(fileno)
with open(path, 'rb') as f:
f.seek(15)
native_data = f.read(1024)
with open(path, 'rb') as f_raw:
f = self._makeOne(f_raw, 'rb', close=False)
# On Python 3, all objects should have seekable.
# On Python 2, only our custom objects do.
self.assertTrue(f.seekable())
f.seek(15)
self.assertEqual(15, f.tell())
# Note that a duplicate close() of the underlying
# file descriptor can look like an OSError from this line
# as we exit the with block
fileobj_data = f.read(1024)
self.assertEqual(native_data, s)
self.assertEqual(native_data, fileobj_data)
def __check_native_matches(self, byte_data, open_mode,
meth='read', open_path=True,
**open_kwargs):
fileno, path = self._mkstemp('.gevent_test_' + open_mode)
os.write(fileno, byte_data)
os.close(fileno)
with io.open(path, open_mode, **open_kwargs) as f:
native_data = getattr(f, meth)()
if open_path:
with self._makeOne(path, open_mode, **open_kwargs) as f:
gevent_data = getattr(f, meth)()
else:
# Note that we don't use ``io.open()`` for the raw file,
# on Python 2. We want 'r' to mean what the usual call to open() means.
opener = io.open
with opener(path, open_mode, **open_kwargs) as raw:
with self._makeOne(raw) as f:
gevent_data = getattr(f, meth)()
self.assertEqual(native_data, gevent_data)
return gevent_data
@skipUnlessWorksWithRegularFiles
def test_str_default_to_native(self):
# With no 'b' or 't' given, read and write native str.
gevent_data = self.__check_native_matches(b'abcdefg', 'r')
self.assertIsInstance(gevent_data, str)
@skipUnlessWorksWithRegularFiles
def test_text_encoding(self):
gevent_data = self.__check_native_matches(
u'\N{SNOWMAN}'.encode('utf-8'),
'r+',
buffering=5, encoding='utf-8'
)
self.assertIsInstance(gevent_data, str)
@skipUnlessWorksWithRegularFiles
def test_does_not_leak_on_exception(self):
# If an exception occurs during opening,
# everything still gets cleaned up.
pass
@skipUnlessWorksWithRegularFiles
def test_rbU_produces_bytes_readline(self):
if sys.version_info > (3, 11):
self.skipTest("U file mode was removed in 3.11")
# Including U in rb still produces bytes.
# Note that the universal newline behaviour is
# essentially ignored in explicit bytes mode.
gevent_data = self.__check_native_matches(
b'line1\nline2\r\nline3\rlastline\n\n',
'rbU',
meth='readlines',
)
self.assertIsInstance(gevent_data[0], bytes)
self.assertEqual(len(gevent_data), 4)
@skipUnlessWorksWithRegularFiles
def test_rU_produces_native(self):
if sys.version_info > (3, 11):
self.skipTest("U file mode was removed in 3.11")
gevent_data = self.__check_native_matches(
b'line1\nline2\r\nline3\rlastline\n\n',
'rU',
meth='readlines',
)
self.assertIsInstance(gevent_data[0], str)
@skipUnlessWorksWithRegularFiles
def test_r_readline_produces_native(self):
gevent_data = self.__check_native_matches(
b'line1\n',
'r',
meth='readline',
)
self.assertIsInstance(gevent_data, str)
@skipUnlessWorksWithRegularFiles
def test_r_readline_on_fobject_produces_native(self):
gevent_data = self.__check_native_matches(
b'line1\n',
'r',
meth='readline',
open_path=False,
)
self.assertIsInstance(gevent_data, str)
def test_close_pipe(self):
# Issue #190, 203
r, w = os.pipe()
x = self._makeOne(r)
y = self._makeOne(w, 'w')
x.close()
y.close()
@skipUnlessWorksWithRegularFiles
@greentest.ignores_leakcheck
def test_name_after_close(self):
fileno, path = self._mkstemp('.gevent_test_named_path_after_close')
# Passing the fileno; the name is the same as the fileno, and
# doesn't change when closed.
f = self._makeOne(fileno)
nf = os.fdopen(fileno)
# On Python 2, os.fdopen() produces a name of <fdopen>;
# we follow the Python 3 semantics everywhere.
nf_name = '<fdopen>' if greentest.PY2 else fileno
self.assertEqual(f.name, fileno)
self.assertEqual(nf.name, nf_name)
# A file-like object that has no name; we'll close the
# `f` after this because we reuse the fileno, which
# gets passed to fcntl and so must still be valid
class Nameless(object):
def fileno(self):
return fileno
close = flush = isatty = closed = writable = lambda self: False
seekable = readable = lambda self: True
nameless = self._makeOne(Nameless(), 'rb')
with self.assertRaises(AttributeError):
getattr(nameless, 'name')
nameless.close()
with self.assertRaises(AttributeError):
getattr(nameless, 'name')
f.close()
try:
nf.close()
except OSError:
# OSError: Py3, IOError: Py2
pass
self.assertEqual(f.name, fileno)
self.assertEqual(nf.name, nf_name)
def check(arg):
f = self._makeOne(arg)
self.assertEqual(f.name, path)
f.close()
# Doesn't change after closed.
self.assertEqual(f.name, path)
# Passing the string
check(path)
# Passing an opened native object
with open(path) as nf:
check(nf)
# An io object
with io.open(path) as nf:
check(nf)
@skipUnlessWorksWithRegularFiles
def test_readinto_serial(self):
fileno, path = self._mkstemp('.gevent_test_readinto')
os.write(fileno, b'hello world')
os.close(fileno)
buf = bytearray(32)
mbuf = memoryview(buf)
def assertReadInto(byte_count, expected_data):
bytes_read = f.readinto(mbuf[:byte_count])
self.assertEqual(bytes_read, len(expected_data))
self.assertEqual(buf[:bytes_read], expected_data)
with self._makeOne(path, 'rb') as f:
assertReadInto(2, b'he')
assertReadInto(1, b'l')
assertReadInto(32, b'lo world')
assertReadInto(32, b'')
class ConcurrentFileObjectMixin(object):
# Additional tests for fileobjects that cooperate
# and we have full control of the implementation
def test_read1_binary_present(self):
# Issue #840
r, w = self._pipe()
reader = self._makeOne(r, 'rb')
self._close_on_teardown(reader)
writer = self._makeOne(w, 'w')
self._close_on_teardown(writer)
self.assertTrue(hasattr(reader, 'read1'), dir(reader))
def test_read1_text_not_present(self):
# Only defined for binary.
r, w = self._pipe()
reader = self._makeOne(r, 'rt')
self._close_on_teardown(reader)
self.addCleanup(os.close, w)
self.assertFalse(hasattr(reader, 'read1'), dir(reader))
def test_read1_default(self):
# If just 'r' is given, whether it has one or not
# depends on if we're Python 2 or 3.
r, w = self._pipe()
self.addCleanup(os.close, w)
reader = self._makeOne(r)
self._close_on_teardown(reader)
self.assertFalse(hasattr(reader, 'read1'))
def test_bufsize_0(self):
# Issue #840
r, w = self._pipe()
x = self._makeOne(r, 'rb', bufsize=0)
y = self._makeOne(w, 'wb', bufsize=0)
self._close_on_teardown(x)
self._close_on_teardown(y)
y.write(b'a')
b = x.read(1)
self.assertEqual(b, b'a')
y.writelines([b'2'])
b = x.read(1)
self.assertEqual(b, b'2')
def test_newlines(self):
import warnings
r, w = self._pipe()
lines = [b'line1\n', b'line2\r', b'line3\r\n', b'line4\r\nline5', b'\nline6']
g = gevent.spawn(Writer, self._makeOne(w, 'wb'), lines)
try:
with warnings.catch_warnings():
if sys.version_info > (3, 11):
# U is removed in Python 3.11
mode = 'r'
self.skipTest("U file mode was removed in 3.11")
else:
# U is deprecated in Python 3, shows up on FileObjectThread
warnings.simplefilter('ignore', DeprecationWarning)
mode = 'rU'
fobj = self._makeOne(r, mode)
result = fobj.read()
fobj.close()
self.assertEqual('line1\nline2\nline3\nline4\nline5\nline6', result)
finally:
g.kill()
def test_readinto(self):
# verify that .readinto() is cooperative.
# if .readinto() is not cooperative spawned greenlet will not be able
# to run and call to .readinto() will block forever.
r, w = self._pipe()
rf = self._close_on_teardown(self._makeOne(r, 'rb'))
wf = self._close_on_teardown(self._makeOne(w, 'wb'))
g = gevent.spawn(Writer, wf, [b'hello'])
try:
buf1 = bytearray(32)
buf2 = bytearray(32)
n1 = rf.readinto(buf1)
n2 = rf.readinto(buf2)
self.assertEqual(n1, 5)
self.assertEqual(buf1[:n1], b'hello')
self.assertEqual(n2, 0)
finally:
g.kill()
class TestFileObjectThread(ConcurrentFileObjectMixin, # pylint:disable=too-many-ancestors
TestFileObjectBlock):
def _getTargetClass(self):
return fileobject.FileObjectThread
def test_del_noclose(self):
# In the past, we used os.fdopen() when given a file descriptor,
# and that has a destructor that can't be bypassed, so
# close=false wasn't allowed. Now that we do everything with the
# io module, it is allowed.
self._test_del(close=False)
# We don't test this with FileObjectThread. Sometimes the
# visibility of the 'close' operation, which happens in a
# background thread, doesn't make it to the foreground
# thread in a timely fashion, leading to 'os.close(4) must
# not succeed' in test_del_close. We have the same thing
# with flushing and closing in test_newlines. Both of
# these are most commonly (only?) observed on Py27/64-bit.
# They also appear on 64-bit 3.6 with libuv
def test_del(self):
raise unittest.SkipTest("Race conditions")
def test_del_close(self):
raise unittest.SkipTest("Race conditions")
@unittest.skipUnless(
hasattr(fileobject, 'FileObjectPosix'),
"Needs FileObjectPosix"
)
class TestFileObjectPosix(ConcurrentFileObjectMixin, # pylint:disable=too-many-ancestors
TestFileObjectBlock):
if sysinfo.LIBUV and sysinfo.LINUX:
# On Linux, initializing the watcher for a regular
# file results in libuv raising EPERM. But that works
# fine on other platforms.
WORKS_WITH_REGULAR_FILES = False
def _getTargetClass(self):
return fileobject.FileObjectPosix
def test_seek_raises_ioerror(self):
# https://github.com/gevent/gevent/issues/1323
# Get a non-seekable file descriptor
r, _w = self._pipe()
with self.assertRaises(OSError) as ctx:
os.lseek(r, 0, os.SEEK_SET)
os_ex = ctx.exception
with self.assertRaises(IOError) as ctx:
f = self._makeOne(r, 'r', close=False)
# Seek directly using the underlying GreenFileDescriptorIO;
# the buffer may do different things, depending
# on the version of Python (especially 3.7+)
f.fileio.seek(0)
io_ex = ctx.exception
self.assertEqual(io_ex.errno, os_ex.errno)
self.assertEqual(io_ex.strerror, os_ex.strerror)
self.assertEqual(io_ex.args, os_ex.args)
self.assertEqual(str(io_ex), str(os_ex))
class TestTextMode(CleanupMixin, unittest.TestCase):
def test_default_mode_writes_linesep(self):
# See https://github.com/gevent/gevent/issues/1282
# libuv 1.x interferes with the default line mode on
# Windows.
# First, make sure we initialize gevent
gevent.get_hub()
fileno, path = self._mkstemp('.gevent.test__fileobject.test_default')
os.close(fileno)
with open(path, "w") as f:
f.write("\n")
with open(path, "rb") as f:
data = f.read()
self.assertEqual(data, os.linesep.encode('ascii'))
class TestOpenDescriptor(CleanupMixin, greentest.TestCase):
def _getTargetClass(self):
return OpenDescriptor
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _check(self, regex, kind, *args, **kwargs):
with self.assertRaisesRegex(kind, regex):
self._makeOne(*args, **kwargs)
case = lambda re, **kwargs: (re, TypeError, kwargs)
vase = lambda re, **kwargs: (re, ValueError, kwargs)
CASES = (
case('mode', mode=42),
case('buffering', buffering='nope'),
case('encoding', encoding=42),
case('errors', errors=42),
vase('mode', mode='aoeug'),
vase('mode U cannot be combined', mode='wU'),
vase('text and binary', mode='rtb'),
vase('append mode at once', mode='rw'),
vase('exactly one', mode='+'),
vase('take an encoding', mode='rb', encoding='ascii'),
vase('take an errors', mode='rb', errors='strict'),
vase('take a newline', mode='rb', newline='\n'),
)
def test_atomicwrite_fd(self):
from gevent._fileobjectcommon import WriteallMixin
# It basically only does something when buffering is otherwise disabled
fileno, _w = self._pipe()
desc = self._makeOne(fileno, 'wb',
buffering=0,
closefd=False,
atomic_write=True)
self.assertTrue(desc.atomic_write)
fobj = desc.opened()
self.assertIsInstance(fobj, WriteallMixin)
os.close(fileno)
def pop():
for regex, kind, kwargs in TestOpenDescriptor.CASES:
setattr(
TestOpenDescriptor, 'test_' + regex.replace(' ', '_'),
lambda self, _re=regex, _kind=kind, _kw=kwargs: self._check(_re, _kind, 1, **_kw)
)
pop()
@unittest.skipIf(GreenOpenDescriptor is None, "No support for non-blocking IO")
class TestGreenOpenDescripton(TestOpenDescriptor):
def _getTargetClass(self):
return GreenOpenDescriptor
if __name__ == '__main__':
greentest.main()
| 18,345 | 31.586146 | 93 | py |
gevent | gevent-master/src/gevent/tests/test__systemerror.py | import sys
import gevent.testing as greentest
import gevent
from gevent.hub import get_hub
def raise_(ex):
raise ex
MSG = 'should be re-raised and caught'
class Test(greentest.TestCase):
x = None
error_fatal = False
def start(self, *args):
raise NotImplementedError
def setUp(self):
self.x = None
def test_sys_exit(self):
self.start(sys.exit, MSG)
try:
gevent.sleep(0.001)
except SystemExit as ex:
assert str(ex) == MSG, repr(str(ex))
else:
raise AssertionError('must raise SystemExit')
def test_keyboard_interrupt(self):
self.start(raise_, KeyboardInterrupt)
try:
gevent.sleep(0.001)
except KeyboardInterrupt:
pass
else:
raise AssertionError('must raise KeyboardInterrupt')
def test_keyboard_interrupt_stderr_patched(self):
# XXX: This one non-top-level call prevents us from being
# run in a process with other tests.
from gevent import monkey
monkey.patch_sys(stdin=False, stdout=False, stderr=True)
try:
try:
self.start(raise_, KeyboardInterrupt)
while True:
gevent.sleep(0.1)
except KeyboardInterrupt:
pass # expected
finally:
sys.stderr = monkey.get_original('sys', 'stderr')
def test_system_error(self):
self.start(raise_, SystemError(MSG))
with self.assertRaisesRegex(SystemError,
MSG):
gevent.sleep(0.002)
def test_exception(self):
self.start(raise_, Exception('regular exception must not kill the program'))
gevent.sleep(0.001)
class TestCallback(Test):
def tearDown(self):
if self.x is not None:
# libuv: See the notes in libuv/loop.py:loop._start_callback_timer
# If that's broken, test_exception can fail sporadically.
# If the issue is the same, then adding `gevent.sleep(0)` here
# will solve it. There's also a race condition for the first loop,
# so we sleep twice.
assert not self.x.pending, self.x
def start(self, *args):
self.x = get_hub().loop.run_callback(*args)
if greentest.LIBUV:
def test_exception(self):
# This call will enter the loop for the very first time (if we're running
# standalone). On libuv, where timers run first, that means that depending on the
# amount of time that elapses between the call to uv_timer_start and uv_run,
# this timer might fire before our check or prepare watchers, and hence callbacks,
# run.
# We make this call now so that the call in the super class is guaranteed to be
# somewhere in the loop and not subject to that race condition.
gevent.sleep(0.001)
super(TestCallback, self).test_exception()
class TestSpawn(Test):
def tearDown(self):
gevent.sleep(0.0001)
if self.x is not None:
assert self.x.dead, self.x
def start(self, *args):
self.x = gevent.spawn(*args)
del Test
if __name__ == '__main__':
greentest.main()
| 3,295 | 28.693694 | 94 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_sigchld_3.py | # Mimics what gunicorn workers do *if* the arbiter is also monkey-patched:
# After forking from the master monkey-patched process, the child
# resets signal handlers to SIG_DFL. If we then fork and watch *again*,
# we shouldn't hang. (Note that we carefully handle this so as not to break
# os.popen)
from __future__ import print_function
# Patch in the parent process.
import gevent.monkey
gevent.monkey.patch_all()
from gevent import get_hub
import os
import sys
import signal
import subprocess
def _waitpid(p):
try:
_, stat = os.waitpid(p, 0)
except OSError:
# Interrupted system call
_, stat = os.waitpid(p, 0)
assert stat == 0, stat
if hasattr(signal, 'SIGCHLD'):
if sys.version_info[:2] >= (3, 8) and os.environ.get("PYTHONDEVMODE"):
# See test__monkey_sigchld.py
print("Ran 1 tests in 0.0s (skipped=1)")
sys.exit(0)
# Do what subprocess does and make sure we have the watcher
# in the parent
get_hub().loop.install_sigchld()
pid = os.fork()
if pid: # parent
_waitpid(pid)
else:
# Child resets.
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Go through subprocess because we expect it to automatically
# set up the waiting for us.
# not on Py2 pylint:disable=consider-using-with
popen = subprocess.Popen([sys.executable, '-c', 'import sys'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
popen.stderr.read()
popen.stdout.read()
popen.wait() # This hangs if it doesn't.
popen.stderr.close()
popen.stdout.close()
sys.exit(0)
else:
print("No SIGCHLD, not testing")
print("Ran 1 tests in 0.0s (skipped=1)")
| 1,755 | 28.266667 | 80 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_select.py | # Tests for the monkey-patched select module.
from gevent import monkey
monkey.patch_all()
import select
import gevent.testing as greentest
class TestSelect(greentest.TestCase):
def _make_test(name, ns): # pylint:disable=no-self-argument
def test(self):
self.assertIs(getattr(select, name, self), self)
self.assertFalse(hasattr(select, name))
test.__name__ = 'test_' + name + '_removed'
ns[test.__name__] = test
for name in (
'epoll',
'kqueue',
'kevent',
'devpoll',
):
_make_test(name, locals()) # pylint:disable=too-many-function-args
del name
del _make_test
if __name__ == '__main__':
greentest.main()
| 741 | 21.484848 | 74 | py |
gevent | gevent-master/src/gevent/tests/_import_import_patch.py | __import__('_import_patch')
| 28 | 13.5 | 27 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_sigchld_2.py | # Mimics what gunicorn workers do: monkey patch in the child process
# and try to reset signal handlers to SIG_DFL.
# NOTE: This breaks again when gevent.subprocess is used, or any child
# watcher.
import os
import sys
import signal
def handle(*_args):
if not pid:
# We only do this is the child so our
# parent's waitpid can get the status.
# This is the opposite of gunicorn.
os.waitpid(-1, os.WNOHANG)
# The signal watcher must be installed *before* monkey patching
if hasattr(signal, 'SIGCHLD'):
if sys.version_info[:2] >= (3, 8) and os.environ.get("PYTHONDEVMODE"):
# See test__monkey_sigchld.py
print("Ran 1 tests in 0.0s (skipped=1)")
sys.exit(0)
# On Python 2, the signal handler breaks the platform
# module, because it uses os.popen. pkg_resources uses the platform
# module.
# Cache that info.
import platform
platform.uname()
signal.signal(signal.SIGCHLD, handle)
pid = os.fork()
if pid: # parent
try:
_, stat = os.waitpid(pid, 0)
except OSError:
# Interrupted system call
_, stat = os.waitpid(pid, 0)
assert stat == 0, stat
else:
# Under Python 2, os.popen() directly uses the popen call, and
# popen's file uses the pclose() system call to
# wait for the child. If it's already waited on,
# it raises the same exception.
# Python 3 uses the subprocess module directly which doesn't
# have this problem.
import gevent.monkey
gevent.monkey.patch_all()
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
f = os.popen('true')
f.close()
sys.exit(0)
else:
print("No SIGCHLD, not testing")
| 1,763 | 29.947368 | 74 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_logging.py | # If the logging module is imported *before* monkey patching,
# the existing handlers are correctly monkey patched to use gevent locks
import logging
logging.basicConfig()
import threading
import sys
PY2 = sys.version_info[0] == 2
def _inner_lock(lock):
# The inner attribute changed between 2 and 3
attr = getattr(lock, '_block' if not PY2 else '_RLock__block', None)
return attr
def _check_type(root, lock, inner_semaphore, kind):
if not isinstance(inner_semaphore, kind):
raise AssertionError(
"Expected <object>.[_]lock._block to be of type %s, "
"but it was of type %s.\n"
"\t<object>.[_]lock=%r\n"
"\t<object>.[_]lock._block=%r\n"
"\t<object>=%r" % (
kind,
type(inner_semaphore),
lock,
inner_semaphore,
root
)
)
def checkLocks(kind, ignore_none=True):
handlers = logging._handlerList
assert handlers
for weakref in handlers:
# In py26, these are actual handlers, not weakrefs
handler = weakref() if callable(weakref) else weakref
block = _inner_lock(handler.lock)
if block is None and ignore_none:
continue
_check_type(handler, handler.lock, block, kind)
attr = _inner_lock(logging._lock)
if attr is None and ignore_none:
return
_check_type(logging, logging._lock, attr, kind)
checkLocks(type(threading._allocate_lock()))
import gevent.monkey
gevent.monkey.patch_all()
import gevent.lock
checkLocks(type(gevent.thread.allocate_lock()), ignore_none=False)
| 1,640 | 27.789474 | 72 | py |
gevent | gevent-master/src/gevent/tests/test__contextvars.py | # gevent: copied from 3.7 to test our monkey-patch.
# Modified to work on all versions of Python.
from gevent import monkey
monkey.patch_all()
# pylint:disable=superfluous-parens,pointless-statement,not-callable
# pylint:disable=unused-argument,too-many-public-methods,unused-variable
# pylint:disable=too-many-branches,too-many-statements
import concurrent.futures
try:
import contextvars
except ImportError:
from gevent import contextvars
import functools
# import gc
import random
import time
import unittest
# import weakref
# try:
# from _testcapi import hamt
# except ImportError:
# hamt = None
hamt = None
def isolated_context(func):
"""Needed to make reftracking test mode work."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
ctx = contextvars.Context()
return ctx.run(func, *args, **kwargs)
return wrapper
class ContextTest(unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_context_var_new_1(self):
with self.assertRaises(TypeError):
contextvars.ContextVar()
# gevent: Doesn't raise
# with self.assertRaisesRegex(TypeError, 'must be a str'):
# contextvars.ContextVar(1)
c = contextvars.ContextVar('aaa')
self.assertEqual(c.name, 'aaa')
with self.assertRaises(AttributeError):
c.name = 'bbb'
self.assertNotEqual(hash(c), hash('aaa'))
@isolated_context
def test_context_var_repr_1(self):
c = contextvars.ContextVar('a')
self.assertIn('a', repr(c))
c = contextvars.ContextVar('a', default=123)
self.assertIn('123', repr(c))
lst = []
c = contextvars.ContextVar('a', default=lst)
lst.append(c)
self.assertIn('...', repr(c))
self.assertIn('...', repr(lst))
t = c.set(1)
self.assertIn(repr(c), repr(t))
self.assertNotIn(' used ', repr(t))
c.reset(t)
self.assertIn(' used ', repr(t))
# gevent: Doesn't raise
# def test_context_subclassing_1(self):
# with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
# class MyContextVar(contextvars.ContextVar):
# # Potentially we might want ContextVars to be subclassable.
# pass
# with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
# class MyContext(contextvars.Context):
# pass
# with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
# class MyToken(contextvars.Token):
# pass
def test_context_new_1(self):
with self.assertRaises(TypeError):
contextvars.Context(1)
with self.assertRaises(TypeError):
contextvars.Context(1, a=1)
with self.assertRaises(TypeError):
contextvars.Context(a=1)
contextvars.Context(**{})
def test_context_typerrors_1(self):
ctx = contextvars.Context()
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx[1]
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
1 in ctx
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx.get(1)
def test_context_get_context_1(self):
ctx = contextvars.copy_context()
self.assertIsInstance(ctx, contextvars.Context)
# gevent: This doesn't raise
# def test_context_run_1(self):
# ctx = contextvars.Context()
# with self.assertRaisesRegex(TypeError, 'missing 1 required'):
# ctx.run()
def test_context_run_2(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
kwargs['spam'] = 'foo'
args += ('bar',)
return args, kwargs
for f in (func, functools.partial(func)):
# partial doesn't support FASTCALL
self.assertEqual(ctx.run(f), (('bar',), {'spam': 'foo'}))
self.assertEqual(ctx.run(f, 1), ((1, 'bar'), {'spam': 'foo'}))
self.assertEqual(
ctx.run(f, a=2),
(('bar',), {'a': 2, 'spam': 'foo'}))
self.assertEqual(
ctx.run(f, 11, a=2),
((11, 'bar'), {'a': 2, 'spam': 'foo'}))
a = {}
self.assertEqual(
ctx.run(f, 11, **a),
((11, 'bar'), {'spam': 'foo'}))
self.assertEqual(a, {})
def test_context_run_3(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2, a=123)
@isolated_context
def test_context_run_4(self):
ctx1 = contextvars.Context()
ctx2 = contextvars.Context()
var = contextvars.ContextVar('var')
def func2():
self.assertIsNone(var.get(None))
def func1():
self.assertIsNone(var.get(None))
var.set('spam')
ctx2.run(func2)
self.assertEqual(var.get(None), 'spam')
cur = contextvars.copy_context()
self.assertEqual(len(cur), 1)
self.assertEqual(cur[var], 'spam')
return cur
returned_ctx = ctx1.run(func1)
self.assertEqual(ctx1, returned_ctx)
self.assertEqual(returned_ctx[var], 'spam')
self.assertIn(var, returned_ctx)
def test_context_run_5(self):
ctx = contextvars.Context()
var = contextvars.ContextVar('var')
def func():
self.assertIsNone(var.get(None))
var.set('spam')
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
self.assertIsNone(var.get(None))
def test_context_run_6(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('a', default=0)
def fun():
self.assertEqual(c.get(), 0)
self.assertIsNone(ctx.get(c))
c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(ctx.get(c), 42)
ctx.run(fun)
def test_context_run_7(self):
ctx = contextvars.Context()
def fun():
with self.assertRaisesRegex(RuntimeError, 'is already entered'):
ctx.run(fun)
ctx.run(fun)
@isolated_context
def test_context_getset_1(self):
c = contextvars.ContextVar('c')
with self.assertRaises(LookupError):
c.get()
self.assertIsNone(c.get(None))
t0 = c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
self.assertIs(t0.old_value, t0.MISSING)
self.assertIs(t0.old_value, contextvars.Token.MISSING)
self.assertIs(t0.var, c)
t = c.set('spam')
self.assertEqual(c.get(), 'spam')
self.assertEqual(c.get(None), 'spam')
self.assertEqual(t.old_value, 42)
c.reset(t)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
c.set('spam2')
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t)
self.assertEqual(c.get(), 'spam2')
ctx1 = contextvars.copy_context()
self.assertIn(c, ctx1)
c.reset(t0)
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t0)
self.assertIsNone(c.get(None))
self.assertIn(c, ctx1)
self.assertEqual(ctx1[c], 'spam2')
self.assertEqual(ctx1.get(c, 'aa'), 'spam2')
self.assertEqual(len(ctx1), 1)
self.assertEqual(list(ctx1.items()), [(c, 'spam2')])
self.assertEqual(list(ctx1.values()), ['spam2'])
self.assertEqual(list(ctx1.keys()), [c])
self.assertEqual(list(ctx1), [c])
ctx2 = contextvars.copy_context()
self.assertNotIn(c, ctx2)
with self.assertRaises(KeyError):
ctx2[c]
self.assertEqual(ctx2.get(c, 'aa'), 'aa')
self.assertEqual(len(ctx2), 0)
self.assertEqual(list(ctx2), [])
@isolated_context
def test_context_getset_2(self):
v1 = contextvars.ContextVar('v1')
v2 = contextvars.ContextVar('v2')
t1 = v1.set(42)
with self.assertRaisesRegex(ValueError, 'by a different'):
v2.reset(t1)
@isolated_context
def test_context_getset_3(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
def fun():
self.assertEqual(c.get(), 42)
with self.assertRaises(KeyError):
ctx[c]
self.assertIsNone(ctx.get(c))
self.assertEqual(ctx.get(c, 'spam'), 'spam')
self.assertNotIn(c, ctx)
self.assertEqual(list(ctx.keys()), [])
t = c.set(1)
self.assertEqual(list(ctx.keys()), [c])
self.assertEqual(ctx[c], 1)
c.reset(t)
self.assertEqual(list(ctx.keys()), [])
with self.assertRaises(KeyError):
ctx[c]
ctx.run(fun)
@isolated_context
def test_context_getset_4(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
tok = ctx.run(c.set, 1)
with self.assertRaisesRegex(ValueError, 'different Context'):
c.reset(tok)
@isolated_context
def test_context_getset_5(self):
c = contextvars.ContextVar('c', default=42)
c.set([])
def fun():
c.set([])
c.get().append(42)
self.assertEqual(c.get(), [42])
contextvars.copy_context().run(fun)
self.assertEqual(c.get(), [])
def test_context_copy_1(self):
ctx1 = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def ctx1_fun():
c.set(10)
ctx2 = ctx1.copy()
self.assertEqual(ctx2[c], 10)
c.set(20)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 10)
ctx2.run(ctx2_fun)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 30)
def ctx2_fun():
self.assertEqual(c.get(), 10)
c.set(30)
self.assertEqual(c.get(), 30)
ctx1.run(ctx1_fun)
@isolated_context
def test_context_threads_1(self):
cvar = contextvars.ContextVar('cvar')
def sub(num):
for i in range(10):
cvar.set(num + i)
time.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
return num
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as tp:
results = list(tp.map(sub, range(10)))
self.assertEqual(results, list(range(10)))
# gevent: clases's can't be subscripted on Python 3.6
# def test_contextvar_getitem(self):
# clss = contextvars.ContextVar
# self.assertEqual(clss[str], clss)
# HAMT Tests
# class HashKey:
# _crasher = None
# def __init__(self, hash, name, error_on_eq_to=None):
# assert hash != -1
# self.name = name
# self.hash = hash
# self.error_on_eq_to = error_on_eq_to
# # def __repr__(self):
# # return f'<Key name:{self.name} hash:{self.hash}>'
# def __hash__(self):
# if self._crasher is not None and self._crasher.error_on_hash:
# raise HashingError
# return self.hash
# def __eq__(self, other):
# if not isinstance(other, HashKey):
# return NotImplemented
# if self._crasher is not None and self._crasher.error_on_eq:
# raise EqError
# if self.error_on_eq_to is not None and self.error_on_eq_to is other:
# raise ValueError#(f'cannot compare {self!r} to {other!r}')
# if other.error_on_eq_to is not None and other.error_on_eq_to is self:
# raise ValueError#(f'cannot compare {other!r} to {self!r}')
# return (self.name, self.hash) == (other.name, other.hash)
# class KeyStr(str):
# def __hash__(self):
# if HashKey._crasher is not None and HashKey._crasher.error_on_hash:
# raise HashingError
# return super().__hash__()
# def __eq__(self, other):
# if HashKey._crasher is not None and HashKey._crasher.error_on_eq:
# raise EqError
# return super().__eq__(other)
# class HaskKeyCrasher:
# def __init__(self, error_on_hash=False, error_on_eq=False):
# self.error_on_hash = error_on_hash
# self.error_on_eq = error_on_eq
# def __enter__(self):
# if HashKey._crasher is not None:
# raise RuntimeError('cannot nest crashers')
# HashKey._crasher = self
# def __exit__(self, *exc):
# HashKey._crasher = None
# class HashingError(Exception):
# pass
# class EqError(Exception):
# pass
# @unittest.skipIf(hamt is None, '_testcapi lacks "hamt()" function')
# class HamtTest(unittest.TestCase):
# def test_hashkey_helper_1(self):
# k1 = HashKey(10, 'aaa')
# k2 = HashKey(10, 'bbb')
# self.assertNotEqual(k1, k2)
# self.assertEqual(hash(k1), hash(k2))
# d = dict()
# d[k1] = 'a'
# d[k2] = 'b'
# self.assertEqual(d[k1], 'a')
# self.assertEqual(d[k2], 'b')
# def test_hamt_basics_1(self):
# h = hamt()
# h = None # NoQA
# def test_hamt_basics_2(self):
# h = hamt()
# self.assertEqual(len(h), 0)
# h2 = h.set('a', 'b')
# self.assertIsNot(h, h2)
# self.assertEqual(len(h), 0)
# self.assertEqual(len(h2), 1)
# self.assertIsNone(h.get('a'))
# self.assertEqual(h.get('a', 42), 42)
# self.assertEqual(h2.get('a'), 'b')
# h3 = h2.set('b', 10)
# self.assertIsNot(h2, h3)
# self.assertEqual(len(h), 0)
# self.assertEqual(len(h2), 1)
# self.assertEqual(len(h3), 2)
# self.assertEqual(h3.get('a'), 'b')
# self.assertEqual(h3.get('b'), 10)
# self.assertIsNone(h.get('b'))
# self.assertIsNone(h2.get('b'))
# self.assertIsNone(h.get('a'))
# self.assertEqual(h2.get('a'), 'b')
# h = h2 = h3 = None
# def test_hamt_basics_3(self):
# h = hamt()
# o = object()
# h1 = h.set('1', o)
# h2 = h1.set('1', o)
# self.assertIs(h1, h2)
# def test_hamt_basics_4(self):
# h = hamt()
# h1 = h.set('key', [])
# h2 = h1.set('key', [])
# self.assertIsNot(h1, h2)
# self.assertEqual(len(h1), 1)
# self.assertEqual(len(h2), 1)
# self.assertIsNot(h1.get('key'), h2.get('key'))
# def test_hamt_collision_1(self):
# k1 = HashKey(10, 'aaa')
# k2 = HashKey(10, 'bbb')
# k3 = HashKey(10, 'ccc')
# h = hamt()
# h2 = h.set(k1, 'a')
# h3 = h2.set(k2, 'b')
# self.assertEqual(h.get(k1), None)
# self.assertEqual(h.get(k2), None)
# self.assertEqual(h2.get(k1), 'a')
# self.assertEqual(h2.get(k2), None)
# self.assertEqual(h3.get(k1), 'a')
# self.assertEqual(h3.get(k2), 'b')
# h4 = h3.set(k2, 'cc')
# h5 = h4.set(k3, 'aa')
# self.assertEqual(h3.get(k1), 'a')
# self.assertEqual(h3.get(k2), 'b')
# self.assertEqual(h4.get(k1), 'a')
# self.assertEqual(h4.get(k2), 'cc')
# self.assertEqual(h4.get(k3), None)
# self.assertEqual(h5.get(k1), 'a')
# self.assertEqual(h5.get(k2), 'cc')
# self.assertEqual(h5.get(k2), 'cc')
# self.assertEqual(h5.get(k3), 'aa')
# self.assertEqual(len(h), 0)
# self.assertEqual(len(h2), 1)
# self.assertEqual(len(h3), 2)
# self.assertEqual(len(h4), 2)
# self.assertEqual(len(h5), 3)
# def test_hamt_stress(self):
# COLLECTION_SIZE = 7000
# TEST_ITERS_EVERY = 647
# CRASH_HASH_EVERY = 97
# CRASH_EQ_EVERY = 11
# RUN_XTIMES = 3
# for _ in range(RUN_XTIMES):
# h = hamt()
# d = dict()
# for i in range(COLLECTION_SIZE):
# key = KeyStr(i)
# if not (i % CRASH_HASH_EVERY):
# with HaskKeyCrasher(error_on_hash=True):
# with self.assertRaises(HashingError):
# h.set(key, i)
# h = h.set(key, i)
# if not (i % CRASH_EQ_EVERY):
# with HaskKeyCrasher(error_on_eq=True):
# with self.assertRaises(EqError):
# h.get(KeyStr(i)) # really trigger __eq__
# d[key] = i
# self.assertEqual(len(d), len(h))
# if not (i % TEST_ITERS_EVERY):
# self.assertEqual(set(h.items()), set(d.items()))
# self.assertEqual(len(h.items()), len(d.items()))
# self.assertEqual(len(h), COLLECTION_SIZE)
# for key in range(COLLECTION_SIZE):
# self.assertEqual(h.get(KeyStr(key), 'not found'), key)
# keys_to_delete = list(range(COLLECTION_SIZE))
# random.shuffle(keys_to_delete)
# for iter_i, i in enumerate(keys_to_delete):
# key = KeyStr(i)
# if not (iter_i % CRASH_HASH_EVERY):
# with HaskKeyCrasher(error_on_hash=True):
# with self.assertRaises(HashingError):
# h.delete(key)
# if not (iter_i % CRASH_EQ_EVERY):
# with HaskKeyCrasher(error_on_eq=True):
# with self.assertRaises(EqError):
# h.delete(KeyStr(i))
# h = h.delete(key)
# self.assertEqual(h.get(key, 'not found'), 'not found')
# del d[key]
# self.assertEqual(len(d), len(h))
# if iter_i == COLLECTION_SIZE // 2:
# hm = h
# dm = d.copy()
# if not (iter_i % TEST_ITERS_EVERY):
# self.assertEqual(set(h.keys()), set(d.keys()))
# self.assertEqual(len(h.keys()), len(d.keys()))
# self.assertEqual(len(d), 0)
# self.assertEqual(len(h), 0)
# # ============
# for key in dm:
# self.assertEqual(hm.get(str(key)), dm[key])
# self.assertEqual(len(dm), len(hm))
# for i, key in enumerate(keys_to_delete):
# hm = hm.delete(str(key))
# self.assertEqual(hm.get(str(key), 'not found'), 'not found')
# dm.pop(str(key), None)
# self.assertEqual(len(d), len(h))
# if not (i % TEST_ITERS_EVERY):
# self.assertEqual(set(h.values()), set(d.values()))
# self.assertEqual(len(h.values()), len(d.values()))
# self.assertEqual(len(d), 0)
# self.assertEqual(len(h), 0)
# self.assertEqual(list(h.items()), [])
# def test_hamt_delete_1(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(102, 'C')
# D = HashKey(103, 'D')
# E = HashKey(104, 'E')
# Z = HashKey(-100, 'Z')
# Er = HashKey(103, 'Er', error_on_eq_to=D)
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# orig_len = len(h)
# # BitmapNode(size=10 bitmap=0b111110000 id=0x10eadc618):
# # <Key name:A hash:100>: 'a'
# # <Key name:B hash:101>: 'b'
# # <Key name:C hash:102>: 'c'
# # <Key name:D hash:103>: 'd'
# # <Key name:E hash:104>: 'e'
# h = h.delete(C)
# self.assertEqual(len(h), orig_len - 1)
# with self.assertRaisesRegex(ValueError, 'cannot compare'):
# h.delete(Er)
# h = h.delete(D)
# self.assertEqual(len(h), orig_len - 2)
# h2 = h.delete(Z)
# self.assertIs(h2, h)
# h = h.delete(A)
# self.assertEqual(len(h), orig_len - 3)
# self.assertEqual(h.get(A, 42), 42)
# self.assertEqual(h.get(B), 'b')
# self.assertEqual(h.get(E), 'e')
# def test_hamt_delete_2(self):
# A = HashKey(100, 'A')
# B = HashKey(201001, 'B')
# C = HashKey(101001, 'C')
# D = HashKey(103, 'D')
# E = HashKey(104, 'E')
# Z = HashKey(-100, 'Z')
# Er = HashKey(201001, 'Er', error_on_eq_to=B)
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# orig_len = len(h)
# # BitmapNode(size=8 bitmap=0b1110010000):
# # <Key name:A hash:100>: 'a'
# # <Key name:D hash:103>: 'd'
# # <Key name:E hash:104>: 'e'
# # NULL:
# # BitmapNode(size=4 bitmap=0b100000000001000000000):
# # <Key name:B hash:201001>: 'b'
# # <Key name:C hash:101001>: 'c'
# with self.assertRaisesRegex(ValueError, 'cannot compare'):
# h.delete(Er)
# h = h.delete(Z)
# self.assertEqual(len(h), orig_len)
# h = h.delete(C)
# self.assertEqual(len(h), orig_len - 1)
# h = h.delete(B)
# self.assertEqual(len(h), orig_len - 2)
# h = h.delete(A)
# self.assertEqual(len(h), orig_len - 3)
# self.assertEqual(h.get(D), 'd')
# self.assertEqual(h.get(E), 'e')
# h = h.delete(A)
# h = h.delete(B)
# h = h.delete(D)
# h = h.delete(E)
# self.assertEqual(len(h), 0)
# def test_hamt_delete_3(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(100100, 'C')
# D = HashKey(100100, 'D')
# E = HashKey(104, 'E')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# orig_len = len(h)
# # BitmapNode(size=6 bitmap=0b100110000):
# # NULL:
# # BitmapNode(size=4 bitmap=0b1000000000000000000001000):
# # <Key name:A hash:100>: 'a'
# # NULL:
# # CollisionNode(size=4 id=0x108572410):
# # <Key name:C hash:100100>: 'c'
# # <Key name:D hash:100100>: 'd'
# # <Key name:B hash:101>: 'b'
# # <Key name:E hash:104>: 'e'
# h = h.delete(A)
# self.assertEqual(len(h), orig_len - 1)
# h = h.delete(E)
# self.assertEqual(len(h), orig_len - 2)
# self.assertEqual(h.get(C), 'c')
# self.assertEqual(h.get(B), 'b')
# def test_hamt_delete_4(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(100100, 'C')
# D = HashKey(100100, 'D')
# E = HashKey(100100, 'E')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# orig_len = len(h)
# # BitmapNode(size=4 bitmap=0b110000):
# # NULL:
# # BitmapNode(size=4 bitmap=0b1000000000000000000001000):
# # <Key name:A hash:100>: 'a'
# # NULL:
# # CollisionNode(size=6 id=0x10515ef30):
# # <Key name:C hash:100100>: 'c'
# # <Key name:D hash:100100>: 'd'
# # <Key name:E hash:100100>: 'e'
# # <Key name:B hash:101>: 'b'
# h = h.delete(D)
# self.assertEqual(len(h), orig_len - 1)
# h = h.delete(E)
# self.assertEqual(len(h), orig_len - 2)
# h = h.delete(C)
# self.assertEqual(len(h), orig_len - 3)
# h = h.delete(A)
# self.assertEqual(len(h), orig_len - 4)
# h = h.delete(B)
# self.assertEqual(len(h), 0)
# def test_hamt_delete_5(self):
# h = hamt()
# keys = []
# for i in range(17):
# key = HashKey(i, str(i))
# keys.append(key)
# h = h.set(key, 'val-{i}'.format(i=i))
# collision_key16 = HashKey(16, '18')
# h = h.set(collision_key16, 'collision')
# # ArrayNode(id=0x10f8b9318):
# # 0::
# # BitmapNode(size=2 count=1 bitmap=0b1):
# # <Key name:0 hash:0>: 'val-0'
# #
# # ... 14 more BitmapNodes ...
# #
# # 15::
# # BitmapNode(size=2 count=1 bitmap=0b1):
# # <Key name:15 hash:15>: 'val-15'
# #
# # 16::
# # BitmapNode(size=2 count=1 bitmap=0b1):
# # NULL:
# # CollisionNode(size=4 id=0x10f2f5af8):
# # <Key name:16 hash:16>: 'val-16'
# # <Key name:18 hash:16>: 'collision'
# self.assertEqual(len(h), 18)
# h = h.delete(keys[2])
# self.assertEqual(len(h), 17)
# h = h.delete(collision_key16)
# self.assertEqual(len(h), 16)
# h = h.delete(keys[16])
# self.assertEqual(len(h), 15)
# h = h.delete(keys[1])
# self.assertEqual(len(h), 14)
# h = h.delete(keys[1])
# self.assertEqual(len(h), 14)
# for key in keys:
# h = h.delete(key)
# self.assertEqual(len(h), 0)
# def test_hamt_items_1(self):
# A = HashKey(100, 'A')
# B = HashKey(201001, 'B')
# C = HashKey(101001, 'C')
# D = HashKey(103, 'D')
# E = HashKey(104, 'E')
# F = HashKey(110, 'F')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# h = h.set(F, 'f')
# it = h.items()
# self.assertEqual(
# set(list(it)),
# {(A, 'a'), (B, 'b'), (C, 'c'), (D, 'd'), (E, 'e'), (F, 'f')})
# def test_hamt_items_2(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(100100, 'C')
# D = HashKey(100100, 'D')
# E = HashKey(100100, 'E')
# F = HashKey(110, 'F')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# h = h.set(F, 'f')
# it = h.items()
# self.assertEqual(
# set(list(it)),
# {(A, 'a'), (B, 'b'), (C, 'c'), (D, 'd'), (E, 'e'), (F, 'f')})
# def test_hamt_keys_1(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(100100, 'C')
# D = HashKey(100100, 'D')
# E = HashKey(100100, 'E')
# F = HashKey(110, 'F')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(B, 'b')
# h = h.set(C, 'c')
# h = h.set(D, 'd')
# h = h.set(E, 'e')
# h = h.set(F, 'f')
# self.assertEqual(set(list(h.keys())), {A, B, C, D, E, F})
# self.assertEqual(set(list(h)), {A, B, C, D, E, F})
# def test_hamt_items_3(self):
# h = hamt()
# self.assertEqual(len(h.items()), 0)
# self.assertEqual(list(h.items()), [])
# def test_hamt_eq_1(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# C = HashKey(100100, 'C')
# D = HashKey(100100, 'D')
# E = HashKey(120, 'E')
# h1 = hamt()
# h1 = h1.set(A, 'a')
# h1 = h1.set(B, 'b')
# h1 = h1.set(C, 'c')
# h1 = h1.set(D, 'd')
# h2 = hamt()
# h2 = h2.set(A, 'a')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.set(B, 'b')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.set(C, 'c')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.set(D, 'd2')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.set(D, 'd')
# self.assertTrue(h1 == h2)
# self.assertFalse(h1 != h2)
# h2 = h2.set(E, 'e')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.delete(D)
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# h2 = h2.set(E, 'd')
# self.assertFalse(h1 == h2)
# self.assertTrue(h1 != h2)
# def test_hamt_eq_2(self):
# A = HashKey(100, 'A')
# Er = HashKey(100, 'Er', error_on_eq_to=A)
# h1 = hamt()
# h1 = h1.set(A, 'a')
# h2 = hamt()
# h2 = h2.set(Er, 'a')
# with self.assertRaisesRegex(ValueError, 'cannot compare'):
# h1 == h2
# with self.assertRaisesRegex(ValueError, 'cannot compare'):
# h1 != h2
# def test_hamt_gc_1(self):
# A = HashKey(100, 'A')
# h = hamt()
# h = h.set(0, 0) # empty HAMT node is memoized in hamt.c
# ref = weakref.ref(h)
# a = []
# a.append(a)
# a.append(h)
# b = []
# a.append(b)
# b.append(a)
# h = h.set(A, b)
# del h, a, b
# gc.collect()
# gc.collect()
# gc.collect()
# self.assertIsNone(ref())
# def test_hamt_gc_2(self):
# A = HashKey(100, 'A')
# B = HashKey(101, 'B')
# h = hamt()
# h = h.set(A, 'a')
# h = h.set(A, h)
# ref = weakref.ref(h)
# hi = h.items()
# next(hi)
# del h, hi
# gc.collect()
# gc.collect()
# gc.collect()
# self.assertIsNone(ref())
# def test_hamt_in_1(self):
# A = HashKey(100, 'A')
# AA = HashKey(100, 'A')
# B = HashKey(101, 'B')
# h = hamt()
# h = h.set(A, 1)
# self.assertTrue(A in h)
# self.assertFalse(B in h)
# with self.assertRaises(EqError):
# with HaskKeyCrasher(error_on_eq=True):
# AA in h
# with self.assertRaises(HashingError):
# with HaskKeyCrasher(error_on_hash=True):
# AA in h
# def test_hamt_getitem_1(self):
# A = HashKey(100, 'A')
# AA = HashKey(100, 'A')
# B = HashKey(101, 'B')
# h = hamt()
# h = h.set(A, 1)
# self.assertEqual(h[A], 1)
# self.assertEqual(h[AA], 1)
# with self.assertRaises(KeyError):
# h[B]
# with self.assertRaises(EqError):
# with HaskKeyCrasher(error_on_eq=True):
# h[AA]
# with self.assertRaises(HashingError):
# with HaskKeyCrasher(error_on_hash=True):
# h[AA]
if __name__ == "__main__":
unittest.main()
| 31,717 | 28.125803 | 80 | py |
gevent | gevent-master/src/gevent/tests/test__backdoor.py | from __future__ import print_function
from __future__ import absolute_import
import gevent
from gevent import socket
from gevent import backdoor
import gevent.testing as greentest
from gevent.testing.params import DEFAULT_BIND_ADDR_TUPLE
from gevent.testing.params import DEFAULT_CONNECT
def read_until(conn, postfix):
read = b''
assert isinstance(postfix, bytes)
while not read.endswith(postfix):
result = conn.recv(1)
if not result:
raise AssertionError('Connection ended before %r. Data read:\n%r' % (postfix, read))
read += result
return read if isinstance(read, str) else read.decode('utf-8')
def readline(conn):
with conn.makefile() as f:
return f.readline()
class WorkerGreenlet(gevent.Greenlet):
spawning_stack_limit = 2
class SocketWithBanner(socket.socket):
__slots__ = ('banner',)
def __init__(self, *args, **kwargs):
self.banner = None
super(SocketWithBanner, self).__init__(*args, **kwargs)
def __enter__(self):
return socket.socket.__enter__(self)
def __exit__(self, t, v, tb):
return socket.socket.__exit__(self, t, v, tb)
@greentest.skipOnAppVeyor(
"With the update to libev 4.31 and potentially closing sockets in the background, "
"alternate tests started hanging on appveyor. Something like .E.E.E. "
"See https://ci.appveyor.com/project/denik/gevent/build/job/n9fynkoyt2bvk8b5 "
"It's not clear why, but presumably a socket isn't getting closed and a watcher is tied "
"to the wrong file descriptor. I haven't been able to reproduce. If it were a systemic "
"problem I'd expect to see more failures, so it is probably specific to resource management "
"in this test."
)
class Test(greentest.TestCase):
__timeout__ = 10
def tearDown(self):
gevent.sleep() # let spawned greenlets die
super(Test, self).tearDown()
def _make_and_start_server(self, *args, **kwargs):
server = backdoor.BackdoorServer(DEFAULT_BIND_ADDR_TUPLE, *args, **kwargs)
server.start()
return server
def _create_connection(self, server):
conn = SocketWithBanner()
conn.connect((DEFAULT_CONNECT, server.server_port)) # pylint:disable=not-callable
try:
banner = self._wait_for_prompt(conn)
except:
conn.close()
raise
conn.banner = banner
return conn
def _wait_for_prompt(self, conn):
return read_until(conn, b'>>> ')
def _close(self, conn, cmd=b'quit()\r\n)'):
conn.sendall(cmd)
line = readline(conn)
self.assertEqual(line, '')
conn.close()
@greentest.skipOnMacOnCI(
"Sometimes fails to get the right answers; "
"https://travis-ci.org/github/gevent/gevent/jobs/692184822"
)
@greentest.skipOnLibuvOnTravisOnCPython27(
"segfaults; "
"See https://github.com/gevent/gevent/pull/1156")
def test_multi(self):
with self._make_and_start_server() as server:
def connect():
with self._create_connection(server) as conn:
conn.sendall(b'2+2\r\n')
line = readline(conn)
self.assertEqual(line.strip(), '4', repr(line))
self._close(conn)
jobs = [WorkerGreenlet.spawn(connect) for _ in range(10)]
try:
done = gevent.joinall(jobs, raise_error=True)
finally:
gevent.joinall(jobs, raise_error=False)
self.assertEqual(len(done), len(jobs), done)
def test_quit(self):
with self._make_and_start_server() as server:
with self._create_connection(server) as conn:
self._close(conn)
def test_sys_exit(self):
with self._make_and_start_server() as server:
with self._create_connection(server) as conn:
self._close(conn, b'import sys; sys.exit(0)\r\n')
def test_banner(self):
expected_banner = "Welcome stranger!" # native string
with self._make_and_start_server(banner=expected_banner) as server:
with self._create_connection(server) as conn:
banner = conn.banner
self._close(conn)
self.assertEqual(banner[:len(expected_banner)], expected_banner, banner)
def test_builtins(self):
with self._make_and_start_server() as server:
with self._create_connection(server) as conn:
conn.sendall(b'locals()["__builtins__"]\r\n')
response = read_until(conn, b'>>> ')
self._close(conn)
self.assertLess(
len(response), 300,
msg="locals() unusable: %s..." % response)
def test_switch_exc(self):
from gevent.queue import Queue, Empty
def bad():
q = Queue()
print('switching out, then throwing in')
try:
q.get(block=True, timeout=0.1)
except Empty:
print("Got Empty")
print('switching out')
gevent.sleep(0.1)
print('switched in')
with self._make_and_start_server(locals={'bad': bad}) as server:
with self._create_connection(server) as conn:
conn.sendall(b'bad()\r\n')
response = self._wait_for_prompt(conn)
self._close(conn)
response = response.replace('\r\n', '\n')
self.assertEqual(
'switching out, then throwing in\nGot Empty\nswitching out\nswitched in\n>>> ',
response)
if __name__ == '__main__':
greentest.main() # pragma: testrunner-no-combine
| 5,707 | 32.186047 | 97 | py |
gevent | gevent-master/src/gevent/tests/test__issues461_471.py | '''Test for GitHub issues 461 and 471.
When moving to Python 3, handling of KeyboardInterrupt exceptions caused
by a Ctrl-C raised an exception while printing the traceback for a
greenlet preventing the process from exiting. This test tests for proper
handling of KeyboardInterrupt.
'''
import sys
if sys.argv[1:] == ['subprocess']: # pragma: no cover
import gevent
def task():
sys.stdout.write('ready\n')
sys.stdout.flush()
gevent.sleep(30)
try:
gevent.spawn(task).get()
except KeyboardInterrupt:
pass
sys.exit(0)
else:
import signal
from subprocess import Popen, PIPE
import time
import unittest
import gevent.testing as greentest
from gevent.testing.sysinfo import CFFI_BACKEND
from gevent.testing.sysinfo import RUN_COVERAGE
from gevent.testing.sysinfo import WIN
from gevent.testing.sysinfo import PYPY3
class Test(unittest.TestCase):
@unittest.skipIf(
(CFFI_BACKEND and RUN_COVERAGE) or (PYPY3 and WIN),
"Interferes with the timing; times out waiting for the child")
def test_hang(self):
# XXX: Why does PyPy3 on Win fail to kill the child? (This was before we switched
# to pypy3w; perhaps that makes a difference?)
if WIN:
from subprocess import CREATE_NEW_PROCESS_GROUP
kwargs = {'creationflags': CREATE_NEW_PROCESS_GROUP}
else:
kwargs = {}
# (not on Py2) pylint:disable=consider-using-with
p = Popen([sys.executable, __file__, 'subprocess'], stdout=PIPE, **kwargs)
line = p.stdout.readline()
if not isinstance(line, str):
line = line.decode('ascii')
# Windows needs the \n in the string to write (because of buffering), but
# because of newline handling it doesn't make it through the read; whereas
# it does on other platforms. Universal newlines is broken on Py3, so the best
# thing to do is to strip it
line = line.strip()
self.assertEqual(line, 'ready')
# On Windows, we have to send the CTRL_BREAK_EVENT (which seems to terminate the process); SIGINT triggers
# "ValueError: Unsupported signal: 2". The CTRL_C_EVENT is ignored on Python 3 (but not Python 2).
# So this test doesn't test much on Windows.
signal_to_send = signal.SIGINT if not WIN else getattr(signal, 'CTRL_BREAK_EVENT')
p.send_signal(signal_to_send)
# Wait a few seconds for child process to die. Sometimes signal delivery is delayed
# or even swallowed by Python, so send the signal a few more times if necessary
wait_seconds = 25.0
now = time.time()
midtime = now + (wait_seconds / 2.0)
endtime = time.time() + wait_seconds
while time.time() < endtime:
if p.poll() is not None:
break
if time.time() > midtime:
p.send_signal(signal_to_send)
midtime = endtime + 1 # only once
time.sleep(0.1)
else:
# Kill unresponsive child and exit with error 1
p.terminate()
p.wait()
raise AssertionError("Failed to wait for child")
# If we get here, it's because we caused the process to exit; it
# didn't hang. Under Windows, however, we have to use CTRL_BREAK_EVENT,
# which has an arbitrary returncode depending on versions (so does CTRL_C_EVENT
# on Python 2). We still
# count this as success.
self.assertEqual(p.returncode if not WIN else 0, 0)
p.stdout.close()
if __name__ == '__main__':
greentest.main()
| 3,889 | 39.520833 | 118 | py |
gevent | gevent-master/src/gevent/tests/test__real_greenlet.py | """Testing that greenlet restores sys.exc_info.
Passes with CPython + greenlet 0.4.0
Fails with PyPy 2.2.1
"""
from __future__ import print_function
import sys
from gevent import testing as greentest
class Test(greentest.TestCase):
def test(self):
import greenlet
print('Your greenlet version: %s' % (getattr(greenlet, '__version__', None), ))
result = []
def func():
result.append(repr(sys.exc_info()))
g = greenlet.greenlet(func)
try:
1 / 0
except ZeroDivisionError:
g.switch()
self.assertEqual(result, ['(None, None, None)'])
if __name__ == '__main__':
greentest.main()
| 693 | 18.828571 | 87 | py |
gevent | gevent-master/src/gevent/tests/test__socket_timeout.py | import gevent
from gevent import socket
import gevent.testing as greentest
class Test(greentest.TestCase):
server = None
acceptor = None
server_port = None
def _accept(self):
try:
conn, _ = self.server.accept()
self._close_on_teardown(conn)
except socket.error:
pass
def setUp(self):
super(Test, self).setUp()
self.server = self._close_on_teardown(greentest.tcp_listener(backlog=1))
self.server_port = self.server.getsockname()[1]
self.acceptor = gevent.spawn(self._accept)
gevent.sleep(0)
def tearDown(self):
if self.acceptor is not None:
self.acceptor.kill()
self.acceptor = None
if self.server is not None:
self.server.close()
self.server = None
super(Test, self).tearDown()
def test_timeout(self):
gevent.sleep(0)
sock = socket.socket()
self._close_on_teardown(sock)
sock.connect((greentest.DEFAULT_CONNECT_HOST, self.server_port))
sock.settimeout(0.1)
with self.assertRaises(socket.error) as cm:
sock.recv(1024)
ex = cm.exception
self.assertEqual(ex.args, ('timed out',))
self.assertEqual(str(ex), 'timed out')
if __name__ == '__main__':
greentest.main()
| 1,351 | 25 | 80 | py |
gevent | gevent-master/src/gevent/tests/test__sleep0.py | import gevent
from gevent.testing.util import alarm
alarm(3)
with gevent.Timeout(0.01, False):
while True:
gevent.sleep(0)
| 139 | 11.727273 | 37 | py |
gevent | gevent-master/src/gevent/tests/test__server.py | from __future__ import print_function, division
from contextlib import contextmanager
import unittest
import errno
import os
import gevent.testing as greentest
from gevent.testing import PY3
from gevent.testing import sysinfo
from gevent.testing import DEFAULT_SOCKET_TIMEOUT as _DEFAULT_SOCKET_TIMEOUT
from gevent.testing.timing import SMALLEST_RELIABLE_DELAY
from gevent.testing.sockets import tcp_listener
from gevent.testing import WIN
from gevent import socket
import gevent
from gevent.server import StreamServer
from gevent.exceptions import LoopExit
class SimpleStreamServer(StreamServer):
def handle(self, client_socket, _address): # pylint:disable=method-hidden
fd = client_socket.makefile()
try:
request_line = fd.readline()
if not request_line:
return
try:
_method, path, _rest = request_line.split(' ', 3)
except Exception:
print('Failed to parse request line: %r' % (request_line, ))
raise
if path == '/ping':
client_socket.sendall(b'HTTP/1.0 200 OK\r\n\r\nPONG')
elif path in ['/long', '/short']:
client_socket.sendall(b'hello')
while True:
data = client_socket.recv(1)
if not data:
break
else:
client_socket.sendall(b'HTTP/1.0 404 WTF?\r\n\r\n')
finally:
fd.close()
def sleep_to_clear_old_sockets(*_args):
try:
# Allow any queued callbacks needed to close sockets
# to run. On Windows, this needs to spin the event loop to
# allow proper FD cleanup. Otherwise we risk getting an
# old FD that's being closed and then get spurious connection
# errors.
gevent.sleep(0 if not WIN else SMALLEST_RELIABLE_DELAY)
except Exception: # pylint:disable=broad-except
pass
class Settings(object):
ServerClass = StreamServer
ServerSubClass = SimpleStreamServer
restartable = True
close_socket_detected = True
@staticmethod
def assertAcceptedConnectionError(inst):
with inst.makefile() as conn:
try:
result = conn.read()
except socket.timeout:
result = None
inst.assertFalse(result)
assert500 = assertAcceptedConnectionError
@staticmethod
def assert503(inst):
# regular reads timeout
inst.assert500()
# attempt to send anything reset the connection
try:
inst.send_request()
except socket.error as ex:
if ex.args[0] not in greentest.CONN_ABORTED_ERRORS:
raise
@staticmethod
def assertPoolFull(inst):
with inst.assertRaises(socket.timeout):
inst.assertRequestSucceeded(timeout=0.01)
@staticmethod
def fill_default_server_args(inst, kwargs):
kwargs.setdefault('spawn', inst.get_spawn())
return kwargs
class TestCase(greentest.TestCase):
# pylint: disable=too-many-public-methods
__timeout__ = greentest.LARGE_TIMEOUT
Settings = Settings
server = None
def cleanup(self):
if getattr(self, 'server', None) is not None:
self.server.stop()
self.server = None
sleep_to_clear_old_sockets()
def get_listener(self):
return self._close_on_teardown(tcp_listener(backlog=5))
def get_server_host_port_family(self):
server_host = self.server.server_host
if not server_host:
server_host = greentest.DEFAULT_LOCAL_HOST_ADDR
elif server_host == '::':
server_host = greentest.DEFAULT_LOCAL_HOST_ADDR6
try:
family = self.server.socket.family
except AttributeError:
# server deletes socket when closed
family = socket.AF_INET
return server_host, self.server.server_port, family
@contextmanager
def makefile(self, timeout=_DEFAULT_SOCKET_TIMEOUT, bufsize=1, include_raw_socket=False):
server_host, server_port, family = self.get_server_host_port_family()
bufarg = 'buffering' if PY3 else 'bufsize'
makefile_kwargs = {bufarg: bufsize}
if PY3:
# Under Python3, you can't read and write to the same
# makefile() opened in r, and r+ is not allowed
makefile_kwargs['mode'] = 'rwb'
with socket.socket(family=family) as sock:
rconn = None
# We want the socket to be accessible from the fileobject
# we return. On Python 2, natively this is available as
# _sock, but Python 3 doesn't have that.
sock.connect((server_host, server_port))
sock.settimeout(timeout)
with sock.makefile(**makefile_kwargs) as rconn:
result = rconn if not include_raw_socket else (rconn, sock)
yield result
def send_request(self, url='/', timeout=_DEFAULT_SOCKET_TIMEOUT, bufsize=1):
with self.makefile(timeout=timeout, bufsize=bufsize) as conn:
self.send_request_to_fd(conn, url)
def send_request_to_fd(self, fd, url='/'):
fd.write(('GET %s HTTP/1.0\r\n\r\n' % url).encode('latin-1'))
fd.flush()
LOCAL_CONN_REFUSED_ERRORS = ()
if greentest.OSX:
# A kernel bug in OS X sometimes results in this
LOCAL_CONN_REFUSED_ERRORS = (errno.EPROTOTYPE,)
elif greentest.WIN and greentest.PYPY3:
# We see WinError 10049: The requested address is not valid
# which is not one of the errors we get anywhere else.
# Not sure which errno constant this is?
LOCAL_CONN_REFUSED_ERRORS = (10049,)
def assertConnectionRefused(self, in_proc_server=True):
try:
with self.assertRaises(socket.error) as exc:
with self.makefile() as conn:
conn.close()
except LoopExit:
if not in_proc_server:
raise
# A LoopExit is fine. If we've killed the server
# and don't have any other greenlets to run, then
# blocking to open the connection might raise this.
# This became likely on Windows once we stopped
# passing IP addresses through an extra call to
# ``getaddrinfo``, which changed the number of switches
return
ex = exc.exception
self.assertIn(ex.args[0],
(errno.ECONNREFUSED, errno.EADDRNOTAVAIL,
errno.ECONNRESET, errno.ECONNABORTED) + self.LOCAL_CONN_REFUSED_ERRORS,
(ex, ex.args))
def assert500(self):
self.Settings.assert500(self)
def assert503(self):
self.Settings.assert503(self)
def assertAcceptedConnectionError(self):
self.Settings.assertAcceptedConnectionError(self)
def assertPoolFull(self):
self.Settings.assertPoolFull(self)
def assertNotAccepted(self):
try:
with self.makefile(include_raw_socket=True) as (conn, sock):
conn.write(b'GET / HTTP/1.0\r\n\r\n')
conn.flush()
result = b''
try:
while True:
data = sock.recv(1)
if not data:
break
result += data
except socket.timeout:
self.assertFalse(result)
return
except LoopExit:
# See assertConnectionRefused
return
self.assertTrue(result.startswith(b'HTTP/1.0 500 Internal Server Error'), repr(result))
def assertRequestSucceeded(self, timeout=_DEFAULT_SOCKET_TIMEOUT):
with self.makefile(timeout=timeout) as conn:
conn.write(b'GET /ping HTTP/1.0\r\n\r\n')
result = conn.read()
self.assertTrue(result.endswith(b'\r\n\r\nPONG'), repr(result))
def start_server(self):
self.server.start()
self.assertRequestSucceeded()
self.assertRequestSucceeded()
def stop_server(self):
self.server.stop()
self.assertConnectionRefused()
def report_netstat(self, _msg):
# At one point this would call 'sudo netstat -anp | grep PID'
# with os.system. We can probably do better with psutil.
return
def _create_server(self, *args, **kwargs):
kind = kwargs.pop('server_kind', self.ServerSubClass)
addr = kwargs.pop('server_listen_addr', (greentest.DEFAULT_BIND_ADDR, 0))
return kind(addr, *args, **kwargs)
def init_server(self, *args, **kwargs):
self.server = self._create_server(*args, **kwargs)
self.server.start()
sleep_to_clear_old_sockets()
@property
def socket(self):
return self.server.socket
def _test_invalid_callback(self):
if sysinfo.RUNNING_ON_APPVEYOR:
self.skipTest("Sometimes misses the error") # XXX: Why?
try:
# Can't use a kwarg here, WSGIServer and StreamServer
# take different things (application and handle)
self.init_server(lambda: None)
self.expect_one_error()
self.assert500()
self.assert_error(TypeError)
finally:
self.server.stop()
# XXX: There's something unreachable (with a traceback?)
# We need to clear it to make the leak checks work on Travis;
# so far I can't reproduce it locally on OS X.
import gc; gc.collect()
def fill_default_server_args(self, kwargs):
return self.Settings.fill_default_server_args(self, kwargs)
def ServerClass(self, *args, **kwargs):
return self.Settings.ServerClass(*args,
**self.fill_default_server_args(kwargs))
def ServerSubClass(self, *args, **kwargs):
return self.Settings.ServerSubClass(*args,
**self.fill_default_server_args(kwargs))
def get_spawn(self):
return None
class TestDefaultSpawn(TestCase):
def get_spawn(self):
return gevent.spawn
def _test_server_start_stop(self, restartable):
self.report_netstat('before start')
self.start_server()
self.report_netstat('after start')
if restartable and self.Settings.restartable:
self.server.stop_accepting()
self.report_netstat('after stop_accepting')
self.assertNotAccepted()
self.server.start_accepting()
self.report_netstat('after start_accepting')
sleep_to_clear_old_sockets()
self.assertRequestSucceeded()
self.stop_server()
self.report_netstat('after stop')
def test_backlog_is_not_accepted_for_socket(self):
self.switch_expected = False
with self.assertRaises(TypeError):
self.ServerClass(self.get_listener(), backlog=25)
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes times out")
@greentest.skipOnAppVeyor("Sometimes times out.")
def test_backlog_is_accepted_for_address(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0), backlog=25)
self.assertConnectionRefused()
self._test_server_start_stop(restartable=False)
def test_subclass_just_create(self):
self.server = self.ServerSubClass(self.get_listener())
self.assertNotAccepted()
@greentest.skipOnAppVeyor("Sometimes times out.")
def test_subclass_with_socket(self):
self.server = self.ServerSubClass(self.get_listener())
# the connection won't be refused, because there exists a
# listening socket, but it won't be handled also
self.assertNotAccepted()
self._test_server_start_stop(restartable=True)
def test_subclass_with_address(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertConnectionRefused()
self._test_server_start_stop(restartable=True)
def test_invalid_callback(self):
self._test_invalid_callback()
@greentest.reraises_flaky_timeout(socket.timeout)
def _test_serve_forever(self):
g = gevent.spawn(self.server.serve_forever)
try:
sleep_to_clear_old_sockets()
self.assertRequestSucceeded()
self.server.stop()
self.assertFalse(self.server.started)
self.assertConnectionRefused()
finally:
g.kill()
g.get()
self.server.stop()
def test_serve_forever(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertFalse(self.server.started)
self.assertConnectionRefused()
self._test_serve_forever()
def test_serve_forever_after_start(self):
self.server = self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0))
self.assertConnectionRefused()
self.assertFalse(self.server.started)
self.server.start()
self.assertTrue(self.server.started)
self._test_serve_forever()
@greentest.skipIf(greentest.EXPECT_POOR_TIMER_RESOLUTION, "Sometimes spuriously fails")
def test_server_closes_client_sockets(self):
self.server = self.ServerClass((greentest.DEFAULT_BIND_ADDR, 0), lambda *args: [])
self.server.start()
sleep_to_clear_old_sockets()
with self.makefile() as conn:
self.send_request_to_fd(conn)
# use assert500 below?
with gevent.Timeout._start_new_or_dummy(1):
try:
result = conn.read()
if result:
assert result.startswith('HTTP/1.0 500 Internal Server Error'), repr(result)
except socket.timeout:
pass
except socket.error as ex:
if ex.args[0] == 10053:
pass # "established connection was aborted by the software in your host machine"
elif ex.args[0] == errno.ECONNRESET:
pass
else:
raise
self.stop_server()
@property
def socket(self):
return self.server.socket
def test_error_in_spawn(self):
self.init_server()
self.assertTrue(self.server.started)
error = ExpectedError('test_error_in_spawn')
def _spawn(*_args):
gevent.getcurrent().throw(error)
self.server._spawn = _spawn
self.expect_one_error()
self.assertAcceptedConnectionError()
self.assert_error(ExpectedError, error)
def test_server_repr_when_handle_is_instancemethod(self):
# PR 501
self.init_server()
assert self.server.started
self.assertIn('Server', repr(self.server))
self.server.set_handle(self.server.handle)
self.assertIn('handle=<bound method', repr(self.server))
self.assertIn('of self>', repr(self.server))
self.server.set_handle(self.test_server_repr_when_handle_is_instancemethod)
self.assertIn('test_server_repr_when_handle_is_instancemethod', repr(self.server))
def handle():
pass
self.server.set_handle(handle)
self.assertIn('handle=<function', repr(self.server))
class TestRawSpawn(TestDefaultSpawn):
def get_spawn(self):
return gevent.spawn_raw
class TestPoolSpawn(TestDefaultSpawn):
def get_spawn(self):
return 2
@greentest.skipIf(greentest.EXPECT_POOR_TIMER_RESOLUTION,
"If we have bad timer resolution and hence increase timeouts, "
"it can be hard to sleep for a correct amount of time that lets "
"requests in the pool be full.")
def test_pool_full(self):
self.init_server()
with self.makefile() as long_request:
with self.makefile() as short_request:
self.send_request_to_fd(short_request, '/short')
self.send_request_to_fd(long_request, '/long')
# keep long_request in scope, otherwise the connection will be closed
gevent.get_hub().loop.update_now()
gevent.sleep(_DEFAULT_SOCKET_TIMEOUT / 10.0)
self.assertPoolFull()
self.assertPoolFull()
# XXX Not entirely clear why this fails (timeout) on appveyor;
# underlying socket timeout causing the long_request to close?
self.assertPoolFull()
# gevent.http and gevent.wsgi cannot detect socket close, so sleep a little
# to let /short request finish
gevent.sleep(_DEFAULT_SOCKET_TIMEOUT)
# XXX: This tends to timeout. Which is weird, because what would have
# been the third call to assertPoolFull() DID NOT timeout, hence why it
# was removed.
try:
self.assertRequestSucceeded()
except socket.timeout:
greentest.reraiseFlakyTestTimeout()
test_pool_full.error_fatal = False
class TestNoneSpawn(TestCase):
def get_spawn(self):
return None
def test_invalid_callback(self):
self._test_invalid_callback()
@greentest.skipOnAppVeyor("Sometimes doesn't get the error.")
def test_assertion_in_blocking_func(self):
def sleep(*_args):
gevent.sleep(SMALLEST_RELIABLE_DELAY)
self.init_server(sleep, server_kind=self.ServerSubClass, spawn=None)
self.expect_one_error()
self.assert500()
self.assert_error(AssertionError, 'Impossible to call blocking function in the event loop callback')
class ExpectedError(Exception):
pass
class TestSSLSocketNotAllowed(TestCase):
switch_expected = False
def get_spawn(self):
return gevent.spawn
@unittest.skipUnless(hasattr(socket, 'ssl'), "Uses socket.ssl")
def test(self):
from gevent.socket import ssl
listener = self._close_on_teardown(tcp_listener(backlog=5))
listener = ssl(listener)
self.assertRaises(TypeError, self.ServerSubClass, listener)
def _file(name, here=os.path.dirname(__file__)):
return os.path.abspath(os.path.join(here, name))
class BadWrapException(BaseException):
pass
class TestSSLGetCertificate(TestCase):
def _create_server(self): # pylint:disable=arguments-differ
return self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0),
keyfile=_file('server.key'),
certfile=_file('server.crt'))
def get_spawn(self):
return gevent.spawn
def test_certificate(self):
# Issue 801
from gevent import monkey, ssl
# only broken if *not* monkey patched
self.assertFalse(monkey.is_module_patched('ssl'))
self.assertFalse(monkey.is_module_patched('socket'))
self.init_server()
server_host, server_port, _family = self.get_server_host_port_family()
ssl.get_server_certificate((server_host, server_port)) # pylint:disable=no-member
def test_wrap_socket_and_handle_wrap_failure(self):
# A failure to wrap the socket doesn't have follow on effects
# like failing with a UnboundLocalError.
# See https://github.com/gevent/gevent/issues/1236
self.init_server()
def bad_wrap(_client_socket, **_wrap_args):
raise BadWrapException()
self.server.wrap_socket = bad_wrap
with self.assertRaises(BadWrapException):
self.server._handle(None, None)
# test non-socket.error exception in accept call: fatal
# test error in spawn(): non-fatal
# test error in spawned handler: non-fatal
if __name__ == '__main__':
greentest.main()
| 19,882 | 34.19115 | 108 | py |
gevent | gevent-master/src/gevent/tests/test__doctests.py | from __future__ import print_function
import doctest
import functools
import os
import re
import sys
import unittest
# Ignore tracebacks: ZeroDivisionError
def myfunction(*_args, **_kwargs):
pass
class RENormalizingOutputChecker(doctest.OutputChecker):
"""
Pattern-normalizing output checker. Inspired by one used in zope.testing.
"""
def __init__(self, patterns):
self.transformers = [functools.partial(re.sub, replacement) for re, replacement in patterns]
def check_output(self, want, got, optionflags):
if got == want:
return True
for transformer in self.transformers:
want = transformer(want)
got = transformer(got)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
FORBIDDEN_MODULES = set()
class Modules(object):
def __init__(self, allowed_modules):
from gevent.testing import walk_modules
self.allowed_modules = allowed_modules
self.modules = set()
for path, module in walk_modules(recursive=True):
self.add_module(module, path)
def add_module(self, name, path):
if self.allowed_modules and name not in self.allowed_modules:
return
if name in FORBIDDEN_MODULES:
return
self.modules.add((name, path))
def __bool__(self):
return bool(self.modules)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.modules)
def main(): # pylint:disable=too-many-locals
cwd = os.getcwd()
# Use pure_python to get the correct module source and docstrings
os.environ['PURE_PYTHON'] = '1'
import gevent
from gevent import socket
from gevent.testing import util
from gevent.testing import sysinfo
if sysinfo.WIN:
FORBIDDEN_MODULES.update({
# Uses commands only found on posix
'gevent.subprocess',
})
try:
allowed_modules = sys.argv[1:]
sys.path.append('.')
globs = {
'myfunction': myfunction,
'gevent': gevent,
'socket': socket,
}
modules = Modules(allowed_modules)
if not modules:
sys.exit('No modules found matching %s' % ' '.join(allowed_modules))
suite = unittest.TestSuite()
checker = RENormalizingOutputChecker((
# Normalize subprocess.py: BSD ls is in the example, gnu ls outputs
# 'cannot access'
(re.compile(
"ls: cannot access 'non_existent_file': No such file or directory"),
"ls: non_existent_file: No such file or directory"),
# Python 3 bytes add a "b".
(re.compile(r'b(".*?")'), r"\1"),
(re.compile(r"b('.*?')"), r"\1"),
))
tests_count = 0
modules_count = 0
for m, path in sorted(modules):
with open(path, 'rb') as f:
contents = f.read()
if re.search(br'^\s*>>> ', contents, re.M):
s = doctest.DocTestSuite(m, extraglobs=globs, checker=checker)
test_count = len(s._tests)
util.log('%s (from %s): %s tests', m, path, test_count)
suite.addTest(s)
modules_count += 1
tests_count += test_count
util.log('Total: %s tests in %s modules', tests_count, modules_count)
# TODO: Pass this off to unittest.main()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
finally:
os.chdir(cwd)
if __name__ == '__main__':
main()
| 3,613 | 25.970149 | 100 | py |
gevent | gevent-master/src/gevent/tests/__init__.py | 0 | 0 | 0 | py | |
gevent | gevent-master/src/gevent/tests/test__monkey_ssl_warning2.py | import unittest
import warnings
import sys
# All supported python versions now provide SSLContext.
# We import it by name and subclass it here by name.
# compare with warning3.py
from ssl import SSLContext
class MySubclass(SSLContext):
pass
# This file should only have this one test in it
# because we have to be careful about our imports
# and because we need to be careful about our patching.
class Test(unittest.TestCase):
@unittest.skipIf(sys.version_info[:2] < (3, 6),
"Only on Python 3.6+")
def test_ssl_subclass_and_module_reference(self):
from gevent import monkey
self.assertFalse(monkey.saved)
with warnings.catch_warnings(record=True) as issued_warnings:
warnings.simplefilter('always')
monkey.patch_all()
monkey.patch_all()
issued_warnings = [x for x in issued_warnings
if isinstance(x.message, monkey.MonkeyPatchWarning)]
self.assertEqual(1, len(issued_warnings))
message = issued_warnings[0].message
self.assertIn("Modules that had direct imports", str(message))
self.assertIn("Subclasses (NOT patched)", str(message))
if __name__ == '__main__':
unittest.main()
| 1,255 | 26.911111 | 79 | py |
gevent | gevent-master/src/gevent/tests/_imports_imports_at_top_level.py | import gevent
# For reproducing #728: We spawn a greenlet at import time,
# that itself wants to import, and wait on it at import time.
# If we're the only greenlet running, and locks aren't granular
# enough, this results in a LoopExit (and also a lock deadlock)
def f():
__import__('_imports_at_top_level')
g = gevent.spawn(f)
g.get()
| 345 | 23.714286 | 63 | py |
gevent | gevent-master/src/gevent/tests/test__memleak.py | import sys
import unittest
from gevent.testing import TestCase
import gevent
from gevent.timeout import Timeout
@unittest.skipUnless(
hasattr(sys, 'gettotalrefcount'),
"Needs debug build"
)
# XXX: This name makes no sense. What was this for originally?
class TestQueue(TestCase): # pragma: no cover
# pylint:disable=bare-except,no-member
def test(self):
refcounts = []
for _ in range(15):
try:
Timeout.start_new(0.01)
gevent.sleep(0.1)
self.fail('must raise Timeout')
except Timeout:
pass
refcounts.append(sys.gettotalrefcount())
# Refcounts may go down, but not up
# XXX: JAM: I think this may just be broken. Each time we add
# a new integer to our list of refcounts, we'll be
# creating a new reference. This makes sense when we see the list
# go up by one each iteration:
#
# AssertionError: 530631 not less than or equal to 530630
# : total refcount mismatch:
# [530381, 530618, 530619, 530620, 530621,
# 530622, 530623, 530624, 530625, 530626,
# 530627, 530628, 530629, 530630, 530631]
final = refcounts[-1]
previous = refcounts[-2]
self.assertLessEqual(
final, previous,
"total refcount mismatch: %s" % refcounts)
if __name__ == '__main__':
unittest.main()
| 1,466 | 29.5625 | 73 | py |
gevent | gevent-master/src/gevent/tests/test__queue.py | import unittest
import gevent.testing as greentest
from gevent.testing import TestCase
import gevent
from gevent.hub import get_hub, LoopExit
from gevent import util
from gevent import queue
from gevent.queue import Empty, Full
from gevent.event import AsyncResult
from gevent.testing.timing import AbstractGenericGetTestCase
# pylint:disable=too-many-ancestors
class TestQueue(TestCase):
def test_send_first(self):
self.switch_expected = False
q = queue.Queue()
q.put('hi')
self.assertEqual(q.peek(), 'hi')
self.assertEqual(q.get(), 'hi')
def test_peek_empty(self):
q = queue.Queue()
# No putters waiting, in the main loop: LoopExit
with self.assertRaises(LoopExit):
q.peek()
def waiter(q):
self.assertRaises(Empty, q.peek, timeout=0.01)
g = gevent.spawn(waiter, q)
gevent.sleep(0.1)
g.join()
def test_peek_multi_greenlet(self):
q = queue.Queue()
g = gevent.spawn(q.peek)
g.start()
gevent.sleep(0)
q.put(1)
g.join()
self.assertTrue(g.exception is None)
self.assertEqual(q.peek(), 1)
def test_send_last(self):
q = queue.Queue()
def waiter(q):
with gevent.Timeout(0.1 if not greentest.RUNNING_ON_APPVEYOR else 0.5):
self.assertEqual(q.get(), 'hi2')
return "OK"
p = gevent.spawn(waiter, q)
gevent.sleep(0.01)
q.put('hi2')
gevent.sleep(0.01)
assert p.get(timeout=0) == "OK"
def test_max_size(self):
q = queue.Queue(2)
results = []
def putter(q):
q.put('a')
results.append('a')
q.put('b')
results.append('b')
q.put('c')
results.append('c')
return "OK"
p = gevent.spawn(putter, q)
gevent.sleep(0)
self.assertEqual(results, ['a', 'b'])
self.assertEqual(q.get(), 'a')
gevent.sleep(0)
self.assertEqual(results, ['a', 'b', 'c'])
self.assertEqual(q.get(), 'b')
self.assertEqual(q.get(), 'c')
assert p.get(timeout=0) == "OK"
def test_zero_max_size(self):
q = queue.Channel()
def sender(evt, q):
q.put('hi')
evt.set('done')
def receiver(evt, q):
x = q.get()
evt.set(x)
e1 = AsyncResult()
e2 = AsyncResult()
p1 = gevent.spawn(sender, e1, q)
gevent.sleep(0.001)
self.assertTrue(not e1.ready())
p2 = gevent.spawn(receiver, e2, q)
self.assertEqual(e2.get(), 'hi')
self.assertEqual(e1.get(), 'done')
with gevent.Timeout(0):
gevent.joinall([p1, p2])
def test_multiple_waiters(self):
# tests that multiple waiters get their results back
q = queue.Queue()
def waiter(q, evt):
evt.set(q.get())
sendings = ['1', '2', '3', '4']
evts = [AsyncResult() for x in sendings]
for i, _ in enumerate(sendings):
gevent.spawn(waiter, q, evts[i]) # XXX use waitall for them
gevent.sleep(0.01) # get 'em all waiting
results = set()
def collect_pending_results():
for e in evts:
with gevent.Timeout(0.001, False):
x = e.get()
results.add(x)
return len(results)
q.put(sendings[0])
self.assertEqual(collect_pending_results(), 1)
q.put(sendings[1])
self.assertEqual(collect_pending_results(), 2)
q.put(sendings[2])
q.put(sendings[3])
self.assertEqual(collect_pending_results(), 4)
def test_waiters_that_cancel(self):
q = queue.Queue()
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result) # pragma: no cover (should have raised)
except RuntimeError:
evt.set('timed out')
evt = AsyncResult()
gevent.spawn(do_receive, q, evt)
self.assertEqual(evt.get(), 'timed out')
q.put('hi')
self.assertEqual(q.get(), 'hi')
def test_senders_that_die(self):
q = queue.Queue()
def do_send(q):
q.put('sent')
gevent.spawn(do_send, q)
self.assertEqual(q.get(), 'sent')
def test_two_waiters_one_dies(self):
def waiter(q, evt):
evt.set(q.get())
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result) # pragma: no cover (should have raised)
except RuntimeError:
evt.set('timed out')
q = queue.Queue()
dying_evt = AsyncResult()
waiting_evt = AsyncResult()
gevent.spawn(do_receive, q, dying_evt)
gevent.spawn(waiter, q, waiting_evt)
gevent.sleep(0.1)
q.put('hi')
self.assertEqual(dying_evt.get(), 'timed out')
self.assertEqual(waiting_evt.get(), 'hi')
def test_two_bogus_waiters(self):
def do_receive(q, evt):
with gevent.Timeout(0, RuntimeError()):
try:
result = q.get()
evt.set(result) # pragma: no cover (should have raised)
except RuntimeError:
evt.set('timed out')
q = queue.Queue()
e1 = AsyncResult()
e2 = AsyncResult()
gevent.spawn(do_receive, q, e1)
gevent.spawn(do_receive, q, e2)
gevent.sleep(0.1)
q.put('sent')
self.assertEqual(e1.get(), 'timed out')
self.assertEqual(e2.get(), 'timed out')
self.assertEqual(q.get(), 'sent')
class TestChannel(TestCase):
def test_send(self):
channel = queue.Channel()
events = []
def another_greenlet():
events.append(channel.get())
events.append(channel.get())
g = gevent.spawn(another_greenlet)
events.append('sending')
channel.put('hello')
events.append('sent hello')
channel.put('world')
events.append('sent world')
self.assertEqual(['sending', 'hello', 'sent hello', 'world', 'sent world'], events)
g.get()
def test_wait(self):
channel = queue.Channel()
events = []
def another_greenlet():
events.append('sending hello')
channel.put('hello')
events.append('sending world')
channel.put('world')
events.append('sent world')
g = gevent.spawn(another_greenlet)
events.append('waiting')
events.append(channel.get())
events.append(channel.get())
self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world'], events)
gevent.sleep(0)
self.assertEqual(['waiting', 'sending hello', 'hello', 'sending world', 'world', 'sent world'], events)
g.get()
def test_iterable(self):
channel = queue.Channel()
gevent.spawn(channel.put, StopIteration)
r = list(channel)
self.assertEqual(r, [])
class TestJoinableQueue(TestCase):
def test_task_done(self):
channel = queue.JoinableQueue()
X = object()
gevent.spawn(channel.put, X)
result = channel.get()
self.assertIs(result, X)
self.assertEqual(1, channel.unfinished_tasks)
channel.task_done()
self.assertEqual(0, channel.unfinished_tasks)
class TestNoWait(TestCase):
def test_put_nowait_simple(self):
result = []
q = queue.Queue(1)
def store_result(func, *args):
result.append(func(*args))
run_callback = get_hub().loop.run_callback
run_callback(store_result, util.wrap_errors(Full, q.put_nowait), 2)
run_callback(store_result, util.wrap_errors(Full, q.put_nowait), 3)
gevent.sleep(0)
assert len(result) == 2, result
assert result[0] is None, result
assert isinstance(result[1], queue.Full), result
def test_get_nowait_simple(self):
result = []
q = queue.Queue(1)
q.put(4)
def store_result(func, *args):
result.append(func(*args))
run_callback = get_hub().loop.run_callback
run_callback(store_result, util.wrap_errors(Empty, q.get_nowait))
run_callback(store_result, util.wrap_errors(Empty, q.get_nowait))
gevent.sleep(0)
assert len(result) == 2, result
assert result[0] == 4, result
assert isinstance(result[1], queue.Empty), result
# get_nowait must work from the mainloop
def test_get_nowait_unlock(self):
result = []
q = queue.Queue(1)
p = gevent.spawn(q.put, 5)
def store_result(func, *args):
result.append(func(*args))
assert q.empty(), q
gevent.sleep(0)
assert q.full(), q
get_hub().loop.run_callback(store_result, q.get_nowait)
gevent.sleep(0)
assert q.empty(), q
assert result == [5], result
assert p.ready(), p
assert p.dead, p
assert q.empty(), q
def test_get_nowait_unlock_channel(self):
# get_nowait runs fine in the hub, and
# it switches to a waiting putter if needed.
result = []
q = queue.Channel()
p = gevent.spawn(q.put, 5)
def store_result(func, *args):
result.append(func(*args))
self.assertTrue(q.empty())
self.assertTrue(q.full())
gevent.sleep(0.001)
self.assertTrue(q.empty())
self.assertTrue(q.full())
get_hub().loop.run_callback(store_result, q.get_nowait)
gevent.sleep(0.001)
self.assertTrue(q.empty())
self.assertTrue(q.full())
self.assertEqual(result, [5])
self.assertTrue(p.ready())
self.assertTrue(p.dead)
self.assertTrue(q.empty())
# put_nowait must work from the mainloop
def test_put_nowait_unlock(self):
result = []
q = queue.Queue()
p = gevent.spawn(q.get)
def store_result(func, *args):
result.append(func(*args))
self.assertTrue(q.empty(), q)
self.assertFalse(q.full(), q)
gevent.sleep(0.001)
self.assertTrue(q.empty(), q)
self.assertFalse(q.full(), q)
get_hub().loop.run_callback(store_result, q.put_nowait, 10)
self.assertFalse(p.ready(), p)
gevent.sleep(0.001)
self.assertEqual(result, [None])
self.assertTrue(p.ready(), p)
self.assertFalse(q.full(), q)
self.assertTrue(q.empty(), q)
class TestJoinEmpty(TestCase):
def test_issue_45(self):
"""Test that join() exits immediately if not jobs were put into the queue"""
self.switch_expected = False
q = queue.JoinableQueue()
q.join()
class AbstractTestWeakRefMixin(object):
def test_weak_reference(self):
import weakref
one = self._makeOne()
ref = weakref.ref(one)
self.assertIs(one, ref())
class TestGetInterrupt(AbstractTestWeakRefMixin, AbstractGenericGetTestCase):
Timeout = Empty
kind = queue.Queue
def wait(self, timeout):
return self._makeOne().get(timeout=timeout)
def _makeOne(self):
return self.kind()
class TestGetInterruptJoinableQueue(TestGetInterrupt):
kind = queue.JoinableQueue
class TestGetInterruptLifoQueue(TestGetInterrupt):
kind = queue.LifoQueue
class TestGetInterruptPriorityQueue(TestGetInterrupt):
kind = queue.PriorityQueue
class TestGetInterruptChannel(TestGetInterrupt):
kind = queue.Channel
class TestPutInterrupt(AbstractGenericGetTestCase):
kind = queue.Queue
Timeout = Full
def setUp(self):
super(TestPutInterrupt, self).setUp()
self.queue = self._makeOne()
def wait(self, timeout):
while not self.queue.full():
self.queue.put(1)
return self.queue.put(2, timeout=timeout)
def _makeOne(self):
return self.kind(1)
class TestPutInterruptJoinableQueue(TestPutInterrupt):
kind = queue.JoinableQueue
class TestPutInterruptLifoQueue(TestPutInterrupt):
kind = queue.LifoQueue
class TestPutInterruptPriorityQueue(TestPutInterrupt):
kind = queue.PriorityQueue
class TestPutInterruptChannel(TestPutInterrupt):
kind = queue.Channel
def _makeOne(self):
return self.kind()
if hasattr(queue, 'SimpleQueue'):
class TestGetInterruptSimpleQueue(TestGetInterrupt):
kind = queue.SimpleQueue
def test_raises_timeout_Timeout(self):
raise unittest.SkipTest("Not supported")
test_raises_timeout_Timeout_exc_customized = test_raises_timeout_Timeout
test_outer_timeout_is_not_lost = test_raises_timeout_Timeout
del AbstractGenericGetTestCase
if __name__ == '__main__':
greentest.main()
| 13,107 | 26.830149 | 111 | py |
gevent | gevent-master/src/gevent/tests/test__select.py | from gevent.testing import six
import sys
import os
import errno
from gevent import select, socket
import gevent.core
import gevent.testing as greentest
import gevent.testing.timing
import unittest
class TestSelect(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
select.select([], [], [], timeout)
@greentest.skipOnWindows("Cant select on files")
class TestSelectRead(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
r, w = os.pipe()
try:
select.select([r], [], [], timeout)
finally:
os.close(r)
os.close(w)
# Issue #12367: http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/155606
@unittest.skipIf(sys.platform.startswith('freebsd'),
'skip because of a FreeBSD bug: kern/155606')
def test_errno(self):
# Backported from test_select.py in 3.4
with open(__file__, 'rb') as fp:
fd = fp.fileno()
fp.close()
try:
select.select([fd], [], [], 0)
except OSError as err:
# Python 3
self.assertEqual(err.errno, errno.EBADF)
except select.error as err: # pylint:disable=duplicate-except
# Python 2 (select.error is OSError on py3)
self.assertEqual(err.args[0], errno.EBADF)
else:
self.fail("exception not raised")
@unittest.skipUnless(hasattr(select, 'poll'), "Needs poll")
@greentest.skipOnWindows("Cant poll on files")
class TestPollRead(gevent.testing.timing.AbstractGenericWaitTestCase):
def wait(self, timeout):
# On darwin, the read pipe is reported as writable
# immediately, for some reason. So we carefully register
# it only for read events (the default is read and write)
r, w = os.pipe()
try:
poll = select.poll()
poll.register(r, select.POLLIN)
poll.poll(timeout * 1000)
finally:
poll.unregister(r)
os.close(r)
os.close(w)
def test_unregister_never_registered(self):
# "Attempting to remove a file descriptor that was
# never registered causes a KeyError exception to be
# raised."
poll = select.poll()
self.assertRaises(KeyError, poll.unregister, 5)
def test_poll_invalid(self):
self.skipTest(
"libev >= 4.27 aborts the process if built with EV_VERIFY >= 2. "
"For libuv, depending on whether the fileno is reused or not "
"this either crashes or does nothing.")
with open(__file__, 'rb') as fp:
fd = fp.fileno()
poll = select.poll()
poll.register(fd, select.POLLIN)
# Close after registering; libuv refuses to even
# create a watcher if it would get EBADF (so this turns into
# a test of whether or not we successfully initted the watcher).
fp.close()
result = poll.poll(0)
self.assertEqual(result, [(fd, select.POLLNVAL)]) # pylint:disable=no-member
class TestSelectTypes(greentest.TestCase):
def test_int(self):
sock = socket.socket()
try:
select.select([int(sock.fileno())], [], [], 0.001)
finally:
sock.close()
if hasattr(six.builtins, 'long'):
def test_long(self):
sock = socket.socket()
try:
select.select(
[six.builtins.long(sock.fileno())], [], [], 0.001)
finally:
sock.close()
def test_string(self):
self.switch_expected = False
self.assertRaises(TypeError, select.select, ['hello'], [], [], 0.001)
if __name__ == '__main__':
greentest.main()
| 3,831 | 32.034483 | 88 | py |
gevent | gevent-master/src/gevent/tests/test__greenness.py | # Copyright (c) 2008 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Trivial test that a single process (and single thread) can both read
and write from green sockets (when monkey patched).
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from gevent import monkey
monkey.patch_all()
import gevent.testing as greentest
try:
from urllib import request as urllib2
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
except ImportError:
# Python 2
import urllib2
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import gevent
from gevent.testing import params
class QuietHandler(SimpleHTTPRequestHandler, object):
def log_message(self, *args): # pylint:disable=arguments-differ
self.server.messages += ((args,),)
class Server(HTTPServer, object):
messages = ()
requests_handled = 0
def __init__(self):
HTTPServer.__init__(self,
params.DEFAULT_BIND_ADDR_TUPLE,
QuietHandler)
def handle_request(self):
HTTPServer.handle_request(self)
self.requests_handled += 1
class TestGreenness(greentest.TestCase):
check_totalrefcount = False
def test_urllib2(self):
httpd = Server()
server_greenlet = gevent.spawn(httpd.handle_request)
port = httpd.socket.getsockname()[1]
rsp = urllib2.urlopen('http://127.0.0.1:%s' % port)
rsp.read()
rsp.close()
server_greenlet.join()
self.assertEqual(httpd.requests_handled, 1)
httpd.server_close()
if __name__ == '__main__':
greentest.main()
| 2,790 | 30.715909 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__iwait.py | import gevent
import gevent.testing as greentest
from gevent.lock import Semaphore
class Testiwait(greentest.TestCase):
def test_noiter(self):
# Test that gevent.iwait returns objects which can be iterated upon
# without additional calls to iter()
sem1 = Semaphore()
sem2 = Semaphore()
gevent.spawn(sem1.release)
ready = next(gevent.iwait((sem1, sem2)))
self.assertEqual(sem1, ready)
def test_iwait_partial(self):
# Test that the iwait context manager allows the iterator to be
# consumed partially without a memory leak.
sem = Semaphore()
let = gevent.spawn(sem.release)
with gevent.iwait((sem,), timeout=0.01) as iterator:
self.assertEqual(sem, next(iterator))
let.get()
def test_iwait_nogarbage(self):
sem1 = Semaphore()
sem2 = Semaphore()
let = gevent.spawn(sem1.release)
with gevent.iwait((sem1, sem2)) as iterator:
self.assertEqual(sem1, next(iterator))
self.assertEqual(sem2.linkcount(), 1)
self.assertEqual(sem2.linkcount(), 0)
let.get()
if __name__ == '__main__':
greentest.main()
| 1,205 | 27.046512 | 75 | py |
gevent | gevent-master/src/gevent/tests/test__socket_errors.py | # Copyright (c) 2008-2009 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import gevent.testing as greentest
from gevent.testing import support
from gevent.testing import sysinfo
from gevent.socket import socket, error
from gevent.exceptions import LoopExit
class TestSocketErrors(greentest.TestCase):
__timeout__ = 5
def test_connection_refused(self):
port = support.find_unused_port()
with socket() as s:
try:
with self.assertRaises(error) as exc:
s.connect((greentest.DEFAULT_CONNECT_HOST, port))
except LoopExit:
return
ex = exc.exception
self.assertIn(ex.args[0], sysinfo.CONN_REFUSED_ERRORS, ex)
self.assertIn('refused', str(ex).lower())
if __name__ == '__main__':
greentest.main()
| 1,869 | 37.163265 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__nondefaultloop.py | # test for issue #210
from gevent import core
from gevent.testing.util import alarm
alarm(1)
log = []
loop = core.loop(default=False)
loop.run_callback(log.append, 1)
loop.run()
assert log == [1], log
| 204 | 14.769231 | 37 | py |
gevent | gevent-master/src/gevent/tests/test__issue_728.py | #!/usr/bin/env python
from gevent.monkey import patch_all
patch_all()
if __name__ == '__main__':
# Reproducing #728 requires a series of nested
# imports
__import__('_imports_imports_at_top_level')
| 212 | 20.3 | 50 | py |
gevent | gevent-master/src/gevent/tests/test__os.py | from __future__ import print_function, absolute_import, division
import sys
from os import pipe
import gevent
from gevent import os
from gevent import Greenlet, joinall
from gevent import testing as greentest
from gevent.testing import mock
from gevent.testing import six
from gevent.testing.skipping import skipOnLibuvOnPyPyOnWin
class TestOS_tp(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def pipe(self):
return pipe()
read = staticmethod(os.tp_read)
write = staticmethod(os.tp_write)
@skipOnLibuvOnPyPyOnWin("Sometimes times out")
def _test_if_pipe_blocks(self, buffer_class):
r, w = self.pipe()
# set nbytes such that for sure it is > maximum pipe buffer
nbytes = 1000000
block = b'x' * 4096
buf = buffer_class(block)
# Lack of "nonlocal" keyword in Python 2.x:
bytesread = [0]
byteswritten = [0]
def produce():
while byteswritten[0] != nbytes:
bytesleft = nbytes - byteswritten[0]
byteswritten[0] += self.write(w, buf[:min(bytesleft, 4096)])
def consume():
while bytesread[0] != nbytes:
bytesleft = nbytes - bytesread[0]
bytesread[0] += len(self.read(r, min(bytesleft, 4096)))
producer = Greenlet(produce)
producer.start()
consumer = Greenlet(consume)
consumer.start_later(1)
# If patching was not succesful, the producer will have filled
# the pipe before the consumer starts, and would block the entire
# process. Therefore the next line would never finish.
joinall([producer, consumer])
self.assertEqual(bytesread[0], nbytes)
self.assertEqual(bytesread[0], byteswritten[0])
if sys.version_info[0] < 3:
def test_if_pipe_blocks_buffer(self):
self._test_if_pipe_blocks(six.builtins.buffer)
if sys.version_info[:2] >= (2, 7):
def test_if_pipe_blocks_memoryview(self):
self._test_if_pipe_blocks(six.builtins.memoryview)
@greentest.skipUnless(hasattr(os, 'make_nonblocking'),
"Only on POSIX")
class TestOS_nb(TestOS_tp):
def read(self, fd, count):
return os.nb_read(fd, count)
def write(self, fd, count):
return os.nb_write(fd, count)
def pipe(self):
r, w = super(TestOS_nb, self).pipe()
os.make_nonblocking(r)
os.make_nonblocking(w)
return r, w
def _make_ignored_oserror(self):
import errno
ignored_oserror = OSError()
ignored_oserror.errno = errno.EINTR
return ignored_oserror
def _check_hub_event_closed(self, mock_get_hub, fd, event):
mock_get_hub.assert_called_once_with()
hub = mock_get_hub.return_value
io = hub.loop.io
io.assert_called_once_with(fd, event)
event = io.return_value
event.close.assert_called_once_with()
def _test_event_closed_on_normal_io(self, nb_func, nb_arg,
mock_io, mock_get_hub, event):
mock_io.side_effect = [self._make_ignored_oserror(), 42]
fd = 100
result = nb_func(fd, nb_arg)
self.assertEqual(result, 42)
self._check_hub_event_closed(mock_get_hub, fd, event)
def _test_event_closed_on_io_error(self, nb_func, nb_arg,
mock_io, mock_get_hub, event):
mock_io.side_effect = [self._make_ignored_oserror(), ValueError()]
fd = 100
with self.assertRaises(ValueError):
nb_func(fd, nb_arg)
self._check_hub_event_closed(mock_get_hub, fd, event)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._write')
def test_event_closed_on_write(self, mock_write, mock_get_hub):
self._test_event_closed_on_normal_io(os.nb_write, b'buf',
mock_write, mock_get_hub,
2)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._write')
def test_event_closed_on_write_error(self, mock_write, mock_get_hub):
self._test_event_closed_on_io_error(os.nb_write, b'buf',
mock_write, mock_get_hub,
2)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._read')
def test_event_closed_on_read(self, mock_read, mock_get_hub):
self._test_event_closed_on_normal_io(os.nb_read, b'buf',
mock_read, mock_get_hub,
1)
@mock.patch('gevent.os.get_hub')
@mock.patch('gevent.os._read')
def test_event_closed_on_read_error(self, mock_read, mock_get_hub):
self._test_event_closed_on_io_error(os.nb_read, b'buf',
mock_read, mock_get_hub,
1)
@greentest.skipUnless(hasattr(os, 'fork_and_watch'),
"Only on POSIX")
class TestForkAndWatch(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def test_waitpid_all(self):
# Cover this specific case.
pid = os.fork_and_watch()
if pid:
os.waitpid(-1, 0)
# Can't assert on what the found pid actually was,
# our testrunner may have spawned multiple children.
os._reap_children(0) # make the leakchecker happy
else: # pragma: no cover
gevent.sleep(2)
# The test framework will catch a regular SystemExit
# from sys.exit(), we need to just kill the process.
os._exit(0)
def test_waitpid_wrong_neg(self):
self.assertRaises(OSError, os.waitpid, -2, 0)
def test_waitpid_wrong_pos(self):
self.assertRaises(OSError, os.waitpid, 1, 0)
if __name__ == '__main__':
greentest.main()
| 5,963 | 31.950276 | 76 | py |
gevent | gevent-master/src/gevent/tests/test__execmodules.py | import unittest
import warnings
from gevent.testing import modules
from gevent.testing import main
from gevent.testing.sysinfo import NON_APPLICABLE_SUFFIXES
from gevent.testing import six
def make_exec_test(path, module):
def test(_):
with open(path, 'rb') as f:
src = f.read()
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
try:
six.exec_(src, {'__file__': path, '__name__': module})
except ImportError:
if module in modules.OPTIONAL_MODULES:
raise unittest.SkipTest("Unable to import optional module %s" % module)
raise
name = "test_" + module.replace(".", "_")
test.__name__ = name
return test
def make_all_tests(cls):
for path, module in modules.walk_modules(recursive=True, check_optional=False):
if module.endswith(NON_APPLICABLE_SUFFIXES):
continue
test = make_exec_test(path, module)
setattr(cls, test.__name__, test)
return cls
@make_all_tests
class Test(unittest.TestCase):
pass
if __name__ == '__main__':
# This should not be combined with other tests in the same process
# because it messes with global shared state.
# pragma: testrunner-no-combine
main()
| 1,327 | 27.869565 | 91 | py |
gevent | gevent-master/src/gevent/tests/test__refcount.py | # Copyright (c) 2008 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This test checks that underlying socket instances (gevent.socket.socket._sock)
are not leaked by the hub.
"""
from __future__ import print_function
from _socket import socket as c_socket
import sys
if sys.version_info[0] >= 3:
# Python3 enforces that __weakref__ appears only once,
# and not when a slotted class inherits from an unslotted class.
# We mess around with the class MRO below and violate that rule
# (because socket.socket defines __slots__ with __weakref__),
# so import socket.socket before that can happen.
__import__('socket')
Socket = c_socket
else:
class Socket(c_socket):
"Something we can have a weakref to"
import _socket
_socket.socket = Socket
from gevent import monkey; monkey.patch_all()
import gevent.testing as greentest
from gevent.testing import support
from gevent.testing import params
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
from time import sleep
import weakref
import gc
import socket
socket._realsocket = Socket
SOCKET_TIMEOUT = 0.1
if greentest.RESOLVER_DNSPYTHON:
# Takes a bit longer to resolve the client
# address initially.
SOCKET_TIMEOUT *= 2
if greentest.RUNNING_ON_CI:
SOCKET_TIMEOUT *= 2
class Server(object):
listening = False
client_data = None
server_port = None
def __init__(self, raise_on_timeout):
self.raise_on_timeout = raise_on_timeout
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.server_port = support.bind_port(self.socket, params.DEFAULT_BIND_ADDR)
except:
self.close()
raise
def close(self):
self.socket.close()
self.socket = None
def handle_request(self):
try:
self.socket.settimeout(SOCKET_TIMEOUT)
self.socket.listen(5)
self.listening = True
try:
conn, _ = self.socket.accept() # pylint:disable=no-member
except socket.timeout:
if self.raise_on_timeout:
raise
return
try:
self.client_data = conn.recv(100)
conn.send(b'bye')
finally:
conn.close()
finally:
self.close()
class Client(object):
server_data = None
def __init__(self, server_port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_port = server_port
def close(self):
self.socket.close()
self.socket = None
def make_request(self):
try:
self.socket.connect((params.DEFAULT_CONNECT, self.server_port))
self.socket.send(b'hello')
self.server_data = self.socket.recv(100)
finally:
self.close()
class Test(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def run_interaction(self, run_client):
server = Server(raise_on_timeout=run_client)
wref_to_hidden_server_socket = weakref.ref(server.socket._sock)
client = None
start_new_thread(server.handle_request)
if run_client:
client = Client(server.server_port)
start_new_thread(client.make_request)
# Wait until we do our business; we will always close
# the server; We may also close the client.
# On PyPy, we may not actually see the changes they write to
# their dicts immediately.
for obj in server, client:
if obj is None:
continue
while obj.socket is not None:
sleep(0.01)
# If we have a client, then we should have data
if run_client:
self.assertEqual(server.client_data, b'hello')
self.assertEqual(client.server_data, b'bye')
return wref_to_hidden_server_socket
def run_and_check(self, run_client):
wref_to_hidden_server_socket = self.run_interaction(run_client=run_client)
greentest.gc_collect_if_needed()
if wref_to_hidden_server_socket():
from pprint import pformat
print(pformat(gc.get_referrers(wref_to_hidden_server_socket())))
for x in gc.get_referrers(wref_to_hidden_server_socket()):
print(pformat(x))
for y in gc.get_referrers(x):
print('-', pformat(y))
self.fail('server socket should be dead by now')
def test_clean_exit(self):
self.run_and_check(True)
self.run_and_check(True)
def test_timeout_exit(self):
self.run_and_check(False)
self.run_and_check(False)
if __name__ == '__main__':
greentest.main()
| 5,866 | 29.878947 | 87 | py |
gevent | gevent-master/src/gevent/tests/test__example_udp_client.py | from gevent import monkey
monkey.patch_all()
from gevent.server import DatagramServer
from gevent.testing import util
from gevent.testing import main
class Test_udp_client(util.TestServer):
start_kwargs = {'timeout': 10}
example = 'udp_client.py'
example_args = ['Test_udp_client']
def test(self):
log = []
def handle(message, address):
log.append(message)
server.sendto(b'reply-from-server', address)
server = DatagramServer('127.0.0.1:9001', handle)
server.start()
try:
self.run_example()
finally:
server.close()
self.assertEqual(log, [b'Test_udp_client'])
if __name__ == '__main__':
# Running this following test__example_portforwarder on Appveyor
# doesn't work in the same process for some reason.
main() # pragma: testrunner-no-combine
| 884 | 23.583333 | 68 | py |
gevent | gevent-master/src/gevent/tests/test__semaphore.py | ###
# This file is test__semaphore.py only for organization purposes.
# The public API,
# and the *only* correct place to import Semaphore --- even in tests ---
# is ``gevent.lock``, never ``gevent._semaphore``.
##
from __future__ import print_function
from __future__ import absolute_import
import weakref
import gevent
import gevent.exceptions
from gevent.lock import Semaphore
from gevent.lock import BoundedSemaphore
import gevent.testing as greentest
from gevent.testing import timing
class TestSemaphore(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
# The order, though, is not guaranteed.
self.assertEqual(sorted(result), ['a', 'b'])
def test_semaphore_weakref(self):
s = Semaphore()
r = weakref.ref(s)
self.assertEqual(s, r())
@greentest.ignores_leakcheck
def test_semaphore_in_class_with_del(self):
# Issue #704. This used to crash the process
# under PyPy through at least 4.0.1 if the Semaphore
# was implemented with Cython.
class X(object):
def __init__(self):
self.s = Semaphore()
def __del__(self):
self.s.acquire()
X()
import gc
gc.collect()
gc.collect()
def test_rawlink_on_unacquired_runs_notifiers(self):
# https://github.com/gevent/gevent/issues/1287
# Rawlinking a ready semaphore should fire immediately,
# not raise LoopExit
s = Semaphore()
gevent.wait([s])
class TestSemaphoreMultiThread(greentest.TestCase):
# Tests that the object can be acquired correctly across
# multiple threads.
# Used as a base class.
# See https://github.com/gevent/gevent/issues/1437
def _getTargetClass(self):
return Semaphore
def _makeOne(self):
# Create an object that is associated with the current hub. If
# we don't do this now, it gets initialized lazily the first
# time it would have to block, which, in the event of threads,
# would be from an arbitrary thread.
return self._getTargetClass()(1)
def _makeThreadMain(self, thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs):
from gevent._hub_local import get_hub_if_exists
import sys
def thread_main():
thread_running.set()
try:
acquired.append(
sem.acquire(**thread_acquire_kwargs)
)
except:
exc_info[:] = sys.exc_info()
raise # Print
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
thread_acquired.set()
return thread_main
IDLE_ITERATIONS = 5
def _do_test_acquire_in_one_then_another(self,
release=True,
require_thread_acquired_to_finish=False,
**thread_acquire_kwargs):
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
thread_running = threading.Event()
thread_acquired = threading.Event()
sem = self._makeOne()
# Make future acquires block
sem.acquire()
exc_info = []
acquired = []
t = threading.Thread(target=self._makeThreadMain(
thread_running, thread_acquired, sem,
acquired, exc_info,
**thread_acquire_kwargs
))
t.daemon = True
t.start()
thread_running.wait(10) # implausibly large time
if release:
sem.release()
# Spin the loop to be sure the release gets through.
# (Release schedules the notifier to run, and when the
# notifier run it sends the async notification to the
# other thread. Depending on exactly where we are in the
# event loop, and the limit to the number of callbacks
# that get run (including time-based) the notifier may or
# may not be immediately ready to run, so this can take up
# to two iterations.)
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
self.assertEqual(acquired, [True])
if not release and thread_acquire_kwargs.get("timeout"):
# Spin the loop to be sure that the timeout has a chance to
# process. Interleave this with something that drops the GIL
# so the background thread has a chance to notice that.
for _ in range(self.IDLE_ITERATIONS):
gevent.idle()
if thread_acquired.wait(timing.LARGE_TICK):
break
thread_acquired.wait(timing.LARGE_TICK * 5)
if require_thread_acquired_to_finish:
self.assertTrue(thread_acquired.is_set())
try:
self.assertEqual(exc_info, [])
finally:
exc_info = None
return sem, acquired
def test_acquire_in_one_then_another(self):
self._do_test_acquire_in_one_then_another(release=True)
def test_acquire_in_one_then_another_timed(self):
sem, acquired_in_thread = self._do_test_acquire_in_one_then_another(
release=False,
require_thread_acquired_to_finish=True,
timeout=timing.SMALLEST_RELIABLE_DELAY)
self.assertEqual([False], acquired_in_thread)
# This doesn't, of course, notify anything, because
# the waiter has given up.
sem.release()
notifier = getattr(sem, '_notifier', None)
self.assertIsNone(notifier)
def test_acquire_in_one_wait_greenlet_wait_thread_gives_up(self):
# The waiter in the thread both arrives and gives up while
# the notifier is already running...or at least, that's what
# we'd like to arrange, but the _notify_links function doesn't
# drop the GIL/object lock, so the other thread is stuck and doesn't
# actually get to call into the acquire method.
from gevent import monkey
self.assertFalse(monkey.is_module_patched('threading'))
import threading
sem = self._makeOne()
# Make future acquires block
sem.acquire()
def greenlet_one():
ack = sem.acquire()
# We're running in the notifier function right now. It switched to
# us.
thread.start()
gevent.sleep(timing.LARGE_TICK)
return ack
exc_info = []
acquired = []
glet = gevent.spawn(greenlet_one)
thread = threading.Thread(target=self._makeThreadMain(
threading.Event(), threading.Event(),
sem,
acquired, exc_info,
timeout=timing.LARGE_TICK
))
thread.daemon = True
gevent.idle()
sem.release()
glet.join()
for _ in range(3):
gevent.idle()
thread.join(timing.LARGE_TICK)
self.assertEqual(glet.value, True)
self.assertEqual([], exc_info)
self.assertEqual([False], acquired)
self.assertTrue(glet.dead, glet)
glet = None
def assertOneHasNoHub(self, sem):
self.assertIsNone(sem.hub, sem)
@greentest.skipOnPyPyOnWindows("Flaky there; can't reproduce elsewhere")
def test_dueling_threads(self, acquire_args=(), create_hub=None):
# pylint:disable=too-many-locals,too-many-statements
# Threads doing nothing but acquiring and releasing locks, without
# having any other greenlets to switch to.
# https://github.com/gevent/gevent/issues/1698
from gevent import monkey
from gevent._hub_local import get_hub_if_exists
self.assertFalse(monkey.is_module_patched('threading'))
import threading
from time import sleep as native_sleep
sem = self._makeOne()
self.assertOneHasNoHub(sem)
count = 10000
results = [-1, -1]
run = True
def do_it(ix):
if create_hub:
gevent.get_hub()
try:
for i in range(count):
if not run:
break
acquired = sem.acquire(*acquire_args)
assert acquire_args or acquired
if acquired:
sem.release()
results[ix] = i
if not create_hub:
# We don't artificially create the hub.
self.assertIsNone(
get_hub_if_exists(),
(get_hub_if_exists(), ix, i)
)
if create_hub and i % 10 == 0:
gevent.sleep(timing.SMALLEST_RELIABLE_DELAY)
elif i % 100 == 0:
native_sleep(timing.SMALLEST_RELIABLE_DELAY)
except Exception as ex: # pylint:disable=broad-except
import traceback; traceback.print_exc()
results[ix] = str(ex)
ex = None
finally:
hub = get_hub_if_exists()
if hub is not None:
hub.join()
hub.destroy(destroy_loop=True)
t1 = threading.Thread(target=do_it, args=(0,))
t1.daemon = True
t2 = threading.Thread(target=do_it, args=(1,))
t2.daemon = True
t1.start()
t2.start()
t1.join(1)
t2.join(1)
while t1.is_alive() or t2.is_alive():
cur = list(results)
t1.join(7)
t2.join(7)
if cur == results:
# Hmm, after two seconds, no progress
run = False
break
self.assertEqual(results, [count - 1, count - 1])
def test_dueling_threads_timeout(self):
self.test_dueling_threads((True, 4))
def test_dueling_threads_with_hub(self):
self.test_dueling_threads(create_hub=True)
# XXX: Need a test with multiple greenlets in a non-primary
# thread. Things should work, just very slowly; instead of moving through
# greenlet.switch(), they'll be moving with async watchers.
class TestBoundedSemaphoreMultiThread(TestSemaphoreMultiThread):
def _getTargetClass(self):
return BoundedSemaphore
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase):
def test_c_extension(self):
self.assertEqual(Semaphore.__module__,
'gevent._gevent_c_semaphore')
class SwitchWithFixedHash(object):
# Replaces greenlet.switch with a callable object
# with a hash code we control. This only matters if
# we're hashing this somewhere (which we used to), but
# that doesn't preserve order, so we don't do
# that anymore.
def __init__(self, greenlet, hashcode):
self.switch = greenlet.switch
self.hashcode = hashcode
def __hash__(self):
raise AssertionError
def __eq__(self, other):
raise AssertionError
def __call__(self, *args, **kwargs):
return self.switch(*args, **kwargs)
def __repr__(self):
return repr(self.switch)
class FirstG(gevent.Greenlet):
# A greenlet whose switch method will have a low hashcode.
hashcode = 10
def __init__(self, *args, **kwargs):
gevent.Greenlet.__init__(self, *args, **kwargs)
self.switch = SwitchWithFixedHash(self, self.hashcode)
class LastG(FirstG):
# A greenlet whose switch method will have a high hashcode.
hashcode = 12
def acquire_then_exit(sem, should_quit):
sem.acquire()
should_quit.append(True)
def acquire_then_spawn(sem, should_quit):
if should_quit:
return
sem.acquire()
g = FirstG.spawn(release_then_spawn, sem, should_quit)
g.join()
def release_then_spawn(sem, should_quit):
sem.release()
if should_quit: # pragma: no cover
return
g = FirstG.spawn(acquire_then_spawn, sem, should_quit)
g.join()
class TestSemaphoreFair(greentest.TestCase):
def test_fair_or_hangs(self):
# If the lock isn't fair, this hangs, spinning between
# the last two greenlets.
# See https://github.com/gevent/gevent/issues/1487
sem = Semaphore()
should_quit = []
keep_going1 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
keep_going2 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
exiting = LastG.spawn(acquire_then_exit, sem, should_quit)
with self.assertRaises(gevent.exceptions.LoopExit):
gevent.joinall([keep_going1, keep_going2, exiting])
self.assertTrue(exiting.dead, exiting)
self.assertTrue(keep_going2.dead, keep_going2)
self.assertFalse(keep_going1.dead, keep_going1)
sem.release()
keep_going1.kill()
keep_going2.kill()
exiting.kill()
gevent.idle()
if __name__ == '__main__':
greentest.main()
| 13,756 | 31.293427 | 85 | py |
gevent | gevent-master/src/gevent/tests/test__core_stat.py | from __future__ import print_function
import os
import tempfile
import time
import gevent
import gevent.core
import gevent.testing as greentest
import gevent.testing.flaky
#pylint: disable=protected-access
DELAY = 0.5
WIN = greentest.WIN
LIBUV = greentest.LIBUV
class TestCoreStat(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def setUp(self):
super(TestCoreStat, self).setUp()
fd, path = tempfile.mkstemp(suffix='.gevent_test_core_stat')
os.close(fd)
self.temp_path = path
self.hub = gevent.get_hub()
# If we don't specify an interval, we default to zero.
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
# threshold, it bumps it up to the minimum.
self.watcher = self.hub.loop.stat(self.temp_path, interval=-1)
def tearDown(self):
self.watcher.close()
if os.path.exists(self.temp_path):
os.unlink(self.temp_path)
super(TestCoreStat, self).tearDown()
def _write(self):
with open(self.temp_path, 'wb', buffering=0) as f:
f.write(b'x')
def _check_attr(self, name, none):
# Deals with the complex behaviour of the 'attr' and 'prev'
# attributes on Windows. This codifies it, rather than simply letting
# the test fail, so we know exactly when and what changes it.
try:
x = getattr(self.watcher, name)
except ImportError:
if WIN:
# the 'posix' module is not available
pass
else:
raise
else:
if WIN and not LIBUV:
# The ImportError is only raised for the first time;
# after that, the attribute starts returning None
self.assertIsNone(x, "Only None is supported on Windows")
if none:
self.assertIsNone(x, name)
else:
self.assertIsNotNone(x, name)
def _wait_on_greenlet(self, func, *greenlet_args):
start = time.time()
self.hub.loop.update_now()
greenlet = gevent.spawn_later(DELAY, func, *greenlet_args)
with gevent.Timeout(5 + DELAY + 0.5):
self.hub.wait(self.watcher)
now = time.time()
self.assertGreaterEqual(now, start, "Time must move forward")
wait_duration = now - start
reaction = wait_duration - DELAY
if reaction <= 0.0:
# Sigh. This is especially true on PyPy on Windows
raise gevent.testing.flaky.FlakyTestRaceCondition(
"Bad timer resolution (on Windows?), test is useless. Start %s, now %s" % (start, now))
self.assertGreaterEqual(
reaction, 0.0,
'Watcher %s reacted too early: %.3fs' % (self.watcher, reaction))
greenlet.join()
def test_watcher_basics(self):
watcher = self.watcher
filename = self.temp_path
self.assertEqual(watcher.path, filename)
filenames = filename if isinstance(filename, bytes) else filename.encode('ascii')
self.assertEqual(watcher._paths, filenames)
self.assertEqual(watcher.interval, -1)
def test_write(self):
self._wait_on_greenlet(self._write)
self._check_attr('attr', False)
self._check_attr('prev', False)
# The watcher interval changed after it started; -1 is illegal
self.assertNotEqual(self.watcher.interval, -1)
def test_unlink(self):
self._wait_on_greenlet(os.unlink, self.temp_path)
self._check_attr('attr', True)
self._check_attr('prev', False)
if __name__ == '__main__':
greentest.main()
| 3,754 | 30.554622 | 103 | py |
gevent | gevent-master/src/gevent/tests/test__socket_close.py | import gevent
from gevent import socket
from gevent import server
import gevent.testing as greentest
# XXX also test: send, sendall, recvfrom, recvfrom_into, sendto
def readall(sock, _):
while sock.recv(1024):
pass # pragma: no cover we never actually send the data
sock.close()
class Test(greentest.TestCase):
error_fatal = False
def setUp(self):
self.server = server.StreamServer(greentest.DEFAULT_BIND_ADDR_TUPLE, readall)
self.server.start()
def tearDown(self):
self.server.stop()
def test_recv_closed(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((greentest.DEFAULT_CONNECT_HOST, self.server.server_port))
receiver = gevent.spawn(sock.recv, 25)
try:
gevent.sleep(0.001)
sock.close()
receiver.join(timeout=0.1)
self.assertTrue(receiver.ready(), receiver)
self.assertEqual(receiver.value, None)
self.assertIsInstance(receiver.exception, socket.error)
self.assertEqual(receiver.exception.errno, socket.EBADF)
finally:
receiver.kill()
# XXX: This is possibly due to the bad behaviour of small sleeps?
# The timeout is the global test timeout, 10s
@greentest.skipOnLibuvOnCI("Sometimes randomly times out")
def test_recv_twice(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((greentest.DEFAULT_CONNECT_HOST, self.server.server_port))
receiver = gevent.spawn(sock.recv, 25)
try:
gevent.sleep(0.001)
self.assertRaises(AssertionError, sock.recv, 25)
self.assertRaises(AssertionError, sock.recv, 25)
finally:
receiver.kill()
sock.close()
if __name__ == '__main__':
greentest.main()
| 1,862 | 30.576271 | 85 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_queue.py | # Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
from gevent import monkey
monkey.patch_all()
from gevent import queue as Queue
import threading
import time
import unittest
QUEUE_SIZE = 5
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
#self.startedEvent = threading.Event()
from gevent.event import Event
self.startedEvent = Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.01)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin(object):
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.isSet():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self, block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
with self.assertRaises(expected_exception_class):
block_func(*block_args)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.isSet():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
type2test = Queue.Queue
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if not q.empty():
raise RuntimeError("Call this function with an empty queue")
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
q.put(444)
target_first_items = dict(
Queue=111,
LifoQueue=444,
PriorityQueue=111)
actual_first_item = (q.peek(), q.get())
self.assertEqual(actual_first_item,
(target_first_items[q.__class__.__name__],
target_first_items[q.__class__.__name__]),
"q.peek() and q.get() are not equal!")
target_order = dict(Queue=[333, 222, 444],
LifoQueue=[222, 333, 111],
PriorityQueue=[222, 333, 444])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertFalse(q.empty(), "Queue should not be empty")
self.assertFalse(q.full(), "Queue should not be full")
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
try:
q.put(888, block=0)
self.fail("Didn't appear to block with a full queue")
except Queue.Full:
pass
try:
q.put(888, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except Queue.Full:
pass
self.assertEqual(q.qsize(), QUEUE_SIZE)
# Test a blocking put
self.do_blocking_test(q.put, (888,), q.get, ())
self.do_blocking_test(q.put, (888, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except Queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except Queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x is None:
q.task_done()
return
#with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0, 1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0, 1):
q.put(None) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclasses
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = Queue.JoinableQueue() # self.type2test()
# XXX the same test in subclass
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_task_done_with_items(self):
# Passing items to the constructor allows for as
# many task_done calls. Joining before all the task done
# are called returns false
# XXX the same test in subclass
l = [1, 2, 3]
q = Queue.JoinableQueue(items=l)
for i in l:
self.assertFalse(q.join(timeout=0.001))
self.assertEqual(i, q.get())
q.task_done()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
self.assertTrue(q.join(timeout=0.001))
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
class LifoQueueTest(BaseQueueTest):
type2test = Queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = Queue.PriorityQueue
def test__init(self):
item1 = (2, 'b')
item2 = (1, 'a')
q = self.type2test(items=[item1, item2])
self.assertTupleEqual(item2, q.get_nowait())
self.assertTupleEqual(item1, q.get_nowait())
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(Queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
Queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return Queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return Queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if not q.empty():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
q.put("oops", block=0)
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
q.put("oops", timeout=0.1)
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
with self.assertRaises(FailingQueueException):
self.do_blocking_test(q.put, (888,), q.get, ())
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put(999)
# Test a failing timeout put
q.fail_next_put = True
self.do_exceptional_blocking_test(q.put, (888, True, 10), q.get, (),
FailingQueueException)
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
q.get()
self.assertFalse(q.full(), "Queue should not be full")
q.put(999)
self.assertTrue(q.full(), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, (888,), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
with self.assertRaises(FailingQueueException):
q.get()
self.assertFalse(q.empty(), "Queue should not be empty")
q.fail_next_get = True
with self.assertRaises(FailingQueueException):
q.get(timeout=0.1)
self.assertFalse(q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
q.fail_next_get = True
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
# put succeeded, but get failed.
self.assertFalse(q.empty(), "Queue should not be empty")
q.get()
self.assertTrue(q.empty(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
if __name__ == "__main__":
unittest.main()
| 12,337 | 36.162651 | 83 | py |
gevent | gevent-master/src/gevent/tests/test__threading_before_monkey.py | # If stdlib threading is imported *BEFORE* monkey patching,
# we can still get the current (main) thread, and it's not a DummyThread.
import threading
from gevent import monkey
monkey.patch_all() # pragma: testrunner-no-monkey-combine
import gevent.testing as greentest
class Test(greentest.TestCase):
def test_main_thread(self):
current = threading.current_thread()
self.assertFalse(isinstance(current, threading._DummyThread))
self.assertTrue(isinstance(current, monkey.get_original('threading', 'Thread')))
# in 3.4, if the patch is incorrectly done, getting the repr
# of the thread fails
repr(current)
if __name__ == '__main__':
greentest.main()
| 714 | 28.791667 | 88 | py |
gevent | gevent-master/src/gevent/tests/test__monkey_ssl_warning.py | import unittest
import warnings
# This file should only have this one test in it
# because we have to be careful about our imports
# and because we need to be careful about our patching.
class Test(unittest.TestCase):
def test_with_pkg_resources(self):
# Issue 1108: Python 2, importing pkg_resources,
# as is done for namespace packages, imports ssl,
# leading to an unwanted SSL warning.
# This is a deprecated API though.
__import__('pkg_resources')
from gevent import monkey
self.assertFalse(monkey.saved)
with warnings.catch_warnings(record=True) as issued_warnings:
warnings.simplefilter('always')
monkey.patch_all()
monkey.patch_all()
issued_warnings = [x for x in issued_warnings
if isinstance(x.message, monkey.MonkeyPatchWarning)]
self.assertFalse(issued_warnings, [str(i) for i in issued_warnings])
self.assertEqual(0, len(issued_warnings))
if __name__ == '__main__':
unittest.main()
| 1,065 | 28.611111 | 79 | py |
gevent | gevent-master/src/gevent/tests/test__example_portforwarder.py | from __future__ import print_function, absolute_import
from gevent import monkey; monkey.patch_all()
import signal
import socket
from time import sleep
import gevent
from gevent.server import StreamServer
import gevent.testing as greentest
from gevent.testing import util
@greentest.skipOnLibuvOnCIOnPyPy("Timing issues sometimes lead to connection refused")
class Test(util.TestServer):
example = 'portforwarder.py'
# [listen on, forward to]
example_args = ['127.0.0.1:10011', '127.0.0.1:10012']
if greentest.WIN:
from subprocess import CREATE_NEW_PROCESS_GROUP
# Must be in a new process group to use CTRL_C_EVENT, otherwise
# we get killed too
start_kwargs = {'creationflags': CREATE_NEW_PROCESS_GROUP}
def after(self):
if greentest.WIN:
self.assertIsNotNone(self.popen.poll())
else:
self.assertEqual(self.popen.poll(), 0)
def _run_all_tests(self):
log = []
def handle(sock, _address):
while True:
data = sock.recv(1024)
print('got %r' % data)
if not data:
break
log.append(data)
server = StreamServer(self.example_args[1], handle)
server.start()
try:
conn = socket.create_connection(('127.0.0.1', 10011))
conn.sendall(b'msg1')
sleep(0.1)
# On Windows, SIGTERM actually abruptly terminates the process;
# it can't be caught. However, CTRL_C_EVENT results in a KeyboardInterrupt
# being raised, so we can shut down properly.
self.popen.send_signal(getattr(signal, 'CTRL_C_EVENT', signal.SIGTERM))
sleep(0.1)
conn.sendall(b'msg2')
conn.close()
with gevent.Timeout(2.1):
self.popen.wait()
finally:
server.close()
self.assertEqual([b'msg1', b'msg2'], log)
if __name__ == '__main__':
greentest.main()
| 2,025 | 28.794118 | 86 | py |
gevent | gevent-master/src/gevent/tests/test__socket_ex.py | import gevent.testing as greentest
from gevent import socket
import errno
import sys
class TestClosedSocket(greentest.TestCase):
switch_expected = False
def test(self):
sock = socket.socket()
sock.close()
try:
sock.send(b'a', timeout=1)
self.fail("Should raise socket error")
except OSError as ex:
if ex.args[0] != errno.EBADF:
if sys.platform.startswith('win'):
# Windows/Py3 raises "OSError: [WinError 10038] "
# which is not standard and not what it does
# on Py2.
pass
else:
raise
class TestRef(greentest.TestCase):
switch_expected = False
def test(self):
# pylint:disable=no-member
sock = socket.socket()
self.assertTrue(sock.ref)
sock.ref = False
self.assertFalse(sock.ref)
self.assertFalse(sock._read_event.ref)
self.assertFalse(sock._write_event.ref)
sock.close()
if __name__ == '__main__':
greentest.main()
| 1,110 | 23.688889 | 69 | py |
gevent | gevent-master/src/gevent/tests/test__makefile_ref.py | from __future__ import print_function
import os
from gevent import monkey; monkey.patch_all()
import socket
import ssl
import threading
import errno
import weakref
import gevent.testing as greentest
from gevent.testing.params import DEFAULT_BIND_ADDR_TUPLE
from gevent.testing.params import DEFAULT_CONNECT
from gevent.testing.sockets import tcp_listener
dirname = os.path.dirname(os.path.abspath(__file__))
CERTFILE = os.path.join(dirname, '2_7_keycert.pem')
pid = os.getpid()
PY3 = greentest.PY3
PYPY = greentest.PYPY
CPYTHON = not PYPY
PY2 = not PY3
fd_types = int
if PY3:
long = int
fd_types = (int, long)
WIN = greentest.WIN
from gevent.testing import get_open_files
try:
import psutil
except ImportError:
psutil = None
# wrap_socket() is considered deprecated in 3.9
# pylint:disable=deprecated-method
class Test(greentest.TestCase):
extra_allowed_open_states = ()
def tearDown(self):
self.extra_allowed_open_states = ()
super(Test, self).tearDown()
def assert_raises_EBADF(self, func):
try:
result = func()
except OSError as ex:
# Windows/Py3 raises "OSError: [WinError 10038]"
if ex.args[0] == errno.EBADF:
return
if WIN and ex.args[0] == 10038:
return
raise
raise AssertionError('NOT RAISED EBADF: %r() returned %r' % (func, result))
if WIN or (PYPY and greentest.LINUX):
def __assert_fd_open(self, fileno):
# We can't detect open file descriptors on Windows.
# On PyPy 3.6-7.3 on Travis CI (linux), for some reason the
# client file descriptors don't always show as open. Don't know why,
# was fine in 7.2.
# On March 23 2020 we had to pin psutil back to a version
# for PyPy 2 (see setup.py) and this same problem started happening there.
# PyPy on macOS was unaffected.
pass
else:
def __assert_fd_open(self, fileno):
assert isinstance(fileno, fd_types)
open_files = get_open_files()
if fileno not in open_files:
raise AssertionError('%r is not open:\n%s' % (fileno, open_files['data']))
def assert_fd_closed(self, fileno):
assert isinstance(fileno, fd_types), repr(fileno)
assert fileno > 0, fileno
# Here, if we're in the process of closing, don't consider it open.
# This goes into details of psutil
open_files = get_open_files(count_closing_as_open=False)
if fileno in open_files:
raise AssertionError('%r is not closed:\n%s' % (fileno, open_files['data']))
def _assert_sock_open(self, sock):
# requires the psutil output
open_files = get_open_files()
sockname = sock.getsockname()
for x in open_files['data']:
if getattr(x, 'laddr', None) == sockname:
assert x.status in (psutil.CONN_LISTEN, psutil.CONN_ESTABLISHED) + self.extra_allowed_open_states, x.status
return
raise AssertionError("%r is not open:\n%s" % (sock, open_files['data']))
def assert_open(self, sock, *rest):
if isinstance(sock, fd_types):
self.__assert_fd_open(sock)
else:
fileno = sock.fileno()
assert isinstance(fileno, fd_types), fileno
sockname = sock.getsockname()
assert isinstance(sockname, tuple), sockname
if not WIN:
self.__assert_fd_open(fileno)
else:
self._assert_sock_open(sock)
if rest:
self.assert_open(rest[0], *rest[1:])
def assert_closed(self, sock, *rest):
if isinstance(sock, fd_types):
self.assert_fd_closed(sock)
else:
# Under Python3, the socket module returns -1 for a fileno
# of a closed socket; under Py2 it raises
if PY3:
self.assertEqual(sock.fileno(), -1)
else:
self.assert_raises_EBADF(sock.fileno)
self.assert_raises_EBADF(sock.getsockname)
self.assert_raises_EBADF(sock.accept)
if rest:
self.assert_closed(rest[0], *rest[1:])
def make_open_socket(self):
s = socket.socket()
try:
s.bind(DEFAULT_BIND_ADDR_TUPLE)
if WIN or greentest.LINUX:
# Windows and linux (with psutil) doesn't show as open until
# we call listen (linux with lsof accepts either)
s.listen(1)
self.assert_open(s, s.fileno())
except:
s.close()
s = None
raise
return s
# Sometimes its this one, sometimes it's test_ssl. No clue why or how.
@greentest.skipOnAppVeyor("This sometimes times out for no apparent reason.")
class TestSocket(Test):
def test_simple_close(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
f = closer(s.makefile())
self.assert_open(s, fileno)
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
s.close()
if PY3:
self.assert_open(s, fileno)
else:
self.assert_closed(s)
self.assert_open(fileno)
f.close()
self.assert_closed(s)
self.assert_closed(fileno)
def test_makefile2(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s)
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_server_simple(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket)
def test_server_makefile1(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
f = closer(client_socket.makefile())
self.assert_open(client_socket, fileno)
client_socket.close()
# Under python 2, this closes socket wrapper object but not the file descriptor;
# under python 3, both stay open
if PY3:
self.assert_open(client_socket, fileno)
else:
self.assert_closed(client_socket)
self.assert_open(fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile2(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
def connect():
connector.connect((DEFAULT_CONNECT, port))
closer.running_task(threading.Thread(target=connect))
client_socket = closer.accept(listener)
fileno = client_socket.fileno()
f = closer(client_socket.makefile())
self.assert_open(client_socket, fileno)
# closing fileobject does not close the socket
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
@greentest.skipOnAppVeyor("This sometimes times out for no apparent reason.")
class TestSSL(Test):
def _ssl_connect_task(self, connector, port, accepted_event):
connector.connect((DEFAULT_CONNECT, port))
try:
# Note: We get ResourceWarning about 'x'
# on Python 3 if we don't join the spawned thread
x = ssl.SSLContext().wrap_socket(connector)
# Wait to be fully accepted. We could otherwise raise ahead
# of the server and close ourself before it's ready to read.
accepted_event.wait()
except socket.error:
# Observed on Windows with PyPy2 5.9.0 and libuv:
# if we don't switch in a timely enough fashion,
# the server side runs ahead of us and closes
# our socket first, so this fails.
pass
else:
x.close()
def _make_ssl_connect_task(self, connector, port):
accepted_event = threading.Event()
t = threading.Thread(target=self._ssl_connect_task,
args=(connector, port, accepted_event))
t.daemon = True
t.accepted_event = accepted_event
return t
def test_simple_close(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s = closer(ssl.SSLContext().wrap_socket(s))
fileno = s.fileno()
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def test_makefile1(self):
with Closing() as closer:
raw_s = closer(self.make_open_socket())
s = closer(ssl.SSLContext().wrap_socket(raw_s))
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s, fileno)
s.close()
self.assert_open(s, fileno)
f.close()
raw_s.close()
self.assert_closed(s, fileno)
def test_makefile2(self):
with Closing() as closer:
s = closer(self.make_open_socket())
fileno = s.fileno()
s = closer(ssl.SSLContext().wrap_socket(s))
fileno = s.fileno()
self.assert_open(s, fileno)
f = closer(s.makefile())
self.assert_open(s, fileno)
f.close()
# closing fileobject does not close the socket
self.assert_open(s, fileno)
s.close()
self.assert_closed(s, fileno)
def _wrap_socket(self, sock, *, keyfile, certfile, server_side=False):
context = ssl.SSLContext()
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
return context.wrap_socket(sock, server_side=server_side)
def test_server_simple(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
client_socket = closer.accept(listener)
t.accepted_event.set()
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile1(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
client_socket = closer.accept(listener)
t.accepted_event.set()
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_server_makefile2(self):
with Closing() as closer:
listener = closer(tcp_listener(backlog=1))
port = listener.getsockname()[1]
connector = closer(socket.socket())
t = self._make_ssl_connect_task(connector, port)
closer.running_task(t)
t.accepted_event.set()
client_socket = closer.accept(listener)
client_socket = closer(
self._wrap_socket(client_socket, keyfile=CERTFILE, certfile=CERTFILE,
server_side=True))
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
# Closing fileobject does not close SSLObject
f.close()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
def test_serverssl_makefile1(self):
raw_listener = tcp_listener(backlog=1)
fileno = raw_listener.fileno()
port = raw_listener.getsockname()[1]
listener = self._wrap_socket(raw_listener, keyfile=CERTFILE, certfile=CERTFILE)
connector = socket.socket()
t = self._make_ssl_connect_task(connector, port)
t.start()
with CleaningUp(t, listener, raw_listener, connector) as client_socket:
t.accepted_event.set()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_open(client_socket, fileno)
f.close()
self.assert_closed(client_socket, fileno)
def test_serverssl_makefile2(self):
raw_listener = tcp_listener(backlog=1)
port = raw_listener.getsockname()[1]
listener = self._wrap_socket(raw_listener, keyfile=CERTFILE, certfile=CERTFILE)
accepted_event = threading.Event()
def connect(connector=socket.socket()):
try:
connector.connect((DEFAULT_CONNECT, port))
s = ssl.SSLContext().wrap_socket(connector)
accepted_event.wait()
s.sendall(b'test_serverssl_makefile2')
s.shutdown(socket.SHUT_RDWR)
s.close()
finally:
connector.close()
t = threading.Thread(target=connect)
t.daemon = True
t.start()
client_socket = None
with CleaningUp(t, listener, raw_listener) as client_socket:
accepted_event.set()
fileno = client_socket.fileno()
self.assert_open(client_socket, fileno)
f = client_socket.makefile()
self.assert_open(client_socket, fileno)
self.assertEqual(f.read(), 'test_serverssl_makefile2')
self.assertEqual(f.read(), '')
# Closing file object does not close the socket.
f.close()
if WIN and psutil:
# Hmm?
self.extra_allowed_open_states = (psutil.CONN_CLOSE_WAIT,)
self.assert_open(client_socket, fileno)
client_socket.close()
self.assert_closed(client_socket, fileno)
class Closing(object):
def __init__(self, *init):
self._objects = []
for i in init:
self.closing(i)
self.task = None
def accept(self, listener):
client_socket, _addr = listener.accept()
return self.closing(client_socket)
def __enter__(self):
o = self.objects()
if len(o) == 1:
return o[0]
return self
if PY2 and CPYTHON:
# This implementation depends or refcounting
# for things to close. Eww.
def closing(self, o):
self._objects.append(weakref.ref(o))
return o
def objects(self):
return [r() for r in self._objects if r() is not None]
else:
def objects(self):
# PyPy returns an object without __len__...
return list(reversed(self._objects))
def closing(self, o):
self._objects.append(o)
return o
__call__ = closing
def running_task(self, thread):
assert self.task is None
self.task = thread
self.task.start()
return self.task
def __exit__(self, t, v, tb):
# workaround for test_server_makefile1, test_server_makefile2,
# test_server_simple, test_serverssl_makefile1.
# On PyPy on Linux, it is important to join the SSL Connect
# Task FIRST, before closing the sockets. If we do it after
# (which makes more sense) we hang. It's not clear why, except
# that it has something to do with context switches. Inserting a call to
# gevent.sleep(0.1) instead of joining the task has the same
# effect. If the previous tests hang, then later tests can fail with
# SSLError: unknown alert type.
# XXX: Why do those two things happen?
# On PyPy on macOS, we don't have that problem and can use the
# more logical order.
try:
if self.task is not None:
self.task.join()
finally:
self.task = None
for o in self.objects():
try:
o.close()
except Exception: # pylint:disable=broad-except
pass
self._objects = ()
class CleaningUp(Closing):
def __init__(self, task, listener, *other_sockets):
super(CleaningUp, self).__init__(listener, *other_sockets)
self.task = task
self.listener = listener
def __enter__(self):
return self.accept(self.listener)
def __exit__(self, t, v, tb):
try:
Closing.__exit__(self, t, v, tb)
finally:
self.listener = None
if __name__ == '__main__':
greentest.main()
| 19,195 | 33.775362 | 123 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.