desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'SF bug #1486663 -- this used to erroneously raise a TypeError'
| def test_keywords_in_subclass(self):
| SetSubclassWithKeywordArgs(newarg=1)
|
'Helper function to make a list of random numbers'
| def randomlist(self, n):
| return [self.gen.random() for i in xrange(n)]
|
'If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.'
| def test_use_foreign_dtd(self):
| handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEn... |
'If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.'
| def test_ignore_use_foreign_dtd(self):
| handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEn... |
'Test multiple targets on the left hand side.'
| def testMultipleLHS(self):
| snippets = ['a, b = 1, 2', '(a, b) = 1, 2', '((a, b), c) = (1, 2), 3']
for s in snippets:
a = transformer.parse(s)
self.assertIsInstance(a, ast.Module)
child1 = a.getChildNodes()[0]
self.assertIsInstance(child1, ast.Stmt)
child2 =... |
'Utility method to verify if two objects are copies of each others.'
| def assert_is_copy(self, obj, objcopy, msg=None):
| if (msg is None):
msg = '{!r} is not a copy of {!r}'.format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.ass... |
'Make a copy of sys.path'
| def setUp(self):
| super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
self.name = os.name
self.platf... |
'Restore sys.path'
| def tearDown(self):
| sys.path[:] = self.sys_path
if (self.makefile is not None):
os.unlink(self.makefile)
self._cleanup_testfn()
if (self.uname is not None):
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep... |
'Mocks the select.select() call to raise EINTR for first call'
| @contextlib.contextmanager
def mocked_select_module(self):
| old_select = select.select
class MockSelect:
def __init__(self):
self.called = 0
def __call__(self, *args):
self.called += 1
if (self.called == 1):
raise select.error(errno.EINTR, os.strerror(errno.EINTR))
else:
retu... |
'Setup of a temp file to use for testing'
| def setUp(self):
| self.text = ('test_urllib: %s\n' % self.__class__.__name__)
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen(('file:%s' % self.pathname))
|
'Shut down the open object'
| def tearDown(self):
| self.returned_obj.close()
os.remove(test_support.TESTFN)
|
'Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file.'
| def createNewTempFile(self, data=''):
| (newFd, newFilePath) = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, 'wb')
newFile.write(data)
newFile.close()
finally:
try:
newFile.close()
except:
pass
return newFilePath
|
'Helper method for testing different input types.
\'given\' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.'
| def help_inputtype(self, given, test_type):
| expect_somewhere = ['1st=1', '2nd=2', '3rd=3']
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result, ('testing %s: %s not found in %s' % (test_type, expected, result)))
self.assertEqual(result.count('&'), 2, ("testing %s: expec... |
'Assert the options are what we expected when parsing arguments.
Otherwise, fail with a nicely formatted message.
Keyword arguments:
args -- A list of arguments to parse with OptionParser.
expected_opts -- The options expected.
expected_positional_args -- The positional arguments expected.
Returns the options and posit... | def assertParseOK(self, args, expected_opts, expected_positional_args):
| (options, positional_args) = self.parser.parse_args(args)
optdict = vars(options)
self.assertEqual(optdict, expected_opts, ('\nOptions are %(optdict)s.\nShould be %(expected_opts)s.\nArgs were %(args)s.' % locals()))
self.assertEqual(positional_args, expected_positional_args, ('\nPosit... |
'Assert that the expected exception is raised when calling a
function, and that the right error message is included with
that exception.
Arguments:
func -- the function to call
args -- positional arguments to `func`
kwargs -- keyword arguments to `func`
expected_exception -- exception that should be raised
expected_mes... | def assertRaises(self, func, args, kwargs, expected_exception, expected_message):
| if (args is None):
args = ()
if (kwargs is None):
kwargs = {}
try:
func(*args, **kwargs)
except expected_exception as err:
actual_message = str(err)
if isinstance(expected_message, retype):
self.assertTrue(expected_message.search(actual_message), ("exp... |
'Assert the parser fails with the expected message. Caller
must ensure that self.parser is an InterceptingOptionParser.'
| def assertParseFail(self, cmdline_args, expected_output):
| try:
self.parser.parse_args(cmdline_args)
except InterceptedError as err:
self.assertEqual(err.error_message, expected_output)
else:
self.assertFalse('expected parse failure')
|
'Assert the parser prints the expected output on stdout.'
| def assertOutput(self, cmdline_args, expected_output, expected_status=0, expected_error=None):
| save_stdout = sys.stdout
encoding = getattr(save_stdout, 'encoding', None)
try:
try:
sys.stdout = StringIO()
if encoding:
sys.stdout.encoding = encoding
self.parser.parse_args(cmdline_args)
finally:
output = sys.stdout.getvalue(... |
'Assert that TypeError is raised when executing func.'
| def assertTypeError(self, func, expected_message, *args):
| self.assertRaises(func, args, None, TypeError, expected_message)
|
'doctest monkeypatches linecache to enable inspection'
| def test_proceed_with_fake_filename(self):
| (fn, source) = ('<test>', 'def x(): pass\n')
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if (filename == fn):
return source.splitlines(True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
... |
'Helper for testing that classify_class_attrs finds a bunch of
different kinds of attributes on a given class.'
| def _classify_test(self, newstyle):
| if newstyle:
base = object
else:
class base:
pass
class A(base, ):
def s():
pass
s = staticmethod(s)
def c(cls):
pass
c = classmethod(c)
def getp(self):
pass
p = property(getp)
def m(self)... |
'classify_class_attrs finds static methods, class methods,
properties, normal methods, and data attributes on an old-style
class.'
| def test_classify_oldstyle(self):
| self._classify_test(False)
|
'Just like test_classify_oldstyle, but for a new-style class.'
| def test_classify_newstyle(self):
| self._classify_test(True)
|
'Create a function that returns its locals(), excluding the
autogenerated \'.1\', \'.2\', etc. tuple param names (if any).'
| def makeCallable(self, signature):
| with check_py3k_warnings(('tuple parameter unpacking has been removed', SyntaxWarning), quiet=True):
code = 'lambda %s: dict(i for i in locals().items() if not is_tuplename(i[0]))'
return eval((code % signature), {'is_tuplename': self.is_tuplename})
|
'Check for cases where compressed data is larger than original.'
| @skipUnless(zlib, 'requires zlib')
def test_low_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('strfile', '12')
with zipfile.ZipFile(TESTFN2, 'r', zipfile.ZIP_DEFLATED) as zipfp:
with zipfp.open('strfile') as openobj:
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.rea... |
'Test appending to an existing zipfile.'
| def test_append_to_zip_file(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr('strfile', self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, 'strfile'])
|
'Test appending to an existing file that is not a zipfile.'
| def test_append_to_non_zip_file(self):
| data = ('I am not a ZipFile!' * 10)
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, 'r') as zipfp... |
'Check that calling ZipFile.write without arcname specified
produces the expected result.'
| def test_write_default_name(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, 'r') as fid:
self.assertEqual(zipfp.read(TESTFN), fid.read())
|
'Check that files within a Zip archive can have different
compression options.'
| @skipUnless(zlib, 'requires zlib')
def test_per_file_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)... |
'Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError.'
| def test_write_to_readonly(self):
| with zipfile.ZipFile(TESTFN2, mode='w') as zipfp:
zipfp.writestr('somefile.txt', 'bogus')
with zipfile.ZipFile(TESTFN2, mode='r') as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
|
'Check that the zipfile is closed after the \'with\' block.'
| def test_close(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertTrue((zipfp.fp is not None), 'zipfp is not open')
self.assertTrue((zipfp.fp is None), 'zipfp is not closed')
with zipfile.ZipFile(TEST... |
'Check that the zipfile is closed if an exception is raised in the
\'with\' block.'
| def test_close_on_exception(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, 'r') as zipfp2:
raise zipfile.BadZipfile()
except zipfile.BadZipfile:
self.assertTrue((zipfp2.fp is None), 'zipf... |
'Check that is_zipfile() correctly identifies non-zip files.'
| def test_is_zip_erroneous_file(self):
| with open(TESTFN, 'w') as fp:
fp.write('this is not a legal zip file\n')
chk = zipfile.is_zipfile(TESTFN)
self.assertFalse(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue((not chk))
fp = StringIO()
fp.write('this is ... |
'Check that zipfiles with missing bytes at the end raise BadZipFile.'
| def test_damaged_zipfile(self):
| fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
zipfiledata = fp.getvalue()
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, fp... |
'Check that is_zipfile() correctly identifies zip files.'
| def test_is_zip_valid_file(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
chk = zipfile.is_zipfile(TESTFN)
self.assertTrue(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
z... |
'Verify that testzip() doesn\'t swallow inappropriate exceptions.'
| def test_closed_zip_raises_RuntimeError(self):
| data = StringIO()
with zipfile.ZipFile(data, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
self.assertRaises(RuntimeError, zipf.read, 'foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt')
self.assertRaises(RuntimeError, zipf.testzip)
self... |
'Check that bad modes passed to ZipFile constructor are caught.'
| def test_bad_constructor_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'q')
|
'Check that bad modes passed to ZipFile.open are caught.'
| def test_bad_open_mode(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
zipf.read('foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt', 'q')
|
'Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn\'t advance file pointer.'
| def test_read0(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipf.open('foo.txt') as f:
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), 'O, for a Mus... |
'Check that attempting to call open() for an item that doesn\'t
exist in the archive raises a RuntimeError.'
| def test_open_non_existent_item(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertRaises(KeyError, zipf.open, 'foo.txt', 'r')
|
'Check that bad compression methods passed to ZipFile.open are
caught.'
| def test_bad_compression_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'w', (-1))
|
'Check that a filename containing a null byte is properly
terminated.'
| def test_null_byte_in_filename(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt\x00qqq', 'O, for a Muse of Fire!')
self.assertEqual(zipf.namelist(), ['foo.txt'])
|
'Check that ZIP internal structure sizes are calculated correctly.'
| def test_struct_sizes(self):
| self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
|
'Check that comments on the archive are handled properly.'
| def test_comments(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertEqual(zipf.comment, '')
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, '')
comment = 'Bravely taking to his feet, ... |
'Tests that files with bad CRCs return their name from testzip.'
| def check_testzip_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertEqual('afile', zipf.testzip())
|
'Tests that files with bad CRCs raise a BadZipfile exception when read.'
| def check_read_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertRaises(zipfile.BadZipfile, zipf.read, 'afile')
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.ass... |
'If an extra field in the header is less than 4 bytes, skip it.'
| def test_zipfile_with_short_extra_field(self):
| zipdata = 'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00abc\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00abc... |
'$ matches the end of string, and just before the terminating'
| def test_dollar_matches_twice(self):
| pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a#\nb#\n#')
self... |
'BaseHTTPServer method, overridden.'
| def get_request(self):
| (request, client_address) = self.socket.accept()
request.settimeout(10.0)
return (request, client_address)
|
'Stops the webserver if it\'s currently running.'
| def stop(self):
| self._stop = True
self.join()
|
'Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.'
| def handle_request(self, request_handler):
| if (len(self._users) == 0):
return True
if ('Proxy-Authorization' not in request_handler.headers):
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(request_handler.headers['Proxy-Authorization'])
if (auth_dict['username'] in self._u... |
'Create a new DocTest containing the given examples. The
DocTest\'s globals are initialized with a copy of `globs`.'
| def __init__(self, examples, globs, name, filename, lineno, docstring):
| assert (not isinstance(examples, basestring)), 'DocTest no longer accepts str; use DocTestParser instead'
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
|
'Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.'
| def parse(self, string, name='<string>'):
| string = string.expandtabs()
min_indent = self._min_indent(string)
if (min_indent > 0):
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
(charno, lineno) = (0, 0)
for m in self._EXAMPLE_RE.finditer(string):
output.append(string[charno:m.start()])
... |
'Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.'
| def get_doctest(self, string, globs, name, filename, lineno):
| return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string)
|
'Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it\'s most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called "line 1" then.
The optional argumen... | def get_examples(self, string, name='<string>'):
| return [x for x in self.parse(string, name) if isinstance(x, Example)]
|
'Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example\'s source code (with prompts and indentation stripped);
and `want` is the example\'s expected output (with indentation
stripped).
`name` is the string\'s name, and `lineno` is the line numbe... | def _parse_example(self, m, name, lineno):
| indent = len(m.group('indent'))
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ((' ' * indent) + '.'), name, lineno)
source = '\n'.join([sl[(indent + 4):] for sl in source_lines])
want = m.group('w... |
'Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string\'s name, and `lineno` is the line number
where the example starts; both are used for error messages.'
| def _find_options(self, source, name, lineno):
| options = {}
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if ((option[0] not in '+-') or (option[1:] not in OPTIONFLAGS_BY_NAME)):
raise ValueError(('line %r of the... |
'Return the minimum indentation of any non-blank line in `s`'
| def _min_indent(self, s):
| indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if (len(indents) > 0):
return min(indents)
else:
return 0
|
'Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.'
| def _check_prompt_blank(self, lines, indent, name, lineno):
| for (i, line) in enumerate(lines):
if ((len(line) >= (indent + 4)) and (line[(indent + 3)] != ' ')):
raise ValueError(('line %r of the docstring for %s lacks blank after %s: %r' % (((lineno + i) + 1), name, line[indent:(indent + 3)], line)))
|
'Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.'
| def _check_prefix(self, lines, prefix, name, lineno):
| for (i, line) in enumerate(lines):
if (line and (not line.startswith(prefix))):
raise ValueError(('line %r of the docstring for %s has inconsistent leading whitespace: %r' % (((lineno + i) + 1), name, line)))
|
'Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument... | def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, exclude_empty=True):
| self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
|
'Return a list of the DocTests that are defined by the given
object\'s docstring, or by any of its contained objects\'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
co... | def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
| if (name is None):
name = getattr(obj, '__name__', None)
if (name is None):
raise ValueError(("DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r" % (type(obj),)))
if (module is False):
module = None
elif (module is None)... |
'Return true if the given object is defined in the given
module.'
| def _from_module(self, module, object):
| if (module is None):
return True
elif (inspect.getmodule(object) is not None):
return (module is inspect.getmodule(object))
elif inspect.isfunction(object):
return (module.__dict__ is object.func_globals)
elif inspect.isclass(object):
return (module.__name__ == object.__m... |
'Find tests for the given object and any contained objects, and
add them to `tests`.'
| def _find(self, tests, obj, name, module, source_lines, globs, seen):
| if self._verbose:
print ('Finding tests in %s' % name)
if (id(obj) in seen):
return
seen[id(obj)] = 1
test = self._get_test(obj, name, module, globs, source_lines)
if (test is not None):
tests.append(test)
if (inspect.ismodule(obj) and self._recurse):
for... |
'Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.'
| def _get_test(self, obj, name, module, globs, source_lines):
| if isinstance(obj, basestring):
docstring = obj
else:
try:
if (obj.__doc__ is None):
docstring = ''
else:
docstring = obj.__doc__
if (not isinstance(docstring, basestring)):
docstring = str(docstring)
... |
'Return a line number of the given object\'s docstring. Note:
this method assumes that the object has a docstring.'
| def _find_lineno(self, obj, source_lines):
| lineno = None
if inspect.ismodule(obj):
lineno = 0
if inspect.isclass(obj):
if (source_lines is None):
return None
pat = re.compile(('^\\s*class\\s*%s\\b' % getattr(obj, '__name__', '-')))
for (i, line) in enumerate(source_lines):
if pat.match(line):
... |
'Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg \'verbose\' prints lots of stuff if true,
only failures if false; by default, it\'s true iff \'-v\' is in
sys.argv.
Optional a... | def __init__(self, checker=None, verbose=None, optionflags=0):
| self._checker = (checker or OutputChecker())
if (verbose is None):
verbose = ('-v' in sys.argv)
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
self.tries = 0
self.failures = 0
self._name2ft = {}
self._fakeout = _SpoofOut()
|
'Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)'
| def report_start(self, out, test, example):
| if self._verbose:
if example.want:
out(((('Trying:\n' + _indent(example.source)) + 'Expecting:\n') + _indent(example.want)))
else:
out((('Trying:\n' + _indent(example.source)) + 'Expecting nothing\n'))
|
'Report that the given example ran successfully. (Only
displays a message if verbose=True)'
| def report_success(self, out, test, example, got):
| if self._verbose:
out('ok\n')
|
'Report that the given example failed.'
| def report_failure(self, out, test, example, got):
| out((self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)))
|
'Report that the given example raised an unexpected exception.'
| def report_unexpected_exception(self, out, test, example, exc_info):
| out(((self._failure_header(test, example) + 'Exception raised:\n') + _indent(_exception_traceback(exc_info))))
|
'Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the num... | def __run(self, test, compileflags, out):
| failures = tries = 0
original_optionflags = self.optionflags
(SUCCESS, FAILURE, BOOM) = range(3)
check = self._checker.check_output
for (examplenum, example) in enumerate(test.examples):
quiet = ((self.optionflags & REPORT_ONLY_FIRST_FAILURE) and (failures > 0))
self.optionflags = or... |
'Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.'
| def __record_outcome(self, test, f, t):
| (f2, t2) = self._name2ft.get(test.name, (0, 0))
self._name2ft[test.name] = ((f + f2), (t + t2))
self.failures += f
self.tries += t
|
'Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
t... | def run(self, test, compileflags=None, out=None, clear_globs=True):
| self.test = test
if (compileflags is None):
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if (out is None):
out = save_stdout.write
sys.stdout = self._fakeout
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
sel... |
'Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then th... | def summarize(self, verbose=None):
| if (verbose is None):
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
(name, (f, t)) = x
assert (f <= t)
totalt += t
totalf += f
if (t == 0):
notests.append(name)
... |
'Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for ... | def check_output(self, want, got, optionflags):
| if (got == want):
return True
if (not (optionflags & DONT_ACCEPT_TRUE_FOR_1)):
if ((got, want) == ('True\n', '1\n')):
return True
if ((got, want) == ('False\n', '0\n')):
return True
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
want = re.sub(('(?m)^%... |
'Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.'
| def output_difference(self, example, got, optionflags):
| want = example.want
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
if self._do_a_fancy_diff(want, got, optionflags):
want_lines = want.splitlines(True)
got_lines = got.splitlines(True)
if (optionflags & REPORT_UDIFF):... |
'Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
T... | def debug(self):
| self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
|
'val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123'
| def __init__(self, val):
| self.val = val
|
'square() -> square TestClass\'s associated value
>>> _TestClass(13).square().get()
169'
| def square(self):
| self.val = (self.val ** 2)
return self
|
'get() -> return TestClass\'s associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42'
| def get(self):
| return self.val
|
'Many methods we can just pass through to the DB object.
(See below)'
| def __getattr__(self, name):
| return getattr(self.db, name)
|
'Some methods we can just pass through to the cursor object. (See below)'
| def __getattr__(self, name):
| return getattr(self.dbc, name)
|
'bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.'
| def __init__(self, filename, dbhome, create=0, truncate=0, mode=384, recover=0, dbflags=0):
| self.db = None
myflags = db.DB_THREAD
if create:
myflags |= db.DB_CREATE
flagsforenv = ((((db.DB_INIT_MPOOL | db.DB_INIT_LOCK) | db.DB_INIT_LOG) | db.DB_INIT_TXN) | dbflags)
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv ... |
'Print the database to stdout for debugging'
| def _db_print(self):
| print '******** Printing raw database for debugging ********'
cur = self.db.cursor()
try:
(key, data) = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
(key, data) = next
else:
... |
'CreateTable(table, columns) - Create a new table in the database.
raises TableDBError if it already exists or for other DB errors.'
| def CreateTable(self, table, columns):
| assert isinstance(columns, list)
txn = None
try:
if contains_metastrings(table):
raise ValueError('bad table name: contains reserved metastrings')
for column in columns:
if contains_metastrings(column):
raise ValueError('bad column ... |
'Return a list of columns in the given table.
[] if the table doesn\'t exist.'
| def ListTableColumns(self, table):
| assert isinstance(table, str)
if contains_metastrings(table):
raise ValueError, 'bad table name: contains reserved metastrings'
columnlist_key = _columns_key(table)
if (not getattr(self.db, 'has_key')(columnlist_key)):
return []
pickledcolumnlist = getattr(self.db, 'ge... |
'Return a list of tables in this database.'
| def ListTables(self):
| pickledtablelist = self.db.get_get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
|
'CreateOrExtendTable(table, columns)
Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.'
| def CreateOrExtendTable(self, table, columns):
| assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
oldcolumnlist = pickle.loads(getattr(self.db, 'get_bytes', self.db.get... |
'initialize the self.__tablecolumns dict'
| def __load_column_info(self, table):
| try:
tcolpickles = getattr(self.db, 'get_bytes', self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError, ('unknown table: %r' % (table,))
if (not tcolpickles):
raise TableDBError, ('unknown table: %r' % (table,))
self.__tablecolumns[table] = p... |
'Create a new unique row identifier'
| def __new_rowid(self, table, txn):
| unique = 0
while (not unique):
blist = []
for x in xrange(_rowid_str_len):
blist.append(random.randint(0, 255))
newid = struct.pack(('B' * _rowid_str_len), *blist)
if (sys.version_info[0] >= 3):
newid = newid.decode('iso8859-1')
try:
se... |
'Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.'
| def Insert(self, table, rowdict):
| txn = None
try:
if (not getattr(self.db, 'has_key')(_columns_key(table))):
raise TableDBError, 'unknown table'
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
for column in rowdict.keys():
if (not self.__tablecolumns[table].c... |
'Modify(table, conditions={}, mappings={}) - Modify items in rows matching \'conditions\' using mapping functions in \'mappings\'
* table - the table name
* conditions - a dictionary keyed on column names containing
a condition callable expecting the data string as an
argument and returning a boolean.
* mappings - a di... | def Modify(self, table, conditions={}, mappings={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
try:
... |
'Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions - a dictionary keyed on column names containing
condition functions expecting the data string as an
argument and returning a boolean.'
| def Delete(self, table, conditions={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
try:
... |
'Select(table, columns, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns - a list of which column data to return. If
columns is None, all columns will be returned.
* conditions - a dictionary keyed on column names
containing callable conditions expecting the ... | def Select(self, table, columns, conditions={}):
| try:
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except db.DBError as dberror:
if (sys.version_info < (2, 6))... |
'__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
... | def __Select(self, table, columns, conditions):
| if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if (not self.__tablecolumns[table].count(column)):
raise TableDBError, ('unknown column: %r'... |
'Remove an entire table from the database'
| def Drop(self, table):
| txn = None
try:
txn = self.env.txn_begin()
self.db.delete(_columns_key(table), txn=txn)
cur = self.db.cursor(txn)
table_key = _search_all_data_key(table)
while 1:
try:
(key, data) = cur.set_range(table_key)
except db.DBNotFoundError... |
'Turn key into an appropriate key type for this db'
| def mk(self, key):
| if (sys.version_info[0] < 3):
return key
else:
return bytes(key, 'iso8859-1')
|
'A Recno file that is given a "backing source file" is essentially a
simple ASCII file. Normally each record is delimited by
and so is
just a line in the file, but you can set a different record delimiter
if needed.'
| def test02_WithSource(self):
| homeDir = get_new_environment_path()
self.homeDir = homeDir
source = os.path.join(homeDir, 'test_recno.txt')
if (not os.path.isdir(homeDir)):
os.mkdir(homeDir)
f = open(source, 'w')
f.close()
d = db.DB()
d.set_re_delim(10)
d.set_re_delim('\n')
d.set_re_source(source)
... |
'Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.'
| def verifyStderr(self, method, successRe):
| stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if (not successRe.search(errorOut)):
self.fail(('unexpected stderr output:\n' + errorOut))
if (sys.version_inf... |
'Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.'
| def verifyStderr(self, method, successRe):
| stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if (not successRe.search(errorOut)):
self.fail(('unexpected stderr output:\n' + errorOut))
if (sys.version_inf... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.