desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Assert the parser fails with the expected message. Caller
must ensure that self.parser is an InterceptingOptionParser.'
| def assertParseFail(self, cmdline_args, expected_output):
| try:
self.parser.parse_args(cmdline_args)
except InterceptedError as err:
self.assertEqual(err.error_message, expected_output)
else:
self.assertFalse('expected parse failure')
|
'Assert the parser prints the expected output on stdout.'
| def assertOutput(self, cmdline_args, expected_output, expected_status=0, expected_error=None):
| save_stdout = sys.stdout
encoding = getattr(save_stdout, 'encoding', None)
try:
try:
sys.stdout = StringIO()
if encoding:
sys.stdout.encoding = encoding
self.parser.parse_args(cmdline_args)
finally:
output = sys.stdout.getvalue(... |
'Assert that TypeError is raised when executing func.'
| def assertTypeError(self, func, expected_message, *args):
| self.assertRaises(func, args, None, TypeError, expected_message)
|
'Helper for testing that classify_class_attrs finds a bunch of
different kinds of attributes on a given class.'
| def _classify_test(self, newstyle):
| if newstyle:
base = object
else:
class base:
pass
class A(base, ):
def s():
pass
s = staticmethod(s)
def c(cls):
pass
c = classmethod(c)
def getp(self):
pass
p = property(getp)
def m(self)... |
'classify_class_attrs finds static methods, class methods,
properties, normal methods, and data attributes on an old-style
class.'
| def test_classify_oldstyle(self):
| self._classify_test(False)
|
'Just like test_classify_oldstyle, but for a new-style class.'
| def test_classify_newstyle(self):
| self._classify_test(True)
|
'Create a function that returns its locals(), excluding the
autogenerated \'.1\', \'.2\', etc. tuple param names (if any).'
| def makeCallable(self, signature):
| with check_py3k_warnings(('tuple parameter unpacking has been removed', SyntaxWarning), quiet=True):
code = 'lambda %s: dict(i for i in locals().items() if not is_tuplename(i[0]))'
return eval((code % signature), {'is_tuplename': self.is_tuplename})
|
'Check for cases where compressed data is larger than original.'
| @skipUnless(zlib, 'requires zlib')
def test_low_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('strfile', '12')
with zipfile.ZipFile(TESTFN2, 'r', zipfile.ZIP_DEFLATED) as zipfp:
with zipfp.open('strfile') as openobj:
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.rea... |
'Test appending to an existing zipfile.'
| def test_append_to_zip_file(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr('strfile', self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, 'strfile'])
|
'Test appending to an existing file that is not a zipfile.'
| def test_append_to_non_zip_file(self):
| data = ('I am not a ZipFile!' * 10)
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, 'r') as zipfp... |
'Check that calling ZipFile.write without arcname specified
produces the expected result.'
| def test_write_default_name(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN)
self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read())
|
'Check that files within a Zip archive can have different
compression options.'
| @skipUnless(zlib, 'requires zlib')
def test_per_file_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)... |
'Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError.'
| def test_write_to_readonly(self):
| with zipfile.ZipFile(TESTFN2, mode='w') as zipfp:
zipfp.writestr('somefile.txt', 'bogus')
with zipfile.ZipFile(TESTFN2, mode='r') as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
|
'Check that the zipfile is closed after the \'with\' block.'
| def test_close(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertTrue((zipfp.fp is not None), 'zipfp is not open')
self.assertTrue((zipfp.fp is None), 'zipfp is not closed')
with zipfile.ZipFile(TEST... |
'Check that the zipfile is closed if an exception is raised in the
\'with\' block.'
| def test_close_on_exception(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, 'r') as zipfp2:
raise zipfile.BadZipfile()
except zipfile.BadZipfile:
self.assertTrue((zipfp2.fp is None), 'zipf... |
'Check that is_zipfile() correctly identifies non-zip files.'
| def test_is_zip_erroneous_file(self):
| with open(TESTFN, 'w') as fp:
fp.write('this is not a legal zip file\n')
chk = zipfile.is_zipfile(TESTFN)
self.assertFalse(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue((not chk))
fp = StringIO()
fp.write('this is ... |
'Check that is_zipfile() correctly identifies zip files.'
| def test_is_zip_valid_file(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
chk = zipfile.is_zipfile(TESTFN)
self.assertTrue(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
z... |
'Verify that testzip() doesn\'t swallow inappropriate exceptions.'
| def test_closed_zip_raises_RuntimeError(self):
| data = StringIO()
with zipfile.ZipFile(data, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
self.assertRaises(RuntimeError, zipf.read, 'foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt')
self.assertRaises(RuntimeError, zipf.testzip)
self... |
'Check that bad modes passed to ZipFile constructor are caught.'
| def test_bad_constructor_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'q')
|
'Check that bad modes passed to ZipFile.open are caught.'
| def test_bad_open_mode(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
zipf.read('foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt', 'q')
|
'Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn\'t advance file pointer.'
| def test_read0(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipf.open('foo.txt') as f:
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), 'O, for a Mus... |
'Check that attempting to call open() for an item that doesn\'t
exist in the archive raises a RuntimeError.'
| def test_open_non_existent_item(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertRaises(KeyError, zipf.open, 'foo.txt', 'r')
|
'Check that bad compression methods passed to ZipFile.open are
caught.'
| def test_bad_compression_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'w', (-1))
|
'Check that a filename containing a null byte is properly
terminated.'
| def test_null_byte_in_filename(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt\x00qqq', 'O, for a Muse of Fire!')
self.assertEqual(zipf.namelist(), ['foo.txt'])
|
'Check that ZIP internal structure sizes are calculated correctly.'
| def test_struct_sizes(self):
| self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
|
'Check that comments on the archive are handled properly.'
| def test_comments(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertEqual(zipf.comment, '')
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, '')
comment = 'Bravely taking to his feet, ... |
'Tests that files with bad CRCs return their name from testzip.'
| def check_testzip_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertEqual('afile', zipf.testzip())
|
'Tests that files with bad CRCs raise a BadZipfile exception when read.'
| def check_read_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertRaises(zipfile.BadZipfile, zipf.read, 'afile')
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.ass... |
'$ matches the end of string, and just before the terminating'
| def test_dollar_matches_twice(self):
| pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a#\nb#\n#')
self... |
'BaseHTTPServer method, overridden.'
| def get_request(self):
| (request, client_address) = self.socket.accept()
request.settimeout(10.0)
return (request, client_address)
|
'Stops the webserver if it\'s currently running.'
| def stop(self):
| self._stop = True
self.join()
|
'Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.'
| def handle_request(self, request_handler):
| if (len(self._users) == 0):
return True
if ('Proxy-Authorization' not in request_handler.headers):
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(request_handler.headers['Proxy-Authorization'])
if (auth_dict['username'] in self._u... |
'Create a new DocTest containing the given examples. The
DocTest\'s globals are initialized with a copy of `globs`.'
| def __init__(self, examples, globs, name, filename, lineno, docstring):
| assert (not isinstance(examples, basestring)), 'DocTest no longer accepts str; use DocTestParser instead'
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
|
'Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.'
| def parse(self, string, name='<string>'):
| string = string.expandtabs()
min_indent = self._min_indent(string)
if (min_indent > 0):
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
(charno, lineno) = (0, 0)
for m in self._EXAMPLE_RE.finditer(string):
output.append(string[charno:m.start()])
... |
'Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.'
| def get_doctest(self, string, globs, name, filename, lineno):
| return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string)
|
'Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it\'s most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called "line 1" then.
The optional argumen... | def get_examples(self, string, name='<string>'):
| return [x for x in self.parse(string, name) if isinstance(x, Example)]
|
'Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example\'s source code (with prompts and indentation stripped);
and `want` is the example\'s expected output (with indentation
stripped).
`name` is the string\'s name, and `lineno` is the line numbe... | def _parse_example(self, m, name, lineno):
| indent = len(m.group('indent'))
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ((' ' * indent) + '.'), name, lineno)
source = '\n'.join([sl[(indent + 4):] for sl in source_lines])
want = m.group('w... |
'Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string\'s name, and `lineno` is the line number
where the example starts; both are used for error messages.'
| def _find_options(self, source, name, lineno):
| options = {}
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if ((option[0] not in '+-') or (option[1:] not in OPTIONFLAGS_BY_NAME)):
raise ValueError(('line %r of the... |
'Return the minimum indentation of any non-blank line in `s`'
| def _min_indent(self, s):
| indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if (len(indents) > 0):
return min(indents)
else:
return 0
|
'Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.'
| def _check_prompt_blank(self, lines, indent, name, lineno):
| for (i, line) in enumerate(lines):
if ((len(line) >= (indent + 4)) and (line[(indent + 3)] != ' ')):
raise ValueError(('line %r of the docstring for %s lacks blank after %s: %r' % (((lineno + i) + 1), name, line[indent:(indent + 3)], line)))
|
'Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.'
| def _check_prefix(self, lines, prefix, name, lineno):
| for (i, line) in enumerate(lines):
if (line and (not line.startswith(prefix))):
raise ValueError(('line %r of the docstring for %s has inconsistent leading whitespace: %r' % (((lineno + i) + 1), name, line)))
|
'Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument... | def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, exclude_empty=True):
| self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
|
'Return a list of the DocTests that are defined by the given
object\'s docstring, or by any of its contained objects\'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
co... | def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
| if (name is None):
name = getattr(obj, '__name__', None)
if (name is None):
raise ValueError(("DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r" % (type(obj),)))
if (module is False):
module = None
elif (module is None)... |
'Return true if the given object is defined in the given
module.'
| def _from_module(self, module, object):
| if (module is None):
return True
elif (inspect.getmodule(object) is not None):
return (module is inspect.getmodule(object))
elif inspect.isfunction(object):
return (module.__dict__ is object.func_globals)
elif inspect.isclass(object):
return (module.__name__ == object.__m... |
'Find tests for the given object and any contained objects, and
add them to `tests`.'
| def _find(self, tests, obj, name, module, source_lines, globs, seen):
| if self._verbose:
print ('Finding tests in %s' % name)
if (id(obj) in seen):
return
seen[id(obj)] = 1
test = self._get_test(obj, name, module, globs, source_lines)
if (test is not None):
tests.append(test)
if (inspect.ismodule(obj) and self._recurse):
for... |
'Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.'
| def _get_test(self, obj, name, module, globs, source_lines):
| if isinstance(obj, basestring):
docstring = obj
else:
try:
if (obj.__doc__ is None):
docstring = ''
else:
docstring = obj.__doc__
if (not isinstance(docstring, basestring)):
docstring = str(docstring)
... |
'Return a line number of the given object\'s docstring. Note:
this method assumes that the object has a docstring.'
| def _find_lineno(self, obj, source_lines):
| lineno = None
if inspect.ismodule(obj):
lineno = 0
if inspect.isclass(obj):
if (source_lines is None):
return None
pat = re.compile(('^\\s*class\\s*%s\\b' % getattr(obj, '__name__', '-')))
for (i, line) in enumerate(source_lines):
if pat.match(line):
... |
'Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg \'verbose\' prints lots of stuff if true,
only failures if false; by default, it\'s true iff \'-v\' is in
sys.argv.
Optional a... | def __init__(self, checker=None, verbose=None, optionflags=0):
| self._checker = (checker or OutputChecker())
if (verbose is None):
verbose = ('-v' in sys.argv)
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
self.tries = 0
self.failures = 0
self._name2ft = {}
self._fakeout = _SpoofOut()
|
'Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)'
| def report_start(self, out, test, example):
| if self._verbose:
if example.want:
out(((('Trying:\n' + _indent(example.source)) + 'Expecting:\n') + _indent(example.want)))
else:
out((('Trying:\n' + _indent(example.source)) + 'Expecting nothing\n'))
|
'Report that the given example ran successfully. (Only
displays a message if verbose=True)'
| def report_success(self, out, test, example, got):
| if self._verbose:
out('ok\n')
|
'Report that the given example failed.'
| def report_failure(self, out, test, example, got):
| out((self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)))
|
'Report that the given example raised an unexpected exception.'
| def report_unexpected_exception(self, out, test, example, exc_info):
| out(((self._failure_header(test, example) + 'Exception raised:\n') + _indent(_exception_traceback(exc_info))))
|
'Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the num... | def __run(self, test, compileflags, out):
| failures = tries = 0
original_optionflags = self.optionflags
(SUCCESS, FAILURE, BOOM) = range(3)
check = self._checker.check_output
for (examplenum, example) in enumerate(test.examples):
quiet = ((self.optionflags & REPORT_ONLY_FIRST_FAILURE) and (failures > 0))
self.optionflags = or... |
'Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.'
| def __record_outcome(self, test, f, t):
| (f2, t2) = self._name2ft.get(test.name, (0, 0))
self._name2ft[test.name] = ((f + f2), (t + t2))
self.failures += f
self.tries += t
|
'Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
t... | def run(self, test, compileflags=None, out=None, clear_globs=True):
| self.test = test
if (compileflags is None):
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if (out is None):
out = save_stdout.write
sys.stdout = self._fakeout
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
sel... |
'Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then th... | def summarize(self, verbose=None):
| if (verbose is None):
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
(name, (f, t)) = x
assert (f <= t)
totalt += t
totalf += f
if (t == 0):
notests.append(name)
... |
'Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for ... | def check_output(self, want, got, optionflags):
| if (got == want):
return True
if (not (optionflags & DONT_ACCEPT_TRUE_FOR_1)):
if ((got, want) == ('True\n', '1\n')):
return True
if ((got, want) == ('False\n', '0\n')):
return True
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
want = re.sub(('(?m)^%... |
'Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.'
| def output_difference(self, example, got, optionflags):
| want = example.want
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
if self._do_a_fancy_diff(want, got, optionflags):
want_lines = want.splitlines(True)
got_lines = got.splitlines(True)
if (optionflags & REPORT_UDIFF):... |
'Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
T... | def debug(self):
| self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
|
'val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123'
| def __init__(self, val):
| self.val = val
|
'square() -> square TestClass\'s associated value
>>> _TestClass(13).square().get()
169'
| def square(self):
| self.val = (self.val ** 2)
return self
|
'get() -> return TestClass\'s associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42'
| def get(self):
| return self.val
|
'Many methods we can just pass through to the DB object.
(See below)'
| def __getattr__(self, name):
| return getattr(self.db, name)
|
'Some methods we can just pass through to the cursor object. (See below)'
| def __getattr__(self, name):
| return getattr(self.dbc, name)
|
'bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.'
| def __init__(self, filename, dbhome, create=0, truncate=0, mode=384, recover=0, dbflags=0):
| self.db = None
myflags = db.DB_THREAD
if create:
myflags |= db.DB_CREATE
flagsforenv = ((((db.DB_INIT_MPOOL | db.DB_INIT_LOCK) | db.DB_INIT_LOG) | db.DB_INIT_TXN) | dbflags)
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv ... |
'Print the database to stdout for debugging'
| def _db_print(self):
| print '******** Printing raw database for debugging ********'
cur = self.db.cursor()
try:
(key, data) = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
(key, data) = next
else:
... |
'CreateTable(table, columns) - Create a new table in the database.
raises TableDBError if it already exists or for other DB errors.'
| def CreateTable(self, table, columns):
| assert isinstance(columns, list)
txn = None
try:
if contains_metastrings(table):
raise ValueError('bad table name: contains reserved metastrings')
for column in columns:
if contains_metastrings(column):
raise ValueError('bad column ... |
'Return a list of columns in the given table.
[] if the table doesn\'t exist.'
| def ListTableColumns(self, table):
| assert isinstance(table, str)
if contains_metastrings(table):
raise ValueError, 'bad table name: contains reserved metastrings'
columnlist_key = _columns_key(table)
if (not getattr(self.db, 'has_key')(columnlist_key)):
return []
pickledcolumnlist = getattr(self.db, 'ge... |
'Return a list of tables in this database.'
| def ListTables(self):
| pickledtablelist = self.db.get_get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
|
'CreateOrExtendTable(table, columns)
Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.'
| def CreateOrExtendTable(self, table, columns):
| assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
oldcolumnlist = pickle.loads(getattr(self.db, 'get_bytes', self.db.get... |
'initialize the self.__tablecolumns dict'
| def __load_column_info(self, table):
| try:
tcolpickles = getattr(self.db, 'get_bytes', self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError, ('unknown table: %r' % (table,))
if (not tcolpickles):
raise TableDBError, ('unknown table: %r' % (table,))
self.__tablecolumns[table] = p... |
'Create a new unique row identifier'
| def __new_rowid(self, table, txn):
| unique = 0
while (not unique):
blist = []
for x in xrange(_rowid_str_len):
blist.append(random.randint(0, 255))
newid = struct.pack(('B' * _rowid_str_len), *blist)
if (sys.version_info[0] >= 3):
newid = newid.decode('iso8859-1')
try:
se... |
'Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.'
| def Insert(self, table, rowdict):
| txn = None
try:
if (not getattr(self.db, 'has_key')(_columns_key(table))):
raise TableDBError, 'unknown table'
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
for column in rowdict.keys():
if (not self.__tablecolumns[table].c... |
'Modify(table, conditions={}, mappings={}) - Modify items in rows matching \'conditions\' using mapping functions in \'mappings\'
* table - the table name
* conditions - a dictionary keyed on column names containing
a condition callable expecting the data string as an
argument and returning a boolean.
* mappings - a di... | def Modify(self, table, conditions={}, mappings={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
try:
... |
'Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions - a dictionary keyed on column names containing
condition functions expecting the data string as an
argument and returning a boolean.'
| def Delete(self, table, conditions={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
try:
... |
'Select(table, columns, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns - a list of which column data to return. If
columns is None, all columns will be returned.
* conditions - a dictionary keyed on column names
containing callable conditions expecting the ... | def Select(self, table, columns, conditions={}):
| try:
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except db.DBError as dberror:
if (sys.version_info < (2, 6))... |
'__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
... | def __Select(self, table, columns, conditions):
| if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if (not self.__tablecolumns[table].count(column)):
raise TableDBError, ('unknown column: %r'... |
'Remove an entire table from the database'
| def Drop(self, table):
| txn = None
try:
txn = self.env.txn_begin()
self.db.delete(_columns_key(table), txn=txn)
cur = self.db.cursor(txn)
table_key = _search_all_data_key(table)
while 1:
try:
(key, data) = cur.set_range(table_key)
except db.DBNotFoundError... |
'Turn key into an appropriate key type for this db'
| def mk(self, key):
| if (sys.version_info[0] < 3):
return key
else:
return bytes(key, 'iso8859-1')
|
'A Recno file that is given a "backing source file" is essentially a
simple ASCII file. Normally each record is delimited by
and so is
just a line in the file, but you can set a different record delimiter
if needed.'
| def test02_WithSource(self):
| homeDir = get_new_environment_path()
self.homeDir = homeDir
source = os.path.join(homeDir, 'test_recno.txt')
if (not os.path.isdir(homeDir)):
os.mkdir(homeDir)
f = open(source, 'w')
f.close()
d = db.DB()
d.set_re_delim(10)
d.set_re_delim('\n')
d.set_re_source(source)
... |
'Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.'
| def verifyStderr(self, method, successRe):
| stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if (not successRe.search(errorOut)):
self.fail(('unexpected stderr output:\n' + errorOut))
if (sys.version_inf... |
'Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; port number
and timeout are optional.'
| def __init__(self, host=None, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
| self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = ''
self.sb = 0
self.sbdataq = ''
self.option_callback = None
if (host is not None):
... |
'Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don\'t try to reopen an already connected instance.'
| def open(self, host, port=0, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
| self.eof = 0
if (not port):
port = TELNET_PORT
self.host = host
self.port = port
self.timeout = timeout
self.sock = socket.create_connection((host, port), timeout)
|
'Destructor -- close the connection.'
| def __del__(self):
| self.close()
|
'Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.'
| def msg(self, msg, *args):
| if (self.debuglevel > 0):
print ('Telnet(%s,%s):' % (self.host, self.port)),
if args:
print (msg % args)
else:
print msg
|
'Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).'
| def set_debuglevel(self, debuglevel):
| self.debuglevel = debuglevel
|
'Close the connection.'
| def close(self):
| if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
|
'Return the socket object used internally.'
| def get_socket(self):
| return self.sock
|
'Return the fileno() of the socket object used internally.'
| def fileno(self):
| return self.sock.fileno()
|
'Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.'
| def write(self, buffer):
| if (IAC in buffer):
buffer = buffer.replace(IAC, (IAC + IAC))
self.msg('send %r', buffer)
self.sock.sendall(buffer)
|
'Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.'
| def read_until(self, match, timeout=None):
| n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if (i >= 0):
i = (i + n)
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if (timeout is not None):
s_args = (s_args + (timeout... |
'Read all data until EOF; block until connection closed.'
| def read_all(self):
| self.process_rawq()
while (not self.eof):
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
|
'Read at least one byte of cooked data unless EOF is hit.
Return \'\' if EOF is hit. Block if no data is immediately
available.'
| def read_some(self):
| self.process_rawq()
while ((not self.cookedq) and (not self.eof)):
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
|
'Read everything that\'s possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return \'\' if no cooked data available otherwise.
Don\'t block unless in the midst of an IAC sequence.'
| def read_very_eager(self):
| self.process_rawq()
while ((not self.eof) and self.sock_avail()):
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
|
'Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return \'\' if no cooked data available otherwise.
Don\'t block unless in the midst of an IAC sequence.'
| def read_eager(self):
| self.process_rawq()
while ((not self.cookedq) and (not self.eof) and self.sock_avail()):
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
|
'Process and return data that\'s already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return \'\' if no cooked data available otherwise. Don\'t block
unless in the midst of an IAC sequence.'
| def read_lazy(self):
| self.process_rawq()
return self.read_very_lazy()
|
'Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return \'\' if no cooked data available otherwise. Don\'t block.'
| def read_very_lazy(self):
| buf = self.cookedq
self.cookedq = ''
if ((not buf) and self.eof and (not self.rawq)):
raise EOFError, 'telnet connection closed'
return buf
|
'Return any data available in the SB ... SE queue.
Return \'\' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don\'t block.'
| def read_sb_data(self):
| buf = self.sbdataq
self.sbdataq = ''
return buf
|
'Provide a callback function called after each receipt of a telnet option.'
| def set_option_negotiation_callback(self, callback):
| self.option_callback = callback
|
'Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don\'t block unless in
the midst of an IAC sequence.'
| def process_rawq(self):
| buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if (not self.iacseq):
if (c == theNULL):
continue
if (c == '\x11'):
continue
if (c != IAC):
buf[self.sb] = (buf... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.