id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7,400
|
time_format.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/time_format.py
|
# ISO-8601:
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
import calendar, datetime, re, time
def iso_utc_date(now=None, t=time.time):
if now is None:
now = t()
return datetime.datetime.utcfromtimestamp(now).isoformat()[:10]
def iso_utc(now=None, sep=' ', t=time.time, suffix='Z'):
if now is None:
now = t()
return datetime.datetime.utcfromtimestamp(now).isoformat(sep)+suffix
def iso_local(now=None, sep=' ', t=time.time):
if now is None:
now = t()
return datetime.datetime.fromtimestamp(now).isoformat(sep)
def iso_utc_time_to_seconds(isotime, _conversion_re=re.compile(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})[T_ ](?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<subsecond>\.\d+)?Z?")):
"""
The inverse of iso_utc().
Real ISO-8601 is "2003-01-08T06:30:59Z". We also accept
"2003-01-08 06:30:59Z" as suggested by RFC 3339. We also accept
"2003-01-08_06:30:59Z". We also accept the trailing 'Z' to be omitted.
"""
m = _conversion_re.match(isotime)
if not m:
raise ValueError, (isotime, "not a complete ISO8601 timestamp")
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
hour, minute, second = int(m.group('hour')), int(m.group('minute')), int(m.group('second'))
subsecstr = m.group('subsecond')
if subsecstr:
subsecfloat = float(subsecstr)
else:
subsecfloat = 0
return calendar.timegm( (year, month, day, hour, minute, second, 0, 1, 0) ) + subsecfloat
def parse_duration(s):
orig = s
unit = None
DAY = 24*60*60
MONTH = 31*DAY
YEAR = 365*DAY
if s.endswith("s"):
s = s[:-1]
if s.endswith("day"):
unit = DAY
s = s[:-len("day")]
elif s.endswith("month"):
unit = MONTH
s = s[:-len("month")]
elif s.endswith("mo"):
unit = MONTH
s = s[:-len("mo")]
elif s.endswith("year"):
unit = YEAR
s = s[:-len("YEAR")]
else:
raise ValueError("no unit (like day, month, or year) in '%s'" % orig)
s = s.strip()
return int(s) * unit
def parse_date(s):
# return seconds-since-epoch for the UTC midnight that starts the given
# day
return int(iso_utc_time_to_seconds(s + "T00:00:00"))
| 2,299
|
Python
|
.py
| 61
| 32.04918
| 195
| 0.605299
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,401
|
weakutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/weakutil.py
|
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
# from the Python Standard Library
from weakref import ref
# from the pyutil library
from assertutil import precondition
# Thanks to Thomas Wouters, JP Calderone and the authors from the Python Cookbook.
# class WeakMethod copied from The Python Cookbook and simplified.
class WeakMethod:
""" Wraps a function or, more importantly, a bound method, in
a way that allows a bound method's object to be GC'd """
def __init__(self, fn, callback=None):
warnings.warn("deprecated", DeprecationWarning)
precondition(hasattr(fn, 'im_self'), "fn is required to be a bound method.")
self._cleanupcallback = callback
self._obj = ref(fn.im_self, self.call_cleanup_cb)
self._meth = fn.im_func
def __call__(self, *args, **kws):
s = self._obj()
if s:
return self._meth(s, *args,**kws)
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__, self._obj, self._meth,)
def call_cleanup_cb(self, thedeadweakref):
if self._cleanupcallback is not None:
self._cleanupcallback(self, thedeadweakref)
def factory_function_name_here(o):
if hasattr(o, 'im_self'):
return WeakMethod(o)
else:
return o
| 1,368
|
Python
|
.py
| 32
| 36.9375
| 84
| 0.671192
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,402
|
testutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/testutil.py
|
import os, signal, time
from twisted.internet import defer, reactor
from twisted.trial import unittest
import repeatable_random
repeatable_random # http://divmod.org/trac/ticket/1499
class SignalMixin:
# This class is necessary for any code which wants to use Processes
# outside the usual reactor.run() environment. It is copied from
# Twisted's twisted.test.test_process . Note that Twisted-8.2.0 uses
# something rather different.
sigchldHandler = None
def setUp(self):
# make sure SIGCHLD handler is installed, as it should be on
# reactor.run(). problem is reactor may not have been run when this
# test runs.
if hasattr(reactor, "_handleSigchld") and hasattr(signal, "SIGCHLD"):
self.sigchldHandler = signal.signal(signal.SIGCHLD,
reactor._handleSigchld)
def tearDown(self):
if self.sigchldHandler:
signal.signal(signal.SIGCHLD, self.sigchldHandler)
class PollMixin:
def poll(self, check_f, pollinterval=0.01):
# Return a Deferred, then call check_f periodically until it returns
# True, at which point the Deferred will fire.. If check_f raises an
# exception, the Deferred will errback.
d = defer.maybeDeferred(self._poll, None, check_f, pollinterval)
return d
def _poll(self, res, check_f, pollinterval):
if check_f():
return True
d = defer.Deferred()
d.addCallback(self._poll, check_f, pollinterval)
reactor.callLater(pollinterval, d.callback, None)
return d
class TestMixin(SignalMixin):
def setUp(self, repeatable=False):
"""
@param repeatable: install the repeatable_randomness hacks to attempt
to without access to real randomness and real time.time from the
code under test
"""
self.repeatable = repeatable
if self.repeatable:
import repeatable_random
repeatable_random.force_repeatability()
if hasattr(time, 'realtime'):
self.teststarttime = time.realtime()
else:
self.teststarttime = time.time()
def tearDown(self):
if self.repeatable:
repeatable_random.restore_non_repeatability()
self.clean_pending(required_to_quiesce=True)
def clean_pending(self, dummy=None, required_to_quiesce=True):
"""
This handy method cleans all pending tasks from the reactor.
When writing a unit test, consider the following question:
Is the code that you are testing required to release control once it
has done its job, so that it is impossible for it to later come around
(with a delayed reactor task) and do anything further?
If so, then trial will usefully test that for you -- if the code under
test leaves any pending tasks on the reactor then trial will fail it.
On the other hand, some code is *not* required to release control -- some
code is allowed to continuously maintain control by rescheduling reactor
tasks in order to do ongoing work. Trial will incorrectly require that
code to clean up all its tasks from the reactor.
Most people think that such code should be amended to have an optional
"shutdown" operation that releases all control, but on the contrary it is
good design for some code to *not* have a shutdown operation, but instead
to have a "crash-only" design in which it recovers from crash on startup.
If the code under test is of the "long-running" kind, which is *not*
required to shutdown cleanly in order to pass tests, then you can simply
call testutil.clean_pending() at the end of the unit test, and trial will
be satisfied.
"""
pending = reactor.getDelayedCalls()
active = bool(pending)
for p in pending:
if p.active():
p.cancel()
else:
print "WEIRDNESS! pending timed call not active!"
if required_to_quiesce and active:
self.fail("Reactor was still active when it was required to be quiescent.")
try:
import win32file
import win32con
def w_make_readonly(path):
win32file.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_READONLY)
def w_make_accessible(path):
win32file.SetFileAttributes(path, win32con.FILE_ATTRIBUTE_NORMAL)
# http://divmod.org/trac/ticket/1499
make_readonly = w_make_readonly
make_accessible = w_make_accessible
except ImportError:
import stat
def make_readonly(path):
os.chmod(path, stat.S_IREAD)
os.chmod(os.path.dirname(path), stat.S_IREAD)
def make_accessible(path):
os.chmod(os.path.dirname(path), stat.S_IWRITE | stat.S_IEXEC | stat.S_IREAD)
os.chmod(path, stat.S_IWRITE | stat.S_IEXEC | stat.S_IREAD)
| 4,955
|
Python
|
.py
| 103
| 39.398058
| 87
| 0.672528
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,403
|
zlibutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/zlibutil.py
|
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Making your zlib experience that much nicer!
Most importantly, this offers protection from "zlib bomb" attacks, where the
original data was maximally compressable, and a naive use of zlib would
consume all of your RAM while trying to decompress it.
"""
import exceptions, string, zlib
from humanreadable import hr
from pyutil.assertutil import precondition
class DecompressError(exceptions.StandardError, zlib.error): pass
class UnsafeDecompressError(DecompressError): pass # This means it would take more memory to decompress than we can spare.
class TooBigError(DecompressError): pass # This means the resulting uncompressed text would exceed the maximum allowed length.
class ZlibError(DecompressError): pass # internal error, probably due to the input not being zlib compressed text
# The smallest limit that you can impose on zlib decompression and still have
# a chance of succeeding at decompression.
# constant memory overhead of zlib (76 KB), plus minbite (128 bytes) times
# maxexpansion (1032) times buffer-copying duplication (2), plus 2063 so as
# to reach the ceiling of div (2*1032)
MINMAXMEM=76*2**10 + 128 * 1032 * 2 + 2063 - 1
# You should really specify a maxmem which is much higher than MINMAXMEM. If
# maxmem=MINMAXMEM, we will be reduced to decompressing the input in
# 128-byte bites, and furthermore unless the decompressed text is quite small,
# we will be forced to give up and spuriously raise UnsafeDecompressError!
# You really ought to pass a maxmem argument equal to the maximum possible
# memory that your app should ever allocate (for a short-term use).
# I typically set it to 65 MB.
def decompress(zbuf, maxlen=(65 * (2**20)), maxmem=(65 * (2**20))):
"""
Decompress zbuf so that it decompresses to <= maxlen bytes, while using
<= maxmem memory, or else raise an exception. If zbuf contains
uncompressed data an exception will be raised.
This function guards against memory allocation attacks.
@param maxlen the resulting text must not be greater than this
@param maxmem the execution of this function must not use more than this
amount of memory in bytes; The higher this number is (optimally
1032 * maxlen, or even greater), the faster this function can
complete. (Actually I don't fully understand the workings of zlib, so
this function might use a *little* more than this memory, but not a
lot more.) (Also, this function will raise an exception if the amount
of memory required even *approaches* maxmem. Another reason to make
it large.) (Hence the default value which would seem to be
exceedingly large until you realize that it means you can decompress
64 KB chunks of compressiontext at a bite.)
"""
assert isinstance(maxlen, (int, long,)) and maxlen > 0, "maxlen is required to be a real maxlen, geez!"
assert isinstance(maxmem, (int, long,)) and maxmem > 0, "maxmem is required to be a real maxmem, geez!"
assert maxlen <= maxmem, "maxlen is required to be <= maxmem. All data that is included in the return value is counted against maxmem as well as against maxlen, so it is impossible to return a result bigger than maxmem, even if maxlen is bigger than maxmem. See decompress_to_spool() if you want to spool a large text out while limiting the amount of memory used during the process."
lenzbuf = len(zbuf)
offset = 0
decomplen = 0
availmem = maxmem - (76 * 2**10) # zlib can take around 76 KB RAM to do decompression
availmem = availmem / 2 # generating the result string from the intermediate strings will require using the same amount of memory again, briefly. If you care about this kind of thing, then let's rewrite this module in C.
decompstrlist = []
decomp = zlib.decompressobj()
while offset < lenzbuf:
# How much compressedtext can we safely attempt to decompress now without going over `maxmem'? zlib docs say that theoretical maximum for the zlib format would be 1032:1.
lencompbite = availmem / 1032 # XXX TODO: The biggest compression ratio zlib can have for whole files is 1032:1. Unfortunately I don't know if small chunks of compressiontext *within* a file can expand to more than that. I'll assume not... --Zooko 2001-05-12
if lencompbite < 128:
# If we can't safely attempt even a few bytes of compression text, let us give up. Either `maxmem' was too small or this compressiontext is actually a decompression bomb.
raise UnsafeDecompressError, "used up roughly maxmem memory. maxmem: %s, len(zbuf): %s, offset: %s, decomplen: %s, lencompbite: %s" % tuple(map(hr, [maxmem, len(zbuf), offset, decomplen, lencompbite,]))
# I wish the following were a local function like this:
# def proc_decomp_bite(tmpstr, lencompbite=0, decomplen=decomplen, maxlen=maxlen, availmem=availmem, decompstrlist=decompstrlist, offset=offset, zbuf=zbuf):
# ...but we can't conveniently and efficiently update the integer variables like offset in the outer scope. Oh well. --Zooko 2003-06-26
try:
if (offset == 0) and (lencompbite >= lenzbuf):
tmpstr = decomp.decompress(zbuf)
else:
tmpstr = decomp.decompress(zbuf[offset:offset+lencompbite])
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, hr(le), )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > maxlen. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
availmem = availmem - lentmpstr
offset = offset + lencompbite
decompstrlist.append(tmpstr)
tmpstr = ''
try:
tmpstr = decomp.flush()
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, le, )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > maxlen. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
availmem = availmem - lentmpstr
offset = offset + lencompbite
if lentmpstr > 0:
decompstrlist.append(tmpstr)
tmpstr = ''
if len(decompstrlist) > 0:
return string.join(decompstrlist, '')
else:
return decompstrlist[0]
def decompress_to_fileobj(zbuf, fileobj, maxlen=(65 * (2**20)), maxmem=(65 * (2**20))):
"""
Decompress zbuf so that it decompresses to <= maxlen bytes, while using
<= maxmem memory, or else raise an exception. If zbuf contains
uncompressed data an exception will be raised.
This function guards against memory allocation attacks.
Note that this assumes that data written to fileobj still occupies memory,
so such data counts against maxmem as well as against maxlen.
@param maxlen the resulting text must not be greater than this
@param maxmem the execution of this function must not use more than this
amount of memory in bytes; The higher this number is (optimally
1032 * maxlen, or even greater), the faster this function can
complete. (Actually I don't fully understand the workings of zlib, so
this function might use a *little* more than this memory, but not a
lot more.) (Also, this function will raise an exception if the amount
of memory required even *approaches* maxmem. Another reason to make
it large.) (Hence the default value which would seem to be
exceedingly large until you realize that it means you can decompress
64 KB chunks of compressiontext at a bite.)
@param fileobj a file object to which the decompressed text will be written
"""
precondition(hasattr(fileobj, 'write') and callable(fileobj.write), "fileobj is required to have a write() method.", fileobj=fileobj)
precondition(isinstance(maxlen, (int, long,)) and maxlen > 0, "maxlen is required to be a real maxlen, geez!", maxlen=maxlen)
precondition(isinstance(maxmem, (int, long,)) and maxmem > 0, "maxmem is required to be a real maxmem, geez!", maxmem=maxmem)
precondition(maxlen <= maxmem, "maxlen is required to be <= maxmem. All data that is written out to fileobj is counted against maxmem as well as against maxlen, so it is impossible to return a result bigger than maxmem, even if maxlen is bigger than maxmem. See decompress_to_spool() if you want to spool a large text out while limiting the amount of memory used during the process.", maxlen=maxlen, maxmem=maxmem)
lenzbuf = len(zbuf)
offset = 0
decomplen = 0
availmem = maxmem - (76 * 2**10) # zlib can take around 76 KB RAM to do decompression
decomp = zlib.decompressobj()
while offset < lenzbuf:
# How much compressedtext can we safely attempt to decompress now without going over maxmem? zlib docs say that theoretical maximum for the zlib format would be 1032:1.
lencompbite = availmem / 1032 # XXX TODO: The biggest compression ratio zlib can have for whole files is 1032:1. Unfortunately I don't know if small chunks of compressiontext *within* a file can expand to more than that. I'll assume not... --Zooko 2001-05-12
if lencompbite < 128:
# If we can't safely attempt even a few bytes of compression text, let us give up. Either maxmem was too small or this compressiontext is actually a decompression bomb.
raise UnsafeDecompressError, "used up roughly maxmem memory. maxmem: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxmem, len(zbuf), offset, decomplen,]))
# I wish the following were a local function like this:
# def proc_decomp_bite(tmpstr, lencompbite=0, decomplen=decomplen, maxlen=maxlen, availmem=availmem, decompstrlist=decompstrlist, offset=offset, zbuf=zbuf):
# ...but we can't conveniently and efficiently update the integer variables like offset in the outer scope. Oh well. --Zooko 2003-06-26
try:
if (offset == 0) and (lencompbite >= lenzbuf):
tmpstr = decomp.decompress(zbuf)
else:
tmpstr = decomp.decompress(zbuf[offset:offset+lencompbite])
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, le, )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > maxlen. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
availmem = availmem - lentmpstr
offset = offset + lencompbite
fileobj.write(tmpstr)
tmpstr = ''
try:
tmpstr = decomp.flush()
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, le, )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > maxlen. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
availmem = availmem - lentmpstr
offset = offset + lencompbite
fileobj.write(tmpstr)
tmpstr = ''
def decompress_to_spool(zbuf, fileobj, maxlen=(65 * (2**20)), maxmem=(65 * (2**20))):
"""
Decompress zbuf so that it decompresses to <= maxlen bytes, while using
<= maxmem memory, or else raise an exception. If zbuf contains
uncompressed data an exception will be raised.
This function guards against memory allocation attacks.
Note that this assumes that data written to fileobj does *not* continue to
occupy memory, so such data doesn't count against maxmem, although of
course it still counts against maxlen.
@param maxlen the resulting text must not be greater than this
@param maxmem the execution of this function must not use more than this
amount of memory in bytes; The higher this number is (optimally
1032 * maxlen, or even greater), the faster this function can
complete. (Actually I don't fully understand the workings of zlib, so
this function might use a *little* more than this memory, but not a
lot more.) (Also, this function will raise an exception if the amount
of memory required even *approaches* maxmem. Another reason to make
it large.) (Hence the default value which would seem to be
exceedingly large until you realize that it means you can decompress
64 KB chunks of compressiontext at a bite.)
@param fileobj the decompressed text will be written to it
"""
precondition(hasattr(fileobj, 'write') and callable(fileobj.write), "fileobj is required to have a write() method.", fileobj=fileobj)
precondition(isinstance(maxlen, (int, long,)) and maxlen > 0, "maxlen is required to be a real maxlen, geez!", maxlen=maxlen)
precondition(isinstance(maxmem, (int, long,)) and maxmem > 0, "maxmem is required to be a real maxmem, geez!", maxmem=maxmem)
tmpstr = ''
lenzbuf = len(zbuf)
offset = 0
decomplen = 0
availmem = maxmem - (76 * 2**10) # zlib can take around 76 KB RAM to do decompression
decomp = zlib.decompressobj()
while offset < lenzbuf:
# How much compressedtext can we safely attempt to decompress now without going over `maxmem'? zlib docs say that theoretical maximum for the zlib format would be 1032:1.
lencompbite = availmem / 1032 # XXX TODO: The biggest compression ratio zlib can have for whole files is 1032:1. Unfortunately I don't know if small chunks of compressiontext *within* a file can expand to more than that. I'll assume not... --Zooko 2001-05-12
if lencompbite < 128:
# If we can't safely attempt even a few bytes of compression text, let us give up. Either `maxmem' was too small or this compressiontext is actually a decompression bomb.
raise UnsafeDecompressError, "used up roughly `maxmem' memory. maxmem: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxmem, len(zbuf), offset, decomplen,]))
# I wish the following were a local function like this:
# def proc_decomp_bite(tmpstr, lencompbite=0, decomplen=decomplen, maxlen=maxlen, availmem=availmem, decompstrlist=decompstrlist, offset=offset, zbuf=zbuf):
# ...but we can't conveniently and efficiently update the integer variables like offset in the outer scope. Oh well. --Zooko 2003-06-26
try:
if (offset == 0) and (lencompbite >= lenzbuf):
tmpstr = decomp.decompress(zbuf)
else:
tmpstr = decomp.decompress(zbuf[offset:offset+lencompbite])
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, le, )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > `maxlen'. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
offset = offset + lencompbite
fileobj.write(tmpstr)
tmpstr = ''
try:
tmpstr = decomp.flush()
except zlib.error, le:
raise ZlibError, (offset, lencompbite, decomplen, le, )
lentmpstr = len(tmpstr)
decomplen = decomplen + lentmpstr
if decomplen > maxlen:
raise TooBigError, "length of resulting data > `maxlen'. maxlen: %s, len(zbuf): %s, offset: %s, decomplen: %s" % tuple(map(hr, [maxlen, len(zbuf), offset, decomplen,]))
offset = offset + lencompbite
fileobj.write(tmpstr)
tmpstr = ''
| 15,919
|
Python
|
.py
| 228
| 62.802632
| 420
| 0.704241
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,404
|
benchutil.py~
|
CouchPotato_CouchPotatoServer/libs/pyutil/benchutil.py~
|
# Copyright (c) 2002-2013 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Benchmark a function for its behavior with respect to N.
How to use this module:
1. Define a function which runs the code that you want to benchmark. The
function takes a single argument which is the size of the task (i.e. the "N"
parameter). Pass this function as the first argument to rep_bench(), and N as
the second, e.g.:
>>> from pyutil.benchutil import rep_bench
>>> def fib(N):
... if N <= 1:
... return 1
... else:
... return fib(N-1) + fib(N-2)
...
>>> rep_bench(fib, 25, UNITS_PER_SECOND=1000)
best: 1.968e+00, 3th-best: 1.987e+00, mean: 2.118e+00, 3th-worst: 2.175e+00, worst: 2.503e+00 (of 10)
The output is reporting the number of milliseconds that executing the function
took, divided by N, from ten different invocations of fib(). It reports the
best, worst, M-th best, M-th worst, and mean, where "M" is the natural log of
the number of invocations (in this case 10).
2. Now run it with different values of N and look for patterns:
>>> for N in 1, 5, 9, 13, 17, 21:
... print "%2d" % N,
... rep_bench(fib, N, UNITS_PER_SECOND=1000000)
...
1 best: 9.537e-01, 3th-best: 9.537e-01, mean: 1.121e+00, 3th-worst: 1.192e+00, worst: 2.146e+00 (of 10)
5 best: 5.722e-01, 3th-best: 6.199e-01, mean: 7.200e-01, 3th-worst: 8.106e-01, worst: 8.106e-01 (of 10)
9 best: 2.437e+00, 3th-best: 2.464e+00, mean: 2.530e+00, 3th-worst: 2.570e+00, worst: 2.676e+00 (of 10)
13 best: 1.154e+01, 3th-best: 1.168e+01, mean: 5.638e+01, 3th-worst: 1.346e+01, worst: 4.478e+02 (of 10)
17 best: 6.230e+01, 3th-best: 6.247e+01, mean: 6.424e+01, 3th-worst: 6.460e+01, worst: 7.294e+01 (of 10)
21 best: 3.376e+02, 3th-best: 3.391e+02, mean: 3.521e+02, 3th-worst: 3.540e+02, worst: 3.963e+02 (of 10)
>>> print_bench_footer(UNITS_PER_SECOND=1000000)
all results are in time units per N
time units per second: 1000000; seconds per time unit: 0.000001
(The pattern here is that as N grows, the time per N grows.)
2. If you need to do some setting up before the code can run, then put the
setting-up code into a separate function so that it won't be included in the
timing measurements. A good way to share state between the setting-up function
and the main function is to make them be methods of the same object, e.g.:
>>> import random
>>> class O:
... def __init__(self):
... self.l = []
... def setup(self, N):
... del self.l[:]
... self.l.extend(range(N))
... random.shuffle(self.l)
... def sort(self, N):
... self.l.sort()
...
>>> o = O()
>>> for N in 1000, 10000, 100000, 1000000:
... print "%7d" % N,
... rep_bench(o.sort, N, o.setup)
...
1000 best: 4.830e+02, 3th-best: 4.950e+02, mean: 5.730e+02, 3th-worst: 5.858e+02, worst: 7.451e+02 (of 10)
10000 best: 6.342e+02, 3th-best: 6.367e+02, mean: 6.678e+02, 3th-worst: 6.851e+02, worst: 7.848e+02 (of 10)
100000 best: 8.309e+02, 3th-best: 8.338e+02, mean: 8.435e+02, 3th-worst: 8.540e+02, worst: 8.559e+02 (of 10)
1000000 best: 1.327e+03, 3th-best: 1.339e+03, mean: 1.349e+03, 3th-worst: 1.357e+03, worst: 1.374e+03 (of 10)
3. Useful fact! rep_bench() returns a dict containing the numbers.
4. Things to fix:
a. I used to have it hooked up to use the "hotshot" profiler on the code being
measured. I recently tried to change it to use the newer cProfile profiler
instead, but I don't understand the interface to cProfiler so it just gives an
exception if you pass profile=True. Please fix this and send me a patch.
b. Wouldn't it be great if this script emitted results in a json format that
was understood by a tool to make pretty interactive explorable graphs? The
pretty graphs could look like those on http://speed.pypy.org/ . Please make
this work and send me a patch!
"""
import cProfile, operator, time
from decimal import Decimal as D
#from pyutil import jsonutil as json
import platform
if 'windows' in platform.system().lower():
clock = time.clock
else:
clock = time.time
from assertutil import _assert
def makeg(func):
def blah(n, func=func):
for i in xrange(n):
func()
return blah
def to_decimal(x):
"""
See if D(x) returns something. If instead it raises TypeError, x must have been a float, so convert it to Decimal by way of string. (In Python >= 2.7, D(x) does this automatically.
"""
try:
return D(x)
except TypeError:
return D("%0.54f" % (x,))
def mult(a, b):
"""
If we get TypeError from * (possibly because one is float and the other is Decimal), then promote them both to Decimal.
"""
try:
return a * b
except TypeError:
return to_decimal(a) * to_decimal(b)
def rep_bench(func, n, initfunc=None, MAXREPS=10, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", UNITS_PER_SECOND=1, quiet=False):
"""
Will run the func up to MAXREPS times, but won't start a new run if MAXTIME
(wall-clock time) has already elapsed (unless MAXTIME is None).
@param quiet Don't print anything--just return the results dict.
"""
assert isinstance(n, int), (n, type(n))
startwallclocktime = time.time()
tls = [] # elapsed time in seconds
bmes = []
while ((len(tls) < MAXREPS) or (MAXREPS is None)) and ((MAXTIME is None) or ((time.time() - startwallclocktime) < MAXTIME)):
if initfunc:
initfunc(n)
try:
tl = bench_it(func, n, profile=profile, profresults=profresults)
except BadMeasure, bme:
bmes.append(bme)
else:
tls.append(tl)
if len(tls) == 0:
raise Exception("Couldn't get any measurements within time limits or number-of-attempts limits. Maybe something is wrong with your clock? %s" % (bmes,))
sumtls = reduce(operator.__add__, tls)
mean = sumtls / len(tls)
tls.sort()
worst = tls[-1]
best = tls[0]
_assert(best > worstemptymeasure*MARGINOFERROR, "%s(n=%s) took %0.10f seconds, but we cannot measure times much less than about %0.10f seconds. Try a more time-consuming variant (such as higher n)." % (func, n, best, worstemptymeasure*MARGINOFERROR,))
m = len(tls)/4
if m > 0:
mthbest = tls[m-1]
mthworst = tls[-m]
else:
mthbest = tls[0]
mthworst = tls[-1]
# The +/-0 index is the best/worst, the +/-1 index is the 2nd-best/worst,
# etc, so we use mp1 to name it.
mp1 = m+1
res = {
'worst': mult(worst, UNITS_PER_SECOND)/n,
'best': mult(best, UNITS_PER_SECOND)/n,
'mp1': mp1,
'mth-best': mult(mthbest, UNITS_PER_SECOND)/n,
'mth-worst': mult(mthworst, UNITS_PER_SECOND)/n,
'mean': mult(mean, UNITS_PER_SECOND)/n,
'num': len(tls),
}
if not quiet:
print "best: %(best)#8.03e, %(mp1)3dth-best: %(mth-best)#8.03e, mean: %(mean)#8.03e, %(mp1)3dth-worst: %(mth-worst)#8.03e, worst: %(worst)#8.03e (of %(num)6d)" % res
return res
MARGINOFERROR = 10
worstemptymeasure = 0
class BadMeasure(Exception):
""" Either the clock wrapped (which happens with time.clock()) or
it went backwards (which happens with time.time() on rare
occasions), (or the code being measured completed before a single
clock tick). """
def __init__(self, startt, stopt, clock):
self.startt = startt
self.stopt = stopt
self.clock = clock
def __repr__(self):
return "<%s %s - %s (%s)>" % (self.__class__.__name__, self.startt, self.stopt, self.clock)
def do_nothing(n):
pass
def bench_it(func, n, runtime=0.1, profile=False, profresults="pyutil-benchutil.prof"):
"""
runtime is how many seconds to
"""
if profile:
st = clock()
cProfile.run('func(n)', profresults)
sto = clock()
else:
st = clock()
func(n)
sto = clock()
timeelapsed = sto - st
if timeelapsed <= 0:
raise BadMeasure(timeelapsed)
global worstemptymeasure
emsta = clock()
do_nothing(2**32)
emstop = clock()
empty = emstop - emsta
if empty > worstemptymeasure:
worstemptymeasure = empty
return timeelapsed
def bench(func, initfunc=None, TOPXP=21, MAXREPS=5, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", outputjson=False, jsonresultsfname="pyutil-benchutil-results.json", UNITS_PER_SECOND=1):
BSIZES = []
for i in range(TOPXP-6, TOPXP+1, 2):
n = int(2 ** i)
if n < 1:
n = 1
if BSIZES and n <= BSIZES[-1]:
n *= 2
BSIZES.append(n)
res = {}
for BSIZE in BSIZES:
print "N: %7d," % BSIZE,
r = rep_bench(func, BSIZE, initfunc=initfunc, MAXREPS=MAXREPS, MAXTIME=MAXTIME, profile=profile, profresults=profresults, UNITS_PER_SECOND=UNITS_PER_SECOND)
res[BSIZE] = r
#if outputjson:
# write_file(jsonresultsfname, json.dumps(res))
return res
def print_bench_footer(UNITS_PER_SECOND=1):
print "all results are in time units per N"
print "time units per second: %s; seconds per time unit: %s" % (UNITS_PER_SECOND, D(1)/UNITS_PER_SECOND)
| 9,210
|
Python
|
.py
| 209
| 39.449761
| 255
| 0.65328
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,405
|
mathutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/mathutil.py
|
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
A few commonly needed functions.
"""
import math
def div_ceil(n, d):
"""
The smallest integer k such that k*d >= n.
"""
return int((n//d) + (n%d != 0))
def next_multiple(n, k):
"""
The smallest multiple of k which is >= n. Note that if n is 0 then the
answer is 0.
"""
return div_ceil(n, k) * k
def pad_size(n, k):
"""
The smallest number that has to be added to n to equal a multiple of k.
"""
if n%k:
return k - n%k
else:
return 0
def is_power_of_k(n, k):
return k**int(math.log(n, k) + 0.5) == n
def next_power_of_k(n, k):
p = 1
while p < n:
p *= k
return p
def ave(l):
return sum(l) / len(l)
def log_ceil(n, b):
"""
The smallest integer k such that b^k >= n.
log_ceil(n, 2) is the number of bits needed to store any of n values, e.g.
the number of bits needed to store any of 128 possible values is 7.
"""
p = 1
k = 0
while p < n:
p *= b
k += 1
return k
def log_floor(n, b):
"""
The largest integer k such that b^k <= n.
"""
p = 1
k = 0
while p <= n:
p *= b
k += 1
return k - 1
def linear_fit_slope(ps):
"""
Single-independent-variable linear regression -- least squares method.
At least, I *think* this function computes that answer. I no longer
remember where I learned this trick and at the moment I can't prove to
myself that this is correct.
@param ps a sequence of tuples of (x, y)
"""
avex = ave([x for (x, y) in ps])
avey = ave([y for (x, y) in ps])
sxy = sum([ (x - avex) * (y - avey) for (x, y) in ps ])
sxx = sum([ (x - avex) ** 2 for (x, y) in ps ])
if sxx == 0:
return None
return sxy / sxx
def permute(l):
"""
Return all possible permutations of l.
@type l: sequence
@rtype a set of sequences
"""
if len(l) == 1:
return [l,]
res = []
for i in range(len(l)):
l2 = list(l[:])
x = l2.pop(i)
for l3 in permute(l2):
l3.append(x)
res.append(l3)
return res
| 2,257
|
Python
|
.py
| 87
| 20.712644
| 78
| 0.559275
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,406
|
test_dictutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/deprecated/test_dictutil.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, sys, traceback, unittest
from pyutil.assertutil import _assert
from pyutil import dictutil
class EqButNotIs:
def __init__(self, x):
self.x = x
self.hash = int(random.randrange(0, 2**31))
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.x,)
def __hash__(self):
return self.hash
def __le__(self, other):
return self.x <= other
def __lt__(self, other):
return self.x < other
def __ge__(self, other):
return self.x >= other
def __gt__(self, other):
return self.x > other
def __ne__(self, other):
return self.x != other
def __eq__(self, other):
return self.x == other
class Testy(unittest.TestCase):
def _help_test_empty_dict(self, klass):
d1 = klass()
d2 = klass({})
self.failUnless(d1 == d2, "d1: %r, d2: %r" % (d1, d2,))
self.failUnless(len(d1) == 0)
self.failUnless(len(d2) == 0)
def _help_test_nonempty_dict(self, klass):
d1 = klass({'a': 1, 'b': "eggs", 3: "spam",})
d2 = klass({'a': 1, 'b': "eggs", 3: "spam",})
self.failUnless(d1 == d2)
self.failUnless(len(d1) == 3, "%s, %s" % (len(d1), d1,))
self.failUnless(len(d2) == 3)
def _help_test_eq_but_notis(self, klass):
d = klass({'a': 3, 'b': EqButNotIs(3), 'c': 3})
d.pop('b')
d.clear()
d['a'] = 3
d['b'] = EqButNotIs(3)
d['c'] = 3
d.pop('b')
d.clear()
d['b'] = EqButNotIs(3)
d['a'] = 3
d['c'] = 3
d.pop('b')
d.clear()
d['a'] = EqButNotIs(3)
d['c'] = 3
d['a'] = 3
d.clear()
fake3 = EqButNotIs(3)
fake7 = EqButNotIs(7)
d[fake3] = fake7
d[3] = 7
d[3] = 8
_assert(filter(lambda x: x is 8, d.itervalues()))
_assert(filter(lambda x: x is fake7, d.itervalues()))
_assert(not filter(lambda x: x is 7, d.itervalues())) # The real 7 should have been ejected by the d[3] = 8.
_assert(filter(lambda x: x is fake3, d.iterkeys()))
_assert(filter(lambda x: x is 3, d.iterkeys()))
d[fake3] = 8
d.clear()
d[3] = 7
fake3 = EqButNotIs(3)
fake7 = EqButNotIs(7)
d[fake3] = fake7
d[3] = 8
_assert(filter(lambda x: x is 8, d.itervalues()))
_assert(filter(lambda x: x is fake7, d.itervalues()))
_assert(not filter(lambda x: x is 7, d.itervalues())) # The real 7 should have been ejected by the d[3] = 8.
_assert(filter(lambda x: x is fake3, d.iterkeys()))
_assert(filter(lambda x: x is 3, d.iterkeys()))
d[fake3] = 8
def test_em(self):
for klass in (dictutil.UtilDict, dictutil.NumDict, dictutil.ValueOrderedDict,):
# print "name of class: ", klass
for helper in (self._help_test_empty_dict, self._help_test_nonempty_dict, self._help_test_eq_but_notis,):
# print "name of test func: ", helper
try:
helper(klass)
except:
(etype, evalue, realtb) = sys.exc_info()
traceback.print_exception(etype, evalue, realtb)
self.fail(evalue)
del realtb
def suite():
suite = unittest.makeSuite(Testy, 'test')
return suite
if __name__ == '__main__':
unittest.main()
| 3,612
|
Python
|
.py
| 97
| 28.587629
| 117
| 0.529311
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,407
|
test_xor.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/deprecated/test_xor.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# portions Copyright (c) 2001 Autonomous Zone Industries
# This file is part of pyutil; see README.rst for licensing terms.
#
import unittest
from pyutil.xor import xor
# unit tests
def _help_test(xf):
assert xf('\000', '\000') == '\000'
assert xf('\001', '\000') == '\001'
assert xf('\001', '\001') == '\000'
assert xf('\000\001', '\000\001') == '\000\000'
assert xf('\100\101', '\000\101') == '\100\000'
class Testy(unittest.TestCase):
def test_em(self):
for xorfunc in (xor.py_xor, xor.py_xor_simple, xor.xor,):
if callable(xorfunc):
# print "testing xorfunc ", xorfunc
_help_test(xorfunc)
| 745
|
Python
|
.py
| 20
| 32.25
| 67
| 0.617198
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,408
|
test_picklesaver.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/deprecated/test_picklesaver.py
|
#!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
| 1,340
|
Python
|
.py
| 28
| 39.857143
| 107
| 0.653374
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,409
|
test_assertutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_assertutil.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# portions Copyright (c) 2001 Autonomous Zone Industries
# This file is part of pyutil; see README.rst for licensing terms.
# Python Standard Library modules
import unittest
from pyutil import assertutil
class Testy(unittest.TestCase):
def test_bad_precond(self):
adict=23
try:
assertutil.precondition(isinstance(adict, dict), "adict is required to be a dict.", 23, adict=adict, foo=None)
except AssertionError, le:
self.failUnless(le.args[0] == "precondition: 'adict is required to be a dict.' <type 'str'>, 23 <type 'int'>, foo: None <type 'NoneType'>, 'adict': 23 <type 'int'>")
| 710
|
Python
|
.py
| 14
| 45.714286
| 177
| 0.699422
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,410
|
test_mathutil.py~
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_mathutil.py~
|
#!/usr/bin/env python
import unittest
from pyutil import mathutil
from pyutil.assertutil import _assert
class MathUtilTestCase(unittest.TestCase):
def _help_test_is_power_of_k(self, k):
for i in range(2, 40):
_assert(mathutil.is_power_of_k(k**i, k), k, i)
def test_is_power_of_k(self):
for i in range(2, 5):
self._help_test_is_power_of_k(i)
def test_log_ceil(self):
f = mathutil.log_ceil
self.failUnlessEqual(f(1, 2), 0)
self.failUnlessEqual(f(1, 3), 0)
self.failUnlessEqual(f(2, 2), 1)
self.failUnlessEqual(f(2, 3), 1)
self.failUnlessEqual(f(3, 2), 2)
def test_log_floor(self):
f = mathutil.log_floor
self.failUnlessEqual(f(1, 2), 0)
self.failUnlessEqual(f(1, 3), 0)
self.failUnlessEqual(f(2, 2), 1)
self.failUnlessEqual(f(2, 3), 0)
self.failUnlessEqual(f(3, 2), 1)
def test_div_ceil(self):
f = mathutil.div_ceil
self.failUnlessEqual(f(0, 1), 0)
self.failUnlessEqual(f(0, 2), 0)
self.failUnlessEqual(f(0, 3), 0)
self.failUnlessEqual(f(1, 3), 1)
self.failUnlessEqual(f(2, 3), 1)
self.failUnlessEqual(f(3, 3), 1)
self.failUnlessEqual(f(4, 3), 2)
self.failUnlessEqual(f(5, 3), 2)
self.failUnlessEqual(f(6, 3), 2)
self.failUnlessEqual(f(7, 3), 3)
def test_next_multiple(self):
f = mathutil.next_multiple
self.failUnlessEqual(f(5, 1), 5)
self.failUnlessEqual(f(5, 2), 6)
self.failUnlessEqual(f(5, 3), 6)
self.failUnlessEqual(f(5, 4), 8)
self.failUnlessEqual(f(5, 5), 5)
self.failUnlessEqual(f(5, 6), 6)
self.failUnlessEqual(f(32, 1), 32)
self.failUnlessEqual(f(32, 2), 32)
self.failUnlessEqual(f(32, 3), 33)
self.failUnlessEqual(f(32, 4), 32)
self.failUnlessEqual(f(32, 5), 35)
self.failUnlessEqual(f(32, 6), 36)
self.failUnlessEqual(f(32, 7), 35)
self.failUnlessEqual(f(32, 8), 32)
self.failUnlessEqual(f(32, 9), 36)
self.failUnlessEqual(f(32, 10), 40)
self.failUnlessEqual(f(32, 11), 33)
self.failUnlessEqual(f(32, 12), 36)
self.failUnlessEqual(f(32, 13), 39)
self.failUnlessEqual(f(32, 14), 42)
self.failUnlessEqual(f(32, 15), 45)
self.failUnlessEqual(f(32, 16), 32)
self.failUnlessEqual(f(32, 17), 34)
self.failUnlessEqual(f(32, 18), 36)
self.failUnlessEqual(f(32, 589), 589)
def test_pad_size(self):
f = mathutil.pad_size
self.failUnlessEqual(f(0, 4), 0)
self.failUnlessEqual(f(1, 4), 3)
self.failUnlessEqual(f(2, 4), 2)
self.failUnlessEqual(f(3, 4), 1)
self.failUnlessEqual(f(4, 4), 0)
self.failUnlessEqual(f(5, 4), 3)
def test_is_power_of_k_part_2(self):
f = mathutil.is_power_of_k
for i in range(1, 100):
if i in (1, 2, 4, 8, 16, 32, 64):
self.failUnless(f(i, 2), "but %d *is* a power of 2" % i)
else:
self.failIf(f(i, 2), "but %d is *not* a power of 2" % i)
for i in range(1, 100):
if i in (1, 3, 9, 27, 81):
self.failUnless(f(i, 3), "but %d *is* a power of 3" % i)
else:
self.failIf(f(i, 3), "but %d is *not* a power of 3" % i)
def test_next_power_of_k(self):
f = mathutil.next_power_of_k
self.failUnlessEqual(f(0,2), 1)
self.failUnlessEqual(f(1,2), 1)
self.failUnlessEqual(f(2,2), 2)
self.failUnlessEqual(f(3,2), 4)
self.failUnlessEqual(f(4,2), 4)
for i in range(5, 8): self.failUnlessEqual(f(i,2), 8, "%d" % i)
for i in range(9, 16): self.failUnlessEqual(f(i,2), 16, "%d" % i)
for i in range(17, 32): self.failUnlessEqual(f(i,2), 32, "%d" % i)
for i in range(33, 64): self.failUnlessEqual(f(i,2), 64, "%d" % i)
for i in range(65, 100): self.failUnlessEqual(f(i,2), 128, "%d" % i)
self.failUnlessEqual(f(0,3), 1)
self.failUnlessEqual(f(1,3), 1)
self.failUnlessEqual(f(2,3), 3)
self.failUnlessEqual(f(3,3), 3)
for i in range(4, 9): self.failUnlessEqual(f(i,3), 9, "%d" % i)
for i in range(10, 27): self.failUnlessEqual(f(i,3), 27, "%d" % i)
for i in range(28, 81): self.failUnlessEqual(f(i,3), 81, "%d" % i)
for i in range(82, 200): self.failUnlessEqual(f(i,3), 243, "%d" % i)
def test_ave(self):
f = mathutil.ave
self.failUnlessEqual(f([1,2,3]), 2)
self.failUnlessEqual(f([0,0,0,4]), 1)
self.failUnlessAlmostEqual(f([0.0, 1.0, 1.0]), .666666666666)
def failUnlessEqualContents(self, a, b):
self.failUnlessEqual(sorted(a), sorted(b))
def test_permute(self):
f = mathutil.permute
self.failUnlessEqualContents(f([]), [])
self.failUnlessEqualContents(f([1]), [[1]])
self.failUnlessEqualContents(f([1,2]), [[1,2], [2,1]])
self.failUnlessEqualContents(f([1,2,3]),
[[1,2,3], [1,3,2],
[2,1,3], [2,3,1],
[3,1,2], [3,2,1]])
| 5,253
|
Python
|
.py
| 120
| 34.175
| 76
| 0.560571
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,411
|
test_fileutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_fileutil.py
|
import unittest
import os
from pyutil import fileutil
class FileUtil(unittest.TestCase):
def mkdir(self, basedir, path, mode=0777):
fn = os.path.join(basedir, path)
fileutil.make_dirs(fn, mode)
def touch(self, basedir, path, mode=None, data="touch\n"):
fn = os.path.join(basedir, path)
f = open(fn, "w")
f.write(data)
f.close()
if mode is not None:
os.chmod(fn, mode)
def test_du(self):
basedir = "util/FileUtil/test_du"
fileutil.make_dirs(basedir)
d = os.path.join(basedir, "space-consuming")
self.mkdir(d, "a/b")
self.touch(d, "a/b/1.txt", data="a"*10)
self.touch(d, "a/b/2.txt", data="b"*11)
self.mkdir(d, "a/c")
self.touch(d, "a/c/1.txt", data="c"*12)
self.touch(d, "a/c/2.txt", data="d"*13)
used = fileutil.du(basedir)
self.failUnlessEqual(10+11+12+13, used)
| 939
|
Python
|
.py
| 26
| 28.384615
| 62
| 0.582781
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,412
|
test_verlib.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_verlib.py
|
# -*- coding: utf-8 -*-
"""Tests for distutils.version."""
import unittest
import doctest
from pyutil.verlib import NormalizedVersion as V
from pyutil.verlib import IrrationalVersionError
from pyutil.verlib import suggest_normalized_version as suggest
class VersionTestCase(unittest.TestCase):
versions = ((V('1.0'), '1.0'),
(V('1.1'), '1.1'),
(V('1.2.3'), '1.2.3'),
(V('1.2'), '1.2'),
(V('1.2.3a4'), '1.2.3a4'),
(V('1.2c4'), '1.2c4'),
(V('1.2.3.4'), '1.2.3.4'),
(V('1.2.3.4.0b3'), '1.2.3.4b3'),
(V('1.2.0.0.0'), '1.2'),
(V('1.0.dev345'), '1.0.dev345'),
(V('1.0.post456.dev623'), '1.0.post456.dev623'))
def test_basic_versions(self):
for v, s in self.versions:
self.assertEquals(str(v), s)
def test_from_parts(self):
for v, s in self.versions:
v2 = V.from_parts(*v.parts)
self.assertEquals(v, v2)
self.assertEquals(str(v), str(v2))
def test_irrational_versions(self):
irrational = ('1', '1.2a', '1.2.3b', '1.02', '1.2a03',
'1.2a3.04', '1.2.dev.2', '1.2dev', '1.2.dev',
'1.2.dev2.post2', '1.2.post2.dev3.post4')
for s in irrational:
self.assertRaises(IrrationalVersionError, V, s)
def test_comparison(self):
r"""
>>> V('1.2.0') == '1.2'
Traceback (most recent call last):
...
TypeError: cannot compare NormalizedVersion and str
>>> V('1.2.0') == V('1.2')
True
>>> V('1.2.0') == V('1.2.3')
False
>>> V('1.2.0') < V('1.2.3')
True
>>> (V('1.0') > V('1.0b2'))
True
>>> (V('1.0') > V('1.0c2') > V('1.0c1') > V('1.0b2') > V('1.0b1')
... > V('1.0a2') > V('1.0a1'))
True
>>> (V('1.0.0') > V('1.0.0c2') > V('1.0.0c1') > V('1.0.0b2') > V('1.0.0b1')
... > V('1.0.0a2') > V('1.0.0a1'))
True
>>> V('1.0') < V('1.0.post456.dev623')
True
>>> V('1.0.post456.dev623') < V('1.0.post456') < V('1.0.post1234')
True
>>> (V('1.0a1')
... < V('1.0a2.dev456')
... < V('1.0a2')
... < V('1.0a2.1.dev456') # e.g. need to do a quick post release on 1.0a2
... < V('1.0a2.1')
... < V('1.0b1.dev456')
... < V('1.0b2')
... < V('1.0c1.dev456')
... < V('1.0c1')
... < V('1.0.dev7')
... < V('1.0.dev18')
... < V('1.0.dev456')
... < V('1.0.dev1234')
... < V('1.0')
... < V('1.0.post456.dev623') # development version of a post release
... < V('1.0.post456'))
True
"""
# must be a simpler way to call the docstrings
doctest.run_docstring_examples(self.test_comparison, globals(),
name='test_comparison')
def test_suggest_normalized_version(self):
self.assertEquals(suggest('1.0'), '1.0')
self.assertEquals(suggest('1.0-alpha1'), '1.0a1')
self.assertEquals(suggest('1.0c2'), '1.0c2')
self.assertEquals(suggest('walla walla washington'), None)
self.assertEquals(suggest('2.4c1'), '2.4c1')
# from setuptools
self.assertEquals(suggest('0.4a1.r10'), '0.4a1.post10')
self.assertEquals(suggest('0.7a1dev-r66608'), '0.7a1.dev66608')
self.assertEquals(suggest('0.6a9.dev-r41475'), '0.6a9.dev41475')
self.assertEquals(suggest('2.4preview1'), '2.4c1')
self.assertEquals(suggest('2.4pre1') , '2.4c1')
self.assertEquals(suggest('2.1-rc2'), '2.1c2')
# from pypi
self.assertEquals(suggest('0.1dev'), '0.1.dev0')
self.assertEquals(suggest('0.1.dev'), '0.1.dev0')
# we want to be able to parse Twisted
# development versions are like post releases in Twisted
self.assertEquals(suggest('9.0.0+r2363'), '9.0.0.post2363')
# pre-releases are using markers like "pre1"
self.assertEquals(suggest('9.0.0pre1'), '9.0.0c1')
# we want to be able to parse Tcl-TK
# they us "p1" "p2" for post releases
self.assertEquals(suggest('1.4p1'), '1.4.post1')
| 4,315
|
Python
|
.py
| 102
| 32.362745
| 83
| 0.501194
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,413
|
test_time_format.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_time_format.py
|
#!/usr/bin/env python
"""\
Test time_format.py
"""
import os, time, unittest
from pyutil import time_format, increasing_timer
class TimeUtilTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_iso8601_utc_time(self, timer=increasing_timer.timer):
ts1 = time_format.iso_utc(timer.time() - 20)
ts2 = time_format.iso_utc()
assert ts1 < ts2, "failed: %s < %s" % (ts1, ts2)
ts3 = time_format.iso_utc(timer.time() + 20)
assert ts2 < ts3, "failed: %s < %s" % (ts2, ts3)
def test_iso_utc_time_to_localseconds(self, timer=increasing_timer.timer):
# test three times of the year so that a DST problem would hopefully be triggered
t1 = int(timer.time() - 365*3600/3)
iso_utc_t1 = time_format.iso_utc(t1)
t1_2 = time_format.iso_utc_time_to_seconds(iso_utc_t1)
assert t1 == t1_2, (t1, t1_2)
t1 = int(timer.time() - (365*3600*2/3))
iso_utc_t1 = time_format.iso_utc(t1)
t1_2 = time_format.iso_utc_time_to_seconds(iso_utc_t1)
self.failUnlessEqual(t1, t1_2)
t1 = int(timer.time())
iso_utc_t1 = time_format.iso_utc(t1)
t1_2 = time_format.iso_utc_time_to_seconds(iso_utc_t1)
self.failUnlessEqual(t1, t1_2)
def test_epoch(self):
return self._help_test_epoch()
def test_epoch_in_London(self):
# Europe/London is a particularly troublesome timezone. Nowadays, its
# offset from GMT is 0. But in 1970, its offset from GMT was 1.
# (Apparently in 1970 Britain had redefined standard time to be GMT+1
# and stayed in standard time all year round, whereas today
# Europe/London standard time is GMT and Europe/London Daylight
# Savings Time is GMT+1.) The current implementation of
# time_format.iso_utc_time_to_seconds() breaks if the timezone is
# Europe/London. (As soon as this unit test is done then I'll change
# that implementation to something that works even in this case...)
origtz = os.environ.get('TZ')
os.environ['TZ'] = "Europe/London"
if hasattr(time, 'tzset'):
time.tzset()
try:
return self._help_test_epoch()
finally:
if origtz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = origtz
if hasattr(time, 'tzset'):
time.tzset()
def _help_test_epoch(self):
origtzname = time.tzname
s = time_format.iso_utc_time_to_seconds("1970-01-01T00:00:01Z")
self.failUnlessEqual(s, 1.0)
s = time_format.iso_utc_time_to_seconds("1970-01-01_00:00:01Z")
self.failUnlessEqual(s, 1.0)
s = time_format.iso_utc_time_to_seconds("1970-01-01 00:00:01Z")
self.failUnlessEqual(s, 1.0)
self.failUnlessEqual(time_format.iso_utc(1.0), "1970-01-01 00:00:01Z")
self.failUnlessEqual(time_format.iso_utc(1.0, sep="_"),
"1970-01-01_00:00:01Z")
now = time.time()
isostr = time_format.iso_utc(now)
timestamp = time_format.iso_utc_time_to_seconds(isostr)
self.failUnlessEqual(int(timestamp), int(now))
def my_time():
return 1.0
self.failUnlessEqual(time_format.iso_utc(t=my_time),
"1970-01-01 00:00:01Z")
self.failUnlessRaises(ValueError,
time_format.iso_utc_time_to_seconds,
"invalid timestring")
s = time_format.iso_utc_time_to_seconds("1970-01-01 00:00:01.500Z")
self.failUnlessEqual(s, 1.5)
# Look for daylight-savings-related errors.
thatmomentinmarch = time_format.iso_utc_time_to_seconds("2009-03-20 21:49:02.226536Z")
self.failUnlessEqual(thatmomentinmarch, 1237585742.226536)
self.failUnlessEqual(origtzname, time.tzname)
| 3,967
|
Python
|
.py
| 84
| 37.380952
| 94
| 0.610853
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,414
|
test_jsonutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_jsonutil.py
|
#!/usr/bin/env python
import unittest
from decimal import Decimal
from pyutil import jsonutil
zero_point_one = Decimal("0.1")
class TestDecimal(unittest.TestCase):
def test_encode(self):
self.failUnlessEqual(jsonutil.dumps(zero_point_one), "0.1")
def test_decode(self):
self.failUnlessEqual(jsonutil.loads("0.1"), zero_point_one)
def test_no_exception_on_convergent_parse_float(self):
self.failUnlessEqual(jsonutil.loads("0.1", parse_float=Decimal), zero_point_one)
| 508
|
Python
|
.py
| 12
| 37.833333
| 88
| 0.740816
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,415
|
test_mathutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_mathutil.py
|
#!/usr/bin/env python
import unittest
from pyutil import mathutil
from pyutil.assertutil import _assert
class MathUtilTestCase(unittest.TestCase):
def _help_test_is_power_of_k(self, k):
for i in range(2, 40):
_assert(mathutil.is_power_of_k(k**i, k), k, i)
def test_is_power_of_k(self):
for i in range(2, 5):
self._help_test_is_power_of_k(i)
def test_log_ceil(self):
f = mathutil.log_ceil
self.failUnlessEqual(f(1, 2), 0)
self.failUnlessEqual(f(1, 3), 0)
self.failUnlessEqual(f(2, 2), 1)
self.failUnlessEqual(f(2, 3), 1)
self.failUnlessEqual(f(3, 2), 2)
def test_log_floor(self):
f = mathutil.log_floor
self.failUnlessEqual(f(1, 2), 0)
self.failUnlessEqual(f(1, 3), 0)
self.failUnlessEqual(f(2, 2), 1)
self.failUnlessEqual(f(2, 3), 0)
self.failUnlessEqual(f(3, 2), 1)
def test_div_ceil(self):
f = mathutil.div_ceil
self.failUnlessEqual(f(0, 1), 0)
self.failUnlessEqual(f(0, 2), 0)
self.failUnlessEqual(f(0, 3), 0)
self.failUnlessEqual(f(1, 3), 1)
self.failUnlessEqual(f(2, 3), 1)
self.failUnlessEqual(f(3, 3), 1)
self.failUnlessEqual(f(4, 3), 2)
self.failUnlessEqual(f(5, 3), 2)
self.failUnlessEqual(f(6, 3), 2)
self.failUnlessEqual(f(7, 3), 3)
self.failUnless(isinstance(f(0.0, 1), int))
self.failUnlessEqual(f(7.0, 3.0), 3)
self.failUnlessEqual(f(7, 3.0), 3)
self.failUnlessEqual(f(7.0, 3), 3)
self.failUnlessEqual(f(6.0, 3.0), 2)
self.failUnlessEqual(f(6.0, 3), 2)
self.failUnlessEqual(f(6, 3.0), 2)
def test_next_multiple(self):
f = mathutil.next_multiple
self.failUnlessEqual(f(5, 1), 5)
self.failUnlessEqual(f(5, 2), 6)
self.failUnlessEqual(f(5, 3), 6)
self.failUnlessEqual(f(5, 4), 8)
self.failUnlessEqual(f(5, 5), 5)
self.failUnlessEqual(f(5, 6), 6)
self.failUnlessEqual(f(32, 1), 32)
self.failUnlessEqual(f(32, 2), 32)
self.failUnlessEqual(f(32, 3), 33)
self.failUnlessEqual(f(32, 4), 32)
self.failUnlessEqual(f(32, 5), 35)
self.failUnlessEqual(f(32, 6), 36)
self.failUnlessEqual(f(32, 7), 35)
self.failUnlessEqual(f(32, 8), 32)
self.failUnlessEqual(f(32, 9), 36)
self.failUnlessEqual(f(32, 10), 40)
self.failUnlessEqual(f(32, 11), 33)
self.failUnlessEqual(f(32, 12), 36)
self.failUnlessEqual(f(32, 13), 39)
self.failUnlessEqual(f(32, 14), 42)
self.failUnlessEqual(f(32, 15), 45)
self.failUnlessEqual(f(32, 16), 32)
self.failUnlessEqual(f(32, 17), 34)
self.failUnlessEqual(f(32, 18), 36)
self.failUnlessEqual(f(32, 589), 589)
def test_pad_size(self):
f = mathutil.pad_size
self.failUnlessEqual(f(0, 4), 0)
self.failUnlessEqual(f(1, 4), 3)
self.failUnlessEqual(f(2, 4), 2)
self.failUnlessEqual(f(3, 4), 1)
self.failUnlessEqual(f(4, 4), 0)
self.failUnlessEqual(f(5, 4), 3)
def test_is_power_of_k_part_2(self):
f = mathutil.is_power_of_k
for i in range(1, 100):
if i in (1, 2, 4, 8, 16, 32, 64):
self.failUnless(f(i, 2), "but %d *is* a power of 2" % i)
else:
self.failIf(f(i, 2), "but %d is *not* a power of 2" % i)
for i in range(1, 100):
if i in (1, 3, 9, 27, 81):
self.failUnless(f(i, 3), "but %d *is* a power of 3" % i)
else:
self.failIf(f(i, 3), "but %d is *not* a power of 3" % i)
def test_next_power_of_k(self):
f = mathutil.next_power_of_k
self.failUnlessEqual(f(0,2), 1)
self.failUnlessEqual(f(1,2), 1)
self.failUnlessEqual(f(2,2), 2)
self.failUnlessEqual(f(3,2), 4)
self.failUnlessEqual(f(4,2), 4)
for i in range(5, 8): self.failUnlessEqual(f(i,2), 8, "%d" % i)
for i in range(9, 16): self.failUnlessEqual(f(i,2), 16, "%d" % i)
for i in range(17, 32): self.failUnlessEqual(f(i,2), 32, "%d" % i)
for i in range(33, 64): self.failUnlessEqual(f(i,2), 64, "%d" % i)
for i in range(65, 100): self.failUnlessEqual(f(i,2), 128, "%d" % i)
self.failUnlessEqual(f(0,3), 1)
self.failUnlessEqual(f(1,3), 1)
self.failUnlessEqual(f(2,3), 3)
self.failUnlessEqual(f(3,3), 3)
for i in range(4, 9): self.failUnlessEqual(f(i,3), 9, "%d" % i)
for i in range(10, 27): self.failUnlessEqual(f(i,3), 27, "%d" % i)
for i in range(28, 81): self.failUnlessEqual(f(i,3), 81, "%d" % i)
for i in range(82, 200): self.failUnlessEqual(f(i,3), 243, "%d" % i)
def test_ave(self):
f = mathutil.ave
self.failUnlessEqual(f([1,2,3]), 2)
self.failUnlessEqual(f([0,0,0,4]), 1)
self.failUnlessAlmostEqual(f([0.0, 1.0, 1.0]), .666666666666)
def failUnlessEqualContents(self, a, b):
self.failUnlessEqual(sorted(a), sorted(b))
def test_permute(self):
f = mathutil.permute
self.failUnlessEqualContents(f([]), [])
self.failUnlessEqualContents(f([1]), [[1]])
self.failUnlessEqualContents(f([1,2]), [[1,2], [2,1]])
self.failUnlessEqualContents(f([1,2,3]),
[[1,2,3], [1,3,2],
[2,1,3], [2,3,1],
[3,1,2], [3,2,1]])
| 5,567
|
Python
|
.py
| 127
| 34.267717
| 76
| 0.561475
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,416
|
test_iputil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_iputil.py
|
#!/usr/bin/env python
try:
from twisted.trial import unittest
unittest # http://divmod.org/trac/ticket/1499
except ImportError, le:
print "Skipping test_iputil since it requires Twisted and Twisted could not be imported: %s" % (le,)
else:
from pyutil import iputil, testutil
import re
DOTTED_QUAD_RE=re.compile("^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")
class ListAddresses(testutil.SignalMixin):
def test_get_local_ip_for(self):
addr = iputil.get_local_ip_for('127.0.0.1')
self.failUnless(DOTTED_QUAD_RE.match(addr))
def test_list_async(self):
try:
from twisted.trial import unittest
unittest # http://divmod.org/trac/ticket/1499
from pyutil import iputil
except ImportError, le:
raise unittest.SkipTest("iputil could not be imported (probably because its dependency, Twisted, is not installed). %s" % (le,))
d = iputil.get_local_addresses_async()
def _check(addresses):
self.failUnless(len(addresses) >= 1) # always have localhost
self.failUnless("127.0.0.1" in addresses, addresses)
d.addCallbacks(_check)
return d
test_list_async.timeout=2
| 1,287
|
Python
|
.py
| 28
| 36.071429
| 145
| 0.620415
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,417
|
test_version_class.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/test_version_class.py
|
import unittest
from pyutil import version_class
V = version_class.Version
class T(unittest.TestCase):
def test_rc_regex_rejects_rc_suffix(self):
self.failUnlessRaises(ValueError, V, '9.9.9rc9')
def test_rc_regex_rejects_trailing_garbage(self):
self.failUnlessRaises(ValueError, V, '9.9.9c9HEYTHISISNTRIGHT')
def test_comparisons(self):
self.failUnless(V('1.0') < V('1.1'))
self.failUnless(V('1.0a1') < V('1.0'))
self.failUnless(V('1.0a1') < V('1.0b1'))
self.failUnless(V('1.0b1') < V('1.0c1'))
self.failUnless(V('1.0a1') < V('1.0a1-r99'))
self.failUnlessEqual(V('1.0a1.post987'), V('1.0a1-r987'))
self.failUnlessEqual(str(V('1.0a1.post999')), '1.0.0a1-r999')
self.failUnlessEqual(str(V('1.0a1-r999')), '1.0.0a1-r999')
self.failIfEqual(V('1.0a1'), V('1.0a1-r987'))
| 870
|
Python
|
.py
| 18
| 41.5
| 71
| 0.634002
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,418
|
test_separators.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_separators.py
|
import textwrap
from unittest import TestCase
from pyutil import jsonutil as json
class TestSeparators(TestCase):
def test_separators(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
] ,
[
"whoops"
] ,
[] ,
"d-shtaeou" ,
"d-nthiouh" ,
"i-vhbjkhnth" ,
{
"nifty" : 87
} ,
{
"field" : "yes" ,
"morefield" : false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=2, sort_keys=True, separators=(' ,', ' : '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)
| 952
|
Python
|
.py
| 34
| 18.382353
| 82
| 0.457143
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,419
|
test_fail.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_fail.py
|
from unittest import TestCase
from pyutil import jsonutil as json
# Fri Dec 30 18:57:26 2005
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'["Illegal backslash escape: \\\'"]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://code.google.com/p/simplejson/issues/detail?id=3
u'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail(TestCase):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
json.loads(doc)
continue
try:
json.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
| 2,908
|
Python
|
.py
| 72
| 34.333333
| 78
| 0.62041
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,420
|
test_speedups.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_speedups.py
|
from twisted.trial.unittest import SkipTest, TestCase
from pyutil.jsonutil import decoder
from pyutil.jsonutil import encoder
class TestSpeedups(TestCase):
def test_scanstring(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(decoder.scanstring.__module__, "simplejson._speedups")
self.assert_(decoder.scanstring is decoder.c_scanstring)
def test_encode_basestring_ascii(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(encoder.encode_basestring_ascii.__module__, "simplejson._speedups")
self.assert_(encoder.encode_basestring_ascii is
encoder.c_encode_basestring_ascii)
| 835
|
Python
|
.py
| 15
| 47.4
| 93
| 0.723378
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,421
|
test_indent.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_indent.py
|
from unittest import TestCase
from pyutil import jsonutil as json
import textwrap
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
self.assertEquals(h1, h)
self.assertEquals(h2, h)
self.assertEquals(d2, expect)
| 930
|
Python
|
.py
| 34
| 17.764706
| 82
| 0.458943
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,422
|
test_dump.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_dump.py
|
from unittest import TestCase
from cStringIO import StringIO
from pyutil import jsonutil as json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEquals(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEquals(json.dumps({}), '{}')
| 325
|
Python
|
.py
| 10
| 27.2
| 47
| 0.669872
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,423
|
test_unicode.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_unicode.py
|
from unittest import TestCase
from pyutil import jsonutil as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"%s"' % (u,))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["%s"]' % (u,))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
js = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(js), u)
| 1,973
|
Python
|
.py
| 45
| 35.688889
| 77
| 0.600626
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,424
|
test_recursion.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_recursion.py
|
from unittest import TestCase
from pyutil import jsonutil as json
class JSONTestObject:
pass
class RecursiveJSONEncoder(json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return json.JSONEncoder.default(o)
class TestRecursion(TestCase):
def test_listrecursion(self):
x = []
x.append(x)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
json.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
{"a": x, "b": x}
# ensure that the marker is cleared
json.dumps(x)
def test_defaultrecursion(self):
enc = RecursiveJSONEncoder()
self.assertEquals(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")
| 1,686
|
Python
|
.py
| 59
| 19.305085
| 78
| 0.556516
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,425
|
test_pass2.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_pass2.py
|
from unittest import TestCase
from pyutil import jsonutil as json
# from http://json.org/JSON_checker/test/pass2.json
JSON = r'''
[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
'''
class TestPass2(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
| 397
|
Python
|
.py
| 12
| 28.916667
| 52
| 0.626632
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,426
|
test_pass1.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_pass1.py
|
from unittest import TestCase
from pyutil import jsonutil as json
# from http://json.org/JSON_checker/test/pass1.json
JSON = r'''
[
"JSON Test Pattern pass1",
{"object with 1 member":["array with 1 element"]},
{},
[],
-42,
true,
false,
null,
{
"integer": 1234567890,
"real": -9876.543210,
"e": 0.123456789e-12,
"E": 1.234567890E+34,
"": 23456789012E666,
"zero": 0,
"one": 1,
"space": " ",
"quote": "\"",
"backslash": "\\",
"controls": "\b\f\n\r\t",
"slash": "/ & \/",
"alpha": "abcdefghijklmnopqrstuvwyz",
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
"digit": "0123456789",
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
"true": true,
"false": false,
"null": null,
"array":[ ],
"object":{ },
"address": "50 St. James Street",
"url": "http://www.JSON.org/",
"comment": "// /* <!-- --",
"# -- --> */": " ",
" s p a c e d " :[1,2 , 3
,
4 , 5 , 6 ,7 ],
"compact": [1,2,3,4,5,6,7],
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
"quotes": "" \u0022 %22 0x22 034 "",
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
: "A key can be any string"
},
0.5 ,98.6
,
99.44
,
1066
,"rosebud"]
'''
class TestPass1(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
self.failUnless("2.3456789012E+676" in json.dumps(res, allow_nan=False))
| 1,814
|
Python
|
.py
| 63
| 22.333333
| 93
| 0.483075
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,427
|
test_encode_basestring_ascii.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_encode_basestring_ascii.py
|
from twisted.trial.unittest import SkipTest, TestCase
from pyutil.jsonutil import encoder
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\xce\xb1\xce\xa9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self._test_encode_basestring_ascii(encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
self.assertEquals(result, expect)
| 1,806
|
Python
|
.py
| 31
| 52.451613
| 199
| 0.571186
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,428
|
test_decode.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_decode.py
|
import decimal
from unittest import TestCase
from pyutil import jsonutil as json
class TestDecode(TestCase):
def test_decimal(self):
rval = json.loads('1.1', parse_float=decimal.Decimal)
self.assert_(isinstance(rval, decimal.Decimal))
self.assertEquals(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = json.loads('1', parse_int=float)
self.assert_(isinstance(rval, float))
self.assertEquals(rval, 1.0)
| 471
|
Python
|
.py
| 12
| 33.333333
| 61
| 0.695175
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,429
|
test_default.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_default.py
|
from unittest import TestCase
from pyutil import jsonutil as json
class TestDefault(TestCase):
def test_default(self):
self.assertEquals(
json.dumps(type, default=repr),
json.dumps(repr(type)))
| 232
|
Python
|
.py
| 7
| 26.714286
| 43
| 0.695067
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,430
|
test_float.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_float.py
|
import math
from unittest import TestCase
from pyutil import jsonutil as json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100]:
self.assertEquals(float(json.dumps(num)), num)
| 272
|
Python
|
.py
| 7
| 34.142857
| 78
| 0.722433
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,431
|
test_pass3.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/current/json_tests/test_pass3.py
|
from unittest import TestCase
from pyutil import jsonutil as json
# from http://json.org/JSON_checker/test/pass3.json
JSON = r'''
{
"JSON Test Pattern pass3": {
"The outermost value": "must be an object or array.",
"In this test": "It is an object."
}
}
'''
class TestPass3(TestCase):
def test_parse(self):
# test in/out equivalence and parsing
res = json.loads(JSON)
out = json.dumps(res)
self.assertEquals(res, json.loads(out))
| 493
|
Python
|
.py
| 17
| 24.294118
| 61
| 0.655391
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,432
|
test_strutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/out_of_shape/test_strutil.py
|
#!/usr/bin/env python
# Copyright (c) 2004-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import unittest
from pyutil.assertutil import _assert
from pyutil import strutil
class Teststrutil(unittest.TestCase):
def test_short_input(self):
self.failUnless(strutil.pop_trailing_newlines("\r\n") == "")
self.failUnless(strutil.pop_trailing_newlines("\r") == "")
self.failUnless(strutil.pop_trailing_newlines("x\r\n") == "x")
self.failUnless(strutil.pop_trailing_newlines("x\r") == "x")
def test_split(self):
_assert(strutil.split_on_newlines("x\r\ny") == ["x", "y",], strutil.split_on_newlines("x\r\ny"))
_assert(strutil.split_on_newlines("x\r\ny\r\n") == ["x", "y", '',], strutil.split_on_newlines("x\r\ny\r\n"))
_assert(strutil.split_on_newlines("x\n\ny\n\n") == ["x", '', "y", '', '',], strutil.split_on_newlines("x\n\ny\n\n"))
def test_commonprefix(self):
_assert(strutil.commonprefix(["foo","foobarooo", "foosplat",]) == 'foo', strutil.commonprefix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonprefix(["foo","afoobarooo", "foosplat",]) == '', strutil.commonprefix(["foo","afoobarooo", "foosplat",]))
def test_commonsuffix(self):
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplat",]) == '', strutil.commonsuffix(["foo","foobarooo", "foosplat",]))
_assert(strutil.commonsuffix(["foo","foobarooo", "foosplato",]) == 'o', strutil.commonsuffix(["foo","foobarooo", "foosplato",]))
_assert(strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]) == 'foo', strutil.commonsuffix(["foo","foobarooofoo", "foosplatofoo",]))
| 1,713
|
Python
|
.py
| 23
| 68.304348
| 150
| 0.649436
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,433
|
test_cache.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/out_of_shape/test_cache.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, unittest
from pyutil.assertutil import _assert
from pyutil.humanreadable import hr
from pyutil import memutil
from pyutil import cache
class Bencher:
def __init__(self, klass, MAXREPS=2**8, MAXTIME=5):
print klass
self.klass = klass
self.MAXREPS = MAXREPS
self.MAXTIME = MAXTIME
self.d = {}
self.lrun = None
def _generic_benchmarking_init(self, n):
self.d.clear()
global lrun
self.lrun = self.klass(maxsize=n)
for i in range(n):
self.d[i] = i
self.lrun[n+i] = n+i
def _benchmark_init(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_update(self, n):
MAXSIZE=n/2
d2 = self.klass(maxsize=MAXSIZE)
assert len(d2) == 0
d2.update(self.d)
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_insert(self, n):
MAXSIZE=n/2
d2 = self.klass(maxsize=MAXSIZE)
assert len(d2) == 0
for k, v, in self.d.iteritems():
d2[k] = v
assert len(d2) == min(len(self.d), MAXSIZE)
return True
def _benchmark_init_and_popitem(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for i in range(len(d2), 0, -1):
assert len(d2) == i
d2.popitem()
return True
def _benchmark_init_and_has_key_and_del(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for k in self.d.iterkeys():
if d2.has_key(k):
del d2[k]
return True
def _benchmark_init_and_remove(self, n):
MAXSIZE=n/2
d2 = self.klass(initialdata=self.d, maxsize=MAXSIZE)
assert len(d2) == min(len(self.d), MAXSIZE)
for k in self.d.iterkeys():
d2.remove(k, strictkey=False)
return True
def bench(self, BSIZES=(128, 250, 2048, 5000, 2**13, 2**20,)):
from pyutil import benchutil
funcs = ("_benchmark_insert", "_benchmark_init_and_has_key_and_del", "_benchmark_init_and_remove", "_benchmark_init_and_popitem", "_benchmark_update", "_benchmark_init",)
max = 0
for func in funcs:
if len(func) > max:
max = len(func)
for func in funcs:
print func + " " * (max + 1 - len(func))
for BSIZE in BSIZES:
f = getattr(self, func)
benchutil.rep_bench(f, BSIZE, self._generic_benchmarking_init, MAXREPS=self.MAXREPS, MAXTIME=self.MAXTIME)
def quick_bench():
Bencher(cache.LRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15, 2**16,))
Bencher(cache.LinkedListLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
Bencher(cache.SmallLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
def slow_bench():
Bencher(cache.LRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(cache.LinkedListLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(cache.SmallLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 17)])
MUCHADDINGSIZE=2**4
MUCHADDINGNUM = 2**4
# The following parameters are for testing for memory leakage.
MIN_SLOPE = 512.0 # If it leaks less than 512.0 bytes per iteration, then it's probably just some kind of noise from the interpreter or something...
SAMPLES = 2**5
# MIN_SLOPE is high because samples is low, which is because taking a statistically useful numbers of samples takes too long.
# For a *good* test, turn samples up as high as you can stand (maybe 2**10) and set MIN_SLOPE to about 1.0.
# For a *really* good test, add a variance measure to memutil.measure_mem_leakage(), and only consider it to be leaking if the slope is > 0.1 *and* is a "pretty good" fit for the data.
# MIN_SLOPE = 1.0
# SAMPLES = 2**10
class Testy(unittest.TestCase):
def _test_empty_lookup(self, d) :
self.failUnless(d.get('spam') is None)
def _test_key_error(self, C) :
d = C()
try:
d['spam']
self.fail(d)
except KeyError :
pass
def _test_insert_and_get(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
def _test_insert_and_remove(self, d):
d.insert('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
d['spam'] = "eggs"
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
del d['spam']
self.failUnless(not d.has_key('spam'))
def _test_setdefault(self, d):
d.setdefault('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
def _test_extracted_bound_method(self, d):
insmeth = d.insert
insmeth('spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_extracted_unbound_method(self, d):
insumeth = d.__class__.insert
insumeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_unbound_method(self, C, d):
umeth = C.insert
umeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_clear(self, d):
d[11] = 11
d._assert_invariants()
self.failUnless(len(d) == 1)
d.clear()
d._assert_invariants()
self.failUnless(len(d) == 0)
def _test_update(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2={ 'a': 0, 'b': 1, 'c': 2,}
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
def _test_popitem(self, C):
c = C({"a": 1})
res = c.popitem()
_assert(res == ("a", 1,), C, c, res)
self.failUnless(res == ("a", 1,))
def _test_iterate_items(self, C):
c = C({"a": 1})
i = c.iteritems()
x = i.next()
self.failUnless(x == ("a", 1,))
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_keys(self, C):
c = C({"a": 1})
i = c.iterkeys()
x = i.next()
self.failUnless(x == "a")
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_values(self, C):
c = C({"a": 1})
i = c.itervalues()
x = i.next()
self.failUnless(x == 1)
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_LRU_much_adding_some_removing(self, C):
c = C(maxsize=MUCHADDINGSIZE)
for i in range(MUCHADDINGNUM):
c[i] = i
if (i % 400) == 0:
k = random.choice(c.keys())
del c[k]
for i in range(MUCHADDINGSIZE):
c[i] = i
self.failUnless(len(c) == MUCHADDINGSIZE)
def _test_LRU_1(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
def _test_LRU_2(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
def _test_LRU_3(self, C):
c = C(maxsize=10)
c[11] = 11
c._assert_invariants()
c[11] = 12
c._assert_invariants()
c[11] = 13
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 14
c._assert_invariants()
c[11] = 15
c._assert_invariants()
c[11] = 16
c._assert_invariants()
def _test_LRU_full(self, C):
c = C(maxsize=10)
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values(), c.values())
self.failUnless(0 not in c.values())
del c[1]
c._assert_invariants()
self.failUnless(1 not in c.values())
self.failUnless(len(c) == 9)
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
c[11] = 11
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 not in c.values())
self.failUnless(11 in c.values())
for i in xrange(200):
c[i] = i
c._assert_invariants()
self.failUnless(199 in c.values())
self.failUnless(190 in c.values())
def _test_LRU_has_key(self, C):
c = C(maxsize=10)
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values())
self.failUnless(0 not in c.values())
# c.has_key(1) # this touches `1' and makes it fresher so that it will live and `2' will die next time we overfill.
c[1] = 1 # this touches `1' and makes it fresher so that it will live and `2' will die next time we overfill.
c._assert_invariants()
c[99] = 99
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(not 2 in c.values())
self.failUnless(99 in c.values())
def _test_LRU_not_overfull_on_idempotent_add(self, C):
c = C(maxsize=10)
for i in xrange(11):
c[i] = i
c[1] = "spam"
# Now 1 is the freshest, so 2 is the next one that would be removed *if* we went over limit.
c[3] = "eggs"
self.failUnless(c.has_key(2))
self.failUnless(len(c) == 10)
c._assert_invariants()
def _test_LRU_overflow_on_update(self, C):
d = C(maxsize=10)
self.failUnless(d._assert_invariants())
d2 = {}
for i in range(12):
d2[i] = i
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(len(d) == 10)
def _test_LRU_overflow_on_init(self, C):
d2 = {}
for i in range(12):
d2[i] = i
d = C(d2, maxsize=10)
self.failUnless(d._assert_invariants())
self.failUnless(len(d) == 10)
def _test_em(self):
for klass in (cache.LRUCache, cache.SmallLRUCache,):
for testfunc in (self._test_empty_lookup, self._test_insert_and_get, self._test_insert_and_remove, self._test_extracted_bound_method, self._test_extracted_unbound_method, self._test_clear, self._test_update, self._test_setdefault,):
testfunc(klass())
for testfunc in (self._test_popitem, self._test_iterate_items, self._test_iterate_keys, self._test_iterate_values, self._test_key_error, ):
testfunc(klass)
self._test_unbound_method(klass, klass())
for klass in (cache.LRUCache, cache.SmallLRUCache,):
for testfunc in (self._test_LRU_1, self._test_LRU_2, self._test_LRU_3, self._test_LRU_full, self._test_LRU_has_key, self._test_LRU_not_overfull_on_idempotent_add, self._test_LRU_overflow_on_update, self._test_LRU_overflow_on_init,):
testfunc(klass)
def test_em(self):
self._test_em()
def _mem_test_LRU_much_adding_some_removing(self):
for klass in (cache.LRUCache, cache.SmallLRUCache,):
return self._test_LRU_much_adding_some_removing(klass)
def test_mem_leakage(self):
try:
self._test_mem_leakage()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage # This test takes too long.
def _test_mem_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_mem_leakage_much_adding_some_removing(self):
try:
self._test_mem_leakage_much_adding_some_removing()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage_much_adding_some_removing # This test takes too long.
def _test_mem_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_mem_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self._mem_test_LRU_much_adding_some_removing, "%0.3f" % slope,))
def test_obj_leakage(self):
self._test_obj_leakage()
del test_obj_leakage # This test takes too long.
def _test_obj_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_obj_leakage_much_adding_some_removing(self):
self._test_obj_leakage_much_adding_some_removing()
del test_obj_leakage_much_adding_some_removing # This test takes too long.
def _test_obj_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_obj_leakage(self._mem_test_LRU_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self._mem_test_LRU_much_adding_some_removing, "%0.3f" % slope,))
| 16,980
|
Python
|
.py
| 390
| 34.702564
| 244
| 0.592823
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,434
|
test_odict.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/out_of_shape/test_odict.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, unittest
from pyutil.humanreadable import hr
from pyutil import memutil
from pyutil import odict
class Bencher:
def __init__(self, klass, MAXREPS=2**8, MAXTIME=5):
print klass
self.klass = klass
self.MAXREPS = MAXREPS
self.MAXTIME = MAXTIME
self.d = {}
self.lrun = None
def _generic_benchmarking_init(self, n):
self.d.clear()
self.lrun = self.klass()
for i in range(n):
self.d[i] = i
self.lrun[n+i] = n+i
def _benchmark_init(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
return True
def _benchmark_update(self, n):
d2 = self.klass()
assert len(d2) == 0
d2.update(self.d)
assert len(d2) == len(self.d)
return True
def _benchmark_insert(self, n):
d2 = self.klass()
assert len(d2) == 0
for k, v, in self.d.iteritems():
d2[k] = v
assert len(d2) == len(self.d)
return True
def _benchmark_init_and_popitem(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for i in range(len(d2), 0, -1):
assert len(d2) == i
d2.popitem()
return True
def _benchmark_init_and_has_key_and_del(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for k in self.d.iterkeys():
if d2.has_key(k):
del d2[k]
return True
def _benchmark_init_and_remove(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for k in self.d.iterkeys():
d2.remove(k, strictkey=False)
return True
def bench(self, BSIZES=(128, 250, 2048, 5000, 2**13, 2**20,)):
from pyutil import benchutil
funcs = ("_benchmark_insert", "_benchmark_init_and_has_key_and_del", "_benchmark_init_and_remove", "_benchmark_init_and_popitem", "_benchmark_update", "_benchmark_init",)
max = 0
for func in funcs:
if len(func) > max:
max = len(func)
for func in funcs:
print func + " " * (max + 1 - len(func))
for BSIZE in BSIZES:
f = getattr(self, func)
benchutil.rep_bench(f, BSIZE, self._generic_benchmarking_init, MAXREPS=self.MAXREPS, MAXTIME=self.MAXTIME)
def quick_bench():
Bencher(odict.LRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15, 2**16,))
Bencher(odict.LinkedListLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
Bencher(odict.SmallLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
def slow_bench():
Bencher(odict.LRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(odict.LinkedListLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(odict.SmallLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 17)])
MUCHADDINGSIZE=2**4
# The following parameters are for testing for memory leakage.
MIN_SLOPE = 512.0 # If it leaks less than 512.0 bytes per iteration, then it's probably just some kind of noise from the interpreter or something...
SAMPLES = 2**5
# MIN_SLOPE is high because samples is low, which is because taking a statistically useful numbers of samples takes too long.
# For a *good* test, turn samples up as high as you can stand (maybe 2**10) and set MIN_SLOPE to about 1.0.
# For a *really* good test, add a variance measure to memutil.measure_mem_leakage(), and only consider it to be leaking if the slope is > 0.1 *and* is a "pretty good" fit for the data.
# MIN_SLOPE = 1.0
# SAMPLES = 2**10
class Testy(unittest.TestCase):
def _test_empty_lookup(self, d) :
self.failUnless(d.get('spam') is None)
def _test_key_error(self, C) :
d = C()
try:
d['spam']
self.fail(d)
except KeyError :
pass
def _test_insert_and_get_and_items(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
self.failUnlessEqual(d.items(), [("spam", "eggs"), ("spam2", "eggs2")], d)
def _test_move_to_most_recent(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
self.failUnlessEqual(d.items(), [("spam", "eggs"), ("spam2", "eggs2")])
d.move_to_most_recent("spam")
self.failUnlessEqual(d.items(), [("spam2", "eggs2"), ("spam", "eggs")])
def _test_insert_and_remove(self, d):
d.insert('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
self.failUnlessEqual(d.items(), [("spam", "eggs")])
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
d['spam'] = "eggsy"
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggsy")
self.failUnless(d['spam'] == "eggsy")
self.failUnlessEqual(d.items(), [("spam", "eggsy")])
del d['spam']
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
def _test_setdefault(self, d):
d.setdefault('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
self.failUnlessEqual(d.items(), [("spam", "eggs")])
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
def _test_extracted_bound_method(self, d):
insmeth = d.insert
insmeth('spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_extracted_unbound_method(self, d):
insumeth = d.__class__.insert
insumeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_unbound_method(self, C, d):
umeth = C.insert
umeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_clear(self, d):
d[11] = 11
d._assert_invariants()
self.failUnless(len(d) == 1)
d.clear()
d._assert_invariants()
self.failUnless(len(d) == 0)
self.failUnlessEqual(d.items(), [])
def _test_update_from_dict(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2={ 'a': 0, 'b': 1, 'c': 2,}
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
def _test_update_from_odict(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2 = odict.OrderedDict()
d2['a'] = 0
d2['b'] = 1
d2['c'] = 2
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
self.failUnlessEqual(d.items(), [("b", 1), ("a", 0), ("c", 2)])
def _test_popitem(self, C):
c = C({"a": 1})
res = c.popitem()
self.failUnlessEqual(res, ("a", 1,))
c["a"] = 1
c["b"] = 2
res = c.popitem()
self.failUnlessEqual(res, ("b", 2,))
def _test_pop(self, C):
c = C({"a": 1})
res = c.pop()
self.failUnlessEqual(res, "a")
c["a"] = 1
c["b"] = 2
res = c.pop()
self.failUnlessEqual(res, "b")
def _test_iterate_items(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.iteritems()
x = i.next()
self.failUnlessEqual(x, ("a", 1,))
x = i.next()
self.failUnlessEqual(x, ("b", 2,))
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_keys(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.iterkeys()
x = i.next()
self.failUnlessEqual(x, "a")
x = i.next()
self.failUnlessEqual(x, "b")
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_values(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.itervalues()
x = i.next()
self.failUnless(x == 1)
x = i.next()
self.failUnless(x == 2)
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_much_adding_some_removing(self, C):
c = C()
for i in range(MUCHADDINGSIZE):
c[i] = i
if (i % 4) == 0:
k = random.choice(c.keys())
del c[k]
for i in range(MUCHADDINGSIZE):
c[i] = i
self.failUnlessEqual(len(c), MUCHADDINGSIZE)
def _test_1(self, C):
c = C()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
def _test_2(self, C):
c = C()
c[11] = 11
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
def _test_3(self, C):
c = C()
c[11] = 11
c._assert_invariants()
c[11] = 12
c._assert_invariants()
c[11] = 13
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 14
c._assert_invariants()
c[11] = 15
c._assert_invariants()
c[11] = 16
c._assert_invariants()
def _test_has_key(self, C):
c = C()
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
del c[0]
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values())
self.failUnless(0 not in c.values())
c.has_key(1) # this touches `1' but does not make it fresher so that it will get popped next time we pop.
c[1] = 1 # this touches `1' but does not make it fresher so that it will get popped.
c._assert_invariants()
x = c.pop()
self.failUnlessEqual(x, 10)
c[99] = 99
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(2 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failIf(10 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(99 in c.values())
def _test_em(self):
for klass in (odict.OrderedDict,):
for testfunc in (self._test_empty_lookup, self._test_insert_and_get_and_items, self._test_insert_and_remove, self._test_extracted_bound_method, self._test_extracted_unbound_method, self._test_clear, self._test_update_from_dict, self._test_update_from_odict, self._test_setdefault,):
testfunc(klass())
for testfunc in (self._test_pop, self._test_popitem, self._test_iterate_items, self._test_iterate_keys, self._test_iterate_values, self._test_key_error, ):
testfunc(klass)
self._test_unbound_method(klass, klass())
for klass in (odict.OrderedDict,):
for testfunc in (self._test_1, self._test_2, self._test_3, self._test_has_key,):
testfunc(klass)
def test_em(self):
self._test_em()
def _mem_test_much_adding_some_removing(self):
for klass in (odict.LRUCache, odict.SmallLRUCache,):
return self._test_much_adding_some_removing(klass)
def test_mem_leakage(self):
try:
self._test_mem_leakage()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage # This test takes too long.
def _test_mem_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_mem_leakage_much_adding_some_removing(self):
try:
self._test_mem_leakage_much_adding_some_removing()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage_much_adding_some_removing # This test takes too long.
def _test_mem_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_mem_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self._mem_test_much_adding_some_removing, "%0.3f" % slope,))
def test_obj_leakage(self):
self._test_obj_leakage()
del test_obj_leakage # This test takes too long.
def _test_obj_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_obj_leakage_much_adding_some_removing(self):
self._test_obj_leakage_much_adding_some_removing()
del test_obj_leakage_much_adding_some_removing # This test takes too long.
def _test_obj_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_obj_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self._mem_test_much_adding_some_removing, "%0.3f" % slope,))
| 16,600
|
Python
|
.py
| 378
| 35.119048
| 294
| 0.582586
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,435
|
test_zlibutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/test/out_of_shape/test_zlibutil.py
|
#!/usr/bin/env python
import unittest
from pyutil import randutil
from pyutil import zlibutil
class Accumulator:
def __init__(self):
self.buf = ''
def write(self, str):
self.buf += str
def make_decomp(realdecomp):
def decomp(str, maxlen, maxmem):
d = Accumulator()
realdecomp(str, d, maxlen, maxmem)
return d.buf
return decomp
def genrandstr(strlen):
return randutil.insecurerandstr(strlen)
def genbombstr(strlen):
return '0' * strlen
MAXMEM=65*2**20
class ZlibTestCase(unittest.TestCase):
def _help_test(self, genstring, decomp, strlen):
s = genstring(strlen)
cs = zlibutil.zlib.compress(s)
s2 = decomp(cs, maxlen=strlen, maxmem=strlen*2**3 + zlibutil.MINMAXMEM)
self.failUnless(s == s2)
s2 = decomp(cs, maxlen=strlen, maxmem=strlen*2**6 + zlibutil.MINMAXMEM)
self.failUnless(s == s2)
self.failUnlessRaises(zlibutil.TooBigError, decomp, cs, maxlen=strlen-1, maxmem=strlen*2**3 + zlibutil.MINMAXMEM)
def _help_test_inplace_minmaxmem(self, genstring, decomp, strlen):
s = genstring(strlen)
cs = zlibutil.zlib.compress(s)
s2 = decomp(cs, maxlen=strlen, maxmem=zlibutil.MINMAXMEM)
self.failUnless(s == s2)
self.failUnlessRaises(zlibutil.TooBigError, decomp, cs, maxlen=strlen-1, maxmem=zlibutil.MINMAXMEM)
def _help_test_inplace(self, genstring, decomp, strlen):
# ### XXX self.failUnlessRaises(UnsafeDecompressError, decomp, zlib.compress(genstring(strlen)), maxlen=strlen, maxmem=strlen-1)
s = genstring(strlen)
cs = zlibutil.zlib.compress(s)
s2 = decomp(cs, maxlen=strlen, maxmem=max(strlen*2**3, zlibutil.MINMAXMEM))
self.failUnless(s == s2)
s2 = decomp(cs, maxlen=strlen, maxmem=max(strlen*2**6, zlibutil.MINMAXMEM))
self.failUnless(s == s2)
s2 = decomp(cs, maxlen=strlen, maxmem=max(strlen-1, zlibutil.MINMAXMEM))
self.failUnless(s == s2)
s2 = decomp(cs, maxlen=strlen, maxmem=max(strlen/2, zlibutil.MINMAXMEM))
self.failUnless(s == s2)
self.failUnlessRaises(zlibutil.TooBigError, decomp, cs, maxlen=strlen-1, maxmem=max(strlen*2**3, zlibutil.MINMAXMEM))
def testem(self):
# for strlen in [2**1, 2**2, 2**10, 2**14, 2**21]: # a *real* test ought to include 2**21, which exercises different cases re: maxmem. But it takes too long.
for strlen in [2, 3, 4, 99,]:
# print "strlen: %s\n" % (strlen,)
for decomp in [zlibutil.decompress, make_decomp(zlibutil.decompress_to_fileobj), make_decomp(zlibutil.decompress_to_spool),]:
# print "decomp: %s\n" % (decomp,)
for genstring in [genrandstr, genbombstr,]:
# print "genstring: %s\n" % (genstring,)
self._help_test(genstring, decomp, strlen)
for decomp in [make_decomp(zlibutil.decompress_to_spool),]:
# print "decomp: %s\n" % (decomp,)
for genstring in [genrandstr, genbombstr,]:
# print "genstring: %s\n" % (genstring,)
self._help_test_inplace(genstring, decomp, strlen)
self._help_test_inplace_minmaxmem(genstring, decomp, strlen)
| 3,283
|
Python
|
.py
| 63
| 43.142857
| 166
| 0.642233
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,436
|
bench_json.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/benchmarks/bench_json.py
|
from pyutil import randutil
import random
from decimal import Decimal
l = []
s = None
def data_strings(N):
assert isinstance(N, int), (N, type(N))
del l[:]
for i in range(N):
l.append(repr(randutil.insecurerandstr(4)))
global s
s = json.dumps(l)
def data_Decimals(N):
del l[:]
for i in range(N):
l.append(Decimal(str(random.randrange(0, 1000000000)))/random.randrange(1, 1000000000))
global s
s = jsonutil.dumps(l)
def data_floats(N):
del l[:]
for i in range(N):
l.append(float(random.randrange(0, 1000000000))/random.randrange(1, 1000000000))
global s
s = json.dumps(l)
import json
from pyutil import jsonutil
def je(N):
return json.dumps(l)
def ue(N):
return jsonutil.dumps(l)
def jd(N):
return json.loads(s)
def ud(N):
return jsonutil.loads(s)
from pyutil import benchutil
for i in (data_strings, data_floats, data_Decimals):
for e in (ud, ue, jd, je):
# for e in (ue,):
print "i: %s, e: %s" % (i, e,)
try:
benchutil.bench(e, initfunc=i, TOPXP=5, profile=False)
except TypeError, e:
print "skipping due to %s" % (e,)
benchutil.print_bench_footer()
| 1,210
|
Python
|
.py
| 44
| 22.886364
| 95
| 0.644156
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,437
|
bench_xor.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/benchmarks/bench_xor.py
|
#!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import hmac, sys, random
from pyutil.assertutil import _assert
from pyutil.xor import xor
from pyutil import benchfunc
from pyutil import randutil
SFUNCS = [hmac._strxor, xor.py_xor,]
SFNAMES = ["hmac", "pyutil py",]
inputs = {}
def _help_init_string(N):
global inputs
if not inputs.has_key(N):
inputs[N] = [randutil.insecurerandstr(N), randutil.insecurerandstr(N),]
def _help_make_bench_xor(f):
def g(n):
assert inputs.has_key(n)
_assert(isinstance(inputs[n][0], str), "Required to be a string.", inputs[n][0])
assert len(inputs[n][0]) == n
_assert(isinstance(inputs[n][1], str), "Required to be a string.", inputs[n][1])
assert len(inputs[n][1]) == n
for SF in SFUNCS:
assert f(inputs[n][0], inputs[n][1]) == SF(inputs[n][0], inputs[n][1])
return f(inputs[n][0], inputs[n][1])
return g
def bench(SETSIZES=[2**x for x in range(0, 22, 3)]):
random.seed(0)
if len(SFUNCS) <= 1: print ""
maxnamel = max(map(len, SFNAMES))
for SETSIZE in SETSIZES:
seed = random.random()
# print "seed: ", seed
random.seed(seed)
i = 0
if len(SFUNCS) > 1: print ""
for FUNC in SFUNCS:
funcname = SFNAMES[i] + " " * (maxnamel - len(SFNAMES[i]))
print "%s" % funcname,
sys.stdout.flush()
benchfunc.rep_bench(_help_make_bench_xor(FUNC), SETSIZE, initfunc=_help_init_string, MAXREPS=2**9, MAXTIME=30)
i = i + 1
bench()
| 1,658
|
Python
|
.py
| 43
| 32.302326
| 122
| 0.613084
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,438
|
xor.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/xor/xor.py
|
# Copyright © 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
What word has three letters and a 'x' in it?
Not that one silly.
"""
import warnings
import array, operator
from pyutil.assertutil import precondition
def py_xor(str1, str2):
warnings.warn("deprecated", DeprecationWarning)
precondition(len(str1) == len(str2), "str1 and str2 are required to be of the same length.", str1=str1, str2=str2)
if len(str1)%4 == 0:
a1 = array.array('i', str1)
a2 = array.array('i', str2)
for i in range(len(a1)):
a2[i] = a2[i]^a1[i]
elif len(str1)%2 == 0:
a1 = array.array('h', str1)
a2 = array.array('h', str2)
for i in range(len(a1)):
a2[i] = a2[i]^a1[i]
else:
a1 = array.array('c', str1)
a2 = array.array('c', str2)
for i in range(len(a1)):
a2[i] = chr(ord(a2[i])^ord(a1[i]))
return a2.tostring()
def py_xor_simple(str1, str2):
"""
Benchmarks show that this is the same speed as py_xor() for small strings
and much slower for large strings, so don't use it. --Zooko 2002-04-29
"""
warnings.warn("deprecated", DeprecationWarning)
precondition(len(str1) == len(str2), "str1 and str2 are required to be of the same length.", str1=str1, str2=str2)
return ''.join(map(chr, map(operator.__xor__, map(ord, str1), map(ord, str2))))
# Now make "xor.xor()" be the best xor we've got:
xor = py_xor
# for unit tests, see pyutil/test/test_xor.py. For benchmarks, see pyutil/test/bench_xor.py.
| 1,604
|
Python
|
.py
| 39
| 35.641026
| 118
| 0.636774
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,439
|
randcookie.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/randcookie.py
|
#!/usr/bin/env python
import os, sys
import zbase32
def main():
if len(sys.argv) > 1:
l = int(sys.argv[1])
else:
l = 64
bl = (l + 7) / 8
s = zbase32.b2a_l(os.urandom(bl), l)
# insert some hyphens for easier memorization
chs = 3 + (len(s)%8==0)
i = chs
while i < len(s)-1:
s = s[:i] + "-" + s[i:]
i += 1
chs = 7-chs
i += chs
print s
if __name__ == '__main__':
main()
| 463
|
Python
|
.py
| 21
| 16.428571
| 49
| 0.48037
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,440
|
try_decoding.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/try_decoding.py
|
#!/usr/bin/env python
import binascii, codecs, encodings, locale, os, sys, zlib
import argparse
def listcodecs(dir):
names = []
for filename in os.listdir(dir):
if filename[-3:] != '.py':
continue
name = filename[:-3]
# Check whether we've found a true codec
try:
codecs.lookup(name)
except LookupError:
# Codec not found
continue
except Exception:
# Probably an error from importing the codec; still it's
# a valid code name
pass
names.append(name)
return names
def listem():
return listcodecs(encodings.__path__[0])
def _canonical_encoding(encoding):
if encoding is None:
encoding = 'utf-8'
encoding = encoding.lower()
if encoding == "cp65001":
encoding = 'utf-8'
elif encoding == "us-ascii" or encoding == "646":
encoding = 'ascii'
# sometimes Python returns an encoding name that it doesn't support for conversion
# fail early if this happens
try:
u"test".encode(encoding)
except (LookupError, AttributeError):
raise AssertionError("The character encoding '%s' is not supported for conversion." % (encoding,))
return encoding
def get_output_encoding():
return _canonical_encoding(sys.stdout.encoding or locale.getpreferredencoding())
def get_argv_encoding():
if sys.platform == 'win32':
# Unicode arguments are not supported on Windows yet; see Tahoe-LAFS tickets #565 and #1074.
return 'ascii'
else:
return get_output_encoding()
output_encoding = get_output_encoding()
argv_encoding = get_argv_encoding()
def type_unicode(argstr):
return argstr.decode(argv_encoding)
def main():
parser = argparse.ArgumentParser(prog="try_decoding", description="Try decoding some bytes with all sorts of different codecs and print out any that decode.")
parser.add_argument('inputfile', help='file to decode or "-" for stdin', type=argparse.FileType('rb'), metavar='INF')
parser.add_argument('-t', '--target', help='unicode string to match against (if any)', type=type_unicode, metavar='T')
parser.add_argument('-a', '--accept-bytes', help='include codecs which return bytes instead of returning unicode (they will be marked with "!!!" in the output)', action='store_true')
args = parser.parse_args()
inb = args.inputfile.read()
for codec in listem():
try:
u = inb.decode(codec)
except (UnicodeDecodeError, IOError, TypeError, IndexError, UnicodeError, ValueError, zlib.error, binascii.Error):
pass
else:
if isinstance(u, unicode):
if args.target:
if args.target != u:
continue
print "%19s" % codec,
print ':',
print u.encode(output_encoding)
else:
if not args.accept_bytes:
continue
print "%19s" % codec,
print "!!! ",
print ':',
print u
if __name__ == "__main__":
main()
| 3,163
|
Python
|
.py
| 79
| 31.43038
| 186
| 0.615911
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,441
|
unsort.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/unsort.py
|
#!/usr/bin/env python
# randomize the lines of stdin or a file
import random, sys
def main():
if len(sys.argv) > 1:
fname = sys.argv[1]
inf = open(fname, 'r')
else:
inf = sys.stdin
lines = inf.readlines()
random.shuffle(lines)
sys.stdout.writelines(lines)
if __name__ == '__main__':
main()
| 343
|
Python
|
.py
| 14
| 19.714286
| 40
| 0.601852
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,442
|
passphrase.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/passphrase.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse, math, random
from pyutil.mathutil import div_ceil
from pkg_resources import resource_stream
def recursive_subset_sum(entropy_needed, wordlists):
# Pick a minimalish set of numbers which sum to at least
# entropy_needed.
# Okay now what's the smallest number of words which will give us
# at least this much entropy?
entropy_of_biggest_wordlist = wordlists[-1][0]
assert isinstance(entropy_of_biggest_wordlist, float), wordlists[-1]
needed_words = div_ceil(entropy_needed, entropy_of_biggest_wordlist)
# How much entropy do we need from each word?
needed_entropy_per_word = entropy_needed / needed_words
# What's the smallest wordlist that offers at least this much
# entropy per word?
for (wlentropy, wl) in wordlists:
if wlentropy >= needed_entropy_per_word:
break
assert wlentropy >= needed_entropy_per_word, (wlentropy, needed_entropy_per_word)
result = [(wlentropy, wl)]
# If we need more, recurse...
if wlentropy < entropy_needed:
rest = recursive_subset_sum(entropy_needed - wlentropy, wordlists)
result.extend(rest)
return result
def gen_passphrase(entropy, allwords):
maxlenwords = []
i = 2 # The smallest set is words of length 1 or 2.
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
while len(maxlenwords[-1][1]) < len(allwords):
i += 1
words = [x for x in allwords if len(x) <= i]
maxlenwords.append((math.log(len(words), 2), words))
sr = random.SystemRandom()
passphrase = []
wordlists_to_use = recursive_subset_sum(entropy, maxlenwords)
passphraseentropy = 0.0
for (wle, wl) in wordlists_to_use:
passphrase.append(sr.choice(wl))
passphraseentropy += wle
return (u".".join(passphrase), passphraseentropy)
def main():
parser = argparse.ArgumentParser(prog="chbs", description="Create a random passphrase by picking a few random words.")
parser.add_argument('-d', '--dictionary', help="what file to read a list of words from (or omit this option to use chbs's bundled dictionary)", type=argparse.FileType('rU'), metavar="DICT")
parser.add_argument('bits', help="how many bits of entropy minimum", type=float, metavar="BITS")
args = parser.parse_args()
dicti = args.dictionary
if not dicti:
dicti = resource_stream('pyutil', 'data/wordlist.txt')
allwords = set([x.decode('utf-8').strip().lower() for x in dicti.readlines()])
passphrase, bits = gen_passphrase(args.bits, allwords)
print u"Your new password is: '%s'. It is worth about %s bits." % (passphrase, bits)
| 2,747
|
Python
|
.py
| 55
| 44.218182
| 193
| 0.690691
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,443
|
lines.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/lines.py
|
#!/usr/bin/env python
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
from pyutil import lineutil
import sys
def main():
if len(sys.argv) > 1 and "-s" in sys.argv[1:]:
strip = True
sys.argv.remove("-s")
else:
strip = False
if len(sys.argv) > 1 and "-n" in sys.argv[1:]:
nobak = True
sys.argv.remove("-n")
else:
nobak = False
if len(sys.argv) > 1:
pipe = False
else:
pipe = True
if pipe:
lineutil.lineify_fileobjs(sys.stdin, sys.stdout)
else:
for fn in sys.argv[1:]:
lineutil.lineify_file(fn, strip, nobak)
if __name__ == '__main__':
main()
| 744
|
Python
|
.py
| 27
| 21.481481
| 67
| 0.583333
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,444
|
time_comparisons.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/time_comparisons.py
|
# If you run this file, it will make up a random secret and then crack it
# using timing information from a string comparison function. Maybe--if it
# gets lucky. It takes a long, long time to work.
# So, the thing I need help with is statistics. The way this thing works is
# extremely stupid. Suppose you want to know which function invocation takes
# longer: comparison(secret, guess1) or comparison(secret, guess2)?
# If you can correctly determine that one of them takes longer than the
# other, then (a) you can use that to crack the secret, and (b) this is a
# unit test demonstrating that comparison() is not timing-safe.
# So how does this script do it? Extremely stupidly. First of all, you can't
# reliably measure tiny times, so to measure the time that a function takes,
# we run that function 10,000 times in a row, measure how long that took, and
# divide by 10,000 to estimate how long any one run would have taken.
# Then, we do that 100 times in a row, and take the fastest of 100 runs. (I
# also experimented with taking the mean of 100 runs instead of the fastest.)
# Then, we just say whichever comparison took longer (for its fastest run of
# 100 runs of 10,000 executions per run) is the one we think is a closer
# guess to the secret.
# Now I would *like* to think that there is some kind of statistical analysis
# more sophisticated than "take the slowest of the fastest of 100 runs of
# 10,000 executions". Such improved statistical analysis would hopefully be
# able to answer these two questions:
# 1. Are these two function calls -- comparison(secret, guess1) and
# comparison(secret, guess2) -- drawing from the same distribution or
# different? If you can answer that question, then you've answered the
# question of whether "comparison" is timing-safe or not.
# And, this would also allow the cracker to recover from a false step. If it
# incorrectly decides the the prefix of the secret is ABCX, when the real
# secret is ABCD, then after that every next step it takes will be the
# "drawing from the same distribution" kind -- any difference between ABCXQ
# and ABCXR will be just due to noise, since both are equally far from the
# correct answer, which startsw with ABCD. If it could realize that there is
# no real difference between the distributions, then it could back-track and
# recover.
# 2. Giving the ability to measure, noisily, the time taken by comparison(),
# how can you most efficiently figure out which guess takes the longest? If
# you can do that more efficiently, you can crack secrets more efficiently.
# The script takes two arguments. The first is how many symbols in the
# secret, and the second is how big the alphabet from which the symbols are
# drawn. To prove that this script can *ever* work, try passing length 5 and
# alphabet size 2. Also try editing the code to let is use sillycomp. That'll
# definitely make it work. If you can improve this script (as per the thing
# above about "needing better statistics") to the degree that it can crack a
# secret with length 32 and alphabet size 256, then that would be awesome.
# See the result of this commandline:
# $ python -c 'import time_comparisons ; time_comparisons.print_measurements()'
from pyutil import benchutil
import hashlib, random, os
from decimal import Decimal
D=Decimal
p1 = 'a'*32
p1a = 'a'*32
p2 = 'a'*31+'b' # close, but no cigar
p3 = 'b'*32 # different in the first byte
def randstr(n, alphabetsize):
alphabet = [ chr(x) for x in range(alphabetsize) ]
return ''.join([random.choice(alphabet) for i in range(n)])
def compare(n, f, a, b):
for i in xrange(n):
f(a, b)
def eqeqcomp(a, b):
return a == b
def sillycomp(a, b):
# This exposes a lot of information in its timing about how many leading bytes match.
for i in range(len(a)):
if a[i] != b[i]:
return False
for i in xrange(2**9):
pass
if len(a) == len(b):
return True
else:
return False
def hashcomp(a, b):
# Brian Warner invented this for Tahoe-LAFS. It seems like it should be very safe agaist timing leakage of any kind, because of the inclusion of a new random randkey every time. Note that exposing the value of the hash (i.e. the output of md5(randkey+secret)) is *not* a security problem. You can post that on your web site and let all attackers have it, no problem. (Provided that the value of "randkey" remains secret.)
randkey = os.urandom(32)
return hashlib.md5(randkey+ a).digest() == hashlib.md5(randkey+b).digest()
def xorcomp(a, b):
# This appears to be the most popular timing-insensitive string comparison function. I'm not completely sure it is fully timing-insensitive. (There are all sorts of funny things inside Python, such as caching of integer objects < 100...)
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def print_measurements():
N=10**4
REPS=10**2
print "all times are in nanoseconds per comparison (in scientific notation)"
print
for comparator in [eqeqcomp, hashcomp, xorcomp, sillycomp]:
print "using comparator ", comparator
# for (a, b, desc) in [(p1, p1a, 'same'), (p1, p2, 'close'), (p1, p3, 'far')]:
trials = [(p1, p1a, 'same'), (p1, p2, 'close'), (p1, p3, 'far')]
random.shuffle(trials)
for (a, b, desc) in trials:
print "comparing two strings that are %s to each other" % (desc,)
def f(n):
compare(n, comparator, a, b)
benchutil.rep_bench(f, N, UNITS_PER_SECOND=10**9, MAXREPS=REPS)
print
def try_to_crack_secret(cracker, comparator, secretlen, alphabetsize):
secret = randstr(secretlen, alphabetsize)
def test_guess(x):
return comparator(secret, x)
print "Giving cracker %s a chance to figure out the secret. Don't tell him, but the secret is %s. Whenever he makes a guess, we'll use comparator %s to decide if his guess is right ..." % (cracker, secret.encode('hex'), comparator,)
guess = cracker(test_guess, secretlen, alphabetsize)
print "Cracker %s guessed %r" % (cracker, guess,)
if guess == secret:
print "HE FIGURED IT OUT!? HOW DID HE DO THAT."
else:
print "HAHA. Our secret is safe."
def byte_at_a_time_cracker(test_guess, secretlen, alphabetsize):
# If we were cleverer, we'd add some backtracking behaviour where, if we can't find any x such that ABCx stands out from the crowd as taking longer than all the other ABCy's, then we start to think that we've taken a wrong step and we go back to trying ABy's. Make sense? But we're not that clever. Once we take a step, we don't backtrack.
print
guess=[]
while len(guess) < secretlen:
best_next_byte = None
best_next_byte_time = None
# For each possible byte...
for next_byte in range(alphabetsize):
c = chr(next_byte)
# Construct a guess with our best candidate so far...
candidate_guess = guess[:]
# Plus that byte...
candidate_guess.append(c)
s = ''.join(candidate_guess)
# Plus random bytes...
s += os.urandom(32 - len(s))
# And see how long it takes the test_guess to consider it...
def f(n):
for i in xrange(n):
test_guess(s)
times = benchutil.rep_bench(f, 10**7, MAXREPS=10**3, quiet=True)
fastesttime = times['mean']
print "%s..."%(c.encode('hex'),),
if best_next_byte is None or fastesttime > best_next_byte_time:
print "new candidate for slowest next-char: %s, took: %s" % (c.encode('hex'), fastesttime,),
best_next_byte_time = fastesttime
best_next_byte = c
# Okay we've tried all possible next bytes. Our guess is this one (the one that took longest to be tested by test_guess):
guess.append(best_next_byte)
print "SLOWEST next-char %s! Current guess at secret: %s" % (best_next_byte.encode('hex'), ''.join(guess).encode('hex'),)
guess = ''.join(guess)
print "Our guess for the secret: %r" % (guess,)
return guess
if __name__ == '__main__':
import sys
secretlen = int(sys.argv[1])
alphabetsize = int(sys.argv[2])
if alphabetsize > 256:
raise Exception("We assume we can fit one element of the alphabet into a byte.")
print "secretlen: %d, alphabetsize: %d" % (secretlen, alphabetsize,)
# try_to_crack_secret(byte_at_a_time_cracker, sillycomp, secretlen, alphabetsize)
try_to_crack_secret(byte_at_a_time_cracker, eqeqcomp, secretlen, alphabetsize)
| 8,758
|
Python
|
.py
| 155
| 50.690323
| 425
| 0.688034
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,445
|
tailx.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/tailx.py
|
#!/usr/bin/env python
# output all but the first N lines of a file
# Allen Short and Jp Calderone wrote this coool version:
import itertools, sys
def main():
K = int(sys.argv[1])
if len(sys.argv) > 2:
fname = sys.argv[2]
inf = open(fname, 'r')
else:
inf = sys.stdin
sys.stdout.writelines(itertools.islice(inf, K, None))
if __name__ == '__main__':
main()
# thus replacing my dumb version:
# # from the Python Standard Library
# import sys
#
# i = K
# for l in sys.stdin.readlines():
# if i:
# i -= 1
# else:
# print l,
| 593
|
Python
|
.py
| 24
| 21.583333
| 57
| 0.609236
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,446
|
randfile.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/randfile.py
|
#!/usr/bin/env python
import os, sys
from random import randrange
import argparse
def main():
CHUNKSIZE=2**20
parser = argparse.ArgumentParser(prog="randfile", description="Create a file of pseudorandom bytes (not cryptographically secure).")
parser.add_argument('-b', '--num-bytes', help="how many bytes to write per output file (default 20)", type=int, metavar="BYTES", default=20)
parser.add_argument('-f', '--output-file-prefix', help="prefix of the name of the output file to create and fill with random bytes (default \"randfile\"", metavar="OUTFILEPRE", default="randfile")
parser.add_argument('-n', '--num-files', help="how many files to write (default 1)", type=int, metavar="FILES", default=1)
parser.add_argument('-F', '--force', help='overwrite any file already present', action='store_true')
parser.add_argument('-p', '--progress', help='write an "x" for every file completed and a "." for every %d bytes' % CHUNKSIZE, action='store_true')
args = parser.parse_args()
for i in xrange(args.num_files):
bytesleft = args.num_bytes
outputfname = args.output_file_prefix + "." + str(i)
if args.force:
f = open(outputfname, "wb")
else:
flags = os.O_WRONLY|os.O_CREAT|os.O_EXCL | (hasattr(os, 'O_BINARY') and os.O_BINARY)
fd = os.open(outputfname, flags)
f = os.fdopen(fd, "wb")
zs = [0]*CHUNKSIZE
ts = [256]*CHUNKSIZE
while bytesleft >= CHUNKSIZE:
f.write(''.join(map(chr, map(randrange, zs, ts))))
bytesleft -= CHUNKSIZE
if args.progress:
sys.stdout.write(".") ; sys.stdout.flush()
zs = [0]*bytesleft
ts = [256]*bytesleft
f.write(''.join(map(chr, map(randrange, zs, ts))))
if args.progress:
sys.stdout.write("x") ; sys.stdout.flush()
if __name__ == "__main__":
main()
| 1,948
|
Python
|
.py
| 36
| 45.527778
| 200
| 0.6248
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,447
|
memdump2dot.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/memdump2dot.py
|
#!/usr/bin/env python
import bindann
bindann.install_exception_handler()
import sys
inf = open(sys.argv[1], "r")
outf = open(sys.argv[1]+".dot", "w")
outf.write("digraph %s {\n" % sys.argv[1].replace(".",""))
def parse_netstring(l, i):
try:
j = l.find(':', i)
if j == -1:
return (None, len(l),)
lenval = int(l[i:j])
val = l[j+1:j+1+lenval]
# skip the comma
assert l[j+1+lenval] == ","
return (val, j+1+lenval+1,)
except Exception, le:
le.args = tuple(le.args + (l, i,))
raise
def parse_ref(l, i):
(attrname, i,) = parse_netstring(l, i)
j = l.find(",", i)
assert j != -1
objid = l[i:j]
return (objid, attrname, j+1,)
def parse_memdump_line(l):
result = []
i = l.find('-')
objid = l[:i]
(objdesc, i,) = parse_netstring(l, i+1)
result.append((objid, objdesc,))
while i != -1 and i < len(l):
(objid, attrname, i,) = parse_ref(l, i)
result.append((objid, attrname,))
return result
for l in inf:
if l[-1] != "\n":
raise "waht the HECK? %r" % l
res = parse_memdump_line(l.strip())
# declare the node
outf.write("\"%s\" [label=\"%s\"];\n" % (res[0][0], res[0][1],))
# declare all the edges
for edge in res[1:]:
if edge[1]:
# a named edge
outf.write("\"%s\" -> \"%s\" [style=bold, label=\"%s\"];\n" % (res[0][0], edge[0], edge[1],))
else:
# an anonymous edge
outf.write("\"%s\" -> \"%s\";\n" % (res[0][0], edge[0]))
outf.write("}")
| 1,589
|
Python
|
.py
| 51
| 24.941176
| 105
| 0.511155
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,448
|
verinfo.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/scripts/verinfo.py
|
#!/usr/bin/env python
import exceptions
class UsageError(exceptions.Exception): pass
import sys
import pkg_resources
def main():
if len(sys.argv) <= 1:
raise UsageError, "USAGE: verinfo DISTRIBUTIONNAME [PACKAGENAME]"
DISTNAME=sys.argv[1]
if len(sys.argv) >= 3:
PACKNAME=sys.argv[2]
else:
PACKNAME=DISTNAME
print "pkg_resources.require('%s') => " % (DISTNAME,),
print pkg_resources.require(DISTNAME)
print "import %s;print %s => " % (PACKNAME, PACKNAME,),
x = __import__(PACKNAME)
print x
print "import %s;print %s.__version__ => " % (PACKNAME, PACKNAME,),
print hasattr(x, '__version__') and x.__version__
if __name__ == "__main__":
main()
| 717
|
Python
|
.py
| 22
| 28.136364
| 73
| 0.638205
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,449
|
debug_stuff.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/debug_stuff.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.tree_index import TreeBasedIndex
import struct
import os
import inspect
from functools import wraps
import json
class DebugTreeBasedIndex(TreeBasedIndex):
def __init__(self, *args, **kwargs):
super(DebugTreeBasedIndex, self).__init__(*args, **kwargs)
def print_tree(self):
print '-----CURRENT TREE-----'
print self.root_flag
if self.root_flag == 'l':
print '---ROOT---'
self._print_leaf_data(self.data_start)
return
else:
print '---ROOT---'
self._print_node_data(self.data_start)
nr_of_el, children_flag = self._read_node_nr_of_elements_and_children_flag(
self.data_start)
nodes = []
for index in range(nr_of_el):
l_pointer, key, r_pointer = self._read_single_node_key(
self.data_start, index)
nodes.append(l_pointer)
nodes.append(r_pointer)
print 'ROOT NODES', nodes
while children_flag == 'n':
self._print_level(nodes, 'n')
new_nodes = []
for node in nodes:
nr_of_el, children_flag = \
self._read_node_nr_of_elements_and_children_flag(node)
for index in range(nr_of_el):
l_pointer, key, r_pointer = self._read_single_node_key(
node, index)
new_nodes.append(l_pointer)
new_nodes.append(r_pointer)
nodes = new_nodes
self._print_level(nodes, 'l')
def _print_level(self, nodes, flag):
print '---NEXT LVL---'
if flag == 'n':
for node in nodes:
self._print_node_data(node)
elif flag == 'l':
for node in nodes:
self._print_leaf_data(node)
def _print_leaf_data(self, leaf_start_position):
print 'printing data of leaf at', leaf_start_position
nr_of_elements = self._read_leaf_nr_of_elements(leaf_start_position)
self.buckets.seek(leaf_start_position)
data = self.buckets.read(self.leaf_heading_size +
nr_of_elements * self.single_leaf_record_size)
leaf = struct.unpack('<' + self.leaf_heading_format +
nr_of_elements * self.single_leaf_record_format, data)
print leaf
print
def _print_node_data(self, node_start_position):
print 'printing data of node at', node_start_position
nr_of_elements = self._read_node_nr_of_elements_and_children_flag(
node_start_position)[0]
self.buckets.seek(node_start_position)
data = self.buckets.read(self.node_heading_size + self.pointer_size
+ nr_of_elements * (self.key_size + self.pointer_size))
node = struct.unpack('<' + self.node_heading_format + self.pointer_format
+ nr_of_elements * (
self.key_format + self.pointer_format),
data)
print node
print
# ------------------>
def database_step_by_step(db_obj, path=None):
if not path:
# ugly for multiplatform support....
p = db_obj.path
p1 = os.path.split(p)
p2 = os.path.split(p1[0])
p3 = '_'.join([p2[1], 'operation_logger.log'])
path = os.path.join(os.path.split(p2[0])[0], p3)
f_obj = open(path, 'wb')
__stack = [] # inspect.stack() is not working on pytest etc
def remove_from_stack(name):
for i in range(len(__stack)):
if __stack[-i] == name:
__stack.pop(-i)
def __dumper(f):
@wraps(f)
def __inner(*args, **kwargs):
funct_name = f.__name__
if funct_name == 'count':
name = args[0].__name__
meth_args = (name,) + args[1:]
elif funct_name in ('reindex_index', 'compact_index'):
name = args[0].name
meth_args = (name,) + args[1:]
else:
meth_args = args
kwargs_copy = kwargs.copy()
res = None
__stack.append(funct_name)
if funct_name == 'insert':
try:
res = f(*args, **kwargs)
except:
packed = json.dumps((funct_name,
meth_args, kwargs_copy, None))
f_obj.write('%s\n' % packed)
f_obj.flush()
raise
else:
packed = json.dumps((funct_name,
meth_args, kwargs_copy, res))
f_obj.write('%s\n' % packed)
f_obj.flush()
else:
if funct_name == 'get':
for curr in __stack:
if ('delete' in curr or 'update' in curr) and not curr.startswith('test'):
remove_from_stack(funct_name)
return f(*args, **kwargs)
packed = json.dumps((funct_name, meth_args, kwargs_copy))
f_obj.write('%s\n' % packed)
f_obj.flush()
res = f(*args, **kwargs)
remove_from_stack(funct_name)
return res
return __inner
for meth_name, meth_f in inspect.getmembers(db_obj, predicate=inspect.ismethod):
if not meth_name.startswith('_'):
setattr(db_obj, meth_name, __dumper(meth_f))
setattr(db_obj, 'operation_logger', f_obj)
def database_from_steps(db_obj, path):
# db_obj.insert=lambda data : insert_for_debug(db_obj, data)
with open(path, 'rb') as f_obj:
for current in f_obj:
line = json.loads(current[:-1])
if line[0] == 'count':
obj = getattr(db_obj, line[1][0])
line[1] = [obj] + line[1][1:]
name = line[0]
if name == 'insert':
try:
line[1][0].pop('_rev')
except:
pass
elif name in ('delete', 'update'):
el = db_obj.get('id', line[1][0]['_id'])
line[1][0]['_rev'] = el['_rev']
# print 'FROM STEPS doing', line
meth = getattr(db_obj, line[0], None)
if not meth:
raise Exception("Method = `%s` not found" % line[0])
meth(*line[1], **line[2])
# def insert_for_debug(self, data):
#
# _rev = data['_rev']
#
# if not '_id' in data:
# _id = uuid4().hex
# else:
# _id = data['_id']
# data['_id'] = _id
# try:
# _id = bytes(_id)
# except:
# raise DatabaseException("`_id` must be valid bytes object")
# self._insert_indexes(_id, _rev, data)
# ret = {'_id': _id, '_rev': _rev}
# data.update(ret)
# return ret
| 7,682
|
Python
|
.py
| 188
| 29.207447
| 98
| 0.515995
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,450
|
rr_cache.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/rr_cache.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from random import choice
def cache1lvl(maxsize=100):
def decorating_function(user_function):
cache1lvl = {}
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache1lvl[key]
except KeyError:
if len(cache1lvl) == maxsize:
for i in xrange(maxsize // 10 or 1):
del cache1lvl[choice(cache1lvl.keys())]
cache1lvl[key] = user_function(key, *args, **kwargs)
result = cache1lvl[key]
# result = user_function(obj, key, *args, **kwargs)
return result
def clear():
cache1lvl.clear()
def delete(key):
try:
del cache1lvl[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache1lvl
wrapper.delete = delete
return wrapper
return decorating_function
def cache2lvl(maxsize=100):
def decorating_function(user_function):
cache = {}
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
# return user_function(*args, **kwargs)
try:
result = cache[args[0]][args[1]]
except KeyError:
# print wrapper.cache_size
if wrapper.cache_size == maxsize:
to_delete = maxsize // 10 or 1
for i in xrange(to_delete):
key1 = choice(cache.keys())
key2 = choice(cache[key1].keys())
del cache[key1][key2]
if not cache[key1]:
del cache[key1]
wrapper.cache_size -= to_delete
# print wrapper.cache_size
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
wrapper.cache_size += 1
return result
def clear():
cache.clear()
wrapper.cache_size = 0
def delete(key, inner_key=None):
if inner_key:
try:
del cache[key][inner_key]
if not cache[key]:
del cache[key]
wrapper.cache_size -= 1
return True
except KeyError:
return False
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
| 3,673
|
Python
|
.py
| 99
| 25.181818
| 74
| 0.528784
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,451
|
lfu_cache_with_lock.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/lfu_cache_with_lock.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from heapq import nsmallest
from operator import itemgetter
from collections import defaultdict
try:
from collections import Counter
except ImportError:
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def twolvl_iterator(dict):
for k, v in dict.iteritems():
for kk, vv in v.iteritems():
yield k, kk, vv
def create_cache1lvl(lock_obj):
def cache1lvl(maxsize=100):
"""
modified version of http://code.activestate.com/recipes/498245/
"""
def decorating_function(user_function):
cache = {}
use_count = Counter()
lock = lock_obj()
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache[key]
except KeyError:
with lock:
if len(cache) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
use_count.iteritems(),
key=itemgetter(1)):
del cache[k], use_count[k]
cache[key] = user_function(key, *args, **kwargs)
result = cache[key]
use_count[key] += 1
else:
with lock:
use_count[key] += 1
return result
def clear():
cache.clear()
use_count.clear()
def delete(key):
try:
del cache[key]
del use_count[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
return wrapper
return decorating_function
return cache1lvl
def create_cache2lvl(lock_obj):
def cache2lvl(maxsize=100):
"""
modified version of http://code.activestate.com/recipes/498245/
"""
def decorating_function(user_function):
cache = {}
use_count = defaultdict(Counter)
lock = lock_obj()
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
try:
result = cache[args[0]][args[1]]
except KeyError:
with lock:
if wrapper.cache_size == maxsize:
to_delete = maxsize / 10 or 1
for k1, k2, v in nsmallest(to_delete,
twolvl_iterator(
use_count),
key=itemgetter(2)):
del cache[k1][k2], use_count[k1][k2]
if not cache[k1]:
del cache[k1]
del use_count[k1]
wrapper.cache_size -= to_delete
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
use_count[args[0]][args[1]] += 1
wrapper.cache_size += 1
else:
use_count[args[0]][args[1]] += 1
return result
def clear():
cache.clear()
use_count.clear()
def delete(key, *args):
if args:
try:
del cache[key][args[0]]
del use_count[key][args[0]]
if not cache[key]:
del cache[key]
del use_count[key]
wrapper.cache_size -= 1
return True
except KeyError:
return False
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
del use_count[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
return cache2lvl
| 5,486
|
Python
|
.py
| 140
| 22.65
| 74
| 0.456832
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,452
|
lfu_cache.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/lfu_cache.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from heapq import nsmallest
from operator import itemgetter
from collections import defaultdict
try:
from collections import Counter
except ImportError:
class Counter(dict):
'Mapping where default values are zero'
def __missing__(self, key):
return 0
def cache1lvl(maxsize=100):
"""
modified version of http://code.activestate.com/recipes/498245/
"""
def decorating_function(user_function):
cache = {}
use_count = Counter()
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache[key]
except KeyError:
if len(cache) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
use_count.iteritems(),
key=itemgetter(1)):
del cache[k], use_count[k]
cache[key] = user_function(key, *args, **kwargs)
result = cache[key]
# result = user_function(obj, key, *args, **kwargs)
finally:
use_count[key] += 1
return result
def clear():
cache.clear()
use_count.clear()
def delete(key):
try:
del cache[key]
del use_count[key]
except KeyError:
return False
else:
return True
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
return wrapper
return decorating_function
def twolvl_iterator(dict):
for k, v in dict.iteritems():
for kk, vv in v.iteritems():
yield k, kk, vv
def cache2lvl(maxsize=100):
"""
modified version of http://code.activestate.com/recipes/498245/
"""
def decorating_function(user_function):
cache = {}
use_count = defaultdict(Counter)
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
# return user_function(*args, **kwargs)
try:
result = cache[args[0]][args[1]]
except KeyError:
if wrapper.cache_size == maxsize:
to_delete = maxsize // 10 or 1
for k1, k2, v in nsmallest(to_delete,
twolvl_iterator(use_count),
key=itemgetter(2)):
del cache[k1][k2], use_count[k1][k2]
if not cache[k1]:
del cache[k1]
del use_count[k1]
wrapper.cache_size -= to_delete
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
wrapper.cache_size += 1
finally:
use_count[args[0]][args[1]] += 1
return result
def clear():
cache.clear()
use_count.clear()
def delete(key, inner_key=None):
if inner_key is not None:
try:
del cache[key][inner_key]
del use_count[key][inner_key]
if not cache[key]:
del cache[key]
del use_count[key]
wrapper.cache_size -= 1
except KeyError:
return False
else:
return True
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
del use_count[key]
except KeyError:
return False
else:
return True
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
| 4,799
|
Python
|
.py
| 133
| 23.421053
| 74
| 0.512798
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,453
|
database_gevent.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/database_gevent.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gevent.lock import RLock
from CodernityDB.env import cdb_environment
cdb_environment['mode'] = "gevent"
cdb_environment['rlock_obj'] = RLock
# from CodernityDB.database import Database
from CodernityDB.database_safe_shared import SafeDatabase
class GeventDatabase(SafeDatabase):
pass
| 949
|
Python
|
.py
| 24
| 38.083333
| 74
| 0.784314
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,454
|
rr_cache_with_lock.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/rr_cache_with_lock.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from random import choice
def create_cache1lvl(lock_obj):
def cache1lvl(maxsize=100):
def decorating_function(user_function):
cache = {}
lock = lock_obj()
@functools.wraps(user_function)
def wrapper(key, *args, **kwargs):
try:
result = cache[key]
except KeyError:
with lock:
if len(cache) == maxsize:
for i in xrange(maxsize // 10 or 1):
del cache[choice(cache.keys())]
cache[key] = user_function(key, *args, **kwargs)
result = cache[key]
return result
def clear():
cache.clear()
def delete(key):
try:
del cache[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
return wrapper
return decorating_function
return cache1lvl
def create_cache2lvl(lock_obj):
def cache2lvl(maxsize=100):
def decorating_function(user_function):
cache = {}
lock = lock_obj()
@functools.wraps(user_function)
def wrapper(*args, **kwargs):
try:
result = cache[args[0]][args[1]]
except KeyError:
with lock:
if wrapper.cache_size == maxsize:
to_delete = maxsize // 10 or 1
for i in xrange(to_delete):
key1 = choice(cache.keys())
key2 = choice(cache[key1].keys())
del cache[key1][key2]
if not cache[key1]:
del cache[key1]
wrapper.cache_size -= to_delete
result = user_function(*args, **kwargs)
try:
cache[args[0]][args[1]] = result
except KeyError:
cache[args[0]] = {args[1]: result}
wrapper.cache_size += 1
return result
def clear():
cache.clear()
wrapper.cache_size = 0
def delete(key, *args):
if args:
try:
del cache[key][args[0]]
if not cache[key]:
del cache[key]
wrapper.cache_size -= 1
return True
except KeyError:
return False
else:
try:
wrapper.cache_size -= len(cache[key])
del cache[key]
return True
except KeyError:
return False
wrapper.clear = clear
wrapper.cache = cache
wrapper.delete = delete
wrapper.cache_size = 0
return wrapper
return decorating_function
return cache2lvl
| 4,032
|
Python
|
.py
| 103
| 23.174757
| 74
| 0.470498
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,455
|
database_super_thread_safe.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/database_super_thread_safe.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import RLock
from CodernityDB.env import cdb_environment
cdb_environment['mode'] = "threads"
cdb_environment['rlock_obj'] = RLock
from database import Database
from functools import wraps
from types import FunctionType, MethodType
from CodernityDB.database_safe_shared import th_safe_gen
class SuperLock(type):
@staticmethod
def wrapper(f):
@wraps(f)
def _inner(*args, **kwargs):
db = args[0]
with db.super_lock:
# print '=>', f.__name__, repr(args[1:])
res = f(*args, **kwargs)
# if db.opened:
# db.flush()
# print '<=', f.__name__, repr(args[1:])
return res
return _inner
def __new__(cls, classname, bases, attr):
new_attr = {}
for base in bases:
for b_attr in dir(base):
a = getattr(base, b_attr, None)
if isinstance(a, MethodType) and not b_attr.startswith('_'):
if b_attr == 'flush' or b_attr == 'flush_indexes':
pass
else:
# setattr(base, b_attr, SuperLock.wrapper(a))
new_attr[b_attr] = SuperLock.wrapper(a)
for attr_name, attr_value in attr.iteritems():
if isinstance(attr_value, FunctionType) and not attr_name.startswith('_'):
attr_value = SuperLock.wrapper(attr_value)
new_attr[attr_name] = attr_value
new_attr['super_lock'] = RLock()
return type.__new__(cls, classname, bases, new_attr)
class SuperThreadSafeDatabase(Database):
"""
Thread safe version that always allows single thread to use db.
It adds the same lock for all methods, so only one operation can be
performed in given time. Completely different implementation
than ThreadSafe version (without super word)
"""
__metaclass__ = SuperLock
def __init__(self, *args, **kwargs):
super(SuperThreadSafeDatabase, self).__init__(*args, **kwargs)
def __patch_index_gens(self, name):
ind = self.indexes_names[name]
for c in ('all', 'get_many'):
m = getattr(ind, c)
if getattr(ind, c + "_orig", None):
return
m_fixed = th_safe_gen.wrapper(m, name, c, self.super_lock)
setattr(ind, c, m_fixed)
setattr(ind, c + '_orig', m)
def open(self, *args, **kwargs):
res = super(SuperThreadSafeDatabase, self).open(*args, **kwargs)
for name in self.indexes_names.iterkeys():
self.__patch_index_gens(name)
return res
def create(self, *args, **kwargs):
res = super(SuperThreadSafeDatabase, self).create(*args, **kwargs)
for name in self.indexes_names.iterkeys():
self.__patch_index_gens(name)
return res
def add_index(self, *args, **kwargs):
res = super(SuperThreadSafeDatabase, self).add_index(*args, **kwargs)
self.__patch_index_gens(res)
return res
def edit_index(self, *args, **kwargs):
res = super(SuperThreadSafeDatabase, self).edit_index(*args, **kwargs)
self.__patch_index_gens(res)
return res
| 3,903
|
Python
|
.py
| 92
| 34.641304
| 86
| 0.614614
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,456
|
tree_index.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/tree_index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from index import Index, IndexException, DocIdNotFound, ElemNotFound
import struct
import marshal
import os
import io
import shutil
from storage import IU_Storage
# from ipdb import set_trace
from CodernityDB.env import cdb_environment
from CodernityDB.index import TryReindexException
if cdb_environment.get('rlock_obj'):
from CodernityDB import patch
patch.patch_cache_rr(cdb_environment['rlock_obj'])
from CodernityDB.rr_cache import cache1lvl, cache2lvl
tree_buffer_size = io.DEFAULT_BUFFER_SIZE
cdb_environment['tree_buffer_size'] = tree_buffer_size
MODE_FIRST = 0
MODE_LAST = 1
MOVE_BUFFER_PREV = 0
MOVE_BUFFER_NEXT = 1
class NodeCapacityException(IndexException):
pass
class IU_TreeBasedIndex(Index):
custom_header = 'from CodernityDB.tree_index import TreeBasedIndex'
def __init__(self, db_path, name, key_format='32s', pointer_format='I',
meta_format='32sIIc', node_capacity=10, storage_class=None):
if node_capacity < 3:
raise NodeCapacityException
super(IU_TreeBasedIndex, self).__init__(db_path, name)
self.data_start = self._start_ind + 1
self.node_capacity = node_capacity
self.flag_format = 'c'
self.elements_counter_format = 'h'
self.pointer_format = pointer_format
self.key_format = key_format
self.meta_format = meta_format
self._count_props()
if not storage_class:
storage_class = IU_Storage
if storage_class and not isinstance(storage_class, basestring):
storage_class = storage_class.__name__
self.storage_class = storage_class
self.storage = None
cache = cache1lvl(100)
twolvl_cache = cache2lvl(150)
self._find_key = cache(self._find_key)
self._match_doc_id = cache(self._match_doc_id)
# self._read_single_leaf_record =
# twolvl_cache(self._read_single_leaf_record)
self._find_key_in_leaf = twolvl_cache(self._find_key_in_leaf)
self._read_single_node_key = twolvl_cache(self._read_single_node_key)
self._find_first_key_occurence_in_node = twolvl_cache(
self._find_first_key_occurence_in_node)
self._find_last_key_occurence_in_node = twolvl_cache(
self._find_last_key_occurence_in_node)
self._read_leaf_nr_of_elements = cache(self._read_leaf_nr_of_elements)
self._read_leaf_neighbours = cache(self._read_leaf_neighbours)
self._read_leaf_nr_of_elements_and_neighbours = cache(
self._read_leaf_nr_of_elements_and_neighbours)
self._read_node_nr_of_elements_and_children_flag = cache(
self._read_node_nr_of_elements_and_children_flag)
def _count_props(self):
"""
Counts dynamic properties for tree, such as all complex formats
"""
self.single_leaf_record_format = self.key_format + self.meta_format
self.single_node_record_format = self.pointer_format + \
self.key_format + self.pointer_format
self.node_format = self.elements_counter_format + self.flag_format\
+ self.pointer_format + (self.key_format +
self.pointer_format) * self.node_capacity
self.leaf_format = self.elements_counter_format + self.pointer_format * 2\
+ (self.single_leaf_record_format) * self.node_capacity
self.leaf_heading_format = self.elements_counter_format + \
self.pointer_format * 2
self.node_heading_format = self.elements_counter_format + \
self.flag_format
self.key_size = struct.calcsize('<' + self.key_format)
self.meta_size = struct.calcsize('<' + self.meta_format)
self.single_leaf_record_size = struct.calcsize('<' + self.
single_leaf_record_format)
self.single_node_record_size = struct.calcsize('<' + self.
single_node_record_format)
self.node_size = struct.calcsize('<' + self.node_format)
self.leaf_size = struct.calcsize('<' + self.leaf_format)
self.flag_size = struct.calcsize('<' + self.flag_format)
self.elements_counter_size = struct.calcsize('<' + self.
elements_counter_format)
self.pointer_size = struct.calcsize('<' + self.pointer_format)
self.leaf_heading_size = struct.calcsize(
'<' + self.leaf_heading_format)
self.node_heading_size = struct.calcsize(
'<' + self.node_heading_format)
def create_index(self):
if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException('Already exists')
with io.open(os.path.join(self.db_path, self.name + "_buck"), 'w+b') as f:
props = dict(name=self.name,
flag_format=self.flag_format,
pointer_format=self.pointer_format,
elements_counter_format=self.elements_counter_format,
node_capacity=self.node_capacity,
key_format=self.key_format,
meta_format=self.meta_format,
version=self.__version__,
storage_class=self.storage_class)
f.write(marshal.dumps(props))
self.buckets = io.open(os.path.join(self.db_path, self.name +
"_buck"), 'r+b', buffering=0)
self._create_storage()
self.buckets.seek(self._start_ind)
self.buckets.write(struct.pack('<c', 'l'))
self._insert_empty_root()
self.root_flag = 'l'
def destroy(self):
super(IU_TreeBasedIndex, self).destroy()
self._clear_cache()
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException("Doesn't exists")
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self.buckets.seek(self._start_ind)
self.root_flag = struct.unpack('<c', self.buckets.read(1))[0]
self._fix_params()
self._open_storage()
def _insert_empty_root(self):
self.buckets.seek(self.data_start)
root = struct.pack('<' + self.leaf_heading_format,
0,
0,
0)
root += self.single_leaf_record_size * self.node_capacity * '\x00'
self.buckets.write(root)
self.flush()
def insert(self, doc_id, key, start, size, status='o'):
nodes_stack, indexes = self._find_leaf_to_insert(key)
self._insert_new_record_into_leaf(nodes_stack.pop(),
key,
doc_id,
start,
size,
status,
nodes_stack,
indexes)
self._match_doc_id.delete(doc_id)
def _read_leaf_nr_of_elements_and_neighbours(self, leaf_start):
self.buckets.seek(leaf_start)
data = self.buckets.read(
self.elements_counter_size + 2 * self.pointer_size)
nr_of_elements, prev_l, next_l = struct.unpack(
'<' + self.elements_counter_format + 2 * self.pointer_format,
data)
return nr_of_elements, prev_l, next_l
def _read_node_nr_of_elements_and_children_flag(self, start):
self.buckets.seek(start)
data = self.buckets.read(self.elements_counter_size + self.flag_size)
nr_of_elements, children_flag = struct.unpack(
'<' + self.elements_counter_format + self.flag_format,
data)
return nr_of_elements, children_flag
def _read_leaf_nr_of_elements(self, start):
self.buckets.seek(start)
data = self.buckets.read(self.elements_counter_size)
nr_of_elements = struct.unpack(
'<' + self.elements_counter_format, data)
return nr_of_elements[0]
def _read_single_node_key(self, node_start, key_index):
self.buckets.seek(self._calculate_key_position(
node_start, key_index, 'n'))
data = self.buckets.read(self.single_node_record_size)
flag_left, key, pointer_right = struct.unpack(
'<' + self.single_node_record_format, data)
return flag_left, key, pointer_right
def _read_single_leaf_record(self, leaf_start, key_index):
self.buckets.seek(self._calculate_key_position(
leaf_start, key_index, 'l'))
data = self.buckets.read(self.single_leaf_record_size)
key, doc_id, start, size, status = struct.unpack('<' + self.
single_leaf_record_format, data)
return key, doc_id, start, size, status
def _calculate_key_position(self, start, key_index, flag):
"""
Calculates position of key in buckets file
"""
if flag == 'l':
return start + self.leaf_heading_size + key_index * self.single_leaf_record_size
elif flag == 'n':
# returns start position of flag before key[key_index]
return start + self.node_heading_size + key_index * (self.pointer_size + self.key_size)
def _match_doc_id(self, doc_id, key, element_index, leaf_start, nr_of_elements):
curr_key_index = element_index + 1
curr_leaf_start = leaf_start
next_leaf = self._read_leaf_neighbours(leaf_start)[1]
while True:
if curr_key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(
curr_leaf_start, curr_key_index)
if key != curr_key:
# should't happen, crashes earlier on id index
raise DocIdNotFound
elif doc_id == curr_doc_id and curr_status != 'd':
return curr_leaf_start, nr_of_elements, curr_key_index
else:
curr_key_index = curr_key_index + 1
else: # there are no more elements in current leaf, must jump to next
if not next_leaf: # end of leaf linked list
# should't happen, crashes earlier on id index
raise DocIdNotFound
else:
curr_leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
curr_key_index = 0
def _find_existing(self, key, element_index, leaf_start, nr_of_elements):
curr_key_index = element_index + 1
curr_leaf_start = leaf_start
next_leaf = self._read_leaf_neighbours(leaf_start)[1]
while True:
if curr_key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(
curr_leaf_start, curr_key_index)
if key != curr_key:
raise ElemNotFound
elif curr_status != 'd':
return curr_leaf_start, nr_of_elements, curr_key_index
else:
curr_key_index = curr_key_index + 1
else: # there are no more elements in current leaf, must jump to next
if not next_leaf: # end of leaf linked list
raise ElemNotFound
else:
curr_leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
curr_key_index = 0
def _update_element(self, leaf_start, key_index, new_data):
self.buckets.seek(self._calculate_key_position(leaf_start, key_index, 'l')
+ self.key_size)
self.buckets.write(struct.pack('<' + self.meta_format,
*new_data))
# self._read_single_leaf_record.delete(leaf_start_position, key_index)
def _delete_element(self, leaf_start, key_index):
self.buckets.seek(self._calculate_key_position(leaf_start, key_index, 'l')
+ self.single_leaf_record_size - 1)
self.buckets.write(struct.pack('<c', 'd'))
# self._read_single_leaf_record.delete(leaf_start_position, key_index)
def _leaf_linear_key_search(self, key, start, start_index, end_index):
self.buckets.seek(start)
data = self.buckets.read(
(end_index - start_index + 1) * self.single_leaf_record_size)
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.single_leaf_record_size:]
curr_index = 0
while curr_key != key:
curr_index += 1
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.single_leaf_record_size:]
return start_index + curr_index
def _node_linear_key_search(self, key, start, start_index, end_index):
self.buckets.seek(start + self.pointer_size)
data = self.buckets.read((end_index - start_index + 1) * (
self.key_size + self.pointer_size))
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.key_size + self.pointer_size:]
curr_index = 0
while curr_key != key:
curr_index += 1
curr_key = struct.unpack(
'<' + self.key_format, data[:self.key_size])[0]
data = data[self.key_size + self.pointer_size:]
return start_index + curr_index
def _next_buffer(self, buffer_start, buffer_end):
return buffer_end, buffer_end + tree_buffer_size
def _prev_buffer(self, buffer_start, buffer_end):
return buffer_start - tree_buffer_size, buffer_start
def _choose_next_candidate_index_in_leaf(self, leaf_start, candidate_start, buffer_start, buffer_end, imin, imax):
if buffer_start > candidate_start:
move_buffer = MOVE_BUFFER_PREV
elif buffer_end < candidate_start + self.single_leaf_record_size:
move_buffer = MOVE_BUFFER_NEXT
else:
move_buffer = None
return self._calculate_key_position(leaf_start, (imin + imax) / 2, 'l'), (imin + imax) / 2, move_buffer
def _choose_next_candidate_index_in_node(self, node_start, candidate_start, buffer_start, buffer_end, imin, imax):
if buffer_start > candidate_start:
move_buffer = MOVE_BUFFER_PREV
elif buffer_end < candidate_start + self.single_node_record_size:
(self.pointer_size + self.key_size) - 1
move_buffer = MOVE_BUFFER_NEXT
else:
move_buffer = None
return self._calculate_key_position(node_start, (imin + imax) / 2, 'n'), (imin + imax) / 2, move_buffer
def _find_key_in_leaf(self, leaf_start, key, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start)[-5:]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements)[-5:]
def _find_key_in_leaf_for_update(self, key, doc_id, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, doc_id=doc_id)
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, doc_id=doc_id)
def _find_index_of_first_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST, return_closest=True)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, return_closest=True)[:2]
def _find_index_of_last_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_LAST, return_closest=True)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_LAST, return_closest=True)[:2]
def _find_index_of_first_key_equal(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST)[:2]
else:
return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST)[:2]
def _find_key_in_leaf_with_one_element(self, key, leaf_start, doc_id=None, mode=None, return_closest=False):
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(leaf_start, 0)
if key != curr_key:
if return_closest and curr_status != 'd':
return leaf_start, 0
else:
raise ElemNotFound
else:
if curr_status == 'd':
raise ElemNotFound
elif doc_id is not None and doc_id != curr_doc_id:
# should't happen, crashes earlier on id index
raise DocIdNotFound
else:
return leaf_start, 0, curr_doc_id, curr_key, curr_start, curr_size, curr_status
def _find_key_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements, doc_id=None, mode=None, return_closest=False):
"""
Binary search implementation used in all get functions
"""
imin, imax = 0, nr_of_elements - 1
buffer_start, buffer_end = self._set_buffer_limits()
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
self._calculate_key_position(leaf_start,
(imin + imax) / 2,
'l'),
buffer_start,
buffer_end,
imin, imax)
while imax != imin and imax > imin:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
candidate_index)
candidate_start = self._calculate_key_position(
leaf_start, candidate_index, 'l')
if key < curr_key:
if move_buffer == MOVE_BUFFER_PREV:
buffer_start, buffer_end = self._prev_buffer(
buffer_start, buffer_end)
else: # if next chosen element is in current buffer, abort moving to other
move_buffer is None
imax = candidate_index - 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
elif key == curr_key:
if mode == MODE_LAST:
if move_buffer == MOVE_BUFFER_NEXT:
buffer_start, buffer_end = self._next_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imin = candidate_index + 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
else:
if curr_status == 'o':
break
else:
if move_buffer == MOVE_BUFFER_PREV:
buffer_start, buffer_end = self._prev_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imax = candidate_index
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
else:
if move_buffer == MOVE_BUFFER_NEXT:
buffer_start, buffer_end = self._next_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imin = candidate_index + 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
if imax > imin:
chosen_key_position = candidate_index
else:
chosen_key_position = imax
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
chosen_key_position)
if key != curr_key:
if return_closest: # useful for find all bigger/smaller methods
return leaf_start, chosen_key_position
else:
raise ElemNotFound
if doc_id and doc_id == curr_doc_id and curr_status == 'o':
return leaf_start, chosen_key_position, curr_doc_id, curr_key, curr_start, curr_size, curr_status
else:
if mode == MODE_FIRST and imin < chosen_key_position: # check if there isn't any element with equal key before chosen one
matching_record_index = self._leaf_linear_key_search(key,
self._calculate_key_position(leaf_start,
imin,
'l'),
imin,
chosen_key_position)
else:
matching_record_index = chosen_key_position
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
matching_record_index)
if curr_status == 'd' and not return_closest:
leaf_start, nr_of_elements, matching_record_index = self._find_existing(key,
matching_record_index,
leaf_start,
nr_of_elements)
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
matching_record_index)
if doc_id is not None and doc_id != curr_doc_id:
leaf_start, nr_of_elements, matching_record_index = self._match_doc_id(doc_id,
key,
matching_record_index,
leaf_start,
nr_of_elements)
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
matching_record_index)
return leaf_start, matching_record_index, curr_doc_id, curr_key, curr_start, curr_size, curr_status
def _find_place_in_leaf(self, key, leaf_start, nr_of_elements):
if nr_of_elements == 1:
return self._find_place_in_leaf_with_one_element(key, leaf_start)
else:
return self._find_place_in_leaf_using_binary_search(key, leaf_start, nr_of_elements)
def _find_place_in_leaf_with_one_element(self, key, leaf_start):
curr_key, curr_doc_id, curr_start, curr_size,\
curr_status = self._read_single_leaf_record(leaf_start, 0)
if curr_status == 'd':
return leaf_start, 0, 0, False, True # leaf start, index of new key position, nr of rec to rewrite, full_leaf flag, on_deleted flag
else:
if key < curr_key:
return leaf_start, 0, 1, False, False
else:
return leaf_start, 1, 0, False, False
def _find_place_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements):
"""
Binary search implementation used in insert function
"""
imin, imax = 0, nr_of_elements - 1
buffer_start, buffer_end = self._set_buffer_limits()
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
self._calculate_key_position(leaf_start,
(imin + imax) / 2,
'l'),
buffer_start,
buffer_end,
imin, imax)
while imax != imin and imax > imin:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
candidate_index)
candidate_start = self._calculate_key_position(
leaf_start, candidate_index, 'l')
if key < curr_key:
if move_buffer == MOVE_BUFFER_PREV:
buffer_start, buffer_end = self._prev_buffer(
buffer_start, buffer_end)
else: # if next chosen element is in current buffer, abort moving to other
move_buffer is None
imax = candidate_index - 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
else:
if move_buffer == MOVE_BUFFER_NEXT:
buffer_start, buffer_end = self._next_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imin = candidate_index + 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
if imax < imin and imin < nr_of_elements:
chosen_key_position = imin
else:
chosen_key_position = imax
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
chosen_key_position)
if curr_status == 'd':
return leaf_start, chosen_key_position, 0, False, True
elif key < curr_key:
if chosen_key_position > 0:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
chosen_key_position - 1)
if curr_start == 'd':
return leaf_start, chosen_key_position - 1, 0, False, True
else:
return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False
else:
return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False
else:
if chosen_key_position < nr_of_elements - 1:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start,
chosen_key_position + 1)
if curr_start == 'd':
return leaf_start, chosen_key_position + 1, 0, False, True
else:
return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False
else:
return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False
def _set_buffer_limits(self):
pos = self.buckets.tell()
buffer_start = pos - (pos % tree_buffer_size)
return buffer_start, (buffer_start + tree_buffer_size)
def _find_first_key_occurence_in_node(self, node_start, key, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_FIRST)
else:
return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_FIRST)
def _find_last_key_occurence_in_node(self, node_start, key, nr_of_elements):
if nr_of_elements == 1:
return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_LAST)
else:
return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_LAST)
def _find_key_in_node_with_one_element(self, key, node_start, mode=None):
l_pointer, curr_key, r_pointer = self._read_single_node_key(
node_start, 0)
if key < curr_key:
return 0, l_pointer
elif key > curr_key:
return 0, r_pointer
else:
if mode == MODE_FIRST:
return 0, l_pointer
elif mode == MODE_LAST:
return 0, r_pointer
else:
raise Exception('Invalid mode declared: set first/last')
def _find_key_in_node_using_binary_search(self, key, node_start, nr_of_elements, mode=None):
imin, imax = 0, nr_of_elements - 1
buffer_start, buffer_end = self._set_buffer_limits()
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start,
self._calculate_key_position(node_start,
(imin + imax) / 2,
'n'),
buffer_start,
buffer_end,
imin, imax)
while imax != imin and imax > imin:
l_pointer, curr_key, r_pointer = self._read_single_node_key(
node_start, candidate_index)
candidate_start = self._calculate_key_position(
node_start, candidate_index, 'n')
if key < curr_key:
if move_buffer == MOVE_BUFFER_PREV:
buffer_start, buffer_end = self._prev_buffer(
buffer_start, buffer_end)
else: # if next chosen element is in current buffer, abort moving to other
move_buffer is None
imax = candidate_index - 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
elif key == curr_key:
if mode == MODE_LAST:
if move_buffer == MOVE_BUFFER_NEXT:
buffer_start, buffer_end = self._next_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imin = candidate_index + 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
else:
break
else:
if move_buffer == MOVE_BUFFER_NEXT:
buffer_start, buffer_end = self._next_buffer(
buffer_start, buffer_end)
else:
move_buffer is None
imin = candidate_index + 1
candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start,
candidate_start,
buffer_start,
buffer_end,
imin, imax)
if imax > imin:
chosen_key_position = candidate_index
elif imax < imin and imin < nr_of_elements:
chosen_key_position = imin
else:
chosen_key_position = imax
l_pointer, curr_key, r_pointer = self._read_single_node_key(
node_start, chosen_key_position)
if mode == MODE_FIRST and imin < chosen_key_position: # check if there is no elements with equal key before chosen one
matching_record_index = self._node_linear_key_search(key,
self._calculate_key_position(node_start,
imin,
'n'),
imin,
chosen_key_position)
else:
matching_record_index = chosen_key_position
l_pointer, curr_key, r_pointer = self._read_single_node_key(
node_start, matching_record_index)
if key < curr_key:
return matching_record_index, l_pointer
elif key > curr_key:
return matching_record_index, r_pointer
else:
if mode == MODE_FIRST:
return matching_record_index, l_pointer
elif mode == MODE_LAST:
return matching_record_index, r_pointer
else:
raise Exception('Invalid mode declared: first/last')
def _update_leaf_ready_data(self, leaf_start, start_index, new_nr_of_elements, records_to_rewrite):
self.buckets.seek(leaf_start)
self.buckets.write(struct.pack('<h', new_nr_of_elements))
start_position = self._calculate_key_position(
leaf_start, start_index, 'l')
self.buckets.seek(start_position)
self.buckets.write(
struct.pack(
'<' + (new_nr_of_elements - start_index) *
self.single_leaf_record_format,
*records_to_rewrite))
# self._read_single_leaf_record.delete(leaf_start)
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
def _update_leaf(self, leaf_start, new_record_position, nr_of_elements,
nr_of_records_to_rewrite, on_deleted, new_key,
new_doc_id, new_start, new_size, new_status):
if nr_of_records_to_rewrite == 0: # just write at set position
self.buckets.seek(self._calculate_key_position(
leaf_start, new_record_position, 'l'))
self.buckets.write(
struct.pack('<' + self.single_leaf_record_format,
new_key,
new_doc_id,
new_start,
new_size,
new_status))
self.flush()
else: # must read all elems after new one, and rewrite them after new
start = self._calculate_key_position(
leaf_start, new_record_position, 'l')
self.buckets.seek(start)
data = self.buckets.read(nr_of_records_to_rewrite *
self.single_leaf_record_size)
records_to_rewrite = struct.unpack('<' + nr_of_records_to_rewrite *
self.single_leaf_record_format, data)
curr_index = 0
records_to_rewrite = list(records_to_rewrite)
for status in records_to_rewrite[4::5]: # don't write back deleted records, deleting them from list
if status != 'o':
del records_to_rewrite[curr_index * 5:curr_index * 5 + 5]
nr_of_records_to_rewrite -= 1
nr_of_elements -= 1
else:
curr_index += 1
self.buckets.seek(start)
self.buckets.write(
struct.pack(
'<' + (nr_of_records_to_rewrite +
1) * self.single_leaf_record_format,
new_key,
new_doc_id,
new_start,
new_size,
new_status,
*tuple(records_to_rewrite)))
self.flush()
self.buckets.seek(leaf_start)
if not on_deleted: # when new record replaced deleted one, nr of leaf elements stays the same
self.buckets.write(struct.pack('<h', nr_of_elements + 1))
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
self._find_key_in_leaf.delete(leaf_start)
# self._read_single_leaf_record.delete(leaf_start)
def _read_leaf_neighbours(self, leaf_start):
self.buckets.seek(leaf_start + self.elements_counter_size)
neihbours_data = self.buckets.read(2 * self.pointer_size)
prev_l, next_l = struct.unpack(
'<' + 2 * self.pointer_format, neihbours_data)
return prev_l, next_l
def _update_leaf_size_and_pointers(self, leaf_start, new_size, new_prev, new_next):
self.buckets.seek(leaf_start)
self.buckets.write(
struct.pack(
'<' + self.elements_counter_format + 2 * self.pointer_format,
new_size,
new_prev,
new_next))
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_neighbours.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
def _update_leaf_prev_pointer(self, leaf_start, pointer):
self.buckets.seek(leaf_start + self.elements_counter_size)
self.buckets.write(struct.pack('<' + self.pointer_format,
pointer))
self._read_leaf_neighbours.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
def _update_size(self, start, new_size):
self.buckets.seek(start)
self.buckets.write(struct.pack('<' + self.elements_counter_format,
new_size))
self._read_leaf_nr_of_elements.delete(start)
self._read_leaf_nr_of_elements_and_neighbours.delete(start)
def _create_new_root_from_leaf(self, leaf_start, nr_of_records_to_rewrite, new_leaf_size, old_leaf_size, half_size, new_data):
blanks = (self.node_capacity - new_leaf_size) * \
self.single_leaf_record_size * '\x00'
left_leaf_start_position = self.data_start + self.node_size
right_leaf_start_position = self.data_start + \
self.node_size + self.leaf_size
self.buckets.seek(self.data_start + self.leaf_heading_size)
# read old root
data = self.buckets.read(
self.single_leaf_record_size * self.node_capacity)
leaf_data = struct.unpack('<' + self.
single_leaf_record_format * self.node_capacity, data)
# remove deleted records, if succeded abort spliting
if self._update_if_has_deleted(self.data_start, leaf_data, 0, new_data):
return None
# find out key which goes to parent node
if nr_of_records_to_rewrite > new_leaf_size - 1:
key_moved_to_parent_node = leaf_data[(old_leaf_size - 1) * 5]
elif nr_of_records_to_rewrite == new_leaf_size - 1:
key_moved_to_parent_node = new_data[0]
else:
key_moved_to_parent_node = leaf_data[old_leaf_size * 5]
data_to_write = self._prepare_new_root_data(key_moved_to_parent_node,
left_leaf_start_position,
right_leaf_start_position,
'l')
if nr_of_records_to_rewrite > half_size:
# key goes to first half
# prepare left leaf data
left_leaf_data = struct.pack('<' + self.leaf_heading_format + self.single_leaf_record_format
* (self.node_capacity - nr_of_records_to_rewrite),
old_leaf_size,
0,
right_leaf_start_position,
*leaf_data[:-nr_of_records_to_rewrite * 5])
left_leaf_data += struct.pack(
'<' + self.single_leaf_record_format * (
nr_of_records_to_rewrite - new_leaf_size + 1),
new_data[0],
new_data[1],
new_data[2],
new_data[3],
new_data[4],
*leaf_data[-nr_of_records_to_rewrite * 5:(old_leaf_size - 1) * 5])
# prepare right leaf_data
right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format +
self.single_leaf_record_format *
new_leaf_size,
new_leaf_size,
left_leaf_start_position,
0,
*leaf_data[-new_leaf_size * 5:])
else:
# key goes to second half
if nr_of_records_to_rewrite:
records_before = leaf_data[old_leaf_size *
5:-nr_of_records_to_rewrite * 5]
records_after = leaf_data[-nr_of_records_to_rewrite * 5:]
else:
records_before = leaf_data[old_leaf_size * 5:]
records_after = []
left_leaf_data = struct.pack(
'<' + self.leaf_heading_format +
self.single_leaf_record_format * old_leaf_size,
old_leaf_size,
0,
right_leaf_start_position,
*leaf_data[:old_leaf_size * 5])
# prepare right leaf_data
right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format +
self.single_leaf_record_format * (new_leaf_size -
nr_of_records_to_rewrite - 1),
new_leaf_size,
left_leaf_start_position,
0,
*records_before)
right_leaf_data += struct.pack(
'<' + self.single_leaf_record_format * (
nr_of_records_to_rewrite + 1),
new_data[0],
new_data[1],
new_data[2],
new_data[3],
new_data[4],
*records_after)
left_leaf_data += (self.node_capacity -
old_leaf_size) * self.single_leaf_record_size * '\x00'
right_leaf_data += blanks
data_to_write += left_leaf_data
data_to_write += right_leaf_data
self.buckets.seek(self._start_ind)
self.buckets.write(struct.pack('<c', 'n') + data_to_write)
self.root_flag = 'n'
# self._read_single_leaf_record.delete(leaf_start)
self._find_key_in_leaf.delete(leaf_start)
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
self._read_leaf_neighbours.delete(leaf_start)
return None
def _split_leaf(
self, leaf_start, nr_of_records_to_rewrite, new_key, new_doc_id, new_start, new_size, new_status,
create_new_root=False):
"""
Splits full leaf in two separate ones, first half of records stays on old position,
second half is written as new leaf at the end of file.
"""
half_size = self.node_capacity / 2
if self.node_capacity % 2 == 0:
old_leaf_size = half_size
new_leaf_size = half_size + 1
else:
old_leaf_size = new_leaf_size = half_size + 1
if create_new_root: # leaf is a root
new_data = [new_key, new_doc_id, new_start, new_size, new_status]
self._create_new_root_from_leaf(leaf_start, nr_of_records_to_rewrite, new_leaf_size, old_leaf_size, half_size, new_data)
else:
blanks = (self.node_capacity - new_leaf_size) * \
self.single_leaf_record_size * '\x00'
prev_l, next_l = self._read_leaf_neighbours(leaf_start)
if nr_of_records_to_rewrite > half_size: # insert key into first half of leaf
self.buckets.seek(self._calculate_key_position(leaf_start,
self.node_capacity - nr_of_records_to_rewrite,
'l'))
# read all records with key>new_key
data = self.buckets.read(
nr_of_records_to_rewrite * self.single_leaf_record_size)
records_to_rewrite = struct.unpack(
'<' + nr_of_records_to_rewrite * self.single_leaf_record_format, data)
# remove deleted records, if succeded abort spliting
if self._update_if_has_deleted(leaf_start,
records_to_rewrite,
self.node_capacity -
nr_of_records_to_rewrite,
[new_key, new_doc_id, new_start, new_size, new_status]):
return None
key_moved_to_parent_node = records_to_rewrite[
-new_leaf_size * 5]
# write new leaf at end of file
self.buckets.seek(0, 2) # end of file
new_leaf_start = self.buckets.tell()
# prepare new leaf_data
new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format +
self.single_leaf_record_format *
new_leaf_size,
new_leaf_size,
leaf_start,
next_l,
*records_to_rewrite[-new_leaf_size * 5:])
new_leaf += blanks
# write new leaf
self.buckets.write(new_leaf)
# update old leaf heading
self._update_leaf_size_and_pointers(leaf_start,
old_leaf_size,
prev_l,
new_leaf_start)
# seek position of new key in first half
self.buckets.seek(self._calculate_key_position(leaf_start,
self.node_capacity - nr_of_records_to_rewrite,
'l'))
# write new key and keys after
self.buckets.write(
struct.pack(
'<' + self.single_leaf_record_format *
(nr_of_records_to_rewrite - new_leaf_size + 1),
new_key,
new_doc_id,
new_start,
new_size,
'o',
*records_to_rewrite[:-new_leaf_size * 5]))
if next_l: # when next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file
self._update_leaf_prev_pointer(
next_l, new_leaf_start)
# self._read_single_leaf_record.delete(leaf_start)
self._find_key_in_leaf.delete(leaf_start)
return new_leaf_start, key_moved_to_parent_node
else: # key goes into second half of leaf '
# seek half of the leaf
self.buckets.seek(self._calculate_key_position(
leaf_start, old_leaf_size, 'l'))
data = self.buckets.read(
self.single_leaf_record_size * (new_leaf_size - 1))
records_to_rewrite = struct.unpack('<' + (new_leaf_size - 1) *
self.single_leaf_record_format, data)
# remove deleted records, if succeded abort spliting
if self._update_if_has_deleted(leaf_start,
records_to_rewrite,
old_leaf_size,
[new_key, new_doc_id, new_start, new_size, new_status]):
return None
key_moved_to_parent_node = records_to_rewrite[
-(new_leaf_size - 1) * 5]
if key_moved_to_parent_node > new_key:
key_moved_to_parent_node = new_key
self.buckets.seek(0, 2) # end of file
new_leaf_start = self.buckets.tell()
# prepare new leaf data
index_of_records_split = nr_of_records_to_rewrite * 5
if index_of_records_split:
records_before = records_to_rewrite[
:-index_of_records_split]
records_after = records_to_rewrite[
-index_of_records_split:]
else:
records_before = records_to_rewrite
records_after = []
new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format
+ self.single_leaf_record_format * (new_leaf_size -
nr_of_records_to_rewrite - 1),
new_leaf_size,
leaf_start,
next_l,
*records_before)
new_leaf += struct.pack(
'<' + self.single_leaf_record_format *
(nr_of_records_to_rewrite + 1),
new_key,
new_doc_id,
new_start,
new_size,
'o',
*records_after)
new_leaf += blanks
self.buckets.write(new_leaf)
self._update_leaf_size_and_pointers(leaf_start,
old_leaf_size,
prev_l,
new_leaf_start)
if next_l: # pren next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file
self._update_leaf_prev_pointer(
next_l, new_leaf_start)
# self._read_single_leaf_record.delete(leaf_start)
self._find_key_in_leaf.delete(leaf_start)
return new_leaf_start, key_moved_to_parent_node
def _update_if_has_deleted(self, leaf_start, records_to_rewrite, start_position, new_record_data):
"""
Checks if there are any deleted elements in data to rewrite and prevent from writing then back.
"""
curr_index = 0
nr_of_elements = self.node_capacity
records_to_rewrite = list(records_to_rewrite)
for status in records_to_rewrite[4::5]: # remove deleted from list
if status != 'o':
del records_to_rewrite[curr_index * 5:curr_index * 5 + 5]
nr_of_elements -= 1
else:
curr_index += 1
# if were deleted dont have to split, just update leaf
if nr_of_elements < self.node_capacity:
data_split_index = 0
for key in records_to_rewrite[0::5]:
if key > new_record_data[0]:
break
else:
data_split_index += 1
records_to_rewrite = records_to_rewrite[:data_split_index * 5]\
+ new_record_data\
+ records_to_rewrite[data_split_index * 5:]
self._update_leaf_ready_data(leaf_start,
start_position,
nr_of_elements + 1,
records_to_rewrite),
return True
else: # did not found any deleted records in leaf
return False
def _prepare_new_root_data(self, root_key, left_pointer, right_pointer, children_flag='n'):
new_root = struct.pack(
'<' + self.node_heading_format + self.single_node_record_format,
1,
children_flag,
left_pointer,
root_key,
right_pointer)
new_root += (self.key_size + self.pointer_size) * (self.
node_capacity - 1) * '\x00'
return new_root
def _create_new_root_from_node(self, node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer):
# reading second half of node
self.buckets.seek(self.data_start + self.node_heading_size)
# read all keys with key>new_key
data = self.buckets.read(self.pointer_size + self.
node_capacity * (self.key_size + self.pointer_size))
old_node_data = struct.unpack('<' + self.pointer_format + self.node_capacity *
(self.key_format + self.pointer_format), data)
self.buckets.seek(0, 2) # end of file
new_node_start = self.buckets.tell()
if nr_of_keys_to_rewrite == new_node_size:
key_moved_to_root = new_key
# prepare new nodes data
left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
old_node_size * (self.
key_format + self.pointer_format),
old_node_size,
children_flag,
*old_node_data[:old_node_size * 2 + 1])
right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
new_node_size * (self.
key_format + self.pointer_format),
new_node_size,
children_flag,
new_pointer,
*old_node_data[old_node_size * 2 + 1:])
elif nr_of_keys_to_rewrite > new_node_size:
key_moved_to_root = old_node_data[old_node_size * 2 - 1]
# prepare new nodes data
if nr_of_keys_to_rewrite == self.node_capacity:
keys_before = old_node_data[:1]
keys_after = old_node_data[1:old_node_size * 2 - 1]
else:
keys_before = old_node_data[:-nr_of_keys_to_rewrite * 2]
keys_after = old_node_data[-(
nr_of_keys_to_rewrite) * 2:old_node_size * 2 - 1]
left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
(self.node_capacity - nr_of_keys_to_rewrite) * (self.
key_format + self.pointer_format),
old_node_size,
children_flag,
*keys_before)
left_node += struct.pack(
'<' + (self.key_format + self.pointer_format) *
(nr_of_keys_to_rewrite - new_node_size),
new_key,
new_pointer,
*keys_after)
right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
new_node_size * (self.
key_format + self.pointer_format),
new_node_size,
children_flag,
*old_node_data[old_node_size * 2:])
else:
# 'inserting key into second half of node and creating new root'
key_moved_to_root = old_node_data[old_node_size * 2 + 1]
# prepare new nodes data
left_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
old_node_size * (self.
key_format + self.pointer_format),
old_node_size,
children_flag,
*old_node_data[:old_node_size * 2 + 1])
if nr_of_keys_to_rewrite:
keys_before = old_node_data[(old_node_size +
1) * 2:-nr_of_keys_to_rewrite * 2]
keys_after = old_node_data[-nr_of_keys_to_rewrite * 2:]
else:
keys_before = old_node_data[(old_node_size + 1) * 2:]
keys_after = []
right_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
(new_node_size - nr_of_keys_to_rewrite - 1) * (self.
key_format + self.pointer_format),
new_node_size,
children_flag,
*keys_before)
right_node += struct.pack(
'<' + (nr_of_keys_to_rewrite + 1) *
(self.key_format + self.pointer_format),
new_key,
new_pointer,
*keys_after)
new_root = self._prepare_new_root_data(key_moved_to_root,
new_node_start,
new_node_start + self.node_size)
left_node += (self.node_capacity - old_node_size) * \
(self.key_size + self.pointer_size) * '\x00'
# adding blanks after new node
right_node += (self.node_capacity - new_node_size) * \
(self.key_size + self.pointer_size) * '\x00'
self.buckets.seek(0, 2)
self.buckets.write(left_node + right_node)
self.buckets.seek(self.data_start)
self.buckets.write(new_root)
self._read_single_node_key.delete(node_start)
self._read_node_nr_of_elements_and_children_flag.delete(node_start)
return None
def _split_node(self, node_start, nr_of_keys_to_rewrite, new_key, new_pointer, children_flag, create_new_root=False):
"""
Splits full node in two separate ones, first half of records stays on old position,
second half is written as new leaf at the end of file.
"""
half_size = self.node_capacity / 2
if self.node_capacity % 2 == 0:
old_node_size = new_node_size = half_size
else:
old_node_size = half_size
new_node_size = half_size + 1
if create_new_root:
self._create_new_root_from_node(node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer)
else:
blanks = (self.node_capacity - new_node_size) * (
self.key_size + self.pointer_size) * '\x00'
if nr_of_keys_to_rewrite == new_node_size: # insert key into first half of node
# reading second half of node
self.buckets.seek(self._calculate_key_position(node_start,
old_node_size,
'n') + self.pointer_size)
# read all keys with key>new_key
data = self.buckets.read(nr_of_keys_to_rewrite *
(self.key_size + self.pointer_size))
old_node_data = struct.unpack('<' + nr_of_keys_to_rewrite *
(self.key_format + self.pointer_format), data)
# write new node at end of file
self.buckets.seek(0, 2)
new_node_start = self.buckets.tell()
# prepare new node_data
new_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
(self.key_format +
self.pointer_format) * new_node_size,
new_node_size,
children_flag,
new_pointer,
*old_node_data)
new_node += blanks
# write new node
self.buckets.write(new_node)
# update old node data
self._update_size(
node_start, old_node_size)
self._read_single_node_key.delete(node_start)
self._read_node_nr_of_elements_and_children_flag.delete(
node_start)
return new_node_start, new_key
elif nr_of_keys_to_rewrite > half_size: # insert key into first half of node
# seek for first key to rewrite
self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n')
+ self.pointer_size)
# read all keys with key>new_key
data = self.buckets.read(
nr_of_keys_to_rewrite * (self.key_size + self.pointer_size))
old_node_data = struct.unpack(
'<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data)
key_moved_to_parent_node = old_node_data[-(
new_node_size + 1) * 2]
self.buckets.seek(0, 2)
new_node_start = self.buckets.tell()
# prepare new node_data
new_node = struct.pack('<' + self.node_heading_format +
self.pointer_format + (self.key_format +
self.pointer_format) * new_node_size,
new_node_size,
children_flag,
old_node_data[-new_node_size * 2 - 1],
*old_node_data[-new_node_size * 2:])
new_node += blanks
# write new node
self.buckets.write(new_node)
self._update_size(
node_start, old_node_size)
# seek position of new key in first half
self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n')
+ self.pointer_size)
# write new key and keys after
self.buckets.write(
struct.pack(
'<' + (self.key_format + self.pointer_format) *
(nr_of_keys_to_rewrite - new_node_size),
new_key,
new_pointer,
*old_node_data[:-(new_node_size + 1) * 2]))
self._read_single_node_key.delete(node_start)
self._read_node_nr_of_elements_and_children_flag.delete(
node_start)
return new_node_start, key_moved_to_parent_node
else: # key goes into second half
# reading second half of node
self.buckets.seek(self._calculate_key_position(node_start,
old_node_size,
'n')
+ self.pointer_size)
data = self.buckets.read(
new_node_size * (self.key_size + self.pointer_size))
old_node_data = struct.unpack('<' + new_node_size *
(self.key_format + self.pointer_format), data)
# find key which goes to parent node
key_moved_to_parent_node = old_node_data[0]
self.buckets.seek(0, 2) # end of file
new_node_start = self.buckets.tell()
index_of_records_split = nr_of_keys_to_rewrite * 2
# prepare new node_data
first_leaf_pointer = old_node_data[1]
old_node_data = old_node_data[2:]
if index_of_records_split:
keys_before = old_node_data[:-index_of_records_split]
keys_after = old_node_data[-index_of_records_split:]
else:
keys_before = old_node_data
keys_after = []
new_node = struct.pack('<' + self.node_heading_format + self.pointer_format +
(self.key_format + self.pointer_format) *
(new_node_size -
nr_of_keys_to_rewrite - 1),
new_node_size,
children_flag,
first_leaf_pointer,
*keys_before)
new_node += struct.pack('<' + (self.key_format + self.pointer_format) *
(nr_of_keys_to_rewrite + 1),
new_key,
new_pointer,
*keys_after)
new_node += blanks
# write new node
self.buckets.write(new_node)
self._update_size(node_start, old_node_size)
self._read_single_node_key.delete(node_start)
self._read_node_nr_of_elements_and_children_flag.delete(
node_start)
return new_node_start, key_moved_to_parent_node
def insert_first_record_into_leaf(self, leaf_start, key, doc_id, start, size, status):
self.buckets.seek(leaf_start)
self.buckets.write(struct.pack('<' + self.elements_counter_format,
1))
self.buckets.seek(leaf_start + self.leaf_heading_size)
self.buckets.write(struct.pack('<' + self.single_leaf_record_format,
key,
doc_id,
start,
size,
status))
# self._read_single_leaf_record.delete(leaf_start)
self._find_key_in_leaf.delete(leaf_start)
self._read_leaf_nr_of_elements.delete(leaf_start)
self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start)
def _insert_new_record_into_leaf(self, leaf_start, key, doc_id, start, size, status, nodes_stack, indexes):
nr_of_elements = self._read_leaf_nr_of_elements(leaf_start)
if nr_of_elements == 0:
self.insert_first_record_into_leaf(
leaf_start, key, doc_id, start, size, status)
return
leaf_start, new_record_position, nr_of_records_to_rewrite, full_leaf, on_deleted\
= self._find_place_in_leaf(key, leaf_start, nr_of_elements)
if full_leaf:
try: # check if leaf has parent node
leaf_parent_pointer = nodes_stack.pop()
except IndexError: # leaf is a root
leaf_parent_pointer = 0
split_data = self._split_leaf(leaf_start,
nr_of_records_to_rewrite,
key,
doc_id,
start,
size,
status,
create_new_root=(False if leaf_parent_pointer else True))
if split_data is not None: # means that split created new root or replaced split with update_if_has_deleted
new_leaf_start_position, key_moved_to_parent_node = split_data
self._insert_new_key_into_node(leaf_parent_pointer,
key_moved_to_parent_node,
leaf_start,
new_leaf_start_position,
nodes_stack,
indexes)
else: # there is a place for record in leaf
self.buckets.seek(leaf_start)
self._update_leaf(
leaf_start, new_record_position, nr_of_elements, nr_of_records_to_rewrite,
on_deleted, key, doc_id, start, size, status)
def _update_node(self, new_key_position, nr_of_keys_to_rewrite, new_key, new_pointer):
if nr_of_keys_to_rewrite == 0:
self.buckets.seek(new_key_position)
self.buckets.write(
struct.pack('<' + self.key_format + self.pointer_format,
new_key,
new_pointer))
self.flush()
else:
self.buckets.seek(new_key_position)
data = self.buckets.read(nr_of_keys_to_rewrite * (
self.key_size + self.pointer_size))
keys_to_rewrite = struct.unpack(
'<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data)
self.buckets.seek(new_key_position)
self.buckets.write(
struct.pack(
'<' + (nr_of_keys_to_rewrite + 1) *
(self.key_format + self.pointer_format),
new_key,
new_pointer,
*keys_to_rewrite))
self.flush()
def _insert_new_key_into_node(self, node_start, new_key, old_half_start, new_half_start, nodes_stack, indexes):
parent_key_index = indexes.pop()
nr_of_elements, children_flag = self._read_node_nr_of_elements_and_children_flag(node_start)
parent_prev_pointer = self._read_single_node_key(
node_start, parent_key_index)[0]
if parent_prev_pointer == old_half_start: # splited child was on the left side of his parent key, must write new key before it
new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index, 'n')
nr_of_keys_to_rewrite = nr_of_elements - parent_key_index
else: # splited child was on the right side of his parent key, must write new key after it
new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index + 1, 'n')
nr_of_keys_to_rewrite = nr_of_elements - (parent_key_index + 1)
if nr_of_elements == self.node_capacity:
try: # check if node has parent
node_parent_pointer = nodes_stack.pop()
except IndexError: # node is a root
node_parent_pointer = 0
new_data = self._split_node(node_start,
nr_of_keys_to_rewrite,
new_key,
new_half_start,
children_flag,
create_new_root=(False if node_parent_pointer else True))
if new_data: # if not new_data, new root has been created
new_node_start_position, key_moved_to_parent_node = new_data
self._insert_new_key_into_node(node_parent_pointer,
key_moved_to_parent_node,
node_start,
new_node_start_position,
nodes_stack,
indexes)
self._find_first_key_occurence_in_node.delete(node_start)
self._find_last_key_occurence_in_node.delete(node_start)
else: # there is a empty slot for new key in node
self._update_size(node_start, nr_of_elements + 1)
self._update_node(new_key_position,
nr_of_keys_to_rewrite,
new_key,
new_half_start)
self._find_first_key_occurence_in_node.delete(node_start)
self._find_last_key_occurence_in_node.delete(node_start)
self._read_single_node_key.delete(node_start)
self._read_node_nr_of_elements_and_children_flag.delete(node_start)
def _find_leaf_to_insert(self, key):
"""
Traverses tree in search for leaf for insert, remembering parent nodes in path,
looks for last occurence of key if already in tree.
"""
nodes_stack = [self.data_start]
if self.root_flag == 'l':
return nodes_stack, []
else:
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start)
curr_index, curr_pointer = self._find_last_key_occurence_in_node(
self.data_start, key, nr_of_elements)
nodes_stack.append(curr_pointer)
indexes = [curr_index]
while(curr_child_flag == 'n'):
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_pointer)
curr_index, curr_pointer = self._find_last_key_occurence_in_node(curr_pointer, key, nr_of_elements)
nodes_stack.append(curr_pointer)
indexes.append(curr_index)
return nodes_stack, indexes
# nodes stack contains start addreses of nodes directly above leaf with key, indexes match keys adjacent nodes_stack values (as pointers)
# required when inserting new keys in upper tree levels
def _find_leaf_with_last_key_occurence(self, key):
if self.root_flag == 'l':
return self.data_start
else:
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start)
curr_position = self._find_last_key_occurence_in_node(
self.data_start, key, nr_of_elements)[1]
while(curr_child_flag == 'n'):
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position)
curr_position = self._find_last_key_occurence_in_node(
curr_position, key, nr_of_elements)[1]
return curr_position
def _find_leaf_with_first_key_occurence(self, key):
if self.root_flag == 'l':
return self.data_start
else:
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start)
curr_position = self._find_first_key_occurence_in_node(
self.data_start, key, nr_of_elements)[1]
while(curr_child_flag == 'n'):
nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position)
curr_position = self._find_first_key_occurence_in_node(
curr_position, key, nr_of_elements)[1]
return curr_position
def _find_key(self, key):
containing_leaf_start = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start)
try:
doc_id, l_key, start, size, status = self._find_key_in_leaf(
containing_leaf_start, key, nr_of_elements)
except ElemNotFound:
if next_leaf:
nr_of_elements = self._read_leaf_nr_of_elements(next_leaf)
else:
raise ElemNotFound
doc_id, l_key, start, size, status = self._find_key_in_leaf(
next_leaf, key, nr_of_elements)
return doc_id, l_key, start, size, status
def _find_key_to_update(self, key, doc_id):
"""
Search tree for key that matches not only given key but also doc_id.
"""
containing_leaf_start = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start)
try:
leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key,
doc_id,
containing_leaf_start,
nr_of_elements)
except ElemNotFound:
if next_leaf:
nr_of_elements = self._read_leaf_nr_of_elements(next_leaf)
else:
raise TryReindexException()
try:
leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key,
doc_id,
next_leaf,
nr_of_elements)
except ElemNotFound:
raise TryReindexException()
return leaf_start, record_index, doc_id, l_key, start, size, status
def update(self, doc_id, key, u_start=0, u_size=0, u_status='o'):
containing_leaf_start, element_index, old_doc_id, old_key, old_start, old_size, old_status = self._find_key_to_update(key, doc_id)
if u_start:
old_start = u_start
if u_size:
old_size = u_size
if u_status:
old_status = u_status
new_data = (old_doc_id, old_start, old_size, old_status)
self._update_element(containing_leaf_start, element_index, new_data)
self._find_key.delete(key)
self._match_doc_id.delete(doc_id)
self._find_key_in_leaf.delete(containing_leaf_start, key)
return True
def delete(self, doc_id, key, start=0, size=0):
containing_leaf_start, element_index = self._find_key_to_update(
key, doc_id)[:2]
self._delete_element(containing_leaf_start, element_index)
self._find_key.delete(key)
self._match_doc_id.delete(doc_id)
self._find_key_in_leaf.delete(containing_leaf_start, key)
return True
def _find_key_many(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_first_key_equal(
key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
leaf_with_key = next_leaf
key_index = 0
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if key == curr_key:
if status != 'd':
offset -= 1
key_index += 1
else:
return
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if key == curr_key:
if status != 'd':
yield doc_id, start, size, status
limit -= 1
key_index += 1
else:
return
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_smaller(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key >= key:
key_index -= 1
while offset:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
while limit:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
def _find_key_equal_and_smaller(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_last_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
leaf_with_key = prev_leaf
key_index = self._read_leaf_nr_of_elements_and_neighbours(
leaf_with_key)[0]
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key > key:
key_index -= 1
while offset:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
while limit:
if key_index >= 0:
key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
key_index -= 1
else:
if prev_leaf:
leaf_with_key = prev_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf)
key_index = nr_of_elements - 1
else:
return
def _find_key_bigger(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_last_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
try:
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
except ElemNotFound:
key_index = 0
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key <= key:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_equal_and_bigger(self, key, limit=1, offset=0):
leaf_with_key = self._find_leaf_with_first_key_occurence(key)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0]
if curr_key < key:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_with_key, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def _find_key_between(self, start, end, limit, offset, inclusive_start, inclusive_end):
"""
Returns generator containing all keys withing given interval.
"""
if inclusive_start:
leaf_with_key = self._find_leaf_with_first_key_occurence(start)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
curr_key = self._read_single_leaf_record(
leaf_with_key, key_index)[0]
if curr_key < start:
key_index += 1
else:
leaf_with_key = self._find_leaf_with_last_key_occurence(start)
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key)
leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements)
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_key <= start:
key_index += 1
while offset:
if key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index)
if curr_key > end or (curr_key == end and not inclusive_end):
return
elif curr_status != 'd':
yield curr_doc_id, curr_key, curr_start, curr_size, curr_status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_with_key = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def get(self, key):
return self._find_key(self.make_key(key))
def get_many(self, key, limit=1, offset=0):
return self._find_key_many(self.make_key(key), limit, offset)
def get_between(self, start, end, limit=1, offset=0, inclusive_start=True, inclusive_end=True):
if start is None:
end = self.make_key(end)
if inclusive_end:
return self._find_key_equal_and_smaller(end, limit, offset)
else:
return self._find_key_smaller(end, limit, offset)
elif end is None:
start = self.make_key(start)
if inclusive_start:
return self._find_key_equal_and_bigger(start, limit, offset)
else:
return self._find_key_bigger(start, limit, offset)
else:
start = self.make_key(start)
end = self.make_key(end)
return self._find_key_between(start, end, limit, offset, inclusive_start, inclusive_end)
def all(self, limit=-1, offset=0):
"""
Traverses linked list of all tree leaves and returns generator containing all elements stored in index.
"""
if self.root_flag == 'n':
leaf_start = self.data_start + self.node_size
else:
leaf_start = self.data_start
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_start)
key_index = 0
while offset:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_start, key_index)
if status != 'd':
offset -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
while limit:
if key_index < nr_of_elements:
curr_key, doc_id, start, size, status = self._read_single_leaf_record(
leaf_start, key_index)
if status != 'd':
yield doc_id, curr_key, start, size, status
limit -= 1
key_index += 1
else:
key_index = 0
if next_leaf:
leaf_start = next_leaf
nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf)
else:
return
def make_key(self, key):
raise NotImplementedError()
def make_key_value(self, data):
raise NotImplementedError()
def _open_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.open()
def _create_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.create()
def compact(self, node_capacity=0):
if not node_capacity:
node_capacity = self.node_capacity
compact_ind = self.__class__(
self.db_path, self.name + '_compact', node_capacity=node_capacity)
compact_ind.create_index()
gen = self.all()
while True:
try:
doc_id, key, start, size, status = gen.next()
except StopIteration:
break
self.storage._f.seek(start)
value = self.storage._f.read(size)
start_ = compact_ind.storage._f.tell()
compact_ind.storage._f.write(value)
compact_ind.insert(doc_id, key, start_, size, status)
compact_ind.close_index()
original_name = self.name
# os.unlink(os.path.join(self.db_path, self.name + "_buck"))
self.close_index()
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_buck"), os.path.join(self.db_path, self.name + "_buck"))
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_stor"), os.path.join(self.db_path, self.name + "_stor"))
# self.name = original_name
self.open_index() # reload...
self.name = original_name
self._save_params(dict(name=original_name))
self._fix_params()
self._clear_cache()
return True
def _fix_params(self):
super(IU_TreeBasedIndex, self)._fix_params()
self._count_props()
def _clear_cache(self):
self._find_key.clear()
self._match_doc_id.clear()
# self._read_single_leaf_record.clear()
self._find_key_in_leaf.clear()
self._read_single_node_key.clear()
self._find_first_key_occurence_in_node.clear()
self._find_last_key_occurence_in_node.clear()
self._read_leaf_nr_of_elements.clear()
self._read_leaf_neighbours.clear()
self._read_leaf_nr_of_elements_and_neighbours.clear()
self._read_node_nr_of_elements_and_children_flag.clear()
def close_index(self):
super(IU_TreeBasedIndex, self).close_index()
self._clear_cache()
class IU_MultiTreeBasedIndex(IU_TreeBasedIndex):
"""
Class that allows to index more than one key per database record.
It operates very well on GET/INSERT. It's not optimized for
UPDATE operations (will always readd everything)
"""
def __init__(self, *args, **kwargs):
super(IU_MultiTreeBasedIndex, self).__init__(*args, **kwargs)
def insert(self, doc_id, key, start, size, status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
ins = super(IU_MultiTreeBasedIndex, self).insert
for curr_key in key:
ins(doc_id, curr_key, start, size, status)
return True
def update(self, doc_id, key, u_start, u_size, u_status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
upd = super(IU_MultiTreeBasedIndex, self).update
for curr_key in key:
upd(doc_id, curr_key, u_start, u_size, u_status)
def delete(self, doc_id, key, start=0, size=0):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
delete = super(IU_MultiTreeBasedIndex, self).delete
for curr_key in key:
delete(doc_id, curr_key, start, size)
def get(self, key):
return super(IU_MultiTreeBasedIndex, self).get(key)
def make_key_value(self, data):
raise NotImplementedError()
# classes for public use, done in this way because of
# generation static files with indexes (_index directory)
class TreeBasedIndex(IU_TreeBasedIndex):
pass
class MultiTreeBasedIndex(IU_MultiTreeBasedIndex):
"""
It allows to index more than one key for record. (ie. prefix/infix/suffix search mechanizms)
That class is designed to be used in custom indexes.
"""
pass
| 107,535
|
Python
|
.py
| 1,904
| 35.946429
| 151
| 0.488193
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,457
|
sharded_index.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/sharded_index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.index import Index
# from CodernityDB.env import cdb_environment
# import warnings
class ShardedIndex(Index):
def __init__(self, db_path, name, *args, **kwargs):
"""
There are 3 additional parameters. You have to hardcode them in your custom class. **NEVER** use directly
:param int sh_nums: how many shards should be
:param class ind_class: Index class to use (HashIndex or your custom one)
:param bool use_make_keys: if True, `make_key`, and `make_key_value` will be overriden with those from first shard
The rest parameters are passed straight to `ind_class` shards.
"""
super(ShardedIndex, self).__init__(db_path, name)
try:
self.sh_nums = kwargs.pop('sh_nums')
except KeyError:
self.sh_nums = 5
try:
ind_class = kwargs.pop('ind_class')
except KeyError:
raise Exception("ind_class must be given")
else:
# if not isinstance(ind_class, basestring):
# ind_class = ind_class.__name__
self.ind_class = ind_class
if 'use_make_keys' in kwargs:
self.use_make_keys = kwargs.pop('use_make_keys')
else:
self.use_make_keys = False
self._set_shard_datas(*args, **kwargs)
self.patchers = [] # database object patchers
def _set_shard_datas(self, *args, **kwargs):
self.shards = {}
self.shards_r = {}
# ind_class = globals()[self.ind_class]
ind_class = self.ind_class
i = 0
for sh_name in [self.name + str(x) for x in xrange(self.sh_nums)]:
# dict is better than list in that case
self.shards[i] = ind_class(self.db_path, sh_name, *args, **kwargs)
self.shards_r['%02x' % i] = self.shards[i]
self.shards_r[i] = self.shards[i]
i += 1
if not self.use_make_keys:
self.make_key = self.shards[0].make_key
self.make_key_value = self.shards[0].make_key_value
self.last_used = 0
@property
def storage(self):
st = self.shards[self.last_used].storage
return st
def __getattr__(self, name):
return getattr(self.shards[self.last_used], name)
def open_index(self):
for curr in self.shards.itervalues():
curr.open_index()
def create_index(self):
for curr in self.shards.itervalues():
curr.create_index()
def destroy(self):
for curr in self.shards.itervalues():
curr.destroy()
def compact(self):
for curr in self.shards.itervalues():
curr.compact()
def reindex(self):
for curr in self.shards.itervalues():
curr.reindex()
def all(self, *args, **kwargs):
for curr in self.shards.itervalues():
for now in curr.all(*args, **kwargs):
yield now
def get_many(self, *args, **kwargs):
for curr in self.shards.itervalues():
for now in curr.get_many(*args, **kwargs):
yield now
| 3,765
|
Python
|
.py
| 92
| 32.880435
| 122
| 0.617301
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,458
|
database_safe_shared.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/database_safe_shared.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.env import cdb_environment
from CodernityDB.database import PreconditionsException, RevConflict, Database
# from database import Database
from collections import defaultdict
from functools import wraps
from types import MethodType
class th_safe_gen:
def __init__(self, name, gen, l=None):
self.lock = l
self.__gen = gen
self.name = name
def __iter__(self):
return self
def next(self):
with self.lock:
return self.__gen.next()
@staticmethod
def wrapper(method, index_name, meth_name, l=None):
@wraps(method)
def _inner(*args, **kwargs):
res = method(*args, **kwargs)
return th_safe_gen(index_name + "_" + meth_name, res, l)
return _inner
def safe_wrapper(method, lock):
@wraps(method)
def _inner(*args, **kwargs):
with lock:
return method(*args, **kwargs)
return _inner
class SafeDatabase(Database):
def __init__(self, path, *args, **kwargs):
super(SafeDatabase, self).__init__(path, *args, **kwargs)
self.indexes_locks = defaultdict(cdb_environment['rlock_obj'])
self.close_open_lock = cdb_environment['rlock_obj']()
self.main_lock = cdb_environment['rlock_obj']()
self.id_revs = {}
def __patch_index_gens(self, name):
ind = self.indexes_names[name]
for c in ('all', 'get_many'):
m = getattr(ind, c)
if getattr(ind, c + "_orig", None):
return
m_fixed = th_safe_gen.wrapper(m, name, c, self.indexes_locks[name])
setattr(ind, c, m_fixed)
setattr(ind, c + '_orig', m)
def __patch_index_methods(self, name):
ind = self.indexes_names[name]
lock = self.indexes_locks[name]
for curr in dir(ind):
meth = getattr(ind, curr)
if not curr.startswith('_') and isinstance(meth, MethodType):
setattr(ind, curr, safe_wrapper(meth, lock))
stor = ind.storage
for curr in dir(stor):
meth = getattr(stor, curr)
if not curr.startswith('_') and isinstance(meth, MethodType):
setattr(stor, curr, safe_wrapper(meth, lock))
def __patch_index(self, name):
self.__patch_index_methods(name)
self.__patch_index_gens(name)
def initialize(self, *args, **kwargs):
with self.close_open_lock:
res = super(SafeDatabase, self).initialize(*args, **kwargs)
for name in self.indexes_names.iterkeys():
self.indexes_locks[name] = cdb_environment['rlock_obj']()
return res
def open(self, *args, **kwargs):
with self.close_open_lock:
res = super(SafeDatabase, self).open(*args, **kwargs)
for name in self.indexes_names.iterkeys():
self.indexes_locks[name] = cdb_environment['rlock_obj']()
self.__patch_index(name)
return res
def create(self, *args, **kwargs):
with self.close_open_lock:
res = super(SafeDatabase, self).create(*args, **kwargs)
for name in self.indexes_names.iterkeys():
self.indexes_locks[name] = cdb_environment['rlock_obj']()
self.__patch_index(name)
return res
def close(self):
with self.close_open_lock:
return super(SafeDatabase, self).close()
def destroy(self):
with self.close_open_lock:
return super(SafeDatabase, self).destroy()
def add_index(self, *args, **kwargs):
with self.main_lock:
res = super(SafeDatabase, self).add_index(*args, **kwargs)
if self.opened:
self.indexes_locks[res] = cdb_environment['rlock_obj']()
self.__patch_index(res)
return res
def _single_update_index(self, index, data, db_data, doc_id):
with self.indexes_locks[index.name]:
super(SafeDatabase, self)._single_update_index(
index, data, db_data, doc_id)
def _single_delete_index(self, index, data, doc_id, old_data):
with self.indexes_locks[index.name]:
super(SafeDatabase, self)._single_delete_index(
index, data, doc_id, old_data)
def edit_index(self, *args, **kwargs):
with self.main_lock:
res = super(SafeDatabase, self).edit_index(*args, **kwargs)
if self.opened:
self.indexes_locks[res] = cdb_environment['rlock_obj']()
self.__patch_index(res)
return res
def set_indexes(self, *args, **kwargs):
try:
self.main_lock.acquire()
super(SafeDatabase, self).set_indexes(*args, **kwargs)
finally:
self.main_lock.release()
def reindex_index(self, index, *args, **kwargs):
if isinstance(index, basestring):
if not index in self.indexes_names:
raise PreconditionsException("No index named %s" % index)
index = self.indexes_names[index]
key = index.name + "reind"
self.main_lock.acquire()
if key in self.indexes_locks:
lock = self.indexes_locks[index.name + "reind"]
else:
self.indexes_locks[index.name +
"reind"] = cdb_environment['rlock_obj']()
lock = self.indexes_locks[index.name + "reind"]
self.main_lock.release()
try:
lock.acquire()
super(SafeDatabase, self).reindex_index(
index, *args, **kwargs)
finally:
lock.release()
def flush(self):
try:
self.main_lock.acquire()
super(SafeDatabase, self).flush()
finally:
self.main_lock.release()
def fsync(self):
try:
self.main_lock.acquire()
super(SafeDatabase, self).fsync()
finally:
self.main_lock.release()
def _update_id_index(self, _rev, data):
with self.indexes_locks['id']:
return super(SafeDatabase, self)._update_id_index(_rev, data)
def _delete_id_index(self, _id, _rev, data):
with self.indexes_locks['id']:
return super(SafeDatabase, self)._delete_id_index(_id, _rev, data)
def _update_indexes(self, _rev, data):
_id, new_rev, db_data = self._update_id_index(_rev, data)
with self.main_lock:
self.id_revs[_id] = new_rev
for index in self.indexes[1:]:
with self.main_lock:
curr_rev = self.id_revs.get(_id) # get last _id, _rev
if curr_rev != new_rev:
break # new update on the way stop current
self._single_update_index(index, data, db_data, _id)
with self.main_lock:
if self.id_revs[_id] == new_rev:
del self.id_revs[_id]
return _id, new_rev
def _delete_indexes(self, _id, _rev, data):
old_data = self.get('id', _id)
if old_data['_rev'] != _rev:
raise RevConflict()
with self.main_lock:
self.id_revs[_id] = _rev
for index in self.indexes[1:]:
self._single_delete_index(index, data, _id, old_data)
self._delete_id_index(_id, _rev, data)
with self.main_lock:
if self.id_revs[_id] == _rev:
del self.id_revs[_id]
| 8,092
|
Python
|
.py
| 194
| 31.948454
| 79
| 0.586777
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,459
|
hash_index.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/hash_index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.index import (Index,
IndexException,
DocIdNotFound,
ElemNotFound,
TryReindexException,
IndexPreconditionsException)
import os
import marshal
import io
import struct
import shutil
from CodernityDB.storage import IU_Storage, DummyStorage
from CodernityDB.env import cdb_environment
if cdb_environment.get('rlock_obj'):
from CodernityDB import patch
patch.patch_cache_rr(cdb_environment['rlock_obj'])
from CodernityDB.rr_cache import cache1lvl
from CodernityDB.misc import random_hex_32
try:
from CodernityDB import __version__
except ImportError:
from __init__ import __version__
class IU_HashIndex(Index):
"""
That class is for Internal Use only, if you want to use HashIndex just subclass the :py:class:`HashIndex` instead this one.
That design is because main index logic should be always in database not in custom user indexes.
"""
def __init__(self, db_path, name, entry_line_format='<32s{key}IIcI', hash_lim=0xfffff, storage_class=None, key_format='c'):
"""
The index is capable to solve conflicts by `Separate chaining`
:param db_path: database path
:type db_path: string
:param name: index name
:type name: ascii string
:param line_format: line format, `key_format` parameter value will replace `{key}` if present.
:type line_format: string (32s{key}IIcI by default) {doc_id}{hash_key}{start}{size}{status}{next}
:param hash_lim: maximum hash functon results (remember about birthday problem) count from 0
:type hash_lim: integer
:param storage_class: Storage class by default it will open standard :py:class:`CodernityDB.storage.Storage` (if string has to be accesible by globals()[storage_class])
:type storage_class: class name which will be instance of CodernityDB.storage.Storage instance or None
:param key_format: a index key format
"""
if key_format and '{key}' in entry_line_format:
entry_line_format = entry_line_format.replace('{key}', key_format)
super(IU_HashIndex, self).__init__(db_path, name)
self.hash_lim = hash_lim
if not storage_class:
storage_class = IU_Storage
if storage_class and not isinstance(storage_class, basestring):
storage_class = storage_class.__name__
self.storage_class = storage_class
self.storage = None
self.bucket_line_format = "<I"
self.bucket_line_size = struct.calcsize(self.bucket_line_format)
self.entry_line_format = entry_line_format
self.entry_line_size = struct.calcsize(self.entry_line_format)
cache = cache1lvl(100)
self._find_key = cache(self._find_key)
self._locate_doc_id = cache(self._locate_doc_id)
self.bucket_struct = struct.Struct(self.bucket_line_format)
self.entry_struct = struct.Struct(self.entry_line_format)
self.data_start = (
self.hash_lim + 1) * self.bucket_line_size + self._start_ind + 2
def _fix_params(self):
super(IU_HashIndex, self)._fix_params()
self.bucket_line_size = struct.calcsize(self.bucket_line_format)
self.entry_line_size = struct.calcsize(self.entry_line_format)
self.bucket_struct = struct.Struct(self.bucket_line_format)
self.entry_struct = struct.Struct(self.entry_line_format)
self.data_start = (
self.hash_lim + 1) * self.bucket_line_size + self._start_ind + 2
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException("Doesn't exists")
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self._fix_params()
self._open_storage()
def create_index(self):
if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException('Already exists')
with io.open(os.path.join(self.db_path, self.name + "_buck"), 'w+b') as f:
props = dict(name=self.name,
bucket_line_format=self.bucket_line_format,
entry_line_format=self.entry_line_format,
hash_lim=self.hash_lim,
version=self.__version__,
storage_class=self.storage_class)
f.write(marshal.dumps(props))
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self._create_storage()
def destroy(self):
super(IU_HashIndex, self).destroy()
self._clear_cache()
def _open_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.open()
def _create_storage(self):
s = globals()[self.storage_class]
if not self.storage:
self.storage = s(self.db_path, self.name)
self.storage.create()
# def close_index(self):
# self.buckets.flush()
# self.buckets.close()
# self.storage.close()
# @lfu_cache(100)
def _find_key(self, key):
"""
Find the key position
:param key: the key to find
"""
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
if not location:
return None, None, 0, 0, 'u'
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
if status == 'd': # when first record from many is deleted
while True:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, _next)
if status != 'd':
break
return doc_id, l_key, start, size, status
else:
return None, None, 0, 0, 'u'
def _find_key_many(self, key, limit=1, offset=0):
location = None
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
while offset:
if not location:
break
try:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
except IndexException:
break
else:
if status != 'd':
if l_key == key: # in case of hash function conflicts
offset -= 1
location = _next
while limit:
if not location:
break
try:
found_at, doc_id, l_key, start, size, status, _next = self._locate_key(
key, location)
except IndexException:
break
else:
if status != 'd':
if l_key == key: # in case of hash function conflicts
yield doc_id, start, size, status
limit -= 1
location = _next
def _calculate_position(self, key):
return abs(hash(key) & self.hash_lim) * self.bucket_line_size + self._start_ind
# TODO add cache!
def _locate_key(self, key, start):
"""
Locate position of the key, it will iterate using `next` field in record
until required key will be find.
:param key: the key to locate
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
try:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
except struct.error:
raise ElemNotFound(
"Not found") # not found but might be also broken
if l_key == key:
break
else:
if not _next:
# not found
raise ElemNotFound("Not found")
else:
location = _next # go to next record
return location, doc_id, l_key, start, size, status, _next
# @lfu_cache(100)
def _locate_doc_id(self, doc_id, key, start):
"""
Locate position of the doc_id, it will iterate using `next` field in record
until required key will be find.
:param doc_id: the doc_id to locate
:param key: key value
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
try:
l_doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
except:
raise DocIdNotFound(
"Doc_id '%s' for '%s' not found" % (doc_id, key))
if l_doc_id == doc_id and l_key == key: # added for consistency
break
else:
if not _next:
# not found
raise DocIdNotFound(
"Doc_id '%s' for '%s' not found" % (doc_id, key))
else:
location = _next # go to next record
return location, doc_id, l_key, start, size, status, _next
def _find_place(self, start):
"""
Find a place to where put the key. It will iterate using `next` field in record, until
empty `next` found
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
if not _next or status == 'd':
return self.buckets.tell() - self.entry_line_size, doc_id, l_key, start, size, status, _next
else:
location = _next # go to next record
def update(self, doc_id, key, u_start=0, u_size=0, u_status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# test if it's unique or not really unique hash
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
raise ElemNotFound("Location '%s' not found" % doc_id)
found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location)
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
u_start,
u_size,
u_status,
_next))
self.flush()
self._find_key.delete(key)
self._locate_doc_id.delete(doc_id)
return True
def insert(self, doc_id, key, start, size, status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# conflict occurs?
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
location = 0
if location:
# last key with that hash
try:
found_at, _doc_id, _key, _start, _size, _status, _next = self._locate_doc_id(doc_id, key, location)
except DocIdNotFound:
found_at, _doc_id, _key, _start, _size, _status, _next = self._find_place(location)
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
_next))
# self.flush()
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(_doc_id,
_key,
_start,
_size,
_status,
wrote_at))
else:
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
_next))
self.flush()
self._locate_doc_id.delete(doc_id)
self._find_key.delete(_key)
# self._find_key.delete(key)
# self._locate_key.delete(_key)
return True
# raise NotImplementedError
else:
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
status,
0))
# self.flush()
self._find_key.delete(key)
self.buckets.seek(start_position)
self.buckets.write(self.bucket_struct.pack(wrote_at))
self.flush()
return True
def get(self, key):
return self._find_key(self.make_key(key))
def get_many(self, key, limit=1, offset=0):
return self._find_key_many(self.make_key(key), limit, offset)
def all(self, limit=-1, offset=0):
self.buckets.seek(self.data_start)
while offset:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, key, start, size, status, _next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
offset -= 1
while limit:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, key, start, size, status, _next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
yield doc_id, key, start, size, status
limit -= 1
def _fix_link(self, key, pos_prev, pos_next):
# CHECKIT why I need that hack
if pos_prev >= self.data_start:
self.buckets.seek(pos_prev)
data = self.buckets.read(self.entry_line_size)
if data:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
self.buckets.seek(pos_prev)
self.buckets.write(self.entry_struct.pack(doc_id,
l_key,
start,
size,
status,
pos_next))
self.flush()
if pos_next:
self.buckets.seek(pos_next)
data = self.buckets.read(self.entry_line_size)
if data:
doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data)
self.buckets.seek(pos_next)
self.buckets.write(self.entry_struct.pack(doc_id,
l_key,
start,
size,
status,
_next))
self.flush()
return
def delete(self, doc_id, key, start=0, size=0):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
# case happens when trying to delete element with new index key in data
# after adding new index to database without reindex
raise TryReindexException()
found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location)
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(doc_id,
key,
start,
size,
'd',
_next))
self.flush()
# self._fix_link(_key, _prev, _next)
self._find_key.delete(key)
self._locate_doc_id.delete(doc_id)
return True
def compact(self, hash_lim=None):
if not hash_lim:
hash_lim = self.hash_lim
compact_ind = self.__class__(
self.db_path, self.name + '_compact', hash_lim=hash_lim)
compact_ind.create_index()
gen = self.all()
while True:
try:
doc_id, key, start, size, status = gen.next()
except StopIteration:
break
self.storage._f.seek(start)
value = self.storage._f.read(size)
start_ = compact_ind.storage._f.tell()
compact_ind.storage._f.write(value)
compact_ind.insert(doc_id, key, start_, size, status)
compact_ind.close_index()
original_name = self.name
# os.unlink(os.path.join(self.db_path, self.name + "_buck"))
self.close_index()
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_buck"), os.path.join(self.db_path, self.name + "_buck"))
shutil.move(os.path.join(compact_ind.db_path, compact_ind.
name + "_stor"), os.path.join(self.db_path, self.name + "_stor"))
# self.name = original_name
self.open_index() # reload...
self.name = original_name
self._save_params(dict(name=original_name))
self._fix_params()
self._clear_cache()
return True
def make_key(self, key):
return key
def make_key_value(self, data):
return '1', data
def _clear_cache(self):
self._find_key.clear()
self._locate_doc_id.clear()
def close_index(self):
super(IU_HashIndex, self).close_index()
self._clear_cache()
class IU_UniqueHashIndex(IU_HashIndex):
"""
Index for *unique* keys! Designed to be a **id** index.
That class is for Internal Use only, if you want to use UniqueHashIndex just subclass the :py:class:`UniqueHashIndex` instead this one.
That design is because main index logic should be always in database not in custom user indexes.
"""
def __init__(self, db_path, name, entry_line_format="<32s8sIIcI", *args, **kwargs):
if 'key' in kwargs:
raise IndexPreconditionsException(
"UniqueHashIndex doesn't accept key parameter'")
super(IU_UniqueHashIndex, self).__init__(db_path, name,
entry_line_format, *args, **kwargs)
self.create_key = random_hex_32 # : set the function to create random key when no _id given
# self.entry_struct=struct.Struct(entry_line_format)
# @lfu_cache(100)
def _find_key(self, key):
"""
Find the key position
:param key: the key to find
"""
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
found_at, l_key, rev, start, size, status, _next = self._locate_key(
key, location)
return l_key, rev, start, size, status
else:
return None, None, 0, 0, 'u'
def _find_key_many(self, *args, **kwargs):
raise NotImplementedError()
def _find_place(self, start, key):
"""
Find a place to where put the key. It will iterate using `next` field in record, until
empty `next` found
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
l_key, rev, start, size, status, _next = self.entry_struct.unpack(
data)
if l_key == key:
raise IndexException("The '%s' key already exists" % key)
if not _next or status == 'd':
return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next
else:
location = _next # go to next record
# @lfu_cache(100)
def _locate_key(self, key, start):
"""
Locate position of the key, it will iterate using `next` field in record
until required key will be find.
:param key: the key to locate
:param start: position to start from
"""
location = start
while True:
self.buckets.seek(location)
data = self.buckets.read(self.entry_line_size)
# todo, maybe partial read there...
try:
l_key, rev, start, size, status, _next = self.entry_struct.unpack(data)
except struct.error:
raise ElemNotFound("Location '%s' not found" % key)
if l_key == key:
break
else:
if not _next:
# not found
raise ElemNotFound("Location '%s' not found" % key)
else:
location = _next # go to next record
return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next
def update(self, key, rev, u_start=0, u_size=0, u_status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# test if it's unique or not really unique hash
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
raise ElemNotFound("Location '%s' not found" % key)
found_at, _key, _rev, start, size, status, _next = self._locate_key(
key, location)
if u_start == 0:
u_start = start
if u_size == 0:
u_size = size
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(key,
rev,
u_start,
u_size,
u_status,
_next))
self.flush()
self._find_key.delete(key)
return True
def insert(self, key, rev, start, size, status='o'):
start_position = self._calculate_position(key)
self.buckets.seek(start_position)
curr_data = self.buckets.read(self.bucket_line_size)
# conflict occurs?
if curr_data:
location = self.bucket_struct.unpack(curr_data)[0]
else:
location = 0
if location:
# last key with that hash
found_at, _key, _rev, _start, _size, _status, _next = self._find_place(
location, key)
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(key,
rev,
start,
size,
status,
_next))
# self.flush()
self.buckets.seek(found_at)
self.buckets.write(self.entry_struct.pack(_key,
_rev,
_start,
_size,
_status,
wrote_at))
self.flush()
self._find_key.delete(_key)
# self._locate_key.delete(_key)
return True
# raise NotImplementedError
else:
self.buckets.seek(0, 2)
wrote_at = self.buckets.tell()
# check if position is bigger than all hash entries...
if wrote_at < self.data_start:
self.buckets.seek(self.data_start)
wrote_at = self.buckets.tell()
self.buckets.write(self.entry_struct.pack(key,
rev,
start,
size,
status,
0))
# self.flush()
self.buckets.seek(start_position)
self.buckets.write(self.bucket_struct.pack(wrote_at))
self.flush()
self._find_key.delete(key)
return True
def all(self, limit=-1, offset=0):
self.buckets.seek(self.data_start)
while offset:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
offset -= 1
while limit:
curr_data = self.buckets.read(self.entry_line_size)
if not curr_data:
break
try:
doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data)
except IndexException:
break
else:
if status != 'd':
yield doc_id, rev, start, size, status
limit -= 1
def get_many(self, *args, **kwargs):
raise NotImplementedError()
def delete(self, key, start=0, size=0):
self.update(key, '00000000', start, size, 'd')
def make_key_value(self, data):
_id = data['_id']
try:
_id = bytes(data['_id'])
except:
raise IndexPreconditionsException(
"_id must be valid string/bytes object")
if len(_id) != 32:
raise IndexPreconditionsException("Invalid _id lenght")
del data['_id']
del data['_rev']
return _id, data
def destroy(self):
Index.destroy(self)
self._clear_cache()
def _clear_cache(self):
self._find_key.clear()
def insert_with_storage(self, _id, _rev, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.insert(_id, _rev, start, size)
def update_with_storage(self, _id, _rev, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.update(_id, _rev, start, size)
class DummyHashIndex(IU_HashIndex):
def __init__(self, db_path, name, entry_line_format="<32s4sIIcI", *args, **kwargs):
super(DummyHashIndex, self).__init__(db_path, name,
entry_line_format, *args, **kwargs)
self.create_key = random_hex_32 # : set the function to create random key when no _id given
# self.entry_struct=struct.Struct(entry_line_format)
def update(self, *args, **kwargs):
return True
def insert(self, *args, **kwargs):
return True
def all(self, *args, **kwargs):
raise StopIteration
def get(self, *args, **kwargs):
raise ElemNotFound
def get_many(self, *args, **kwargs):
raise StopIteration
def delete(self, *args, **kwargs):
pass
def make_key_value(self, data):
return '1', {'_': 1}
def destroy(self):
pass
def _clear_cache(self):
pass
def _open_storage(self):
if not self.storage:
self.storage = DummyStorage()
self.storage.open()
def _create_storage(self):
if not self.storage:
self.storage = DummyStorage()
self.storage.create()
class IU_MultiHashIndex(IU_HashIndex):
"""
Class that allows to index more than one key per database record.
It operates very well on GET/INSERT. It's not optimized for
UPDATE operations (will always readd everything)
"""
def __init__(self, *args, **kwargs):
super(IU_MultiHashIndex, self).__init__(*args, **kwargs)
def insert(self, doc_id, key, start, size, status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
ins = super(IU_MultiHashIndex, self).insert
for curr_key in key:
ins(doc_id, curr_key, start, size, status)
return True
def update(self, doc_id, key, u_start, u_size, u_status='o'):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
upd = super(IU_MultiHashIndex, self).update
for curr_key in key:
upd(doc_id, curr_key, u_start, u_size, u_status)
def delete(self, doc_id, key, start=0, size=0):
if isinstance(key, (list, tuple)):
key = set(key)
elif not isinstance(key, set):
key = set([key])
delete = super(IU_MultiHashIndex, self).delete
for curr_key in key:
delete(doc_id, curr_key, start, size)
def get(self, key):
return super(IU_MultiHashIndex, self).get(key)
def make_key_value(self, data):
raise NotImplementedError()
# classes for public use, done in this way because of
# generation static files with indexes (_index directory)
class HashIndex(IU_HashIndex):
"""
That class is designed to be used in custom indexes.
"""
pass
class UniqueHashIndex(IU_UniqueHashIndex):
"""
That class is designed to be used in custom indexes. It's designed to be **id** index.
"""
pass
class MultiHashIndex(IU_MultiHashIndex):
"""
That class is designed to be used in custom indexes.
"""
| 33,813
|
Python
|
.py
| 769
| 29.462939
| 176
| 0.51623
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,460
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.5.0'
__license__ = "Apache 2.0"
| 700
|
Python
|
.py
| 18
| 37.777778
| 74
| 0.747059
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,461
|
storage.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/storage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
import shutil
import marshal
import io
try:
from CodernityDB import __version__
except ImportError:
from __init__ import __version__
class StorageException(Exception):
pass
class DummyStorage(object):
"""
Storage mostly used to fake real storage
"""
def create(self, *args, **kwargs):
pass
def open(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def data_from(self, *args, **kwargs):
pass
def data_to(self, *args, **kwargs):
pass
def save(self, *args, **kwargs):
return 0, 0
def insert(self, *args, **kwargs):
return self.save(*args, **kwargs)
def update(self, *args, **kwargs):
return 0, 0
def get(self, *args, **kwargs):
return None
# def compact(self, *args, **kwargs):
# pass
def fsync(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
class IU_Storage(object):
__version__ = __version__
def __init__(self, db_path, name='main'):
self.db_path = db_path
self.name = name
self._header_size = 100
def create(self):
if os.path.exists(os.path.join(self.db_path, self.name + "_stor")):
raise IOError("Storage already exists!")
with io.open(os.path.join(self.db_path, self.name + "_stor"), 'wb') as f:
f.write(struct.pack("10s90s", self.__version__, '|||||'))
f.close()
self._f = io.open(os.path.join(
self.db_path, self.name + "_stor"), 'r+b', buffering=0)
self.flush()
self._f.seek(0, 2)
def open(self):
if not os.path.exists(os.path.join(self.db_path, self.name + "_stor")):
raise IOError("Storage doesn't exists!")
self._f = io.open(os.path.join(
self.db_path, self.name + "_stor"), 'r+b', buffering=0)
self.flush()
self._f.seek(0, 2)
def destroy(self):
os.unlink(os.path.join(self.db_path, self.name + '_stor'))
def close(self):
self._f.close()
# self.flush()
# self.fsync()
def data_from(self, data):
return marshal.loads(data)
def data_to(self, data):
return marshal.dumps(data)
def save(self, data):
s_data = self.data_to(data)
self._f.seek(0, 2)
start = self._f.tell()
size = len(s_data)
self._f.write(s_data)
self.flush()
return start, size
def insert(self, data):
return self.save(data)
def update(self, data):
return self.save(data)
def get(self, start, size, status='c'):
if status == 'd':
return None
else:
self._f.seek(start)
return self.data_from(self._f.read(size))
def flush(self):
self._f.flush()
def fsync(self):
os.fsync(self._f.fileno())
# classes for public use, done in this way because of
# generation static files with indexes (_index directory)
class Storage(IU_Storage):
pass
| 3,736
|
Python
|
.py
| 114
| 26.342105
| 81
| 0.608708
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,462
|
patch.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/patch.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.misc import NONE
def __patch(obj, name, new):
n = NONE()
orig = getattr(obj, name, n)
if orig is not n:
if orig == new:
raise Exception("Shouldn't happen, new and orig are the same")
setattr(obj, name, new)
return
def patch_cache_lfu(lock_obj):
"""
Patnches cache mechanizm to be thread safe (gevent ones also)
.. note::
It's internal CodernityDB mechanizm, it will be called when needed
"""
import lfu_cache
import lfu_cache_with_lock
lfu_lock1lvl = lfu_cache_with_lock.create_cache1lvl(lock_obj)
lfu_lock2lvl = lfu_cache_with_lock.create_cache2lvl(lock_obj)
__patch(lfu_cache, 'cache1lvl', lfu_lock1lvl)
__patch(lfu_cache, 'cache2lvl', lfu_lock2lvl)
def patch_cache_rr(lock_obj):
"""
Patches cache mechanizm to be thread safe (gevent ones also)
.. note::
It's internal CodernityDB mechanizm, it will be called when needed
"""
import rr_cache
import rr_cache_with_lock
rr_lock1lvl = rr_cache_with_lock.create_cache1lvl(lock_obj)
rr_lock2lvl = rr_cache_with_lock.create_cache2lvl(lock_obj)
__patch(rr_cache, 'cache1lvl', rr_lock1lvl)
__patch(rr_cache, 'cache2lvl', rr_lock2lvl)
def patch_flush_fsync(db_obj):
"""
Will always execute index.fsync after index.flush.
.. note::
It's for advanced users, use when you understand difference between `flush` and `fsync`, and when you definitely need that.
It's important to call it **AFTER** database has all indexes etc (after db.create or db.open)
Example usage::
...
db = Database('/tmp/patch_demo')
db.create()
patch_flush_fsync(db)
...
"""
def always_fsync(ind_obj):
def _inner():
ind_obj.orig_flush()
ind_obj.fsync()
return _inner
for index in db_obj.indexes:
setattr(index, 'orig_flush', index.flush)
setattr(index, 'flush', always_fsync(index))
setattr(db_obj, 'orig_flush', db_obj.flush)
setattr(db_obj, 'flush', always_fsync(db_obj))
return
| 2,770
|
Python
|
.py
| 73
| 32.684932
| 130
| 0.679146
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,463
|
index.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/index.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import marshal
import struct
import shutil
from CodernityDB.storage import IU_Storage, DummyStorage
try:
from CodernityDB import __version__
except ImportError:
from __init__ import __version__
import io
class IndexException(Exception):
pass
class IndexNotFoundException(IndexException):
pass
class ReindexException(IndexException):
pass
class TryReindexException(ReindexException):
pass
class ElemNotFound(IndexException):
pass
class DocIdNotFound(ElemNotFound):
pass
class IndexConflict(IndexException):
pass
class IndexPreconditionsException(IndexException):
pass
class Index(object):
__version__ = __version__
custom_header = "" # : use it for imports required by your index
def __init__(self,
db_path,
name):
self.name = name
self._start_ind = 500
self.db_path = db_path
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException("Doesn't exists")
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self._fix_params()
self._open_storage()
def _close(self):
self.buckets.close()
self.storage.close()
def close_index(self):
self.flush()
self.fsync()
self._close()
def create_index(self):
raise NotImplementedError()
def _fix_params(self):
self.buckets.seek(0)
props = marshal.loads(self.buckets.read(self._start_ind))
for k, v in props.iteritems():
self.__dict__[k] = v
self.buckets.seek(0, 2)
def _save_params(self, in_params={}):
self.buckets.seek(0)
props = marshal.loads(self.buckets.read(self._start_ind))
props.update(in_params)
self.buckets.seek(0)
data = marshal.dumps(props)
if len(data) > self._start_ind:
raise IndexException("To big props")
self.buckets.write(data)
self.flush()
self.buckets.seek(0, 2)
self.__dict__.update(props)
def _open_storage(self, *args, **kwargs):
pass
def _create_storage(self, *args, **kwargs):
pass
def _destroy_storage(self, *args, **kwargs):
self.storage.destroy()
def _find_key(self, key):
raise NotImplementedError()
def update(self, doc_id, key, start, size):
raise NotImplementedError()
def insert(self, doc_id, key, start, size):
raise NotImplementedError()
def get(self, key):
raise NotImplementedError()
def get_many(self, key, start_from=None, limit=0):
raise NotImplementedError()
def all(self, start_pos):
raise NotImplementedError()
def delete(self, key, start, size):
raise NotImplementedError()
def make_key_value(self, data):
raise NotImplementedError()
def make_key(self, data):
raise NotImplementedError()
def compact(self, *args, **kwargs):
raise NotImplementedError()
def destroy(self, *args, **kwargs):
self._close()
bucket_file = os.path.join(self.db_path, self.name + '_buck')
os.unlink(bucket_file)
self._destroy_storage()
self._find_key.clear()
def flush(self):
try:
self.buckets.flush()
self.storage.flush()
except:
pass
def fsync(self):
try:
os.fsync(self.buckets.fileno())
self.storage.fsync()
except:
pass
def update_with_storage(self, doc_id, key, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.update(doc_id, key, start, size)
def insert_with_storage(self, doc_id, key, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.insert(doc_id, key, start, size)
| 4,746
|
Python
|
.py
| 143
| 26.132867
| 80
| 0.634586
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,464
|
migrate.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/migrate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.database import Database
import shutil
import os
def migrate(source, destination):
"""
Very basic for now
"""
dbs = Database(source)
dbt = Database(destination)
dbs.open()
dbt.create()
dbt.close()
for curr in os.listdir(os.path.join(dbs.path, '_indexes')):
if curr != '00id.py':
shutil.copyfile(os.path.join(dbs.path, '_indexes', curr),
os.path.join(dbt.path, '_indexes', curr))
dbt.open()
for c in dbs.all('id'):
del c['_rev']
dbt.insert(c)
return True
if __name__ == '__main__':
import sys
migrate(sys.argv[1], sys.argv[2])
| 1,317
|
Python
|
.py
| 40
| 28.8
| 74
| 0.675314
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,465
|
env.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/env.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
It's CodernityDB environment.
Handles internal informations.'
"""
cdb_environment = {
'mode': 'normal'
}
| 764
|
Python
|
.py
| 23
| 31.956522
| 74
| 0.756428
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,466
|
indexcreator.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/indexcreator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import tokenize
import token
import uuid
class IndexCreatorException(Exception):
def __init__(self, ex, line=None):
self.ex = ex
self.line = line
def __str__(self):
if self.line:
return repr(self.ex + "(in line: %d)" % self.line)
return repr(self.ex)
class IndexCreatorFunctionException(IndexCreatorException):
pass
class IndexCreatorValueException(IndexCreatorException):
pass
class Parser(object):
def __init__(self):
pass
def parse(self, data, name=None):
if not name:
self.name = "_" + uuid.uuid4().hex
else:
self.name = name
self.ind = 0
self.stage = 0
self.logic = ['and', 'or', 'in']
self.logic2 = ['&', '|']
self.allowed_props = {'TreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'],
'HashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'],
'MultiHashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'],
'MultiTreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format']
}
self.funcs = {'md5': (['md5'], ['.digest()']),
'len': (['len'], []),
'str': (['str'], []),
'fix_r': (['self.fix_r'], []),
'prefix': (['self.prefix'], []),
'infix': (['self.infix'], []),
'suffix': (['self.suffix'], [])
}
self.handle_int_imports = {'infix': "from itertools import izip\n"}
self.funcs_with_body = {'fix_r':
(""" def fix_r(self,s,l):
e = len(s)
if e == l:
return s
elif e > l:
return s[:l]
else:
return s.rjust(l,'_')\n""", False),
'prefix':
(""" def prefix(self,s,m,l,f):
t = len(s)
if m < 1:
m = 1
o = set()
if t > l:
s = s[:l]
t = l
while m <= t:
o.add(s.rjust(f,'_'))
s = s[:-1]
t -= 1
return o\n""", False),
'suffix':
(""" def suffix(self,s,m,l,f):
t = len(s)
if m < 1:
m = 1
o = set()
if t > l:
s = s[t-l:]
t = len(s)
while m <= t:
o.add(s.rjust(f,'_'))
s = s[1:]
t -= 1
return o\n""", False),
'infix':
(""" def infix(self,s,m,l,f):
t = len(s)
o = set()
for x in xrange(m - 1, l):
t = (s, )
for y in xrange(0, x):
t += (s[y + 1:],)
o.update(set(''.join(x).rjust(f, '_').lower() for x in izip(*t)))
return o\n""", False)}
self.none = ['None', 'none', 'null']
self.props_assign = ['=', ':']
self.all_adj_num_comp = {token.NUMBER: (
token.NUMBER, token.NAME, '-', '('),
token.NAME: (token.NUMBER, token.NAME, '-', '('),
')': (token.NUMBER, token.NAME, '-', '(')
}
self.all_adj_num_op = {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, '('),
')': (token.NUMBER, token.NAME, '(')
}
self.allowed_adjacent = {
"<=": self.all_adj_num_comp,
">=": self.all_adj_num_comp,
">": self.all_adj_num_comp,
"<": self.all_adj_num_comp,
"==": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
token.STRING: (token.NAME, token.STRING, '('),
')': (token.NUMBER, token.NAME, token.STRING, '('),
']': (token.NUMBER, token.NAME, token.STRING, '(')
},
"+": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '('),
token.STRING: (token.NAME, token.STRING, '('),
')': (token.NUMBER, token.NAME, token.STRING, '('),
']': (token.NUMBER, token.NAME, token.STRING, '(')
},
"-": {token.NUMBER: (token.NUMBER, token.NAME, '('),
token.NAME: (token.NUMBER, token.NAME, '('),
')': (token.NUMBER, token.NAME, '('),
'<': (token.NUMBER, token.NAME, '('),
'>': (token.NUMBER, token.NAME, '('),
'<=': (token.NUMBER, token.NAME, '('),
'>=': (token.NUMBER, token.NAME, '('),
'==': (token.NUMBER, token.NAME, '('),
']': (token.NUMBER, token.NAME, '(')
},
"*": self.all_adj_num_op,
"/": self.all_adj_num_op,
"%": self.all_adj_num_op,
",": {token.NUMBER: (token.NUMBER, token.NAME, token.STRING, '{', '[', '('),
token.NAME: (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
token.STRING: (token.NAME, token.STRING, token.NUMBER, '(', '{', '['),
')': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
']': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['),
'}': (token.NUMBER, token.NAME, token.STRING, '(', '{', '[')
}
}
def is_num(s):
m = re.search('[^0-9*()+\-\s/]+', s)
return not m
def is_string(s):
m = re.search('\s*(?P<a>[\'\"]+).*?(?P=a)\s*', s)
return m
data = re.split('make_key_value\:', data)
if len(data) < 2:
raise IndexCreatorFunctionException(
"Couldn't find a definition of make_key_value function!\n")
spl1 = re.split('make_key\:', data[0])
spl2 = re.split('make_key\:', data[1])
self.funcs_rev = False
if len(spl1) > 1:
data = [spl1[0]] + [data[1]] + [spl1[1]]
self.funcs_rev = True
elif len(spl2) > 1:
data = [data[0]] + spl2
else:
data.append("key")
if data[1] == re.search('\s*', data[1], re.S | re.M).group(0):
raise IndexCreatorFunctionException("Empty function body ",
len(re.split('\n', data[0])) + (len(re.split('\n', data[2])) if self.funcs_rev else 1) - 1)
if data[2] == re.search('\s*', data[2], re.S | re.M).group(0):
raise IndexCreatorFunctionException("Empty function body ",
len(re.split('\n', data[0])) + (1 if self.funcs_rev else len(re.split('\n', data[1]))) - 1)
if data[0] == re.search('\s*', data[0], re.S | re.M).group(0):
raise IndexCreatorValueException("You didn't set any properity or you set them not at the begining of the code\n")
data = [re.split(
'\n', data[0]), re.split('\n', data[1]), re.split('\n', data[2])]
self.cnt_lines = (len(data[0]), len(data[1]), len(data[2]))
ind = 0
self.predata = data
self.data = [[], [], []]
for i, v in enumerate(self.predata[0]):
for k, w in enumerate(self.predata[0][i]):
if self.predata[0][i][k] in self.props_assign:
if not is_num(self.predata[0][i][k + 1:]) and self.predata[0][i].strip()[:4] != 'type' and self.predata[0][i].strip()[:4] != 'name':
s = self.predata[0][i][k + 1:]
self.predata[0][i] = self.predata[0][i][:k + 1]
m = re.search('\s+', s.strip())
if not is_string(s) and not m:
s = "'" + s.strip() + "'"
self.predata[0][i] += s
break
for n, i in enumerate(self.predata):
for k in i:
k = k.strip()
if k:
self.data[ind].append(k)
self.check_enclosures(k, n)
ind += 1
return self.parse_ex()
def readline(self, stage):
def foo():
if len(self.data[stage]) <= self.ind:
self.ind = 0
return ""
else:
self.ind += 1
return self.data[stage][self.ind - 1]
return foo
def add(self, l, i):
def add_aux(*args):
# print args,self.ind
if len(l[i]) < self.ind:
l[i].append([])
l[i][self.ind - 1].append(args)
return add_aux
def parse_ex(self):
self.index_name = ""
self.index_type = ""
self.curLine = -1
self.con = -1
self.brackets = -1
self.curFunc = None
self.colons = 0
self.line_cons = ([], [], [])
self.pre_tokens = ([], [], [])
self.known_dicts_in_mkv = []
self.prop_name = True
self.prop_assign = False
self.is_one_arg_enough = False
self.funcs_stack = []
self.last_line = [-1, -1, -1]
self.props_set = []
self.custom_header = set()
self.tokens = []
self.tokens_head = ['# %s\n' % self.name, 'class %s(' % self.name, '):\n', ' def __init__(self, *args, **kwargs): ']
for i in xrange(3):
tokenize.tokenize(self.readline(i), self.add(self.pre_tokens, i))
# tokenize treats some keyword not in the right way, thats why we
# have to change some of them
for nk, k in enumerate(self.pre_tokens[i]):
for na, a in enumerate(k):
if a[0] == token.NAME and a[1] in self.logic:
self.pre_tokens[i][nk][
na] = (token.OP, a[1], a[2], a[3], a[4])
for i in self.pre_tokens[1]:
self.line_cons[1].append(self.check_colons(i, 1))
self.check_adjacents(i, 1)
if self.check_for_2nd_arg(i) == -1 and not self.is_one_arg_enough:
raise IndexCreatorValueException("No 2nd value to return (did u forget about ',None'?", self.cnt_line_nr(i[0][4], 1))
self.is_one_arg_enough = False
for i in self.pre_tokens[2]:
self.line_cons[2].append(self.check_colons(i, 2))
self.check_adjacents(i, 2)
for i in self.pre_tokens[0]:
self.handle_prop_line(i)
self.cur_brackets = 0
self.tokens += ['\n super(%s, self).__init__(*args, **kwargs)\n def make_key_value(self, data): ' % self.name]
for i in self.pre_tokens[1]:
for k in i:
self.handle_make_value(*k)
self.curLine = -1
self.con = -1
self.cur_brackets = 0
self.tokens += ['\n def make_key(self, key):']
for i in self.pre_tokens[2]:
for k in i:
self.handle_make_key(*k)
if self.index_type == "":
raise IndexCreatorValueException("Missing index type definition\n")
if self.index_name == "":
raise IndexCreatorValueException("Missing index name\n")
self.tokens_head[0] = "# " + self.index_name + "\n" + \
self.tokens_head[0]
for i in self.funcs_with_body:
if self.funcs_with_body[i][1]:
self.tokens_head.insert(4, self.funcs_with_body[i][0])
if None in self.custom_header:
self.custom_header.remove(None)
if self.custom_header:
s = ' custom_header = """'
for i in self.custom_header:
s += i
s += '"""\n'
self.tokens_head.insert(4, s)
if self.index_type in self.allowed_props:
for i in self.props_set:
if i not in self.allowed_props[self.index_type]:
raise IndexCreatorValueException("Properity %s is not allowed for index type: %s" % (i, self.index_type))
# print "".join(self.tokens_head)
# print "----------"
# print (" ".join(self.tokens))
return "".join(self.custom_header), "".join(self.tokens_head) + (" ".join(self.tokens))
# has to be run BEFORE tokenize
def check_enclosures(self, d, st):
encs = []
contr = {'(': ')', '{': '}', '[': ']', "'": "'", '"': '"'}
ends = [')', '}', ']', "'", '"']
for i in d:
if len(encs) > 0 and encs[-1] in ['"', "'"]:
if encs[-1] == i:
del encs[-1]
elif i in contr:
encs += [i]
elif i in ends:
if len(encs) < 1 or contr[encs[-1]] != i:
raise IndexCreatorValueException("Missing opening enclosure for \'%s\'" % i, self.cnt_line_nr(d, st))
del encs[-1]
if len(encs) > 0:
raise IndexCreatorValueException("Missing closing enclosure for \'%s\'" % encs[0], self.cnt_line_nr(d, st))
def check_adjacents(self, d, st):
def std_check(d, n):
if n == 0:
prev = -1
else:
prev = d[n - 1][1] if d[n - 1][0] == token.OP else d[n - 1][0]
cur = d[n][1] if d[n][0] == token.OP else d[n][0]
# there always is an endmarker at the end, but this is a precaution
if n + 2 > len(d):
nex = -1
else:
nex = d[n + 1][1] if d[n + 1][0] == token.OP else d[n + 1][0]
if prev not in self.allowed_adjacent[cur]:
raise IndexCreatorValueException("Wrong left value of the %s" % cur, self.cnt_line_nr(line, st))
# there is an assumption that whole data always ends with 0 marker, the idea prolly needs a rewritting to allow more whitespaces
# between tokens, so it will be handled anyway
elif nex not in self.allowed_adjacent[cur][prev]:
raise IndexCreatorValueException("Wrong right value of the %s" % cur, self.cnt_line_nr(line, st))
for n, (t, i, _, _, line) in enumerate(d):
if t == token.NAME or t == token.STRING:
if n + 1 < len(d) and d[n + 1][0] in [token.NAME, token.STRING]:
raise IndexCreatorValueException("Did you forget about an operator in between?", self.cnt_line_nr(line, st))
elif i in self.allowed_adjacent:
std_check(d, n)
def check_colons(self, d, st):
cnt = 0
br = 0
def check_ret_args_nr(a, s):
c_b_cnt = 0
s_b_cnt = 0
n_b_cnt = 0
comas_cnt = 0
for _, i, _, _, line in a:
if c_b_cnt == n_b_cnt == s_b_cnt == 0:
if i == ',':
comas_cnt += 1
if (s == 1 and comas_cnt > 1) or (s == 2 and comas_cnt > 0):
raise IndexCreatorFunctionException("Too much arguments to return", self.cnt_line_nr(line, st))
if s == 0 and comas_cnt > 0:
raise IndexCreatorValueException("A coma here doesn't make any sense", self.cnt_line_nr(line, st))
elif i == ':':
if s == 0:
raise IndexCreatorValueException("A colon here doesn't make any sense", self.cnt_line_nr(line, st))
raise IndexCreatorFunctionException("Two colons don't make any sense", self.cnt_line_nr(line, st))
if i == '{':
c_b_cnt += 1
elif i == '}':
c_b_cnt -= 1
elif i == '(':
n_b_cnt += 1
elif i == ')':
n_b_cnt -= 1
elif i == '[':
s_b_cnt += 1
elif i == ']':
s_b_cnt -= 1
def check_if_empty(a):
for i in a:
if i not in [token.NEWLINE, token.INDENT, token.ENDMARKER]:
return False
return True
if st == 0:
check_ret_args_nr(d, st)
return
for n, i in enumerate(d):
if i[1] == ':':
if br == 0:
if len(d) < n or check_if_empty(d[n + 1:]):
raise IndexCreatorValueException(
"Empty return value", self.cnt_line_nr(i[4], st))
elif len(d) >= n:
check_ret_args_nr(d[n + 1:], st)
return cnt
else:
cnt += 1
elif i[1] == '{':
br += 1
elif i[1] == '}':
br -= 1
check_ret_args_nr(d, st)
return -1
def check_for_2nd_arg(self, d):
c_b_cnt = 0 # curly brackets counter '{}'
s_b_cnt = 0 # square brackets counter '[]'
n_b_cnt = 0 # normal brackets counter '()'
def check_2nd_arg(d, ind):
d = d[ind[0]:]
for t, i, (n, r), _, line in d:
if i == '{' or i is None:
return 0
elif t == token.NAME:
self.known_dicts_in_mkv.append((i, (n, r)))
return 0
elif t == token.STRING or t == token.NUMBER:
raise IndexCreatorValueException("Second return value of make_key_value function has to be a dictionary!", self.cnt_line_nr(line, 1))
for ind in enumerate(d):
t, i, _, _, _ = ind[1]
if s_b_cnt == n_b_cnt == c_b_cnt == 0:
if i == ',':
return check_2nd_arg(d, ind)
elif (t == token.NAME and i not in self.funcs) or i == '{':
self.is_one_arg_enough = True
if i == '{':
c_b_cnt += 1
self.is_one_arg_enough = True
elif i == '}':
c_b_cnt -= 1
elif i == '(':
n_b_cnt += 1
elif i == ')':
n_b_cnt -= 1
elif i == '[':
s_b_cnt += 1
elif i == ']':
s_b_cnt -= 1
return -1
def cnt_line_nr(self, l, stage):
nr = -1
for n, i in enumerate(self.predata[stage]):
# print i,"|||",i.strip(),"|||",l
if l == i.strip():
nr = n
if nr == -1:
return -1
if stage == 0:
return nr + 1
elif stage == 1:
return nr + self.cnt_lines[0] + (self.cnt_lines[2] - 1 if self.funcs_rev else 0)
elif stage == 2:
return nr + self.cnt_lines[0] + (self.cnt_lines[1] - 1 if not self.funcs_rev else 0)
return -1
def handle_prop_line(self, d):
d_len = len(d)
if d[d_len - 1][0] == token.ENDMARKER:
d_len -= 1
if d_len < 3:
raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
if not d[1][1] in self.props_assign:
raise IndexCreatorValueException(
"Did you forget : or =?", self.cnt_line_nr(d[0][4], 0))
if d[0][0] == token.NAME or d[0][0] == token.STRING:
if d[0][1] in self.props_set:
raise IndexCreatorValueException("Properity %s is set more than once" % d[0][1], self.cnt_line_nr(d[0][4], 0))
self.props_set += [d[0][1]]
if d[0][1] == "type" or d[0][1] == "name":
t, tk, _, _, line = d[2]
if d_len > 3:
raise IndexCreatorValueException(
"Wrong value to assign", self.cnt_line_nr(line, 0))
if t == token.STRING:
m = re.search('\s*(?P<a>[\'\"]+)(.*?)(?P=a)\s*', tk)
if m:
tk = m.groups()[1]
elif t != token.NAME:
raise IndexCreatorValueException(
"Wrong value to assign", self.cnt_line_nr(line, 0))
if d[0][1] == "type":
if d[2][1] == "TreeBasedIndex":
self.custom_header.add("from CodernityDB.tree_index import TreeBasedIndex\n")
elif d[2][1] == "MultiTreeBasedIndex":
self.custom_header.add("from CodernityDB.tree_index import MultiTreeBasedIndex\n")
elif d[2][1] == "MultiHashIndex":
self.custom_header.add("from CodernityDB.hash_index import MultiHashIndex\n")
self.tokens_head.insert(2, tk)
self.index_type = tk
else:
self.index_name = tk
return
else:
self.tokens += ['\n kwargs["' + d[0][1] + '"]']
else:
raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0))
self.tokens += ['=']
self.check_adjacents(d[2:], 0)
self.check_colons(d[2:], 0)
for i in d[2:]:
self.tokens += [i[1]]
def generate_func(self, t, tk, pos_start, pos_end, line, hdata, stage):
if self.last_line[stage] != -1 and pos_start[0] > self.last_line[stage] and line != '':
raise IndexCreatorFunctionException("This line will never be executed!", self.cnt_line_nr(line, stage))
if t == 0:
return
if pos_start[1] == 0:
if self.line_cons[stage][pos_start[0] - 1] == -1:
self.tokens += ['\n return']
self.last_line[stage] = pos_start[0]
else:
self.tokens += ['\n if']
elif tk == ':' and self.line_cons[stage][pos_start[0] - 1] > -1:
if self.line_cons[stage][pos_start[0] - 1] == 0:
self.tokens += [':\n return']
return
self.line_cons[stage][pos_start[0] - 1] -= 1
if tk in self.logic2:
# print tk
if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] != tk:
self.tokens += [tk]
if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] == tk:
if tk == '&':
self.tokens += ['and']
else:
self.tokens += ['or']
return
if self.brackets != 0:
def search_through_known_dicts(a):
for i, (n, r) in self.known_dicts_in_mkv:
if i == tk and r > pos_start[1] and n == pos_start[0] and hdata == 'data':
return True
return False
if t == token.NAME and len(self.funcs_stack) > 0 and self.funcs_stack[-1][0] == 'md5' and search_through_known_dicts(tk):
raise IndexCreatorValueException("Second value returned by make_key_value for sure isn't a dictionary ", self.cnt_line_nr(line, 1))
if tk == ')':
self.cur_brackets -= 1
if len(self.funcs_stack) > 0 and self.cur_brackets == self.funcs_stack[-1][1]:
self.tokens += [tk]
self.tokens += self.funcs[self.funcs_stack[-1][0]][1]
del self.funcs_stack[-1]
return
if tk == '(':
self.cur_brackets += 1
if tk in self.none:
self.tokens += ['None']
return
if t == token.NAME and tk not in self.logic and tk != hdata:
if tk not in self.funcs:
self.tokens += [hdata + '["' + tk + '"]']
else:
self.tokens += self.funcs[tk][0]
if tk in self.funcs_with_body:
self.funcs_with_body[tk] = (
self.funcs_with_body[tk][0], True)
self.custom_header.add(self.handle_int_imports.get(tk))
self.funcs_stack += [(tk, self.cur_brackets)]
else:
self.tokens += [tk]
def handle_make_value(self, t, tk, pos_start, pos_end, line):
self.generate_func(t, tk, pos_start, pos_end, line, 'data', 1)
def handle_make_key(self, t, tk, pos_start, pos_end, line):
self.generate_func(t, tk, pos_start, pos_end, line, 'key', 2)
| 25,444
|
Python
|
.py
| 556
| 31.663669
| 153
| 0.461067
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,467
|
database_thread_safe.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/database_thread_safe.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import RLock
from CodernityDB.env import cdb_environment
cdb_environment['mode'] = "threads"
cdb_environment['rlock_obj'] = RLock
from database_safe_shared import SafeDatabase
class ThreadSafeDatabase(SafeDatabase):
"""
Thread safe version of CodernityDB that uses several lock objects,
on different methods / different indexes etc. It's completely different
implementation of locking than SuperThreadSafe one.
"""
pass
| 1,115
|
Python
|
.py
| 28
| 37.714286
| 75
| 0.774074
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,468
|
database.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/database.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
from inspect import getsource
# for custom indexes
from CodernityDB.storage import Storage, IU_Storage
from CodernityDB.hash_index import (IU_UniqueHashIndex,
IU_HashIndex,
HashIndex,
UniqueHashIndex)
# normal imports
from CodernityDB.index import (ElemNotFound,
DocIdNotFound,
IndexException,
Index,
TryReindexException,
ReindexException,
IndexNotFoundException,
IndexConflict)
from CodernityDB.misc import NONE
from CodernityDB.env import cdb_environment
from random import randrange
import warnings
def header_for_indexes(index_name, index_class, db_custom="", ind_custom="", classes_code=""):
return """# %s
# %s
# inserted automatically
import os
import marshal
import struct
import shutil
from hashlib import md5
# custom db code start
# db_custom
%s
# custom index code start
# ind_custom
%s
# source of classes in index.classes_code
# classes_code
%s
# index code start
""" % (index_name, index_class, db_custom, ind_custom, classes_code)
class DatabaseException(Exception):
pass
class PreconditionsException(DatabaseException):
pass
class RecordDeleted(DatabaseException):
pass
class RecordNotFound(DatabaseException):
pass
class RevConflict(DatabaseException):
pass
class DatabaseConflict(DatabaseException):
pass
class DatabasePathException(DatabaseException):
pass
class DatabaseIsNotOpened(PreconditionsException):
pass
class Database(object):
"""
A default single thread database object.
"""
custom_header = "" # : use it for imports required by your database
def __init__(self, path):
self.path = path
self.storage = None
self.indexes = []
self.id_ind = None
self.indexes_names = {}
self.opened = False
def create_new_rev(self, old_rev=None):
"""
Creates new revision number based on previous one.
Increments it + random bytes. On overflow starts from 0 again.
"""
if old_rev:
try:
rev_num = int(old_rev[:4], 16)
except:
raise RevConflict()
rev_num += 1
if rev_num > 65025:
# starting the counter from 0 again
rev_num = 0
rnd = randrange(65536)
return "%04x%04x" % (rev_num, rnd)
else:
# new rev
rnd = randrange(256 ** 2)
return '0001%04x' % rnd
def __not_opened(self):
if not self.opened:
raise DatabaseIsNotOpened("Database is not opened")
def set_indexes(self, indexes=[]):
"""
Set indexes using ``indexes`` param
:param indexes: indexes to set in db
:type indexes: iterable of :py:class:`CodernityDB.index.Index` objects.
"""
for ind in indexes:
self.add_index(ind, create=False)
def _add_single_index(self, p, i, index):
"""
Adds single index to a database.
It will use :py:meth:`inspect.getsource` to get class source.
Then it will build real index file, save it in ``_indexes`` directory.
"""
code = getsource(index.__class__)
if not code.startswith('c'): # fix for indented index codes
import textwrap
code = textwrap.dedent(code)
index._order = i
cls_code = getattr(index, 'classes_code', [])
classes_code = ""
for curr in cls_code:
classes_code += getsource(curr) + '\n\n'
with io.FileIO(os.path.join(p, "%.2d%s" % (i, index.name) + '.py'), 'w') as f:
f.write(header_for_indexes(index.name,
index.__class__.__name__,
getattr(self, 'custom_header', ''),
getattr(index, 'custom_header', ''),
classes_code))
f.write(code)
return True
def _read_index_single(self, p, ind, ind_kwargs={}):
"""
It will read single index from index file (ie. generated in :py:meth:`._add_single_index`).
Then it will perform ``exec`` on that code
If error will occur the index file will be saved with ``_broken`` suffix
:param p: path
:param ind: index name (will be joined with *p*)
:returns: new index object
"""
with io.FileIO(os.path.join(p, ind), 'r') as f:
name = f.readline()[2:].strip()
_class = f.readline()[2:].strip()
code = f.read()
try:
obj = compile(code, '<Index: %s' % os.path.join(p, ind), 'exec')
exec obj in globals()
ind_obj = globals()[_class](self.path, name, **ind_kwargs)
ind_obj._order = int(ind[:2])
except:
ind_path = os.path.join(p, ind)
os.rename(ind_path, ind_path + '_broken') # rename it instead of removing
# os.unlink(os.path.join(p, ind))
warnings.warn("Fatal error in index, saved as %s" % (ind_path + '_broken', ))
raise
else:
return ind_obj
def __check_if_index_unique(self, name, num):
indexes = os.listdir(os.path.join(self.path, '_indexes'))
if any((x for x in indexes if x[2:-3] == name and x[:2] != str(num))):
raise IndexConflict("Already exists")
def __write_index(self, new_index, number=0, edit=False, ind_kwargs=None):
# print new_index
if ind_kwargs is None:
ind_kwargs = {}
p = os.path.join(self.path, '_indexes')
if isinstance(new_index, basestring) and not new_index.startswith("path:"):
if len(new_index.splitlines()) < 4 or new_index.splitlines()[3] != '# inserted automatically':
from indexcreator import Parser
par = Parser()
custom_imports, s = par.parse(new_index)
s = s.splitlines()
name = s[0][2:]
c = s[1][2:]
comented = ['\n\n#SIMPLIFIED CODE']
map(lambda x: comented.append("#" + x), new_index.splitlines())
comented.append('#SIMPLIFIED CODE END\n\n')
s = header_for_indexes(
name, c, ind_custom=custom_imports) + "\n".join(s[2:]) + "\n".join(comented)
new_index = s
else:
name = new_index.splitlines()[0][2:]
name = name.strip()
if name in self.indexes_names and not edit:
raise IndexConflict("Already exists")
if edit:
previous_index = filter(lambda x: x.endswith(
'.py') and x[2:-3] == name, os.listdir(p))
if not previous_index:
raise PreconditionsException(
"Can't edit index that's not yet in database")
number = int(previous_index[0][:2])
if number == 0 and not edit and not name == 'id':
raise PreconditionsException(
"Id index must be the first added")
ind_path = "%.2d%s" % (number, name)
if not edit:
self.__check_if_index_unique(name, number)
ind_path_f = os.path.join(p, ind_path + '.py')
if os.path.exists(ind_path_f):
os.rename(ind_path_f, ind_path_f + '_last') # save last working index code
with io.FileIO(ind_path_f, 'w') as f:
f.write(new_index)
ind_obj = self._read_index_single(p, ind_path + '.py')
elif isinstance(new_index, basestring) and new_index.startswith("path:"):
path = new_index[5:]
if not path.endswith('.py'):
path += '.py'
ind_obj = self._read_index_single(p, path, ind_kwargs)
name = ind_obj.name
if name in self.indexes_names and not edit:
raise IndexConflict("Already exists")
elif isinstance(new_index, Index):
# it will first save index as a string, and then compile it
# it will allow to control the index object on the DB side
ind = new_index
init_arguments = new_index.__class__.__init__.im_func.func_code.co_varnames[
3:] # ignore self, path and name
for curr in init_arguments:
if curr not in ('args', 'kwargs'):
v = getattr(ind, curr, NONE())
if not isinstance(v, NONE):
ind_kwargs[curr] = v
if edit:
# code duplication...
previous_index = filter(lambda x: x.endswith(
'.py') and x[2:-3] == ind.name, os.listdir(p))
if not previous_index:
raise PreconditionsException(
"Can't edit index that's not yet in database")
number = int(previous_index[0][:2])
if ind.name in self.indexes_names and not edit:
raise IndexConflict("Already exists")
if number == 0 and not edit and not ind.name == 'id':
raise PreconditionsException(
"Id index must be the first added")
if not edit:
self.__check_if_index_unique(ind.name, number)
self._add_single_index(p, number, ind)
ind_path = "%.2d%s" % (number, ind.name)
ind_obj = self._read_index_single(p, ind_path + '.py', ind_kwargs)
name = ind_obj.name
else:
raise PreconditionsException("Argument must be Index instance, path to index_file or valid string index format")
return ind_obj, name
def add_index(self, new_index, create=True, ind_kwargs=None):
"""
:param new_index: New index to add, can be Index object, index valid string or path to file with index code
:type new_index: string
:param create: Create the index after add or not
:type create: bool
:returns: new index name
"""
if ind_kwargs is None:
ind_kwargs = {}
p = os.path.join(self.path, '_indexes')
if not os.path.exists(p):
self.initialize()
current = sorted(filter(lambda x: x.endswith('.py'), os.listdir(p)))
if current:
last = int(current[-1][:2]) # may crash... ignore
_next = last + 1
else:
_next = 0
ind_obj, name = self.__write_index(new_index, _next, edit=False)
# add the new index to objects
self.indexes.append(ind_obj)
self.indexes_names[name] = ind_obj
if create:
if self.exists(): # no need te create if database doesn't exists'
ind_obj.create_index()
if name == 'id':
self.__set_main_storage()
self.__compat_things()
for patch in getattr(ind_obj, 'patchers', ()): # index can patch db object
patch(self, ind_obj)
return name
def edit_index(self, index, reindex=False, ind_kwargs=None):
"""
Allows to edit existing index.
Previous working version will be saved with ``_last`` suffix (see :py:meth:`.revert_index`
:param bool reindex: should be the index reindexed after change
:returns: index name
"""
if ind_kwargs is None:
ind_kwargs = {}
ind_obj, name = self.__write_index(index, -1, edit=True)
old = next(x for x in self.indexes if x.name == name)
old.close_index()
index_of_index = self.indexes.index(old)
ind_obj.open_index()
self.indexes[index_of_index] = ind_obj
self.indexes_names[name] = ind_obj
if reindex:
self.reindex_index(name)
return name
def revert_index(self, index_name, reindex=False, ind_kwargs=None):
"""
Tries to revert index code from copy.
It calls :py:meth:`.edit_index` with previous working.
:param string index_name: index name to restore
"""
ind_path = os.path.join(self.path, '_indexes')
if index_name in self.indexes_names: # then it's working index.
ind = self.indexes_names[index_name]
full_name = '%.2d%s.py' % (ind._order, index_name)
else:
indexes = os.listdir(ind_path)
full_name = next((x for x in indexes if x[2:-3] == index_name))
if not full_name:
raise DatabaseException("%s index not found" % index_name)
last_path = os.path.join(ind_path, full_name + "_last")
if not os.path.exists(last_path):
raise DatabaseException("No previous copy found for %s" % index_name)
correct_last_path = last_path[:-5] # remove _last from name
os.rename(last_path, correct_last_path)
# ind_data = open(last_path, 'r')
p = 'path:%s' % os.path.split(correct_last_path)[1]
return self.edit_index(p, reindex, ind_kwargs)
def get_index_code(self, index_name, code_switch='All'):
"""
It will return full index code from index file.
:param index_name: the name of index to look for code
"""
if not index_name in self.indexes_names:
self.__not_opened()
raise IndexNotFoundException(
"Index `%s` doesn't exists" % index_name)
ind = self.indexes_names[index_name]
name = "%.2d%s" % (ind._order, index_name)
name += '.py'
with io.FileIO(os.path.join(self.path, '_indexes', name), 'r') as f:
co = f.read()
if code_switch == 'All':
return co
if code_switch == 'S':
try:
ind = co.index('#SIMPLIFIED CODE')
except ValueError:
return " "
else:
s = co[ind:]
l = s.splitlines()[1:-2]
ll = map(lambda x: x[1:], l)
return '\n'.join(ll)
if code_switch == 'P':
try:
ind = co.index('#SIMPLIFIED CODE')
except ValueError:
return co
else:
return co[:ind]
return "" # shouldn't happen
def __set_main_storage(self):
"""
Sets database main storage (from the **id** index)
"""
try:
self.storage = self.indexes_names['id'].storage
self.id_ind = self.indexes_names['id']
except KeyError:
# when opening / initializing DB without `id` index
# happens mostly on server side
pass
def initialize(self, path=None, makedir=True):
"""
Initialize new database
:param path: Path to a database (allows delayed path configuration), if not provided self.path will be used
:param makedir: Make the ``_indexes`` directory or not
:returns: the database path
"""
if self.opened is True:
raise DatabaseConflict("Already opened")
if not path:
path = self.path
else:
self.path = path
if makedir:
if not self.path:
raise PreconditionsException("No path specified")
p = os.path.join(self.path, '_indexes')
if os.path.exists(p):
raise DatabaseConflict("Cant't create because already exists")
os.makedirs(p)
return self.path
def __open_new(self, with_id_index=True, index_kwargs={}):
"""
Will open new database (works like create),
if not self.path provided will call initialize()
"""
if self.path:
if not os.path.exists(self.path):
self.initialize(self.path)
if not 'id' in self.indexes_names and with_id_index:
import CodernityDB.hash_index
if not 'db_path' in index_kwargs:
index_kwargs['db_path'] = self.path
index_kwargs['name'] = 'id'
id_ind = CodernityDB.hash_index.UniqueHashIndex(**index_kwargs)
self.add_index(id_ind, create=False)
# del CodernityDB.index
for index in self.indexes:
try:
index.create_index()
except IndexException:
raise DatabaseConflict(
"Already exists (detected on index=%s)" % index.name)
return True
def _read_indexes(self):
"""
Read all known indexes from ``_indexes``
"""
p = os.path.join(self.path, '_indexes')
for ind in os.listdir(p):
if ind.endswith('.py'):
self.add_index('path:' + ind, create=False)
def __compat_things(self):
"""
Things for compatibility.
"""
# patch for rev size change
if not self.id_ind:
return
if self.id_ind.entry_line_format[4:6] == '4s':
# rev compatibility...
warnings.warn("Your database is using old rev mechanizm \
for ID index. You should update that index \
(CodernityDB.migrate.migrate).")
from misc import random_hex_4
self.create_new_rev = random_hex_4
def create(self, path=None, **kwargs):
"""
Create database
:param path: path where to create the database
:returns: database path
"""
if path:
self.initialize(path)
if not self.path:
raise PreconditionsException("No path specified")
if self.opened is True:
raise DatabaseConflict("Already opened")
self.__open_new(**kwargs)
self.__set_main_storage()
self.__compat_things()
self.opened = True
return self.path
def exists(self, path=None):
"""
Checks if database in given path exists
:param path: path to look for database
"""
if not path:
path = self.path
if not path:
return False
if os.path.exists(path):
return os.path.exists(os.path.join(path, '_indexes'))
return False
def open(self, path=None):
"""
Will open already existing database
:param path: path with database to open
"""
if self.opened is True:
raise DatabaseConflict("Already opened")
# else:
if path:
self.path = path
if not self.path:
raise PreconditionsException("No path specified")
if not os.path.exists(self.path):
raise DatabasePathException("Can't open database")
self.indexes = []
self.id_ind = None
self.indexes_names = {}
self._read_indexes()
if not 'id' in self.indexes_names:
raise PreconditionsException("There must be `id` index!")
for index in self.indexes:
index.open_index()
self.indexes.sort(key=lambda ind: ind._order)
self.__set_main_storage()
self.__compat_things()
self.opened = True
return True
def close(self):
"""
Closes the database
"""
if not self.opened:
raise DatabaseConflict("Not opened")
self.id_ind = None
self.indexes_names = {}
self.storage = None
for index in self.indexes:
index.close_index()
self.indexes = []
self.opened = False
return True
def destroy(self):
"""
Allows to destroy database.
**not reversable** operation!
"""
# destroy all but *id*
if not self.exists():
raise DatabaseConflict("Doesn't exists'")
for index in reversed(self.indexes[1:]):
try:
self.destroy_index(index)
except:
pass
if getattr(self, 'id_ind', None) is not None:
self.id_ind.destroy() # now destroy id index
# remove all files in db directory
for root, dirs, files in os.walk(self.path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(self.path)
self.close()
return True
def _single_update_index(self, index, data, db_data, doc_id):
"""
Performs update operation on single index
:param index: the index to perform operation
:param data: new data
:param db_data: database data
:param doc_id: the id of document
"""
try:
old_should_index = index.make_key_value(db_data)
except Exception as ex:
warnings.warn("""Problem during update for `%s`, ex = `%s`, \
uou should check index code.""" % (index.name, ex), RuntimeWarning)
old_should_index = None
if old_should_index:
old_key, old_value = old_should_index
try:
new_should_index = index.make_key_value(data)
except Exception as ex:
warnings.warn("""Problem during update for `%s`, ex = `%r`, \
you should check index code.""" % (index.name, ex), RuntimeWarning)
new_should_index = None
if new_should_index:
new_key, new_value = new_should_index
if new_key != old_key:
index.delete(doc_id, old_key)
index.insert_with_storage(doc_id, new_key, new_value)
elif new_value != old_value:
try:
index.update_with_storage(doc_id, new_key, new_value)
except (ElemNotFound, DocIdNotFound):
# element should be in index but isn't
#(propably added new index without reindex)
warnings.warn("""Reindex might be required for index %s""" % index.name)
else:
index.delete(doc_id, old_key)
else: # not previously indexed
self._single_insert_index(index, data, doc_id)
def _update_id_index(self, _rev, data):
"""
Performs update on **id** index
"""
_id, value = self.id_ind.make_key_value(data)
db_data = self.get('id', _id)
if db_data['_rev'] != _rev:
raise RevConflict()
new_rev = self.create_new_rev(_rev)
# storage = self.storage
# start, size = storage.update(value)
# self.id_ind.update(_id, new_rev, start, size)
self.id_ind.update_with_storage(_id, new_rev, value)
return _id, new_rev, db_data
def _update_indexes(self, _rev, data):
"""
Performs update operation on all indexes in order
"""
_id, new_rev, db_data = self._update_id_index(_rev, data)
for index in self.indexes[1:]:
self._single_update_index(index, data, db_data, _id)
return _id, new_rev
def _single_insert_index(self, index, data, doc_id):
"""
Performs insert operation on single index
:param index: index to perform operation
:param data: new data
:param doc_id: document id
"""
try:
should_index = index.make_key_value(data)
except Exception as ex:
warnings.warn("""Problem during insert for `%s`, ex = `%r`, \
you should check index code.""" % (index.name, ex), RuntimeWarning)
should_index = None
if should_index:
key, value = should_index
index.insert_with_storage(doc_id, key, value)
# if value:
# storage = index.storage
# start, size = storage.insert(value)
# else:
# start = 1
# size = 0
# index.insert(doc_id, key, start, size)
def _insert_id_index(self, _rev, data):
"""
Performs insert on **id** index.
"""
_id, value = self.id_ind.make_key_value(data) # may be improved
# storage = self.storage
# start, size = storage.insert(value)
# self.id_ind.insert(_id, _rev, start, size)
self.id_ind.insert_with_storage(_id, _rev, value)
return _id
def _insert_indexes(self, _rev, data):
"""
Performs insert operation on all indexes in order
"""
_id = self._insert_id_index(_rev, data)
for index in self.indexes[1:]:
self._single_insert_index(index, data, _id)
def _single_delete_index(self, index, data, doc_id, old_data):
"""
Performs single delete operation on single index.
It's very similar to update functions (that's why data is in arguments)
:param index: index to perform operation
:param data: not important (because of update operations)
:param doc_id: document id
:param old_data: current data in database
"""
index_data = index.make_key_value(old_data)
if not index_data:
return
key, value = index_data
try:
index.delete(doc_id, key)
except TryReindexException:
return
def _delete_id_index(self, _id, _rev, data):
"""
Performs delete from **id** index
"""
# key, value = self.id_ind.make_key_value(data)
# key = data['_id']
key = self.id_ind.make_key(_id)
self.id_ind.delete(key)
def _delete_indexes(self, _id, _rev, data):
"""
Performs delete operation on all indexes in order
"""
old_data = self.get('id', _id)
if old_data['_rev'] != _rev:
raise RevConflict()
for index in self.indexes[1:]:
self._single_delete_index(index, data, _id, old_data)
self._delete_id_index(_id, _rev, data)
def destroy_index(self, index):
"""
Destroys index
:param index: the index to destroy
:type index: :py:class:`CodernityDB.index.Index`` instance, or string
"""
if isinstance(index, basestring):
if not index in self.indexes_names:
raise PreconditionsException("No index named %s" % index)
index = self.indexes_names[index]
elif not index in self.indexes:
self.__not_opened()
raise PreconditionsException("Argument must be Index instance or valid string index format")
if index.name == 'id':
self.__not_opened()
raise PreconditionsException("Id index cannot be destroyed")
full_file = "%.2d%s" % (index._order, index.name) + '.py'
p = os.path.join(self.path, '_indexes', full_file)
os.unlink(p)
index.destroy()
del self.indexes_names[index.name]
self.indexes.remove(index)
def compact_index(self, index):
"""
Compacts index
Used for better utilization of index metadata.
The deleted documents will be not more in structure.
:param index: the index to destroy
:type index: :py:class:`CodernityDB.index.Index`` instance, or string
"""
if isinstance(index, basestring):
if not index in self.indexes_names:
raise PreconditionsException("No index named %s" % index)
index = self.indexes_names[index]
elif not index in self.indexes:
self.__not_opened()
raise PreconditionsException("Argument must be Index instance or valid string index format")
if getattr(index, 'compacting', False):
raise ReindexException(
"The index=%s is still compacting" % index.name)
index.compacting = True
index.compact()
del index.compacting
def _compact_indexes(self):
"""
Runs compact on all indexes
"""
for index in self.indexes:
self.compact_index(index)
def _single_reindex_index(self, index, data):
doc_id, rev, start, size, status = self.id_ind.get(
data['_id']) # it's cached so it's ok
if status != 'd' and status != 'u':
self._single_insert_index(index, data, doc_id)
def reindex_index(self, index):
"""
Performs reindex on index. Optimizes metadata and storage informations for given index.
You can't reindex **id** index.
:param index: the index to reindex
:type index: :py:class:`CodernityDB.index.Index`` instance, or string
"""
if isinstance(index, basestring):
if not index in self.indexes_names:
raise PreconditionsException("No index named %s" % index)
index = self.indexes_names[index]
elif not index in self.indexes:
self.__not_opened()
raise PreconditionsException("Argument must be Index instance or valid string index format")
if index.name == 'id':
self.__not_opened()
raise PreconditionsException("Id index cannot be reindexed")
if getattr(index, 'reindexing', False):
raise ReindexException(
"The index=%s is still reindexing" % index.name)
all_iter = self.all('id')
index.reindexing = True
index.destroy()
index.create_index()
while True:
try:
curr = all_iter.next()
except StopIteration:
break
else:
self._single_reindex_index(index, curr)
del index.reindexing
def _reindex_indexes(self):
for index in self.indexes[1:]:
self.reindex_index(index)
def insert(self, data):
"""
It's using **reference** on the given data dict object,
to avoid it copy it before inserting!
If data **will not** have ``_id`` field,
it will be generated (random 32 chars string)
:param data: data to insert
"""
if '_rev' in data:
self.__not_opened()
raise PreconditionsException(
"Can't add record with forbidden fields")
_rev = self.create_new_rev()
if not '_id' in data:
try:
_id = self.id_ind.create_key()
except:
self.__not_opened()
raise DatabaseException("No id?")
else:
_id = data['_id']
assert _id is not None
data['_rev'] = _rev # for make_key_value compat with update / delete
data['_id'] = _id
self._insert_indexes(_rev, data)
ret = {'_id': _id, '_rev': _rev}
data.update(ret)
return ret
def update(self, data):
"""
It's using **reference** on the given data dict object,
to avoid it copy it before updating!
``data`` **must** contain ``_id`` and ``_rev`` fields.
:param data: data to update
"""
if not '_rev' in data or not '_id' in data:
self.__not_opened()
raise PreconditionsException("Can't update without _rev or _id")
_rev = data['_rev']
try:
_rev = bytes(_rev)
except:
self.__not_opened()
raise PreconditionsException(
"`_rev` must be valid bytes object")
_id, new_rev = self._update_indexes(_rev, data)
ret = {'_id': _id, '_rev': new_rev}
data.update(ret)
return ret
def get(self, index_name, key, with_doc=False, with_storage=True):
"""
Get single data from Database by ``key``.
:param index_name: index to get data from
:param key: key to get
:param with_doc: if ``True`` data from **id** index will be included in output
:param with_storage: if ``True`` data from index storage will be included, otherwise just metadata.
"""
# if not self.indexes_names.has_key(index_name):
# raise DatabaseException, "Invalid index name"
try:
ind = self.indexes_names[index_name]
except KeyError:
self.__not_opened()
raise IndexNotFoundException(
"Index `%s` doesn't exists" % index_name)
try:
l_key, _unk, start, size, status = ind.get(key)
except ElemNotFound as ex:
raise RecordNotFound(ex)
if not start and not size:
raise RecordNotFound("Not found")
elif status == 'd':
raise RecordDeleted("Deleted")
if with_storage and size:
storage = ind.storage
data = storage.get(start, size, status)
else:
data = {}
if with_doc and index_name != 'id':
storage = ind.storage
doc = self.get('id', l_key, False)
if data:
data['doc'] = doc
else:
data = {'doc': doc}
data['_id'] = l_key
if index_name == 'id':
data['_rev'] = _unk
else:
data['key'] = _unk
return data
def get_many(self, index_name, key=None, limit=-1, offset=0, with_doc=False, with_storage=True, start=None, end=None, **kwargs):
"""
Allows to get **multiple** data for given ``key`` for *Hash based indexes*.
Also allows get **range** queries for *Tree based indexes* with ``start`` and ``end`` arguments.
:param index_name: Index to perform the operation
:param key: key to look for (has to be ``None`` to use range queries)
:param limit: defines limit for query
:param offset: defines offset (how many records from start it will ignore)
:param with_doc: if ``True`` data from **id** index will be included in output
:param with_storage: if ``True`` data from index storage will be included, otherwise just metadata.
:param start: ``start`` parameter for range queries
:param end: ``end`` parameter for range queries
:returns: iterator over records
"""
if index_name == 'id':
self.__not_opened()
raise PreconditionsException("Can't get many from `id`")
try:
ind = self.indexes_names[index_name]
except KeyError:
self.__not_opened()
raise IndexNotFoundException(
"Index `%s` doesn't exists" % index_name)
storage = ind.storage
if start is None and end is None:
gen = ind.get_many(key, limit, offset)
else:
gen = ind.get_between(start, end, limit, offset, **kwargs)
while True:
try:
# l_key, start, size, status = gen.next()
ind_data = gen.next()
except StopIteration:
break
else:
if with_storage and ind_data[-2]:
data = storage.get(*ind_data[-3:])
else:
data = {}
doc_id = ind_data[0]
if with_doc:
doc = self.get('id', doc_id, False)
if data:
data['doc'] = doc
else:
data = {'doc': doc}
data['_id'] = doc_id
if key is None:
data['key'] = ind_data[1]
yield data
def all(self, index_name, limit=-1, offset=0, with_doc=False, with_storage=True):
"""
Alows to get all records for given index
:param index_name: Index to perform the operation
:param limit: defines limit for query
:param offset: defines offset (how many records from start it will ignore)
:param with_doc: if ``True`` data from **id** index will be included in output
:param with_storage: if ``True`` data from index storage will be included, otherwise just metadata
"""
try:
ind = self.indexes_names[index_name]
except KeyError:
self.__not_opened()
raise IndexNotFoundException(
"Index `%s` doesn't exists" % index_name)
storage = ind.storage
gen = ind.all(limit, offset)
while True:
try:
doc_id, unk, start, size, status = gen.next()
except StopIteration:
break
else:
if index_name == 'id':
if with_storage and size:
data = storage.get(start, size, status)
else:
data = {}
data['_id'] = doc_id
data['_rev'] = unk
else:
data = {}
if with_storage and size:
data['value'] = storage.get(start, size, status)
data['key'] = unk
data['_id'] = doc_id
if with_doc:
doc = self.get('id', doc_id, False)
data['doc'] = doc
yield data
def run(self, index_name, target_funct, *args, **kwargs):
"""
Allows to execute given function on Database side
(important for server mode)
If ``target_funct==sum`` then given index must have ``run_sum`` method.
:param index_name: index name to perform action.
:param target_funct: target function name (without *run* prefix)
:param \*args: ``*args`` for function
:param \*\*kwargs: ``**kwargs`` for function
"""
try:
ind = self.indexes_names[index_name]
except KeyError:
self.__not_opened()
raise IndexNotFoundException(
"Index `%s` doesn't exists" % index_name)
try:
funct = getattr(ind, "run_" + target_funct)
except AttributeError:
raise IndexException("Invalid function to run")
return funct(self, *args, **kwargs)
def count(self, target_funct, *args, **kwargs):
"""
Counter. Allows to execute for example
.. code-block:: python
db.count(db.all, 'id')
And it will return then how much records are in your ``id`` index.
.. warning::
It sets ``kwargs['with_storage'] = False`` and ``kwargs['with_doc'] = False``
"""
kwargs['with_storage'] = False
kwargs['with_doc'] = False
iter_ = target_funct(*args, **kwargs)
i = 0
while True:
try:
iter_.next()
i += 1
except StopIteration:
break
return i
def delete(self, data):
"""
Delete data from database.
``data`` has to contain ``_id`` and ``_rev`` fields.
:param data: data to delete
"""
if not '_rev' in data or not '_id' in data:
raise PreconditionsException("Can't delete without _rev or _id")
_id = data['_id']
_rev = data['_rev']
try:
_id = bytes(_id)
_rev = bytes(_rev)
except:
raise PreconditionsException(
"`_id` and `_rev` must be valid bytes object")
data['_deleted'] = True
self._delete_indexes(_id, _rev, data)
return True
def compact(self):
"""
Compact all indexes. Runs :py:meth:`._compact_indexes` behind.
"""
self.__not_opened()
self._compact_indexes()
def reindex(self):
"""
Reindex all indexes. Runs :py:meth:`._reindex_indexes` behind.
"""
self.__not_opened()
self._reindex_indexes()
def flush_indexes(self):
"""
Flushes all indexes
"""
self.__not_opened()
for index in self.indexes:
index.flush()
def flush(self):
"""
Flushes all indexes. Runs :py:meth:`.flush_indexes` behind.
"""
return self.flush_indexes()
def fsync(self):
"""
It forces the kernel buffer to be written to disk. Use when you're sure that you need to.
"""
self.__not_opened()
for index in self.indexes:
index.flush()
index.fsync()
def __get_size(self):
"""
:returns: total size of database.
"""
if not self.path:
return 0
return sum(
os.path.getsize(os.path.join(dirpath, filename)) for dirpath, dirnames,
filenames in os.walk(self.path) for filename in filenames)
def get_index_details(self, name):
"""
Will return index properties.
:returns: index details
"""
self.__not_opened()
try:
db_index = self.indexes_names[name]
except KeyError:
self.__not_opened()
raise IndexNotFoundException("Index doesn't exist")
props = {}
for key, value in db_index.__dict__.iteritems():
if not callable(value): # not using inspect etc...
props[key] = value
return props
def get_db_details(self):
"""
Get's database details, size, indexes, environment etc.
:returns: database details
"""
props = {}
props['path'] = self.path
props['size'] = self.__get_size()
props['indexes'] = self.indexes_names.keys()
props['cdb_environment'] = cdb_environment
return props
| 42,585
|
Python
|
.py
| 1,066
| 28.879925
| 132
| 0.549878
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,469
|
misc.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/misc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import getrandbits, randrange
import uuid
class NONE:
"""
It's inteded to be None but different,
for internal use only!
"""
pass
def random_hex_32():
return uuid.UUID(int=getrandbits(128), version=4).hex
def random_hex_4(*args, **kwargs):
return '%04x' % randrange(256 ** 2)
| 971
|
Python
|
.py
| 28
| 32.428571
| 74
| 0.738248
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,470
|
sharded_hash.py
|
CouchPotato_CouchPotatoServer/libs/CodernityDB/sharded_hash.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.hash_index import UniqueHashIndex, HashIndex
from CodernityDB.sharded_index import ShardedIndex
from CodernityDB.index import IndexPreconditionsException
from random import getrandbits
import uuid
class IU_ShardedUniqueHashIndex(ShardedIndex):
custom_header = """import uuid
from random import getrandbits
from CodernityDB.sharded_index import ShardedIndex
"""
def __init__(self, db_path, name, *args, **kwargs):
if kwargs.get('sh_nums', 0) > 255:
raise IndexPreconditionsException("Too many shards")
kwargs['ind_class'] = UniqueHashIndex
super(IU_ShardedUniqueHashIndex, self).__init__(db_path,
name, *args, **kwargs)
self.patchers.append(self.wrap_insert_id_index)
@staticmethod
def wrap_insert_id_index(db_obj, ind_obj, clean=False):
def _insert_id_index(_rev, data):
"""
Performs insert on **id** index.
"""
_id, value = db_obj.id_ind.make_key_value(data) # may be improved
trg_shard = _id[:2]
storage = db_obj.id_ind.shards_r[trg_shard].storage
start, size = storage.insert(value)
db_obj.id_ind.insert(_id, _rev, start, size)
return _id
if not clean:
if hasattr(db_obj, '_insert_id_index_orig'):
raise IndexPreconditionsException(
"Already patched, something went wrong")
setattr(db_obj, "_insert_id_index_orig", db_obj._insert_id_index)
setattr(db_obj, "_insert_id_index", _insert_id_index)
else:
setattr(db_obj, "_insert_id_index", db_obj._insert_id_index_orig)
delattr(db_obj, "_insert_id_index_orig")
def create_key(self):
h = uuid.UUID(int=getrandbits(128), version=4).hex
trg = self.last_used + 1
if trg >= self.sh_nums:
trg = 0
self.last_used = trg
h = '%02x%30s' % (trg, h[2:])
return h
def delete(self, key, *args, **kwargs):
trg_shard = key[:2]
op = self.shards_r[trg_shard]
return op.delete(key, *args, **kwargs)
def update(self, key, *args, **kwargs):
trg_shard = key[:2]
self.last_used = int(trg_shard, 16)
op = self.shards_r[trg_shard]
return op.update(key, *args, **kwargs)
def insert(self, key, *args, **kwargs):
trg_shard = key[:2] # in most cases it's in create_key BUT not always
self.last_used = int(key[:2], 16)
op = self.shards_r[trg_shard]
return op.insert(key, *args, **kwargs)
def get(self, key, *args, **kwargs):
trg_shard = key[:2]
self.last_used = int(trg_shard, 16)
op = self.shards_r[trg_shard]
return op.get(key, *args, **kwargs)
class ShardedUniqueHashIndex(IU_ShardedUniqueHashIndex):
# allow unique hash to be used directly
custom_header = 'from CodernityDB.sharded_hash import IU_ShardedUniqueHashIndex'
pass
class IU_ShardedHashIndex(ShardedIndex):
custom_header = """from CodernityDB.sharded_index import ShardedIndex"""
def __init__(self, db_path, name, *args, **kwargs):
kwargs['ind_class'] = HashIndex
super(IU_ShardedHashIndex, self).__init__(db_path, name, *
args, **kwargs)
def calculate_shard(self, key):
"""
Must be implemented. It has to return shard to be used by key
:param key: key
:returns: target shard
:rtype: int
"""
raise NotImplementedError()
def delete(self, doc_id, key, *args, **kwargs):
trg_shard = self.calculate_shard(key)
op = self.shards_r[trg_shard]
return op.delete(doc_id, key, *args, **kwargs)
def insert(self, doc_id, key, *args, **kwargs):
trg_shard = self.calculate_shard(key)
op = self.shards_r[trg_shard]
return op.insert(doc_id, key, *args, **kwargs)
def update(self, doc_id, key, *args, **kwargs):
trg_shard = self.calculate_shard(key)
op = self.shards_r[trg_shard]
return op.insert(doc_id, key, *args, **kwargs)
def get(self, key, *args, **kwargs):
trg_shard = self.calculate_shard(key)
op = self.shards_r[trg_shard]
return op.get(key, *args, **kwargs)
class ShardedHashIndex(IU_ShardedHashIndex):
pass
| 5,086
|
Python
|
.py
| 117
| 35.333333
| 84
| 0.625506
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,471
|
client.py
|
CouchPotato_CouchPotatoServer/libs/deluge_client/client.py
|
import logging
import socket
import ssl
import struct
import warnings
import zlib
from .rencode import dumps, loads
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
MESSAGE_HEADER_SIZE = 5
READ_SIZE = 10
logger = logging.getLogger(__name__)
class DelugeClientException(Exception):
"""Base exception for all deluge client exceptions"""
class ConnectionLostException(DelugeClientException):
pass
class CallTimeoutException(DelugeClientException):
pass
class InvalidHeaderException(DelugeClientException):
pass
class FailedToReconnectException(DelugeClientException):
pass
class RemoteException(DelugeClientException):
pass
class DelugeRPCClient(object):
timeout = 20
def __init__(self, host, port, username, password, decode_utf8=False, automatic_reconnect=True):
self.host = host
self.port = port
self.username = username
self.password = password
self.deluge_version = None
# This is only applicable if deluge_version is 2
self.deluge_protocol_version = None
self.decode_utf8 = decode_utf8
if not self.decode_utf8:
warnings.warn('Using `decode_utf8=False` is deprecated, please set it to True.'
'The argument will be removed in a future release where it will be always True', DeprecationWarning)
self.automatic_reconnect = automatic_reconnect
self.request_id = 1
self.connected = False
self._create_socket()
def _create_socket(self, ssl_version=None):
if ssl_version is not None:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), ssl_version=ssl_version)
else:
self._socket = ssl.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self._socket.settimeout(self.timeout)
def connect(self):
"""
Connects to the Deluge instance
"""
self._connect()
logger.debug('Connected to Deluge, detecting daemon version')
self._detect_deluge_version()
logger.debug('Daemon version {} detected, logging in'.format(self.deluge_version))
if self.deluge_version == 2:
result = self.call('daemon.login', self.username, self.password, client_version='deluge-client')
else:
result = self.call('daemon.login', self.username, self.password)
logger.debug('Logged in with value %r' % result)
self.connected = True
def _connect(self):
logger.info('Connecting to %s:%s' % (self.host, self.port))
try:
self._socket.connect((self.host, self.port))
except ssl.SSLError as e:
# Note: have not verified that we actually get errno 258 for this error
if (hasattr(ssl, 'PROTOCOL_SSLv3') and
(getattr(e, 'reason', None) == 'UNSUPPORTED_PROTOCOL' or e.errno == 258)):
logger.warning('Was unable to ssl handshake, trying to force SSLv3 (insecure)')
self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
self._socket.connect((self.host, self.port))
else:
raise
def disconnect(self):
"""
Disconnect from deluge
"""
if self.connected:
self._socket.close()
self._socket = None
self.connected = False
def _detect_deluge_version(self):
if self.deluge_version is not None:
return
self._send_call(1, None, 'daemon.info')
self._send_call(2, None, 'daemon.info')
self._send_call(2, 1, 'daemon.info')
result = self._socket.recv(1)
if result[:1] == b'D':
# This is a protocol deluge 2.0 was using before release
self.deluge_version = 2
self.deluge_protocol_version = None
# If we need the specific version of deluge 2, this is it.
daemon_version = self._receive_response(2, None, partial_data=result)
elif ord(result[:1]) == 1:
self.deluge_version = 2
self.deluge_protocol_version = 1
# If we need the specific version of deluge 2, this is it.
daemon_version = self._receive_response(2, 1, partial_data=result)
else:
self.deluge_version = 1
# Deluge 1 doesn't recover well from the bad request. Re-connect the socket.
self._socket.close()
self._create_socket()
self._connect()
def _send_call(self, deluge_version, protocol_version, method, *args, **kwargs):
self.request_id += 1
if method == 'daemon.login':
debug_args = list(args)
if len(debug_args) >= 2:
debug_args[1] = '<password hidden>'
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, debug_args, kwargs))
else:
logger.debug('Calling reqid %s method %r with args:%r kwargs:%r' % (self.request_id, method, args, kwargs))
req = ((self.request_id, method, args, kwargs), )
req = zlib.compress(dumps(req))
if deluge_version == 2:
if protocol_version is None:
# This was a protocol for deluge 2 before they introduced protocol version numbers
self._socket.send(b'D' + struct.pack("!i", len(req)))
elif protocol_version == 1:
self._socket.send(struct.pack('!BI', protocol_version, len(req)))
else:
raise Exception('Deluge protocol version {} is not (yet) supported.'.format(protocol_version))
self._socket.send(req)
def _receive_response(self, deluge_version, protocol_version, partial_data=b''):
expected_bytes = None
data = partial_data
while True:
try:
d = self._socket.recv(READ_SIZE)
except ssl.SSLError:
raise CallTimeoutException()
data += d
if deluge_version == 2:
if expected_bytes is None:
if len(data) < 5:
continue
header = data[:MESSAGE_HEADER_SIZE]
data = data[MESSAGE_HEADER_SIZE:]
if protocol_version is None:
if header[0] != b'D'[0]:
raise InvalidHeaderException('Expected D as first byte in reply')
elif ord(header[:1]) != protocol_version:
raise InvalidHeaderException(
'Expected protocol version ({}) as first byte in reply'.format(protocol_version)
)
if protocol_version is None:
expected_bytes = struct.unpack('!i', header[1:])[0]
else:
expected_bytes = struct.unpack('!I', header[1:])[0]
if len(data) >= expected_bytes:
data = zlib.decompress(data)
break
else:
try:
data = zlib.decompress(data)
except zlib.error:
if not d:
raise ConnectionLostException()
continue
break
data = list(loads(data, decode_utf8=self.decode_utf8))
msg_type = data.pop(0)
request_id = data.pop(0)
if msg_type == RPC_ERROR:
if self.deluge_version == 2:
exception_type, exception_msg, _, traceback = data
# On deluge 2, exception arguments are sent as tuple
if self.decode_utf8:
exception_msg = ', '.join(exception_msg)
else:
exception_msg = b', '.join(exception_msg)
else:
exception_type, exception_msg, traceback = data[0]
if self.decode_utf8:
exception = type(str(exception_type), (RemoteException, ), {})
exception_msg = '%s\n%s' % (exception_msg,
traceback)
else:
exception = type(str(exception_type.decode('utf-8', 'ignore')), (RemoteException, ), {})
exception_msg = '%s\n%s' % (exception_msg.decode('utf-8', 'ignore'),
traceback.decode('utf-8', 'ignore'))
raise exception(exception_msg)
elif msg_type == RPC_RESPONSE:
retval = data[0]
return retval
def reconnect(self):
"""
Reconnect
"""
self.disconnect()
self._create_socket()
self.connect()
def call(self, method, *args, **kwargs):
"""
Calls an RPC function
"""
tried_reconnect = False
for _ in range(2):
try:
self._send_call(self.deluge_version, self.deluge_protocol_version, method, *args, **kwargs)
return self._receive_response(self.deluge_version, self.deluge_protocol_version)
except (socket.error, ConnectionLostException, CallTimeoutException):
if self.automatic_reconnect:
if tried_reconnect:
raise FailedToReconnectException()
else:
try:
self.reconnect()
except (socket.error, ConnectionLostException, CallTimeoutException):
raise FailedToReconnectException()
tried_reconnect = True
else:
raise
def __getattr__(self, item):
return RPCCaller(self.call, item)
class RPCCaller(object):
def __init__(self, caller, method=''):
self.caller = caller
self.method = method
def __getattr__(self, item):
return RPCCaller(self.caller, self.method+'.'+item)
def __call__(self, *args, **kwargs):
return self.caller(self.method, *args, **kwargs)
| 10,080
|
Python
|
.py
| 228
| 31.802632
| 126
| 0.56818
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,472
|
rencode.py
|
CouchPotato_CouchPotatoServer/libs/deluge_client/rencode.py
|
# Original bencode module by Petru Paler, et al.
#
# Modifications by Connelly Barnes:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by Bram Cohen under the "MIT license":
#
# "Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
"""
rencode -- Web safe object pickling/unpickling.
Public domain, Connelly Barnes 2006-2007.
The rencode module is a modified version of bencode from the
BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than
b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
26
The rencode format is not standardized, and may change with different
rencode module versions, so you should check that you are using the
same rencode version throughout your project.
"""
import struct
import sys
from threading import Lock
try:
from future_builtins import zip
except ImportError:
# Ignore on Py3.
pass
__version__ = ('Python', 1, 0, 4)
__all__ = ['dumps', 'loads']
py3 = sys.version_info[0] >= 3
if py3:
long = int # pylint: disable=redefined-builtin
unicode = str # pylint: disable=redefined-builtin
def int2byte(c):
return bytes([c])
else:
def int2byte(c):
return chr(c)
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = int2byte(59)
CHR_DICT = int2byte(60)
CHR_INT = int2byte(61)
CHR_INT1 = int2byte(62)
CHR_INT2 = int2byte(63)
CHR_INT4 = int2byte(64)
CHR_INT8 = int2byte(65)
CHR_FLOAT32 = int2byte(66)
CHR_FLOAT64 = int2byte(44)
CHR_TRUE = int2byte(67)
CHR_FALSE = int2byte(68)
CHR_NONE = int2byte(69)
CHR_TERM = int2byte(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START + STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
# Whether strings should be decoded when loading
_decode_utf8 = False
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f:f + 1] == '-':
if x[f + 1:f + 2] == '0':
raise ValueError
elif x[f:f + 1] == '0' and newf != f + 1:
raise ValueError
return (n, newf + 1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f + 1])[0], f + 1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f + 2])[0], f + 2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f + 4])[0], f + 4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f + 8])[0], f + 8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f + 4])[0]
return (n, f + 4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f + 8])[0]
return (n, f + 8)
def decode_string(x, f):
colon = x.index(b':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f + 1:
raise ValueError
colon += 1
s = x[colon:colon + n]
if _decode_utf8:
s = s.decode('utf8')
return (s, colon + n)
def decode_list(x, f):
r, f = [], f + 1
while x[f:f + 1] != CHR_TERM:
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f + 1
while x[f:f + 1] != CHR_TERM:
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f + 1)
def decode_false(x, f):
return (False, f + 1)
def decode_none(x, f):
return (None, f + 1)
decode_func = {}
decode_func[b'0'] = decode_string
decode_func[b'1'] = decode_string
decode_func[b'2'] = decode_string
decode_func[b'3'] = decode_string
decode_func[b'4'] = decode_string
decode_func[b'5'] = decode_string
decode_func[b'6'] = decode_string
decode_func[b'7'] = decode_string
decode_func[b'8'] = decode_string
decode_func[b'9'] = decode_string
decode_func[CHR_LIST] = decode_list
decode_func[CHR_DICT] = decode_dict
decode_func[CHR_INT] = decode_int
decode_func[CHR_INT1] = decode_intb
decode_func[CHR_INT2] = decode_inth
decode_func[CHR_INT4] = decode_intl
decode_func[CHR_INT8] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE] = decode_true
decode_func[CHR_FALSE] = decode_false
decode_func[CHR_NONE] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f + 1:f + 1 + slen]
if _decode_utf8:
s = s.decode('utf8')
return (s, f + 1 + slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[int2byte(STR_FIXED_START + i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f + 1
for _ in range(slen):
v, f = decode_func[x[f:f + 1]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[int2byte(LIST_FIXED_START + i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f + 1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[int2byte(INT_POS_FIXED_START + i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[int2byte(INT_NEG_FIXED_START + i)] = make_decoder(-1 - i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f + 1
for _ in range(slen):
k, f = decode_func[x[f:f + 1]](x, f)
r[k], f = decode_func[x[f:f + 1]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[int2byte(DICT_FIXED_START + i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def loads(x, decode_utf8=False):
global _decode_utf8
_decode_utf8 = decode_utf8
try:
r, l = decode_func[x[0:1]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(int2byte(INT_POS_FIXED_START + x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(int2byte(INT_NEG_FIXED_START - 1 - x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if py3:
s = bytes(s, 'ascii')
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.append({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.append(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((int2byte(STR_FIXED_START + len(x)), x))
else:
s = str(len(x))
if py3:
s = bytes(s, 'ascii')
r.extend((s, b':', x))
def encode_unicode(x, r):
encode_string(x.encode('utf8'), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(int2byte(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x, r):
if len(x) < DICT_FIXED_COUNT:
r.append(int2byte(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[int] = encode_int
encode_func[long] = encode_int
encode_func[bytes] = encode_string
encode_func[list] = encode_list
encode_func[tuple] = encode_list
encode_func[dict] = encode_dict
encode_func[type(None)] = encode_none
encode_func[unicode] = encode_unicode
encode_func[bool] = encode_bool
lock = Lock()
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
ld = (({b'a': 15, b'bb': f1, b'ccc': f2, b'': (f3, (), False, True, b'')}, (b'a', 10**20),
tuple(range(-100000, 100000)), b'b' * 31, b'b' * 62, b'b' * 64, 2**30, 2**33, 2**62,
2**64, 2**30, 2**33, 2**62, 2**64, False, False, True, -1, 2, 0),)
assert loads(dumps(ld)) == ld
d = dict(zip(range(-100000, 100000), range(-100000, 100000)))
d.update({b'a': 20, 20: 40, 40: 41, f1: f2, f2: f3, f3: False, False: True, True: False})
ld = (d, {}, {5: 6}, {7: 7, True: 8}, {9: 10, 22: 39, 49: 50, 44: b''})
assert loads(dumps(ld)) == ld
ld = (b'', b'a' * 10, b'a' * 100, b'a' * 1000, b'a' * 10000, b'a' * 100000, b'a' * 1000000, b'a' * 10000000)
assert loads(dumps(ld)) == ld
ld = tuple([dict(zip(range(n), range(n))) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([dict(zip(range(n), range(-n, 0))) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([tuple(range(n)) for n in range(100)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([b'a' * n for n in range(1000)]) + (b'b',)
assert loads(dumps(ld)) == ld
ld = tuple([b'a' * n for n in range(1000)]) + (None, True, None)
assert loads(dumps(ld)) == ld
assert loads(dumps(None)) is None
assert loads(dumps({None: None})) == {None: None}
assert 1e-10 < abs(loads(dumps(1.1)) - 1.1) < 1e-6
assert 1e-10 < abs(loads(dumps(1.1, 32)) - 1.1) < 1e-6
assert abs(loads(dumps(1.1, 64)) - 1.1) < 1e-12
assert loads(dumps('Hello World!!'), decode_utf8=True)
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
| 13,391
|
Python
|
.py
| 377
| 30.734748
| 112
| 0.621429
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,473
|
tests.py
|
CouchPotato_CouchPotatoServer/libs/deluge_client/tests.py
|
import os
import sys
import pytest
from .client import DelugeRPCClient, RemoteException
if sys.version_info > (3,):
long = int
@pytest.fixture
def client(request):
if sys.platform.startswith('win'):
auth_path = os.path.join(os.getenv('APPDATA'), 'deluge', 'auth')
else:
auth_path = os.path.expanduser("~/.config/deluge/auth")
with open(auth_path, 'rb') as f:
filedata = f.read().decode("utf-8").split('\n')[0].split(':')
username, password = filedata[:2]
ip = '127.0.0.1'
port = 58846
kwargs = {'decode_utf8': True}
if hasattr(request, 'param'):
kwargs.update(request.param)
client = DelugeRPCClient(ip, port, username, password, **kwargs)
client.connect()
yield client
try:
client.disconnect()
except:
pass
def test_connect(client):
assert client.connected
def test_call_method(client):
assert isinstance(client.call('core.get_free_space'), (int, long))
def test_call_method_arguments(client):
assert isinstance(client.call('core.get_free_space', '/'), (int, long))
@pytest.mark.parametrize('client',
[{'decode_utf8': True}, {'decode_utf8': False}],
ids=['decode_utf8_on', 'decode_utf8_off'],
indirect=True)
def test_call_method_exception(client):
with pytest.raises(RemoteException) as ex_info:
client.call('core.get_free_space', '1', '2')
assert ('takes at most 2 arguments' in str(ex_info.value) or
'takes from 1 to 2 positional arguments' in str(ex_info.value)) # deluge 2.0
def test_attr_caller(client):
assert isinstance(client.core.get_free_space(), (int, long))
assert isinstance(client.core.get_free_space('/'), (int, long))
| 1,783
|
Python
|
.py
| 45
| 33.133333
| 89
| 0.645518
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,474
|
client.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/client.py
|
import os
import platform
from collections import defaultdict
from itertools import imap
from synchronousdeluge.exceptions import DelugeRPCError
from synchronousdeluge.protocol import DelugeRPCRequest, DelugeRPCResponse
from synchronousdeluge.transfer import DelugeTransfer
__all__ = ["DelugeClient"]
RPC_RESPONSE = 1
RPC_ERROR = 2
RPC_EVENT = 3
class DelugeClient(object):
def __init__(self):
"""A deluge client session."""
self.transfer = DelugeTransfer()
self.modules = []
self._request_counter = 0
def _get_local_auth(self):
auth_file = ""
username = password = ""
if platform.system() in ('Windows', 'Microsoft'):
appDataPath = os.environ.get("APPDATA")
if not appDataPath:
import _winreg
hkey = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, "Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders")
appDataReg = _winreg.QueryValueEx(hkey, "AppData")
appDataPath = appDataReg[0]
_winreg.CloseKey(hkey)
auth_file = os.path.join(appDataPath, "deluge", "auth")
else:
from xdg.BaseDirectory import save_config_path
try:
auth_file = os.path.join(save_config_path("deluge"), "auth")
except OSError, e:
return username, password
if os.path.exists(auth_file):
for line in open(auth_file):
if line.startswith("#"):
# This is a comment line
continue
line = line.strip()
try:
lsplit = line.split(":")
except Exception, e:
continue
if len(lsplit) == 2:
username, password = lsplit
elif len(lsplit) == 3:
username, password, level = lsplit
else:
continue
if username == "localclient":
return (username, password)
return ("", "")
def _create_module_method(self, module, method):
fullname = "{0}.{1}".format(module, method)
def func(obj, *args, **kwargs):
return self.remote_call(fullname, *args, **kwargs)
func.__name__ = method
return func
def _introspect(self):
self.modules = []
methods = self.remote_call("daemon.get_method_list").get()
methodmap = defaultdict(dict)
splitter = lambda v: v.split(".")
for module, method in imap(splitter, methods):
methodmap[module][method] = self._create_module_method(module, method)
for module, methods in methodmap.items():
clsname = "DelugeModule{0}".format(module.capitalize())
cls = type(clsname, (), methods)
setattr(self, module, cls())
self.modules.append(module)
def remote_call(self, method, *args, **kwargs):
req = DelugeRPCRequest(self._request_counter, method, *args, **kwargs)
message = next(self.transfer.send_request(req))
response = DelugeRPCResponse()
if not isinstance(message, tuple):
return
if len(message) < 3:
return
message_type = message[0]
# if message_type == RPC_EVENT:
# event = message[1]
# values = message[2]
#
# if event in self._event_handlers:
# for handler in self._event_handlers[event]:
# gevent.spawn(handler, *values)
#
# elif message_type in (RPC_RESPONSE, RPC_ERROR):
if message_type in (RPC_RESPONSE, RPC_ERROR):
request_id = message[1]
value = message[2]
if request_id == self._request_counter :
if message_type == RPC_RESPONSE:
response.set(value)
elif message_type == RPC_ERROR:
err = DelugeRPCError(*value)
response.set_exception(err)
self._request_counter += 1
return response
def connect(self, host="127.0.0.1", port=58846, username="", password=""):
"""Connects to a daemon process.
:param host: str, the hostname of the daemon
:param port: int, the port of the daemon
:param username: str, the username to login with
:param password: str, the password to login with
"""
# Connect transport
self.transfer.connect((host, port))
# Attempt to fetch local auth info if needed
if not username and host in ("127.0.0.1", "localhost"):
username, password = self._get_local_auth()
# Authenticate
self.remote_call("daemon.login", username, password).get()
# Introspect available methods
self._introspect()
@property
def connected(self):
return self.transfer.connected
def disconnect(self):
"""Disconnects from the daemon."""
self.transfer.disconnect()
| 5,078
|
Python
|
.py
| 123
| 30.829268
| 138
| 0.576282
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,475
|
protocol.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/protocol.py
|
__all__ = ["DelugeRPCRequest", "DelugeRPCResponse"]
class DelugeRPCRequest(object):
def __init__(self, request_id, method, *args, **kwargs):
self.request_id = request_id
self.method = method
self.args = args
self.kwargs = kwargs
def format(self):
return (self.request_id, self.method, self.args, self.kwargs)
class DelugeRPCResponse(object):
def __init__(self):
self.value = None
self._exception = None
def successful(self):
return self._exception is None
@property
def exception(self):
if self._exception is not None:
return self._exception
def set(self, value=None):
self.value = value
self._exception = None
def set_exception(self, exception):
self._exception = exception
def get(self):
if self._exception is None:
return self.value
else:
raise self._exception
| 957
|
Python
|
.py
| 29
| 25.344828
| 69
| 0.621328
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,476
|
rencode.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/rencode.py
|
"""
rencode -- Web safe object pickling/unpickling.
Public domain, Connelly Barnes 2006-2007.
The rencode module is a modified version of bencode from the
BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than
b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
26
The rencode format is not standardized, and may change with different
rencode module versions, so you should check that you are using the
same rencode version throughout your project.
"""
__version__ = '1.0.1'
__all__ = ['dumps', 'loads']
# Original bencode module by Petru Paler, et al.
#
# Modifications by Connelly Barnes:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by Bram Cohen under the "MIT license":
#
# "Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
import struct
import string
from threading import Lock
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = chr(59)
CHR_DICT = chr(60)
CHR_INT = chr(61)
CHR_INT1 = chr(62)
CHR_INT2 = chr(63)
CHR_INT4 = chr(64)
CHR_INT8 = chr(65)
CHR_FLOAT32 = chr(66)
CHR_FLOAT64 = chr(44)
CHR_TRUE = chr(67)
CHR_FALSE = chr(68)
CHR_NONE = chr(69)
CHR_TERM = chr(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f+1])[0], f+1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f+2])[0], f+2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f+4])[0], f+4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f+8])[0], f+8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f+4])[0]
return (n, f+4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f+8])[0]
return (n, f+8)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
s = x[colon:colon+n]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != CHR_TERM:
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != CHR_TERM:
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f+1)
def decode_false(x, f):
return (False, f+1)
def decode_none(x, f):
return (None, f+1)
decode_func = {}
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
decode_func[CHR_LIST ] = decode_list
decode_func[CHR_DICT ] = decode_dict
decode_func[CHR_INT ] = decode_int
decode_func[CHR_INT1 ] = decode_intb
decode_func[CHR_INT2 ] = decode_inth
decode_func[CHR_INT4 ] = decode_intl
decode_func[CHR_INT8 ] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE ] = decode_true
decode_func[CHR_FALSE ] = decode_false
decode_func[CHR_NONE ] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f+1:f+1+slen]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, f+1+slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[chr(STR_FIXED_START+i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f+1
for i in range(slen):
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f+1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f+1
for j in range(slen):
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def encode_dict(x,r):
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
def loads(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(chr(INT_POS_FIXED_START+x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(chr(INT_NEG_FIXED_START-1-x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.extend(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((chr(STR_FIXED_START + len(x)), x))
else:
r.extend((str(len(x)), ':', x))
def encode_unicode(x, r):
encode_string(x.encode("utf8"), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(chr(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x,r):
if len(x) < DICT_FIXED_COUNT:
r.append(chr(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[NoneType] = encode_none
encode_func[UnicodeType] = encode_unicode
lock = Lock()
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
lock.acquire()
try:
if float_bits == 32:
encode_func[FloatType] = encode_float32
elif float_bits == 64:
encode_func[FloatType] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
finally:
lock.release()
return ''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),)
assert loads(dumps(L)) == L
d = dict(zip(range(-100000,100000),range(-100000,100000)))
d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False})
L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''})
assert loads(dumps(L)) == L
L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([tuple(range(n)) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + (None,True,None)
assert loads(dumps(L)) == L
assert loads(dumps(None)) == None
assert loads(dumps({None:None})) == {None:None}
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6
assert abs(loads(dumps(1.1,64))-1.1)<1e-12
assert loads(dumps(u"Hello World!!"))
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
| 12,982
|
Python
|
.py
| 374
| 29.938503
| 205
| 0.623237
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,477
|
transfer.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/transfer.py
|
import zlib
import struct
import socket
import ssl
from synchronousdeluge import rencode
__all__ = ["DelugeTransfer"]
class DelugeTransfer(object):
def __init__(self):
self.sock = None
self.conn = None
self.connected = False
def connect(self, hostport):
if self.connected:
self.disconnect()
self.sock = socket.create_connection(hostport)
self.conn = ssl.wrap_socket(self.sock, None, None, False, ssl.CERT_NONE, ssl.PROTOCOL_TLSv1_2)
self.connected = True
def disconnect(self):
if self.conn:
self.conn.close()
self.connected = False
def send_request(self, request):
data = (request.format(),)
payload = zlib.compress(rencode.dumps(data))
self.conn.sendall(payload)
buf = b""
while True:
data = self.conn.recv(1024)
if not data:
self.connected = False
break
buf += data
dobj = zlib.decompressobj()
try:
message = rencode.loads(dobj.decompress(buf))
except (ValueError, zlib.error, struct.error):
# Probably incomplete data, read more
continue
else:
buf = dobj.unused_data
yield message
| 1,348
|
Python
|
.py
| 41
| 23
| 102
| 0.576297
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,478
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/__init__.py
|
"""A synchronous implementation of the Deluge RPC protocol
based on gevent-deluge by Christopher Rosell.
https://github.com/chrippa/gevent-deluge
Example usage:
from synchronousdeluge import DelgueClient
client = DelugeClient()
client.connect()
# Wait for value
download_location = client.core.get_config_value("download_location").get()
"""
__title__ = "synchronous-deluge"
__version__ = "0.1"
__author__ = "Christian Dale"
from synchronousdeluge.client import DelugeClient
from synchronousdeluge.exceptions import DelugeRPCError
| 569
|
Python
|
.py
| 15
| 34.4
| 79
| 0.771218
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,479
|
exceptions.py
|
CouchPotato_CouchPotatoServer/libs/synchronousdeluge/exceptions.py
|
__all__ = ["DelugeRPCError"]
class DelugeRPCError(Exception):
def __init__(self, name, msg, traceback):
self.name = name
self.msg = msg
self.traceback = traceback
def __str__(self):
return "{0}: {1}: {2}".format(self.__class__.__name__, self.name, self.msg)
| 301
|
Python
|
.py
| 8
| 31.25
| 83
| 0.582759
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,480
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/backports/__init__.py
|
# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 155
|
Python
|
.py
| 3
| 50.666667
| 79
| 0.710526
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,481
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/backports/ssl_match_hostname/__init__.py
|
"""The match_hostname() function from Python 3.2, essential when using SSL."""
import re
__version__ = '3.2a3'
class CertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not san:
# The subject is only checked when subjectAltName is empty
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| 2,139
|
Python
|
.py
| 54
| 30.351852
| 78
| 0.578644
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,482
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/ndg/__init__.py
|
"""ndg_httpsclient - PyOpenSSL utility to make a httplib-like interface suitable
for use with urllib2
This is a setuptools namespace_package. DO NOT place any other
code in this file! There is no guarantee that it will be installed
with easy_install. See:
http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
... for details.
"""
__author__ = "P J Kershaw"
__date__ = "06/01/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
__import__('pkg_resources').declare_namespace(__name__)
| 653
|
Python
|
.py
| 15
| 42.333333
| 80
| 0.738583
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,483
|
ssl_peer_verification.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/ssl_peer_verification.py
|
"""ndg_httpsclient - module containing SSL peer verification class.
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import re
import logging
log = logging.getLogger(__name__)
try:
from ndg.httpsclient.subj_alt_name import SubjectAltName
from pyasn1.codec.der import decoder as der_decoder
SUBJ_ALT_NAME_SUPPORT = True
except ImportError as e:
SUBJ_ALT_NAME_SUPPORT = False
SUBJ_ALT_NAME_SUPPORT_MSG = (
'SubjectAltName support is disabled - check pyasn1 package '
'installation to enable'
)
import warnings
warnings.warn(SUBJ_ALT_NAME_SUPPORT_MSG)
class ServerSSLCertVerification(object):
"""Check server identity. If hostname doesn't match, allow match of
host's Distinguished Name against server DN setting"""
DN_LUT = {
'commonName': 'CN',
'organisationalUnitName': 'OU',
'organisation': 'O',
'countryName': 'C',
'emailAddress': 'EMAILADDRESS',
'localityName': 'L',
'stateOrProvinceName': 'ST',
'streetAddress': 'STREET',
'domainComponent': 'DC',
'userid': 'UID'
}
SUBJ_ALT_NAME_EXT_NAME = 'subjectAltName'
PARSER_RE_STR = '/(%s)=' % '|'.join(list(DN_LUT.keys()) + list(DN_LUT.values()))
PARSER_RE = re.compile(PARSER_RE_STR)
__slots__ = ('__hostname', '__certDN', '__subj_alt_name_match')
def __init__(self, certDN=None, hostname=None, subj_alt_name_match=True):
"""Override parent class __init__ to enable setting of certDN
setting
@type certDN: string
@param certDN: Set the expected Distinguished Name of the
server to avoid errors matching hostnames. This is useful
where the hostname is not fully qualified
@type hostname: string
@param hostname: hostname to match against peer certificate
subjectAltNames or subject common name
@type subj_alt_name_match: bool
@param subj_alt_name_match: flag to enable/disable matching of hostname
against peer certificate subjectAltNames. Nb. A setting of True will
be ignored if the pyasn1 package is not installed
"""
self.__certDN = None
self.__hostname = None
if certDN is not None:
self.certDN = certDN
if hostname is not None:
self.hostname = hostname
if subj_alt_name_match:
if not SUBJ_ALT_NAME_SUPPORT:
log.warning('Overriding "subj_alt_name_match" keyword setting: '
'peer verification with subjectAltNames is disabled')
self.__subj_alt_name_match = False
else:
self.__subj_alt_name_match = True
else:
log.debug('Disabling peer verification with subject '
'subjectAltNames!')
self.__subj_alt_name_match = False
def __call__(self, connection, peerCert, errorStatus, errorDepth,
preverifyOK):
"""Verify server certificate
@type connection: OpenSSL.SSL.Connection
@param connection: SSL connection object
@type peerCert: basestring
@param peerCert: server host certificate as OpenSSL.crypto.X509
instance
@type errorStatus: int
@param errorStatus: error status passed from caller. This is the value
returned by the OpenSSL C function X509_STORE_CTX_get_error(). Look-up
x509_vfy.h in the OpenSSL source to get the meanings of the different
codes. PyOpenSSL doesn't help you!
@type errorDepth: int
@param errorDepth: a non-negative integer representing where in the
certificate chain the error occurred. If it is zero it occured in the
end entity certificate, one if it is the certificate which signed the
end entity certificate and so on.
@type preverifyOK: int
@param preverifyOK: the error status - 0 = Error, 1 = OK of the current
SSL context irrespective of any verification checks done here. If this
function yields an OK status, it should enforce the preverifyOK value
so that any error set upstream overrides and is honoured.
@rtype: int
@return: status code - 0/False = Error, 1/True = OK
"""
if peerCert.has_expired():
# Any expired certificate in the chain should result in an error
log.error('Certificate %r in peer certificate chain has expired',
peerCert.get_subject())
return False
elif errorDepth == 0:
# Only interested in DN of last certificate in the chain - this must
# match the expected Server DN setting
peerCertSubj = peerCert.get_subject()
peerCertDN = peerCertSubj.get_components()
peerCertDN.sort()
if self.certDN is None:
# Check hostname against peer certificate CN field instead:
if self.hostname is None:
log.error('No "hostname" or "certDN" set to check peer '
'certificate against')
return False
# Check for subject alternative names
if self.__subj_alt_name_match:
dns_names = self._get_subj_alt_name(peerCert)
if self.hostname in dns_names:
return preverifyOK
# If no subjectAltNames, default to check of subject Common Name
if peerCertSubj.commonName == self.hostname:
return preverifyOK
else:
log.error('Peer certificate CN %r doesn\'t match the '
'expected CN %r', peerCertSubj.commonName,
self.hostname)
return False
else:
if peerCertDN == self.certDN:
return preverifyOK
else:
log.error('Peer certificate DN %r doesn\'t match the '
'expected DN %r', peerCertDN, self.certDN)
return False
else:
return preverifyOK
def get_verify_server_cert_func(self):
def verify_server_cert(connection, peerCert, errorStatus, errorDepth,
preverifyOK):
return self.__call__(connection, peerCert, errorStatus,
errorDepth, preverifyOK)
return verify_server_cert
@classmethod
def _get_subj_alt_name(cls, peer_cert):
'''Extract subjectAltName DNS name settings from certificate extensions
@param peer_cert: peer certificate in SSL connection. subjectAltName
settings if any will be extracted from this
@type peer_cert: OpenSSL.crypto.X509
'''
# Search through extensions
dns_name = []
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name == cls.SUBJ_ALT_NAME_EXT_NAME:
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if isinstance(name, SubjectAltName):
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
dns_name.append(str(component.getComponent()))
return dns_name
def _getCertDN(self):
return self.__certDN
def _setCertDN(self, val):
if isinstance(val, str):
# Allow for quoted DN
certDN = val.strip('"')
dnFields = self.__class__.PARSER_RE.split(certDN)
if len(dnFields) < 2:
raise TypeError('Error parsing DN string: "%s"' % certDN)
self.__certDN = list(zip(dnFields[1::2], dnFields[2::2]))
self.__certDN.sort()
elif not isinstance(val, list):
for i in val:
if not len(i) == 2:
raise TypeError('Expecting list of two element DN field, '
'DN field value pairs for "certDN" '
'attribute')
self.__certDN = val
else:
raise TypeError('Expecting list or string type for "certDN" '
'attribute')
certDN = property(fget=_getCertDN,
fset=_setCertDN,
doc="Distinguished Name for Server Certificate")
# Get/Set Property methods
def _getHostname(self):
return self.__hostname
def _setHostname(self, val):
if not isinstance(val, str):
raise TypeError("Expecting string type for hostname "
"attribute")
self.__hostname = val
hostname = property(fget=_getHostname,
fset=_setHostname,
doc="hostname of server")
| 9,661
|
Python
|
.py
| 204
| 34.539216
| 84
| 0.580617
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,484
|
ssl_context_util.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/ssl_context_util.py
|
"""ndg_httpsclient SSL Context utilities module containing convenience routines
for setting SSL context configuration.
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import sys
if sys.version_info[0] > 2:
import urllib.parse as urlparse_
else:
import urlparse as urlparse_
from OpenSSL import SSL
from ndg.httpsclient.ssl_peer_verification import ServerSSLCertVerification
class SSlContextConfig(object):
"""
Holds configuration options for creating a SSL context. This is used as a
template to create the contexts with specific verification callbacks.
"""
def __init__(self, key_file=None, cert_file=None, pem_file=None, ca_dir=None,
verify_peer=False):
self.key_file = key_file
self.cert_file = cert_file
self.pem_file = pem_file
self.ca_dir = ca_dir
self.verify_peer = verify_peer
def make_ssl_context_from_config(ssl_config=False, url=None):
return make_ssl_context(ssl_config.key_file, ssl_config.cert_file,
ssl_config.pem_file, ssl_config.ca_dir,
ssl_config.verify_peer, url)
def make_ssl_context(key_file=None, cert_file=None, pem_file=None, ca_dir=None,
verify_peer=False, url=None, method=SSL.TLSv1_METHOD,
key_file_passphrase=None):
"""
Creates SSL context containing certificate and key file locations.
"""
ssl_context = SSL.Context(method)
# Key file defaults to certificate file if present.
if cert_file:
ssl_context.use_certificate_file(cert_file)
if key_file_passphrase:
passwd_cb = lambda max_passphrase_len, set_prompt, userdata: \
key_file_passphrase
ssl_context.set_passwd_cb(passwd_cb)
if key_file:
ssl_context.use_privatekey_file(key_file)
elif cert_file:
ssl_context.use_privatekey_file(cert_file)
if pem_file or ca_dir:
ssl_context.load_verify_locations(pem_file, ca_dir)
def _callback(conn, x509, errnum, errdepth, preverify_ok):
"""Default certification verification callback.
Performs no checks and returns the status passed in.
"""
return preverify_ok
verify_callback = _callback
if verify_peer:
ssl_context.set_verify_depth(9)
if url:
set_peer_verification_for_url_hostname(ssl_context, url)
else:
ssl_context.set_verify(SSL.VERIFY_PEER, verify_callback)
else:
ssl_context.set_verify(SSL.VERIFY_NONE, verify_callback)
return ssl_context
def set_peer_verification_for_url_hostname(ssl_context, url,
if_verify_enabled=False):
'''Convenience routine to set peer verification callback based on
ServerSSLCertVerification class'''
if not if_verify_enabled or (ssl_context.get_verify_mode() & SSL.VERIFY_PEER):
urlObj = urlparse_.urlparse(url)
hostname = urlObj.hostname
server_ssl_cert_verif = ServerSSLCertVerification(hostname=hostname)
verify_callback_ = server_ssl_cert_verif.get_verify_server_cert_func()
ssl_context.set_verify(SSL.VERIFY_PEER, verify_callback_)
| 3,458
|
Python
|
.py
| 77
| 36.636364
| 82
| 0.671575
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,485
|
urllib2_build_opener.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/urllib2_build_opener.py
|
"""urllib2 style build opener integrates with HTTPSConnection class from this
package.
"""
__author__ = "P J Kershaw"
__date__ = "21/12/10"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
import sys
# Py 2 <=> 3 compatibility for class type checking
if sys.version_info[0] > 2:
class_type_ = type
from urllib.request import (ProxyHandler, UnknownHandler,
HTTPDefaultErrorHandler, FTPHandler,
FileHandler, HTTPErrorProcessor,
HTTPHandler, OpenerDirector,
HTTPRedirectHandler)
else:
import types
class_type_ = types.ClassType
from urllib2 import (ProxyHandler, UnknownHandler, HTTPDefaultErrorHandler,
FTPHandler, FileHandler, HTTPErrorProcessor,
HTTPHandler, OpenerDirector, HTTPRedirectHandler)
from ndg.httpsclient.https import HTTPSContextHandler
log = logging.getLogger(__name__)
# Copied from urllib2 with modifications for ssl
def build_opener(*handlers, **kw):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
def isclass(obj):
return isinstance(obj, class_type_) or hasattr(obj, "__bases__")
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
check_classes = list(default_classes)
check_classes.append(HTTPSContextHandler)
skip = []
for klass in check_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.append(klass)
elif isinstance(check, klass):
skip.append(klass)
for klass in default_classes:
if klass not in skip:
opener.add_handler(klass())
# Pick up SSL context from keyword settings
ssl_context = kw.get('ssl_context')
# Add the HTTPS handler with ssl_context
if HTTPSContextHandler not in skip:
opener.add_handler(HTTPSContextHandler(ssl_context))
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
| 2,689
|
Python
|
.py
| 64
| 33.015625
| 80
| 0.654188
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,486
|
subj_alt_name.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/subj_alt_name.py
|
"""NDG HTTPS Client package
Use pyasn1 to provide support for parsing ASN.1 formatted subjectAltName
content for SSL peer verification. Code based on:
http://stackoverflow.com/questions/5519958/how-do-i-parse-subjectaltname-extension-data-using-pyasn1
"""
__author__ = "P J Kershaw"
__date__ = "01/02/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
try:
from pyasn1.type import univ, constraint, char, namedtype, tag
except ImportError as e:
import_error_msg = ('Error importing pyasn1, subjectAltName check for SSL '
'peer verification will be disabled. Import error '
'is: %s' % e)
import warnings
warnings.warn(import_error_msg)
class Pyasn1ImportError(ImportError):
"Raise for pyasn1 import error"
raise Pyasn1ImportError(import_error_msg)
MAX = 64
class DirectoryString(univ.Choice):
"""ASN.1 Directory string class"""
componentType = namedtype.NamedTypes(
namedtype.NamedType(
'teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType(
'ia5String', char.IA5String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
class AttributeValue(DirectoryString):
"""ASN.1 Attribute value"""
class AttributeType(univ.ObjectIdentifier):
"""ASN.1 Attribute type"""
class AttributeTypeAndValue(univ.Sequence):
"""ASN.1 Attribute type and value class"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue()),
)
class RelativeDistinguishedName(univ.SetOf):
'''ASN.1 Realtive distinguished name'''
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
'''ASN.1 RDN sequence class'''
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
'''ASN.1 name class'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence()),
)
class Extension(univ.Sequence):
'''ASN.1 extension class'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.OctetString()),
)
class Extensions(univ.SequenceOf):
'''ASN.1 extensions class'''
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any().subtype(
explicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
'''ASN.1 configuration for X.509 certificate subjectAltNames fields'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName', AnotherName().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 2))),
# namedtype.NamedType('x400Address', ORAddress().subtype(
# implicitTag=tag.Tag(tag.tagClassContext,
# tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName', Name().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 4))),
# namedtype.NamedType('ediPartyName', EDIPartyName().subtype(
# implicitTag=tag.Tag(tag.tagClassContext,
# tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext,
tag.tagFormatSimple, 8))),
)
class GeneralNames(univ.SequenceOf):
'''Sequence of names for ASN.1 subjectAltNames settings'''
componentType = GeneralName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectAltName(GeneralNames):
'''ASN.1 implementation for subjectAltNames support'''
| 6,131
|
Python
|
.py
| 121
| 39.652893
| 100
| 0.62521
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,487
|
https.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/https.py
|
"""ndg_httpsclient HTTPS module containing PyOpenSSL implementation of
httplib.HTTPSConnection
PyOpenSSL utility to make a httplib-like interface suitable for use with
urllib2
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "09/12/11"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
import socket
import sys
if sys.version_info[0] > 2:
from http.client import HTTPS_PORT
from http.client import HTTPConnection
from urllib.request import AbstractHTTPHandler
else:
from httplib import HTTPS_PORT
from httplib import HTTPConnection
from urllib2 import AbstractHTTPHandler
from OpenSSL import SSL
from ndg.httpsclient.ssl_socket import SSLSocket
log = logging.getLogger(__name__)
class HTTPSConnection(HTTPConnection):
"""This class allows communication via SSL using PyOpenSSL.
It is based on httplib.HTTPSConnection, modified to use PyOpenSSL.
Note: This uses the constructor inherited from HTTPConnection to allow it to
be used with httplib and HTTPSContextHandler. To use the class directly with
an SSL context set ssl_context after construction.
@cvar default_port: default port for this class (443)
@type default_port: int
@cvar default_ssl_method: default SSL method used if no SSL context is
explicitly set - defaults to version 2/3.
@type default_ssl_method: int
"""
default_port = HTTPS_PORT
default_ssl_method = SSL.SSLv23_METHOD
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None):
HTTPConnection.__init__(self, host, port, strict, timeout)
if not hasattr(self, 'ssl_context'):
self.ssl_context = None
if ssl_context is not None:
if not isinstance(ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" keyword; got %r instead' %
ssl_context)
self.ssl_context = ssl_context
def connect(self):
"""Create SSL socket and connect to peer
"""
if getattr(self, 'ssl_context', None):
if not isinstance(self.ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" attribute; got %r instead' %
self.ssl_context)
ssl_context = self.ssl_context
else:
ssl_context = SSL.Context(self.__class__.default_ssl_method)
sock = socket.create_connection((self.host, self.port), self.timeout)
# Tunnel if using a proxy - ONLY available for Python 2.6.2 and above
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
self.sock = SSLSocket(ssl_context, sock)
# Go to client mode.
self.sock.set_connect_state()
def close(self):
"""Close socket and shut down SSL connection"""
if hasattr(self.sock, "close"):
self.sock.close()
class HTTPSContextHandler(AbstractHTTPHandler):
'''HTTPS handler that allows a SSL context to be set for the SSL
connections.
'''
https_request = AbstractHTTPHandler.do_request_
def __init__(self, ssl_context, debuglevel=0):
"""
@param ssl_context:SSL context
@type ssl_context: OpenSSL.SSL.Context
@param debuglevel: debug level for HTTPSHandler
@type debuglevel: int
"""
AbstractHTTPHandler.__init__(self, debuglevel)
if ssl_context is not None:
if not isinstance(ssl_context, SSL.Context):
raise TypeError('Expecting OpenSSL.SSL.Context type for "'
'ssl_context" keyword; got %r instead' %
ssl_context)
self.ssl_context = ssl_context
else:
self.ssl_context = SSL.Context(SSL.TLSv1_METHOD)
def https_open(self, req):
"""Opens HTTPS request
@param req: HTTP request
@return: HTTP Response object
"""
# Make a custom class extending HTTPSConnection, with the SSL context
# set as a class variable so that it is available to the connect method.
customHTTPSContextConnection = type('CustomHTTPSContextConnection',
(HTTPSConnection, object),
{'ssl_context': self.ssl_context})
return self.do_open(customHTTPSContextConnection, req)
| 4,834
|
Python
|
.py
| 105
| 35.8
| 83
| 0.641358
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,488
|
utils.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/utils.py
|
"""Utilities using NDG HTTPS Client, including a main module that can be used to
fetch from a URL.
"""
__author__ = "R B Wilkinson"
__date__ = "09/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import logging
from optparse import OptionParser
import os
import sys
if sys.version_info[0] > 2:
import http.cookiejar as cookiejar_
import http.client as http_client_
from urllib.request import Request as Request_
from urllib.request import HTTPHandler as HTTPHandler_
from urllib.request import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib.request import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib.request import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib.request import ProxyHandler as ProxyHandler_
from urllib.error import HTTPError as HTTPError_
import urllib.parse as urlparse_
else:
import cookielib as cookiejar_
import httplib as http_client_
from urllib2 import Request as Request_
from urllib2 import HTTPHandler as HTTPHandler_
from urllib2 import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib2 import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib2 import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib2 import ProxyHandler as ProxyHandler_
from urllib2 import HTTPError as HTTPError_
import urlparse as urlparse_
from ndg.httpsclient.urllib2_build_opener import build_opener
from ndg.httpsclient.https import HTTPSContextHandler
from ndg.httpsclient import ssl_context_util
log = logging.getLogger(__name__)
class AccumulatingHTTPCookieProcessor(HTTPCookieProcessor_):
"""Cookie processor that adds new cookies (instead of replacing the existing
ones as HTTPCookieProcessor does)
"""
def http_request(self, request):
"""Processes cookies for a HTTP request.
@param request: request to process
@type request: urllib2.Request
@return: request
@rtype: urllib2.Request
"""
COOKIE_HEADER_NAME = "Cookie"
tmp_request = Request_(request.get_full_url(), request.data, {},
request.origin_req_host,
request.unverifiable)
self.cookiejar.add_cookie_header(tmp_request)
# Combine existing and new cookies.
new_cookies = tmp_request.get_header(COOKIE_HEADER_NAME)
if new_cookies:
if request.has_header(COOKIE_HEADER_NAME):
# Merge new cookies with existing ones.
old_cookies = request.get_header(COOKIE_HEADER_NAME)
merged_cookies = '; '.join([old_cookies, new_cookies])
request.add_unredirected_header(COOKIE_HEADER_NAME,
merged_cookies)
else:
# No existing cookies so just set new ones.
request.add_unredirected_header(COOKIE_HEADER_NAME, new_cookies)
return request
# Process cookies for HTTPS in the same way.
https_request = http_request
class URLFetchError(Exception):
"""Error fetching content from URL"""
def fetch_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@return data retrieved from URL or None
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return_data = response.read()
response.close()
return return_data
else:
raise URLFetchError(return_message)
def fetch_from_url_to_file(url, config, output_file, data=None, handlers=None):
"""Writes data retrieved from a URL to a file.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param output_file: output file
@type output_file: basestring
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message
boolean indicating whether access was successful)
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code == http_client_.OK:
return_data = response.read()
response.close()
outfile = open(output_file, "w")
outfile.write(return_data)
outfile.close()
return return_code, return_message, return_code == http_client_.OK
def fetch_stream_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: data retrieved from URL or None
@rtype: file derived type
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return response
else:
raise URLFetchError(return_message)
def open_url(url, config, data=None, handlers=None):
"""Attempts to open a connection to a specified URL.
@param url: URL to attempt to open
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message or error description
response object)
"""
debuglevel = 1 if config.debug else 0
# Set up handlers for URL opener.
if config.cookie:
cj = config.cookie
else:
cj = cookiejar_.CookieJar()
# Use a cookie processor that accumulates cookies when redirects occur so
# that an application can redirect for authentication and retain both any
# cookies for the application and the security system (c.f.,
# urllib2.HTTPCookieProcessor which replaces cookies).
cookie_handler = AccumulatingHTTPCookieProcessor(cj)
if not handlers:
handlers = []
handlers.append(cookie_handler)
if config.debug:
http_handler = HTTPHandler_(debuglevel=debuglevel)
https_handler = HTTPSContextHandler(config.ssl_context,
debuglevel=debuglevel)
handlers.extend([http_handler, https_handler])
if config.http_basicauth:
# currently only supports http basic auth
auth_handler = HTTPBasicAuthHandler_(HTTPPasswordMgrWithDefaultRealm_())
auth_handler.add_password(realm=None, uri=url,
user=config.httpauth[0],
passwd=config.httpauth[1])
handlers.append(auth_handler)
# Explicitly remove proxy handling if the host is one listed in the value of
# the no_proxy environment variable because urllib2 does use proxy settings
# set via http_proxy and https_proxy, but does not take the no_proxy value
# into account.
if not _should_use_proxy(url, config.no_proxy):
handlers.append(ProxyHandler_({}))
log.debug("Not using proxy")
elif config.proxies:
handlers.append(ProxyHandler_(config.proxies))
log.debug("Configuring proxies: %s" % config.proxies)
opener = build_opener(*handlers, ssl_context=config.ssl_context)
headers = config.headers
if headers is None:
headers = {}
request = Request_(url, data, headers)
# Open the URL and check the response.
return_code = 0
return_message = ''
response = None
# FIXME
response = opener.open(request)
try:
response = opener.open(request)
return_message = response.msg
return_code = response.code
if log.isEnabledFor(logging.DEBUG):
for index, cookie in enumerate(cj):
log.debug("%s : %s", index, cookie)
except HTTPError_ as exc:
return_code = exc.code
return_message = "Error: %s" % exc.msg
if log.isEnabledFor(logging.DEBUG):
log.debug("%s %s", exc.code, exc.msg)
except Exception as exc:
return_message = "Error: %s" % exc.__str__()
if log.isEnabledFor(logging.DEBUG):
import traceback
log.debug(traceback.format_exc())
return (return_code, return_message, response)
def _should_use_proxy(url, no_proxy=None):
"""Determines whether a proxy should be used to open a connection to the
specified URL, based on the value of the no_proxy environment variable.
@param url: URL
@type url: basestring or urllib2.Request
"""
if no_proxy is None:
no_proxy_effective = os.environ.get('no_proxy', '')
else:
no_proxy_effective = no_proxy
urlObj = urlparse_.urlparse(_url_as_string(url))
for np in [h.strip() for h in no_proxy_effective.split(',')]:
if urlObj.hostname == np:
return False
return True
def _url_as_string(url):
"""Returns the URL string from a URL value that is either a string or
urllib2.Request..
@param url: URL
@type url: basestring or urllib2.Request
@return: URL string
@rtype: basestring
"""
if isinstance(url, Request_):
return url.get_full_url()
elif isinstance(url, str):
return url
else:
raise TypeError("Expected type %r or %r" %
(str, Request_))
class Configuration(object):
"""Connection configuration.
"""
def __init__(self, ssl_context, debug=False, proxies=None, no_proxy=None,
cookie=None, http_basicauth=None, headers=None):
"""
@param ssl_context: SSL context to use with this configuration
@type ssl_context: OpenSSL.SSL.Context
@param debug: if True, output debugging information
@type debug: bool
@param proxies: proxies to use for
@type proxies: dict with basestring keys and values
@param no_proxy: hosts for which a proxy should not be used
@type no_proxy: basestring
@param cookie: cookies to set for request
@type cookie: cookielib.CookieJar (python 3 - http.cookiejar)
@param http_basicauth: http authentication, or None
@type http_basicauth: tuple of (username,password)
@param headers: http headers
@type headers: dict
"""
self.ssl_context = ssl_context
self.debug = debug
self.proxies = proxies
self.no_proxy = no_proxy
self.cookie = cookie
self.http_basicauth = http_basicauth
self.headers = headers
def main():
'''Utility to fetch data using HTTP or HTTPS GET from a specified URL.
'''
parser = OptionParser(usage="%prog [options] url")
parser.add_option("-c", "--certificate", dest="cert_file", metavar="FILE",
default=os.path.expanduser("~/credentials.pem"),
help="Certificate file - defaults to $HOME/credentials.pem")
parser.add_option("-k", "--private-key", dest="key_file", metavar="FILE",
default=None,
help="Private key file - defaults to the certificate file")
parser.add_option("-t", "--ca-certificate-dir", dest="ca_dir",
metavar="PATH",
default=None,
help="Trusted CA certificate file directory")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False,
help="Print debug information.")
parser.add_option("-p", "--post-data-file", dest="data_file",
metavar="FILE", default=None,
help="POST data file")
parser.add_option("-f", "--fetch", dest="output_file", metavar="FILE",
default=None, help="Output file")
parser.add_option("-n", "--no-verify-peer", action="store_true",
dest="no_verify_peer", default=False,
help="Skip verification of peer certificate.")
parser.add_option("-a", "--basicauth", dest="basicauth",
metavar="USER:PASSWD",
default=None,
help="HTTP authentication credentials")
parser.add_option("--header", action="append", dest="headers",
metavar="HEADER: VALUE",
help="Add HTTP header to request")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments")
url = args[0]
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
if options.key_file and os.path.exists(options.key_file):
key_file = options.key_file
else:
key_file = None
if options.cert_file and os.path.exists(options.cert_file):
cert_file = options.cert_file
else:
cert_file = None
if options.ca_dir and os.path.exists(options.ca_dir):
ca_dir = options.ca_dir
else:
ca_dir = None
verify_peer = not options.no_verify_peer
if options.data_file and os.path.exists(options.data_file):
data_file = open(options.data_file)
data = data_file.read()
data_file.close()
else:
data = None
if options.basicauth:
http_basicauth = options.basicauth.split(':', 1)
else:
http_basicauth = None
headers = {}
if options.headers:
for h in options.headers:
key, val = h.split(':', 1)
headers[key.strip()] = val.lstrip()
# If a private key file is not specified, the key is assumed to be stored in
# the certificate file.
ssl_context = ssl_context_util.make_ssl_context(key_file,
cert_file,
None,
ca_dir,
verify_peer,
url)
config = Configuration(ssl_context,
options.debug,
http_basicauth=http_basicauth,
headers=headers)
if options.output_file:
return_code, return_message = fetch_from_url_to_file(
url,
config,
options.output_file,
data)[:2]
raise SystemExit(return_code, return_message)
else:
data = fetch_from_url(url, config)
print(data)
if __name__=='__main__':
logging.basicConfig()
main()
| 15,736
|
Python
|
.py
| 359
| 33.367688
| 82
| 0.620608
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,489
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/__init__.py
|
"""ndg_httpsclient - PyOpenSSL utility to make a httplib-like interface suitable
for use with urllib2
"""
__author__ = "P J Kershaw (STFC) and Richard Wilkinson (Tessella)"
__date__ = "09/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
| 390
|
Python
|
.py
| 9
| 42.333333
| 80
| 0.708661
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,490
|
ssl_socket.py
|
CouchPotato_CouchPotatoServer/libs/ndg/httpsclient/ssl_socket.py
|
"""PyOpenSSL utilities including HTTPSSocket class which wraps PyOpenSSL
SSL connection into a httplib-like interface suitable for use with urllib2
"""
__author__ = "P J Kershaw"
__date__ = "21/12/10"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
from datetime import datetime
import logging
import socket
from io import BytesIO
from OpenSSL import SSL
log = logging.getLogger(__name__)
class SSLSocket(object):
"""SSL Socket class wraps pyOpenSSL's SSL.Connection class implementing
the makefile method so that it is compatible with the standard socket
interface and usable with httplib.
@cvar default_buf_size: default buffer size for recv operations in the
makefile method
@type default_buf_size: int
"""
default_buf_size = 8192
def __init__(self, ctx, sock=None):
"""Create SSL socket object
@param ctx: SSL context
@type ctx: OpenSSL.SSL.Context
@param sock: underlying socket object
@type sock: socket.socket
"""
if sock is not None:
self.socket = sock
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__ssl_conn = SSL.Connection(ctx, self.socket)
self.buf_size = self.__class__.default_buf_size
self._makefile_refs = 0
def __del__(self):
"""Close underlying socket when this object goes out of scope
"""
self.close()
@property
def buf_size(self):
"""Buffer size for makefile method recv() operations"""
return self.__buf_size
@buf_size.setter
def buf_size(self, value):
"""Buffer size for makefile method recv() operations"""
if not isinstance(value, int):
raise TypeError('Expecting int type for "buf_size"; '
'got %r instead' % type(value))
self.__buf_size = value
def close(self):
"""Shutdown the SSL connection and call the close method of the
underlying socket"""
if self._makefile_refs < 1:
try:
self.__ssl_conn.shutdown()
except (SSL.Error, SSL.SysCallError):
# Make errors on shutdown non-fatal
pass
else:
self._makefile_refs -= 1
def set_shutdown(self, mode):
"""Set the shutdown state of the Connection.
@param mode: bit vector of either or both of SENT_SHUTDOWN and
RECEIVED_SHUTDOWN
"""
self.__ssl_conn.set_shutdown(mode)
def get_shutdown(self):
"""Get the shutdown state of the Connection.
@return: bit vector of either or both of SENT_SHUTDOWN and
RECEIVED_SHUTDOWN
"""
return self.__ssl_conn.get_shutdown()
def bind(self, addr):
"""bind to the given address - calls method of the underlying socket
@param addr: address/port number tuple
@type addr: tuple"""
self.__ssl_conn.bind(addr)
def listen(self, backlog):
"""Listen for connections made to the socket.
@param backlog: specifies the maximum number of queued connections and
should be at least 1; the maximum value is system-dependent (usually 5).
@param backlog: int
"""
self.__ssl_conn.listen(backlog)
def set_accept_state(self):
"""Set the connection to work in server mode. The handshake will be
handled automatically by read/write"""
self.__ssl_conn.set_accept_state()
def accept(self):
"""Accept an SSL connection.
@return: pair (ssl, addr) where ssl is a new SSL connection object and
addr is the address bound to the other end of the SSL connection.
@rtype: tuple
"""
return self.__ssl_conn.accept()
def set_connect_state(self):
"""Set the connection to work in client mode. The handshake will be
handled automatically by read/write"""
self.__ssl_conn.set_connect_state()
def connect(self, addr):
"""Call the connect method of the underlying socket and set up SSL on
the socket, using the Context object supplied to this Connection object
at creation.
@param addr: address/port number pair
@type addr: tuple
"""
self.__ssl_conn.connect(addr)
def shutdown(self, how):
"""Send the shutdown message to the Connection.
@param how: for socket.socket this flag determines whether read, write
or both type operations are supported. OpenSSL.SSL.Connection doesn't
support this so this parameter is IGNORED
@return: true if the shutdown message exchange is completed and false
otherwise (in which case you call recv() or send() when the connection
becomes readable/writeable.
@rtype: bool
"""
return self.__ssl_conn.shutdown()
def renegotiate(self):
"""Renegotiate this connection's SSL parameters."""
return self.__ssl_conn.renegotiate()
def pending(self):
"""@return: numbers of bytes that can be safely read from the SSL
buffer.
@rtype: int
"""
return self.__ssl_conn.pending()
def send(self, data, *flags_arg):
"""Send data to the socket. Nb. The optional flags argument is ignored.
- retained for compatibility with socket.socket interface
@param data: data to send down the socket
@type data: string
"""
return self.__ssl_conn.send(data)
def sendall(self, data):
self.__ssl_conn.sendall(data)
def recv(self, size=default_buf_size):
"""Receive data from the Connection.
@param size: The maximum amount of data to be received at once
@type size: int
@return: data received.
@rtype: string
"""
return self.__ssl_conn.recv(size)
def setblocking(self, mode):
"""Set this connection's underlying socket blocking _mode_.
@param mode: blocking mode
@type mode: int
"""
self.__ssl_conn.setblocking(mode)
def fileno(self):
"""
@return: file descriptor number for the underlying socket
@rtype: int
"""
return self.__ssl_conn.fileno()
def getsockopt(self, *args):
"""See socket.socket.getsockopt
"""
return self.__ssl_conn.getsockopt(*args)
def setsockopt(self, *args):
"""See socket.socket.setsockopt
@return: value of the given socket option
@rtype: int/string
"""
return self.__ssl_conn.setsockopt(*args)
def state_string(self):
"""Return the SSL state of this connection."""
return self.__ssl_conn.state_string()
def makefile(self, *args):
"""Specific to Python socket API and required by httplib: convert
response into a file-like object. This implementation reads using recv
and copies the output into a StringIO buffer to simulate a file object
for consumption by httplib
Nb. Ignoring optional file open mode (StringIO is generic and will
open for read and write unless a string is passed to the constructor)
and buffer size - httplib set a zero buffer size which results in recv
reading nothing
@return: file object for data returned from socket
@rtype: cStringIO.StringO
"""
self._makefile_refs += 1
# Optimisation
_buf_size = self.buf_size
i=0
stream = BytesIO()
startTime = datetime.utcnow()
try:
dat = self.__ssl_conn.recv(_buf_size)
while dat:
i+=1
stream.write(dat)
dat = self.__ssl_conn.recv(_buf_size)
except (SSL.ZeroReturnError, SSL.SysCallError):
# Connection is closed - assuming here that all is well and full
# response has been received. httplib will catch an error in
# incomplete content since it checks the content-length header
# against the actual length of data received
pass
if log.getEffectiveLevel() <= logging.DEBUG:
log.debug("Socket.makefile %d recv calls completed in %s", i,
datetime.utcnow() - startTime)
# Make sure to rewind the buffer otherwise consumers of the content will
# read from the end of the buffer
stream.seek(0)
return stream
def getsockname(self):
"""
@return: the socket's own address
@rtype:
"""
return self.__ssl_conn.getsockname()
def getpeername(self):
"""
@return: remote address to which the socket is connected
"""
return self.__ssl_conn.getpeername()
def get_context(self):
'''Retrieve the Context object associated with this Connection. '''
return self.__ssl_conn.get_context()
def get_peer_certificate(self):
'''Retrieve the other side's certificate (if any) '''
return self.__ssl_conn.get_peer_certificate()
| 9,344
|
Python
|
.py
| 228
| 32.280702
| 80
| 0.629945
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,491
|
guess.py
|
CouchPotato_CouchPotatoServer/libs/guessit/guess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, s, u, base_text_type
from guessit.language import Language
from guessit.country import Country
import json
import datetime
import logging
log = logging.getLogger(__name__)
class Guess(UnicodeMixin, dict):
"""A Guess is a dictionary which has an associated confidence for each of
its values.
As it is a subclass of dict, you can use it everywhere you expect a
simple dict."""
def __init__(self, *args, **kwargs):
try:
confidence = kwargs.pop('confidence')
except KeyError:
confidence = 0
try:
raw = kwargs.pop('raw')
except KeyError:
raw = None
dict.__init__(self, *args, **kwargs)
self._confidence = {}
self._raw = {}
for prop in self:
self._confidence[prop] = confidence
self._raw[prop] = raw
def to_dict(self, advanced=False):
data = dict(self)
for prop, value in data.items():
if isinstance(value, datetime.date):
data[prop] = value.isoformat()
elif isinstance(value, (Language, Country, base_text_type)):
data[prop] = u(value)
elif isinstance(value, list):
data[prop] = [u(x) for x in value]
if advanced:
data[prop] = {"value": data[prop], "raw": self.raw(prop), "confidence": self.confidence(prop)}
return data
def nice_string(self, advanced=False):
if advanced:
data = self.to_dict(advanced)
return json.dumps(data, indent=4)
else:
data = self.to_dict()
parts = json.dumps(data, indent=4).split('\n')
for i, p in enumerate(parts):
if p[:5] != ' "':
continue
prop = p.split('"')[1]
parts[i] = (' [%.2f] "' % self.confidence(prop)) + p[5:]
return '\n'.join(parts)
def __unicode__(self):
return u(self.to_dict())
def confidence(self, prop):
return self._confidence.get(prop, -1)
def raw(self, prop):
return self._raw.get(prop, None)
def set(self, prop, value, confidence=None, raw=None):
self[prop] = value
if confidence is not None:
self._confidence[prop] = confidence
if raw is not None:
self._raw[prop] = raw
def set_confidence(self, prop, value):
self._confidence[prop] = value
def set_raw(self, prop, value):
self._raw[prop] = value
def update(self, other, confidence=None, raw=None):
dict.update(self, other)
if isinstance(other, Guess):
for prop in other:
self._confidence[prop] = other.confidence(prop)
self._raw[prop] = other.raw(prop)
if confidence is not None:
for prop in other:
self._confidence[prop] = confidence
if raw is not None:
for prop in other:
self._raw[prop] = raw
def update_highest_confidence(self, other):
"""Update this guess with the values from the given one. In case
there is property present in both, only the one with the highest one
is kept."""
if not isinstance(other, Guess):
raise ValueError('Can only call this function on Guess instances')
for prop in other:
if prop in self and self.confidence(prop) >= other.confidence(prop):
continue
self[prop] = other[prop]
self._confidence[prop] = other.confidence(prop)
self._raw[prop] = other.raw(prop)
def choose_int(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are integers."""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if (v1 == v2):
return (v1, 1 - (1 - c1) * (1 - c2))
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1)
def choose_string(g1, g2):
"""Function used by merge_similar_guesses to choose between 2 possible
properties when they are strings.
If the 2 strings are similar, or one is contained in the other, the latter is returned
with an increased confidence.
If the 2 strings are dissimilar, the one with the higher confidence is returned, with
a weaker confidence.
Note that here, 'similar' means that 2 strings are either equal, or that they
differ very little, such as one string being the other one with the 'the' word
prepended to it.
>>> s(choose_string(('Hello', 0.75), ('World', 0.5)))
('Hello', 0.25)
>>> s(choose_string(('Hello', 0.5), ('hello', 0.5)))
('Hello', 0.75)
>>> s(choose_string(('Hello', 0.4), ('Hello World', 0.4)))
('Hello', 0.64)
>>> s(choose_string(('simpsons', 0.5), ('The Simpsons', 0.5)))
('The Simpsons', 0.75)
"""
v1, c1 = g1 # value, confidence
v2, c2 = g2
if not v1:
return g2
elif not v2:
return g1
v1, v2 = v1.strip(), v2.strip()
v1l, v2l = v1.lower(), v2.lower()
combined_prob = 1 - (1 - c1) * (1 - c2)
if v1l == v2l:
return (v1, combined_prob)
# check for common patterns
elif v1l == 'the ' + v2l:
return (v1, combined_prob)
elif v2l == 'the ' + v1l:
return (v2, combined_prob)
# if one string is contained in the other, return the shortest one
elif v2l in v1l:
return (v2, combined_prob)
elif v1l in v2l:
return (v1, combined_prob)
# in case of conflict, return the one with highest confidence
else:
if c1 > c2:
return (v1, c1 - c2)
else:
return (v2, c2 - c1)
def _merge_similar_guesses_nocheck(guesses, prop, choose):
"""Take a list of guesses and merge those which have the same properties,
increasing or decreasing the confidence depending on whether their values
are similar.
This function assumes there are at least 2 valid guesses."""
similar = [guess for guess in guesses if prop in guess]
g1, g2 = similar[0], similar[1]
other_props = set(g1) & set(g2) - set([prop])
if other_props:
log.debug('guess 1: %s' % g1)
log.debug('guess 2: %s' % g2)
for prop in other_props:
if g1[prop] != g2[prop]:
log.warning('both guesses to be merged have more than one '
'different property in common, bailing out...')
return
# merge all props of s2 into s1, updating the confidence for the
# considered property
v1, v2 = g1[prop], g2[prop]
c1, c2 = g1.confidence(prop), g2.confidence(prop)
new_value, new_confidence = choose((v1, c1), (v2, c2))
if new_confidence >= c1:
msg = "Updating matching property '%s' with confidence %.2f"
else:
msg = "Updating non-matching property '%s' with confidence %.2f"
log.debug(msg % (prop, new_confidence))
g2[prop] = new_value
g2.set_confidence(prop, new_confidence)
g1.update(g2)
guesses.remove(g2)
def merge_similar_guesses(guesses, prop, choose):
"""Take a list of guesses and merge those which have the same properties,
increasing or decreasing the confidence depending on whether their values
are similar."""
similar = [guess for guess in guesses if prop in guess]
if len(similar) < 2:
# nothing to merge
return
if len(similar) == 2:
_merge_similar_guesses_nocheck(guesses, prop, choose)
if len(similar) > 2:
log.debug('complex merge, trying our best...')
before = len(guesses)
_merge_similar_guesses_nocheck(guesses, prop, choose)
after = len(guesses)
if after < before:
# recurse only when the previous call actually did something,
# otherwise we end up in an infinite loop
merge_similar_guesses(guesses, prop, choose)
def merge_all(guesses, append=None):
"""Merge all the guesses in a single result, remove very unlikely values,
and return it.
You can specify a list of properties that should be appended into a list
instead of being merged.
>>> s(merge_all([ Guess({'season': 2}, confidence=0.6),
... Guess({'episodeNumber': 13}, confidence=0.8) ]))
{'season': 2, 'episodeNumber': 13}
>>> s(merge_all([ Guess({'episodeNumber': 27}, confidence=0.02),
... Guess({'season': 1}, confidence=0.2) ]))
{'season': 1}
>>> s(merge_all([ Guess({'other': 'PROPER'}, confidence=0.8),
... Guess({'releaseGroup': '2HD'}, confidence=0.8) ],
... append=['other']))
{'releaseGroup': '2HD', 'other': ['PROPER']}
"""
if not guesses:
return Guess()
result = guesses[0]
if append is None:
append = []
for g in guesses[1:]:
# first append our appendable properties
for prop in append:
if prop in g:
result.set(prop, result.get(prop, []) + [g[prop]],
# TODO: what to do with confidence here? maybe an
# arithmetic mean...
confidence=g.confidence(prop),
raw=g.raw(prop))
del g[prop]
# then merge the remaining ones
dups = set(result) & set(g)
if dups:
log.warning('duplicate properties %s in merged result...' % [ (result[p], g[p]) for p in dups] )
result.update_highest_confidence(g)
# delete very unlikely values
for p in list(result.keys()):
if result.confidence(p) < 0.05:
del result[p]
# make sure our appendable properties contain unique values
for prop in append:
try:
value = result[prop]
if isinstance(value, list):
result[prop] = list(set(value))
else:
result[prop] = [ value ]
except KeyError:
pass
return result
| 11,011
|
Python
|
.py
| 270
| 32.288889
| 110
| 0.598983
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,492
|
slogging.py
|
CouchPotato_CouchPotatoServer/libs/guessit/slogging.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import logging
import sys
import os, os.path
GREEN_FONT = "\x1B[0;32m"
YELLOW_FONT = "\x1B[0;33m"
BLUE_FONT = "\x1B[0;34m"
RED_FONT = "\x1B[0;31m"
RESET_FONT = "\x1B[0m"
def setupLogging(colored=True, with_time=False, with_thread=False, filename=None, with_lineno=False):
"""Set up a nice colored logger as the main application logger."""
class SimpleFormatter(logging.Formatter):
def __init__(self, with_time, with_thread):
self.fmt = (('%(asctime)s ' if with_time else '') +
'%(levelname)-8s ' +
'[%(name)s:%(funcName)s' +
(':%(lineno)s' if with_lineno else '') + ']' +
('[%(threadName)s]' if with_thread else '') +
' -- %(message)s')
logging.Formatter.__init__(self, self.fmt)
class ColoredFormatter(logging.Formatter):
def __init__(self, with_time, with_thread):
self.fmt = (('%(asctime)s ' if with_time else '') +
'-CC-%(levelname)-8s ' +
BLUE_FONT + '[%(name)s:%(funcName)s' +
(':%(lineno)s' if with_lineno else '') + ']' +
RESET_FONT + ('[%(threadName)s]' if with_thread else '') +
' -- %(message)s')
logging.Formatter.__init__(self, self.fmt)
def format(self, record):
modpath = record.name.split('.')
record.mname = modpath[0]
record.mmodule = '.'.join(modpath[1:])
result = logging.Formatter.format(self, record)
if record.levelno == logging.DEBUG:
color = BLUE_FONT
elif record.levelno == logging.INFO:
color = GREEN_FONT
elif record.levelno == logging.WARNING:
color = YELLOW_FONT
else:
color = RED_FONT
result = result.replace('-CC-', color)
return result
if filename is not None:
# make sure we can write to our log file
logdir = os.path.dirname(filename)
if not os.path.exists(logdir):
os.makedirs(logdir)
ch = logging.FileHandler(filename, mode='w')
ch.setFormatter(SimpleFormatter(with_time, with_thread))
else:
ch = logging.StreamHandler()
if colored and sys.platform != 'win32':
ch.setFormatter(ColoredFormatter(with_time, with_thread))
else:
ch.setFormatter(SimpleFormatter(with_time, with_thread))
logging.getLogger().addHandler(ch)
| 3,388
|
Python
|
.py
| 77
| 34.792208
| 101
| 0.592907
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,493
|
__main__.py
|
CouchPotato_CouchPotatoServer/libs/guessit/__main__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from __future__ import print_function
from guessit import u
from guessit import slogging, guess_file_info
from optparse import OptionParser
import logging
import sys
import os
import locale
def detect_filename(filename, filetype, info=['filename'], advanced = False):
filename = u(filename)
print('For:', filename)
print('GuessIt found:', guess_file_info(filename, filetype, info).nice_string(advanced))
def run_demo(episodes=True, movies=True, advanced=False):
# NOTE: tests should not be added here but rather in the tests/ folder
# this is just intended as a quick example
if episodes:
testeps = [ 'Series/Californication/Season 2/Californication.2x05.Vaginatown.HDTV.XviD-0TV.[tvu.org.ru].avi',
'Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi',
'Series/Treme/Treme.1x03.Right.Place,.Wrong.Time.HDTV.XviD-NoTV.[tvu.org.ru].avi',
'Series/Duckman/Duckman - 101 (01) - 20021107 - I, Duckman.avi',
'Series/Duckman/Duckman - S1E13 Joking The Chicken (unedited).avi',
'Series/Simpsons/The_simpsons_s13e18_-_i_am_furious_yellow.mpg',
'Series/Simpsons/Saison 12 Français/Simpsons,.The.12x08.A.Bas.Le.Sergent.Skinner.FR.[tvu.org.ru].avi',
'Series/Dr._Slump_-_002_DVB-Rip_Catalan_by_kelf.avi',
'Series/Kaamelott/Kaamelott - Livre V - Second Volet - HD 704x396 Xvid 2 pass - Son 5.1 - TntRip by Slurm.avi'
]
for f in testeps:
print('-'*80)
detect_filename(f, filetype='episode', advanced=advanced)
if movies:
testmovies = [ 'Movies/Fear and Loathing in Las Vegas (1998)/Fear.and.Loathing.in.Las.Vegas.720p.HDDVD.DTS.x264-ESiR.mkv',
'Movies/El Dia de la Bestia (1995)/El.dia.de.la.bestia.DVDrip.Spanish.DivX.by.Artik[SEDG].avi',
'Movies/Blade Runner (1982)/Blade.Runner.(1982).(Director\'s.Cut).CD1.DVDRip.XviD.AC3-WAF.avi',
'Movies/Dark City (1998)/Dark.City.(1998).DC.BDRip.720p.DTS.X264-CHD.mkv',
'Movies/Sin City (BluRay) (2005)/Sin.City.2005.BDRip.720p.x264.AC3-SEPTiC.mkv',
'Movies/Borat (2006)/Borat.(2006).R5.PROPER.REPACK.DVDRip.XviD-PUKKA.avi', # FIXME: PROPER and R5 get overwritten
'[XCT].Le.Prestige.(The.Prestige).DVDRip.[x264.HP.He-Aac.{Fr-Eng}.St{Fr-Eng}.Chaps].mkv', # FIXME: title gets overwritten
'Battle Royale (2000)/Battle.Royale.(Batoru.Rowaiaru).(2000).(Special.Edition).CD1of2.DVDRiP.XviD-[ZeaL].avi',
'Movies/Brazil (1985)/Brazil_Criterion_Edition_(1985).CD2.English.srt',
'Movies/Persepolis (2007)/[XCT] Persepolis [H264+Aac-128(Fr-Eng)+ST(Fr-Eng)+Ind].mkv',
'Movies/Toy Story (1995)/Toy Story [HDTV 720p English-Spanish].mkv',
'Movies/Pirates of the Caribbean: The Curse of the Black Pearl (2003)/Pirates.Of.The.Carribean.DC.2003.iNT.DVDRip.XviD.AC3-NDRT.CD1.avi',
'Movies/Office Space (1999)/Office.Space.[Dual-DVDRip].[Spanish-English].[XviD-AC3-AC3].[by.Oswald].avi',
'Movies/The NeverEnding Story (1984)/The.NeverEnding.Story.1.1984.DVDRip.AC3.Xvid-Monteque.avi',
'Movies/Juno (2007)/Juno KLAXXON.avi',
'Movies/Chat noir, chat blanc (1998)/Chat noir, Chat blanc - Emir Kusturica (VO - VF - sub FR - Chapters).mkv',
'Movies/Wild Zero (2000)/Wild.Zero.DVDivX-EPiC.srt',
'Movies/El Bosque Animado (1987)/El.Bosque.Animado.[Jose.Luis.Cuerda.1987].[Xvid-Dvdrip-720x432].avi',
'testsmewt_bugs/movies/Baraka_Edition_Collector.avi'
]
for f in testmovies:
print('-'*80)
detect_filename(f, filetype = 'movie', advanced = advanced)
def main():
slogging.setupLogging()
# see http://bugs.python.org/issue2128
if sys.version_info.major < 3 and os.name == 'nt':
for i, a in enumerate(sys.argv):
sys.argv[i] = a.decode(locale.getpreferredencoding())
parser = OptionParser(usage = 'usage: %prog [options] file1 [file2...]')
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help = 'display debug output')
parser.add_option('-i', '--info', dest = 'info', default = 'filename',
help = 'the desired information type: filename, hash_mpc or a hash from python\'s '
'hashlib module, such as hash_md5, hash_sha1, ...; or a list of any of '
'them, comma-separated')
parser.add_option('-t', '--type', dest = 'filetype', default = 'autodetect',
help = 'the suggested file type: movie, episode or autodetect')
parser.add_option('-a', '--advanced', dest = 'advanced', action='store_true', default = False,
help = 'display advanced information for filename guesses, as json output')
parser.add_option('-d', '--demo', action='store_true', dest='demo', default=False,
help = 'run a few builtin tests instead of analyzing a file')
options, args = parser.parse_args()
if options.verbose:
logging.getLogger('guessit').setLevel(logging.DEBUG)
if options.demo:
run_demo(episodes=True, movies=True, advanced=options.advanced)
else:
if args:
for filename in args:
detect_filename(filename,
filetype = options.filetype,
info = options.info.split(','),
advanced = options.advanced)
else:
parser.print_help()
if __name__ == '__main__':
main()
| 6,835
|
Python
|
.py
| 108
| 51.481481
| 160
| 0.622743
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,494
|
patterns.py
|
CouchPotato_CouchPotatoServer/libs/guessit/patterns.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
# Copyright (c) 2011 Ricard Marxer <ricardmp@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import re
subtitle_exts = [ 'srt', 'idx', 'sub', 'ssa' ]
info_exts = [ 'nfo' ]
video_exts = ['3g2', '3gp', '3gp2', 'asf', 'avi', 'divx', 'flv', 'm4v', 'mk2',
'mka', 'mkv', 'mov', 'mp4', 'mp4a', 'mpeg', 'mpg', 'ogg', 'ogm',
'ogv', 'qt', 'ra', 'ram', 'rm', 'ts', 'wav', 'webm', 'wma', 'wmv']
group_delimiters = [ '()', '[]', '{}' ]
# separator character regexp
sep = r'[][,)(}{+ /\._-]' # regexp art, hehe :D
# character used to represent a deleted char (when matching groups)
deleted = '_'
# format: [ (regexp, confidence, span_adjust) ]
episode_rexps = [ # ... Season 2 ...
(r'season (?P<season>[0-9]+)', 1.0, (0, 0)),
(r'saison (?P<season>[0-9]+)', 1.0, (0, 0)),
# ... s02e13 ...
(r'[Ss](?P<season>[0-9]{1,3})[^0-9]?(?P<episodeNumber>(?:-?[eE-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)),
# ... s03-x02 ... # FIXME: redundant? remove it?
#(r'[Ss](?P<season>[0-9]{1,3})[^0-9]?(?P<bonusNumber>(?:-?[xX-][0-9]{1,3})+)[^0-9]', 1.0, (0, -1)),
# ... 2x13 ...
(r'[^0-9](?P<season>[0-9]{1,2})[^0-9 .-]?(?P<episodeNumber>(?:-?[xX][0-9]{1,3})+)[^0-9]', 1.0, (1, -1)),
# ... s02 ...
#(sep + r's(?P<season>[0-9]{1,2})' + sep, 0.6, (1, -1)),
(r's(?P<season>[0-9]{1,2})[^0-9]', 0.6, (0, -1)),
# v2 or v3 for some mangas which have multiples rips
(r'(?P<episodeNumber>[0-9]{1,3})v[23]' + sep, 0.6, (0, 0)),
# ... ep 23 ...
('ep' + sep + r'(?P<episodeNumber>[0-9]{1,2})[^0-9]', 0.7, (0, -1)),
# ... e13 ... for a mini-series without a season number
(sep + r'e(?P<episodeNumber>[0-9]{1,2})' + sep, 0.6, (1, -1))
]
weak_episode_rexps = [ # ... 213 or 0106 ...
(sep + r'(?P<episodeNumber>[0-9]{2,4})' + sep, (1, -1))
]
non_episode_title = [ 'extras', 'rip' ]
video_rexps = [ # cd number
(r'cd ?(?P<cdNumber>[0-9])( ?of ?(?P<cdNumberTotal>[0-9]))?', 1.0, (0, 0)),
(r'(?P<cdNumberTotal>[1-9]) cds?', 0.9, (0, 0)),
# special editions
(r'edition' + sep + r'(?P<edition>collector)', 1.0, (0, 0)),
(r'(?P<edition>collector)' + sep + 'edition', 1.0, (0, 0)),
(r'(?P<edition>special)' + sep + 'edition', 1.0, (0, 0)),
(r'(?P<edition>criterion)' + sep + 'edition', 1.0, (0, 0)),
# director's cut
(r"(?P<edition>director'?s?" + sep + "cut)", 1.0, (0, 0)),
# video size
(r'(?P<width>[0-9]{3,4})x(?P<height>[0-9]{3,4})', 0.9, (0, 0)),
# website
(r'(?P<website>www(\.[a-zA-Z0-9]+){2,3})', 0.8, (0, 0)),
# bonusNumber: ... x01 ...
(r'x(?P<bonusNumber>[0-9]{1,2})', 1.0, (0, 0)),
# filmNumber: ... f01 ...
(r'f(?P<filmNumber>[0-9]{1,2})', 1.0, (0, 0))
]
websites = [ 'tvu.org.ru', 'emule-island.com', 'UsaBit.com', 'www.divx-overnet.com',
'sharethefiles.com' ]
unlikely_series = [ 'series' ]
# prop_multi is a dict of { property_name: { canonical_form: [ pattern ] } }
# pattern is a string considered as a regexp, with the addition that dashes are
# replaced with '([ \.-_])?' which matches more types of separators (or none)
# note: simpler patterns need to be at the end of the list to not shadow more
# complete ones, eg: 'AAC' needs to come after 'He-AAC'
# ie: from most specific to less specific
prop_multi = { 'format': { 'DVD': [ 'DVD', 'DVD-Rip', 'VIDEO-TS', 'DVDivX' ],
'HD-DVD': [ 'HD-(?:DVD)?-Rip', 'HD-DVD' ],
'BluRay': [ 'Blu-ray', 'B[DR]Rip' ],
'HDTV': [ 'HD-TV' ],
'DVB': [ 'DVB-Rip', 'DVB', 'PD-TV' ],
'WEBRip': [ 'WEB-Rip' ],
'Screener': [ 'DVD-SCR', 'Screener' ],
'VHS': [ 'VHS' ],
'WEB-DL': [ 'WEB-DL' ] },
'is3D': { True: [ '3D' ] },
'screenSize': { '480p': [ '480[pi]?' ],
'720p': [ '720[pi]?' ],
'1080i': [ '1080i' ],
'1080p': [ '1080p', '1080[^i]' ] },
'videoCodec': { 'XviD': [ 'Xvid' ],
'DivX': [ 'DVDivX', 'DivX' ],
'h264': [ '[hx]-264' ],
'Rv10': [ 'Rv10' ],
'Mpeg2': [ 'Mpeg2' ] },
# has nothing to do here (or on filenames for that matter), but some
# releases use it and it helps to identify release groups, so we adapt
'videoApi': { 'DXVA': [ 'DXVA' ] },
'audioCodec': { 'AC3': [ 'AC3' ],
'DTS': [ 'DTS' ],
'AAC': [ 'He-AAC', 'AAC-He', 'AAC' ] },
'audioChannels': { '5.1': [ r'5\.1', 'DD5[._ ]1', '5ch' ] },
'episodeFormat': { 'Minisode': [ 'Minisodes?' ] }
}
# prop_single dict of { property_name: [ canonical_form ] }
prop_single = { 'releaseGroup': [ 'ESiR', 'WAF', 'SEPTiC', r'\[XCT\]', 'iNT', 'PUKKA',
'CHD', 'ViTE', 'TLF', 'FLAiTE',
'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS',
'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL',
'CtrlHD', 'POD', 'WiKi','IMMERSE', 'FQM',
'2HD', 'CTU', 'HALCYON', 'EbP', 'SiTV',
'HDBRiSe', 'AlFleNi-TeaM', 'EVOLVE', '0TV',
'TLA', 'NTB', 'ASAP', 'MOMENTUM', 'FoV', 'D-Z0N3',
'TrollHD', 'ECI'
],
# potentially confusing release group names (they are words)
'weakReleaseGroup': [ 'DEiTY', 'FiNaLe', 'UnSeeN', 'KiNGS', 'CLUE', 'DIMENSION',
'SAiNTS', 'ARROW', 'EuReKA', 'SiNNERS', 'DiRTY', 'REWARD',
'REPTiLE',
],
'other': [ 'PROPER', 'REPACK', 'LIMITED', 'DualAudio', 'Audiofixed', 'R5',
'complete', 'classic', # not so sure about these ones, could appear in a title
'ws' ] # widescreen
}
_dash = '-'
_psep = '[-. _]?'
def _to_rexp(prop):
return re.compile(prop.replace(_dash, _psep), re.IGNORECASE)
# properties_rexps dict of { property_name: { canonical_form: [ rexp ] } }
# containing the rexps compiled from both prop_multi and prop_single
properties_rexps = dict((type, dict((canonical_form,
[ _to_rexp(pattern) for pattern in patterns ])
for canonical_form, patterns in props.items()))
for type, props in prop_multi.items())
properties_rexps.update(dict((type, dict((canonical_form, [ _to_rexp(canonical_form) ])
for canonical_form in props))
for type, props in prop_single.items()))
def find_properties(string):
result = []
for property_name, props in properties_rexps.items():
# FIXME: this should be done in a more flexible way...
if property_name in ['weakReleaseGroup']:
continue
for canonical_form, rexps in props.items():
for value_rexp in rexps:
match = value_rexp.search(string)
if match:
start, end = match.span()
# make sure our word is always surrounded by separators
# note: sep is a regexp, but in this case using it as
# a char sequence achieves the same goal
if ((start > 0 and string[start-1] not in sep) or
(end < len(string) and string[end] not in sep)):
continue
result.append((property_name, canonical_form, start, end))
return result
property_synonyms = { 'Special Edition': [ 'Special' ],
'Collector Edition': [ 'Collector' ],
'Criterion Edition': [ 'Criterion' ]
}
def revert_synonyms():
reverse = {}
for canonical, synonyms in property_synonyms.items():
for synonym in synonyms:
reverse[synonym.lower()] = canonical
return reverse
reverse_synonyms = revert_synonyms()
def canonical_form(string):
return reverse_synonyms.get(string.lower(), string)
def compute_canonical_form(property_name, value):
"""Return the canonical form of a property given its type if it is a valid
one, None otherwise."""
if isinstance(value, basestring):
for canonical_form, rexps in properties_rexps[property_name].items():
for rexp in rexps:
if rexp.match(value):
return canonical_form
return None
| 10,301
|
Python
|
.py
| 186
| 40.666667
| 122
| 0.472391
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,495
|
language.py
|
CouchPotato_CouchPotatoServer/libs/guessit/language.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, u, s
from guessit.fileutils import load_file_in_same_dir
from guessit.textutils import find_words
from guessit.country import Country
import re
import logging
__all__ = [ 'is_iso_language', 'is_language', 'lang_set', 'Language',
'ALL_LANGUAGES', 'ALL_LANGUAGES_NAMES', 'UNDETERMINED',
'search_language', 'guess_language' ]
log = logging.getLogger(__name__)
# downloaded from http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
#
# Description of the fields:
# "An alpha-3 (bibliographic) code, an alpha-3 (terminologic) code (when given),
# an alpha-2 code (when given), an English name, and a French name of a language
# are all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb': ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'br'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
return self.lang != 'und'
def __unicode__(self):
if self.country:
return '%s(%s)' % (self.english_name, self.country.alpha2)
else:
return self.english_name
def __repr__(self):
if self.country:
return 'Language(%s, country=%s)' % (self.english_name, self.country)
else:
return 'Language(%s)' % self.english_name
UNDETERMINED = Language('und')
ALL_LANGUAGES = frozenset(Language(lng) for lng in lng_all_names) - frozenset([UNDETERMINED])
ALL_LANGUAGES_NAMES = lng_all_names
def search_language(string, lang_filter=None, skip=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')
(Language(English), (7, 9), 0.8)
>>> search_language('the zen fat cat and the gay mad men got a new fan', lang_filter = ['en', 'fr', 'es'])
(None, None, None)
"""
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
lng_common_words = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi',
# french words
'bas', 'de', 'le', 'son', 'vo', 'vf', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se',
# spanish words
'la', 'el', 'del', 'por', 'mar',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt'
])
sep = r'[](){} \._-+'
if lang_filter:
lang_filter = lang_set(lang_filter)
slow = ' %s ' % string.lower()
confidence = 1.0 # for all of them
for lang in set(find_words(slow)) & lng_all_names:
if lang in lng_common_words:
continue
pos = slow.find(lang)
if pos != -1:
end = pos + len(lang)
# skip if span in in skip list
while skip and (pos - 1, end - 1) in skip:
pos = slow.find(lang, end)
if pos == -1:
continue
end = pos + len(lang)
if pos == -1:
continue
# make sure our word is always surrounded by separators
if slow[pos - 1] not in sep or slow[end] not in sep:
continue
language = Language(slow[pos:end])
if lang_filter and language not in lang_filter:
continue
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
if language.lang not in lng3_to_lng2:
continue
# confidence depends on lng2, lng3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return language, (pos - 1, end - 1), confidence
return None, None, None
def guess_language(text):
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
| 14,207
|
Python
|
.py
| 317
| 35.624606
| 110
| 0.588693
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,496
|
hash_ed2k.py
|
CouchPotato_CouchPotatoServer/libs/guessit/hash_ed2k.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import s, to_hex
import hashlib
import os.path
def hash_file(filename):
"""Returns the ed2k hash of a given file.
>>> s(hash_file('tests/dummy.srt'))
'ed2k://|file|dummy.srt|44|1CA0B9DED3473B926AA93A0A546138BB|/'
"""
return 'ed2k://|file|%s|%d|%s|/' % (os.path.basename(filename),
os.path.getsize(filename),
hash_filehash(filename).upper())
def hash_filehash(filename):
"""Returns the ed2k hash of a given file.
This function is taken from:
http://www.radicand.org/blog/orz/2010/2/21/edonkey2000-hash-in-python/
"""
md4 = hashlib.new('md4').copy
def gen(f):
while True:
x = f.read(9728000)
if x:
yield x
else:
return
def md4_hash(data):
m = md4()
m.update(data)
return m
with open(filename, 'rb') as f:
a = gen(f)
hashes = [md4_hash(data).digest() for data in a]
if len(hashes) == 1:
return to_hex(hashes[0])
else:
return md4_hash(reduce(lambda a, d: a + d, hashes, "")).hexd
| 2,039
|
Python
|
.py
| 55
| 30.654545
| 74
| 0.639311
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,497
|
textutils.py
|
CouchPotato_CouchPotatoServer/libs/guessit/textutils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Smewt - A smart collection manager
# Copyright (c) 2008-2012 Nicolas Wack <wackou@gmail.com>
#
# Smewt is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Smewt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import s
from guessit.patterns import sep
import functools
import unicodedata
import re
# string-related functions
def normalize_unicode(s):
return unicodedata.normalize('NFC', s)
def strip_brackets(s):
if not s:
return s
if ((s[0] == '[' and s[-1] == ']') or
(s[0] == '(' and s[-1] == ')') or
(s[0] == '{' and s[-1] == '}')):
return s[1:-1]
return s
def clean_string(st):
for c in sep:
# do not remove certain chars
if c in ['-', ',']:
continue
st = st.replace(c, ' ')
parts = st.split()
result = ' '.join(p for p in parts if p != '')
# now also remove dashes on the outer part of the string
while result and result[0] in sep:
result = result[1:]
while result and result[-1] in sep:
result = result[:-1]
return result
_words_rexp = re.compile('\w+', re.UNICODE)
def find_words(s):
return _words_rexp.findall(s.replace('_', ' '))
def reorder_title(title):
ltitle = title.lower()
if ltitle[-4:] == ',the':
return title[-3:] + ' ' + title[:-4]
if ltitle[-5:] == ', the':
return title[-3:] + ' ' + title[:-5]
return title
def str_replace(string, pos, c):
return string[:pos] + c + string[pos+1:]
def str_fill(string, region, c):
start, end = region
return string[:start] + c * (end - start) + string[end:]
def levenshtein(a, b):
if not a:
return len(b)
if not b:
return len(a)
m = len(a)
n = len(b)
d = []
for i in range(m+1):
d.append([0] * (n+1))
for i in range(m+1):
d[i][0] = i
for j in range(n+1):
d[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
if a[i-1] == b[j-1]:
cost = 0
else:
cost = 1
d[i][j] = min(d[i-1][j] + 1, # deletion
d[i][j-1] + 1, # insertion
d[i-1][j-1] + cost # substitution
)
return d[m][n]
# group-related functions
def find_first_level_groups_span(string, enclosing):
"""Return a list of pairs (start, end) for the groups delimited by the given
enclosing characters.
This does not return nested groups, ie: '(ab(c)(d))' will return a single group
containing the whole string.
>>> find_first_level_groups_span('abcd', '()')
[]
>>> find_first_level_groups_span('abc(de)fgh', '()')
[(3, 7)]
>>> find_first_level_groups_span('(ab(c)(d))', '()')
[(0, 10)]
>>> find_first_level_groups_span('ab[c]de[f]gh(i)', '[]')
[(2, 5), (7, 10)]
"""
opening, closing = enclosing
depth = [] # depth is a stack of indices where we opened a group
result = []
for i, c, in enumerate(string):
if c == opening:
depth.append(i)
elif c == closing:
try:
start = depth.pop()
end = i
if not depth:
# we emptied our stack, so we have a 1st level group
result.append((start, end+1))
except IndexError:
# we closed a group which was not opened before
pass
return result
def split_on_groups(string, groups):
"""Split the given string using the different known groups for boundaries.
>>> s(split_on_groups('0123456789', [ (2, 4) ]))
['01', '23', '456789']
>>> s(split_on_groups('0123456789', [ (2, 4), (4, 6) ]))
['01', '23', '45', '6789']
>>> s(split_on_groups('0123456789', [ (5, 7), (2, 4) ]))
['01', '23', '4', '56', '789']
"""
if not groups:
return [ string ]
boundaries = sorted(set(functools.reduce(lambda l, x: l + list(x), groups, [])))
if boundaries[0] != 0:
boundaries.insert(0, 0)
if boundaries[-1] != len(string):
boundaries.append(len(string))
groups = [ string[start:end] for start, end in zip(boundaries[:-1],
boundaries[1:]) ]
return [ g for g in groups if g ] # return only non-empty groups
def find_first_level_groups(string, enclosing, blank_sep=None):
"""Return a list of groups that could be split because of explicit grouping.
The groups are delimited by the given enclosing characters.
You can also specify if you want to blank the separator chars in the returned
list of groups by specifying a character for it. None means it won't be replaced.
This does not return nested groups, ie: '(ab(c)(d))' will return a single group
containing the whole string.
>>> s(find_first_level_groups('', '()'))
['']
>>> s(find_first_level_groups('abcd', '()'))
['abcd']
>>> s(find_first_level_groups('abc(de)fgh', '()'))
['abc', '(de)', 'fgh']
>>> s(find_first_level_groups('(ab(c)(d))', '()', blank_sep = '_'))
['_ab(c)(d)_']
>>> s(find_first_level_groups('ab[c]de[f]gh(i)', '[]'))
['ab', '[c]', 'de', '[f]', 'gh(i)']
>>> s(find_first_level_groups('()[]()', '()', blank_sep = '-'))
['--', '[]', '--']
"""
groups = find_first_level_groups_span(string, enclosing)
if blank_sep:
for start, end in groups:
string = str_replace(string, start, blank_sep)
string = str_replace(string, end-1, blank_sep)
return split_on_groups(string, groups)
| 6,238
|
Python
|
.py
| 167
| 30.568862
| 85
| 0.571832
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,498
|
matchtree.py
|
CouchPotato_CouchPotatoServer/libs/guessit/matchtree.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, Guess
from guessit.textutils import clean_string, str_fill
from guessit.patterns import group_delimiters
from guessit.guess import (merge_similar_guesses, merge_all,
choose_int, choose_string)
import copy
import logging
log = logging.getLogger(__name__)
class BaseMatchTree(UnicodeMixin):
"""A MatchTree represents the hierarchical split of a string into its
constituent semantic groups."""
def __init__(self, string='', span=None, parent=None):
self.string = string
self.span = span or (0, len(string))
self.parent = parent
self.children = []
self.guess = Guess()
@property
def value(self):
return self.string[self.span[0]:self.span[1]]
@property
def clean_value(self):
return clean_string(self.value)
@property
def offset(self):
return self.span[0]
@property
def info(self):
result = dict(self.guess)
for c in self.children:
result.update(c.info)
return result
@property
def root(self):
if not self.parent:
return self
return self.parent.root
@property
def depth(self):
if self.is_leaf():
return 0
return 1 + max(c.depth for c in self.children)
def is_leaf(self):
return self.children == []
def add_child(self, span):
child = MatchTree(self.string, span=span, parent=self)
self.children.append(child)
def partition(self, indices):
indices = sorted(indices)
if indices[0] != 0:
indices.insert(0, 0)
if indices[-1] != len(self.value):
indices.append(len(self.value))
for start, end in zip(indices[:-1], indices[1:]):
self.add_child(span=(self.offset + start,
self.offset + end))
def split_on_components(self, components):
offset = 0
for c in components:
start = self.value.find(c, offset)
end = start + len(c)
self.add_child(span=(self.offset + start,
self.offset + end))
offset = end
def nodes_at_depth(self, depth):
if depth == 0:
yield self
for child in self.children:
for node in child.nodes_at_depth(depth - 1):
yield node
@property
def node_idx(self):
if self.parent is None:
return ()
return self.parent.node_idx + (self.parent.children.index(self),)
def node_at(self, idx):
if not idx:
return self
try:
return self.children[idx[0]].node_at(idx[1:])
except:
raise ValueError('Non-existent node index: %s' % (idx,))
def nodes(self):
yield self
for child in self.children:
for node in child.nodes():
yield node
def _leaves(self):
if self.is_leaf():
yield self
else:
for child in self.children:
# pylint: disable=W0212
for leaf in child._leaves():
yield leaf
def leaves(self):
return list(self._leaves())
def to_string(self):
empty_line = ' ' * len(self.string)
def to_hex(x):
if isinstance(x, int):
return str(x) if x < 10 else chr(55 + x)
return x
def meaning(result):
mmap = { 'episodeNumber': 'E',
'season': 'S',
'extension': 'e',
'format': 'f',
'language': 'l',
'country': 'C',
'videoCodec': 'v',
'audioCodec': 'a',
'website': 'w',
'container': 'c',
'series': 'T',
'title': 't',
'date': 'd',
'year': 'y',
'releaseGroup': 'r',
'screenSize': 's'
}
if result is None:
return ' '
for prop, l in mmap.items():
if prop in result:
return l
return 'x'
lines = [ empty_line ] * (self.depth + 2) # +2: remaining, meaning
lines[-2] = self.string
for node in self.nodes():
if node == self:
continue
idx = node.node_idx
depth = len(idx) - 1
if idx:
lines[depth] = str_fill(lines[depth], node.span,
to_hex(idx[-1]))
if node.guess:
lines[-2] = str_fill(lines[-2], node.span, '_')
lines[-1] = str_fill(lines[-1], node.span, meaning(node.guess))
lines.append(self.string)
return '\n'.join(lines)
def __unicode__(self):
return self.to_string()
class MatchTree(BaseMatchTree):
"""The MatchTree contains a few "utility" methods which are not necessary
for the BaseMatchTree, but add a lot of convenience for writing
higher-level rules."""
def _unidentified_leaves(self,
valid=lambda leaf: len(leaf.clean_value) >= 2):
for leaf in self._leaves():
if not leaf.guess and valid(leaf):
yield leaf
def unidentified_leaves(self,
valid=lambda leaf: len(leaf.clean_value) >= 2):
return list(self._unidentified_leaves(valid))
def _leaves_containing(self, property_name):
if isinstance(property_name, base_text_type):
property_name = [ property_name ]
for leaf in self._leaves():
for prop in property_name:
if prop in leaf.guess:
yield leaf
break
def leaves_containing(self, property_name):
return list(self._leaves_containing(property_name))
def first_leaf_containing(self, property_name):
try:
return next(self._leaves_containing(property_name))
except StopIteration:
return None
def _previous_unidentified_leaves(self, node):
node_idx = node.node_idx
for leaf in self._unidentified_leaves():
if leaf.node_idx < node_idx:
yield leaf
def previous_unidentified_leaves(self, node):
return list(self._previous_unidentified_leaves(node))
def _previous_leaves_containing(self, node, property_name):
node_idx = node.node_idx
for leaf in self._leaves_containing(property_name):
if leaf.node_idx < node_idx:
yield leaf
def previous_leaves_containing(self, node, property_name):
return list(self._previous_leaves_containing(node, property_name))
def is_explicit(self):
"""Return whether the group was explicitly enclosed by
parentheses/square brackets/etc."""
return (self.value[0] + self.value[-1]) in group_delimiters
def matched(self):
# we need to make a copy here, as the merge functions work in place and
# calling them on the match tree would modify it
parts = [node.guess for node in self.nodes() if node.guess]
parts = copy.deepcopy(parts)
# 1- try to merge similar information together and give it a higher
# confidence
for int_part in ('year', 'season', 'episodeNumber'):
merge_similar_guesses(parts, int_part, choose_int)
for string_part in ('title', 'series', 'container', 'format',
'releaseGroup', 'website', 'audioCodec',
'videoCodec', 'screenSize', 'episodeFormat',
'audioChannels', 'idNumber'):
merge_similar_guesses(parts, string_part, choose_string)
# 2- merge the rest, potentially discarding information not properly
# merged before
result = merge_all(parts,
append=['language', 'subtitleLanguage', 'other'])
log.debug('Final result: ' + result.nice_string())
return result
| 9,116
|
Python
|
.py
| 229
| 29.0131
| 79
| 0.569147
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,499
|
date.py
|
CouchPotato_CouchPotatoServer/libs/guessit/date.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import datetime
import re
def valid_year(year):
return 1920 < year < datetime.date.today().year + 5
def search_year(string):
"""Looks for year patterns, and if found return the year and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Note this only looks for valid production years, that is between 1920
and now + 5 years, so for instance 2000 would be returned as a valid
year but 1492 would not.
>>> search_year('in the year 2000...')
(2000, (12, 16))
>>> search_year('they arrived in 1492.')
(None, None)
"""
match = re.search(r'[^0-9]([0-9]{4})[^0-9]', string)
if match:
year = int(match.group(1))
if valid_year(year):
return (year, match.span(1))
return (None, None)
def search_date(string):
"""Looks for date patterns, and if found return the date and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
>>> search_date('This happened on 2002-04-22.')
(datetime.date(2002, 4, 22), (17, 27))
>>> search_date('And this on 17-06-1998.')
(datetime.date(1998, 6, 17), (12, 22))
>>> search_date('no date in here')
(None, None)
"""
dsep = r'[-/ \.]'
date_rexps = [
# 20010823
r'[^0-9]' +
r'(?P<year>[0-9]{4})' +
r'(?P<month>[0-9]{2})' +
r'(?P<day>[0-9]{2})' +
r'[^0-9]',
# 2001-08-23
r'[^0-9]' +
r'(?P<year>[0-9]{4})' + dsep +
r'(?P<month>[0-9]{2})' + dsep +
r'(?P<day>[0-9]{2})' +
r'[^0-9]',
# 23-08-2001
r'[^0-9]' +
r'(?P<day>[0-9]{2})' + dsep +
r'(?P<month>[0-9]{2})' + dsep +
r'(?P<year>[0-9]{4})' +
r'[^0-9]',
# 23-08-01
r'[^0-9]' +
r'(?P<day>[0-9]{2})' + dsep +
r'(?P<month>[0-9]{2})' + dsep +
r'(?P<year>[0-9]{2})' +
r'[^0-9]',
]
for drexp in date_rexps:
match = re.search(drexp, string)
if match:
d = match.groupdict()
year, month, day = int(d['year']), int(d['month']), int(d['day'])
# years specified as 2 digits should be adjusted here
if year < 100:
if year > (datetime.date.today().year % 100) + 5:
year = 1900 + year
else:
year = 2000 + year
date = None
try:
date = datetime.date(year, month, day)
except ValueError:
try:
date = datetime.date(year, day, month)
except ValueError:
pass
if date is None:
continue
# check date plausibility
if not 1900 < date.year < datetime.date.today().year + 5:
continue
# looks like we have a valid date
# note: span is [+1,-1] because we don't want to include the
# non-digit char
start, end = match.span()
return (date, (start + 1, end - 1))
return None, None
| 4,095
|
Python
|
.py
| 110
| 29.363636
| 77
| 0.564361
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|