bugged stringlengths 4 228k | fixed stringlengths 0 96.3M | __index_level_0__ int64 0 481k |
|---|---|---|
def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: return self._call('abortVersion', src, self._serial) finally: self._lock_release() | def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: oids=self._call('abortVersion', src, self._serial) invalidate=self._cache.invalidate for oid in oids: invalidate(oid, src) return oids finally: self.... | 27,100 |
def commitVersion(self, src, dest, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: return self._call('commitVersion', src, dest, self._serial) finally: self._lock_release() | def commitVersion(self, src, dest, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: oids=self._call('commitVersion', src, dest, self._serial) invalidate=self._cache.invalidate if dest: for oid in oids: invalidate(oid, src) el... | 27,101 |
def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p is not None: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: return p, s return pv, sv finally: self._lock_release() | def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: return p, s return pv, sv finally: self._lock_release() | 27,102 |
def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p is not None: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: return p, s return pv, sv finally: self._lock_release() | def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p is not None: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: if s: return p, s raise KeyError, oid return pv, sv finally: se... | 27,103 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | conn_no = [0] def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no): conn_no[0] = conn_no[0] + 1 cn = conn_no[0] for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,104 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id='' if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,105 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id=d['id'] elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,106 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,107 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,108 |
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__']) | 27,109 |
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | def copy_other_files(cmd, outputbase): extensions = ["*.conf", "*.xml", "*.txt", "*.sh"] for dir in [ "ZConfig/components/basic", "ZConfig/components/logger", "ZConfig/tests/input", "ZConfig/tests/library", "ZConfig/tests/library/thing", "ZConfig/tests/library/thing/extras", "ZConfig/tests/library/widget", "ZEO", "ZO... | 27,110 |
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | 27,111 |
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | 27,112 |
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/z... | 27,113 |
def _create_wrappers(self): # Create socket wrappers wrappers = {} # keys are active wrappers for domain, addr in self.addrlist: wrap = ConnectWrapper(domain, addr, self.mgr, self.client) wrap.connect_procedure() if wrap.state == "notified": for wrap in wrappers.keys(): wrap.close() wrappers[wrap] = wrap return wrappe... | def _create_wrappers(self): # Create socket wrappers wrappers = {} # keys are active wrappers for domain, addr in self.addrlist: wrap = ConnectWrapper(domain, addr, self.mgr, self.client) wrap.connect_procedure() if wrap.state == "notified": for w in wrappers.keys(): w.close() return {wrap: wrap} if wrap.state != "clo... | 27,114 |
def fsIndex(): return {} | def fsIndex(): return {} | 27,115 |
def warn(message, *data): LOG('ZODB FS',WARNING, "%s warn: %s\n" % (packed_version, (message % data))) | def warn(message, *data): LOG('ZODB FS', WARNING, "%s warn: %s\n" % (packed_version, (message % data))) | 27,116 |
def error(message, *data): LOG('ZODB FS',ERROR,"%s ERROR: %s\n" % (packed_version, (message % data))) | def error(message, *data): LOG('ZODB FS', ERROR, "%s ERROR: %s\n" % (packed_version, (message % data))) | 27,117 |
def nearPanic(message, *data): LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, (message % data))) | def nearPanic(message, *data): LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, (message % data))) | 27,118 |
def panic(message, *data): message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message | def panic(message, *data): message = message % data LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message | 27,119 |
def panic(message, *data): message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message | def panic(message, *data): message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message | 27,120 |
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | 27,121 |
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | 27,122 |
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | 27,123 |
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None): | 27,124 |
def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, sinc... | def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, sinc... | 27,125 |
def commit(self, subtransaction=None): 'Finalize the transaction' | def commit(self, subtransaction=None): 'Finalize the transaction' | 27,126 |
def commit(self, subtransaction=None): 'Finalize the transaction' | def commit(self, subtransaction=None): 'Finalize the transaction' | 27,127 |
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._pos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), ... | def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._pos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), ... | 27,128 |
def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end... | def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end... | 27,129 |
def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end... | def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end... | 27,130 |
def __iter__(self): errors = {} skipped = 0 for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError... | def __iter__(self): global skipped for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError, v: v = ... | 27,131 |
def __iter__(self): errors = {} skipped = 0 for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError... | def __iter__(self): errors = {} skipped = 0 for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError... | 27,132 |
def modifiedInVersion(self, oid): self._lock_acquire() try: pos=self._index[oid] file=self._file seek=file.seek seek(pos) doid,serial,prev,tloc,vlen = unpack(">8s8s8s8sH", file.read(34)) if doid != oid: raise CorruptedDataError, h if vlen: seek(24,1) # skip plen, pnv, and pv return file.read(vlen) return '' finally: se... | def modifiedInVersion(self, oid): self._lock_acquire() try: pos=self._index[oid] file=self._file seek=file.seek seek(pos) doid,serial,prev,tloc,vlen = unpack(">8s8s8s8sH", file.read(34)) if doid != oid: raise CorruptedDataError, pos if vlen: seek(24,1) # skip plen, pnv, and pv return file.read(vlen) return '' finally: ... | 27,133 |
def checkBuggyResolve(self): obj = PCounter3() obj.inc() | def checkBuggyResolve1(self): obj = PCounter3() obj.inc() | 27,134 |
def checkBuggyResolve(self): obj = PCounter3() obj.inc() | defdef checkBuggyResolve2(self): obj = PCounter4() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self.assertRaises(TypeError, self._dostoreNP, oid, revid=revid1, data=zodb_pickle(o... | 27,135 |
def test_basic_pickling(): """ >>> x = Simple('x', aaa=1, bbb='foo') >>> x.__getnewargs__() () >>> print_dict(x.__getstate__()) {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'} >>> f, (c,), state = x.__reduce__() >>> f.__name__ '__newobj__' >>> f.__module__ 'copy_reg' >>> c.__name__ 'Simple' >>> print_dict(state) {'__name... | def test_basic_pickling(): """ >>> x = Simple('x', aaa=1, bbb='foo') >>> print_dict(x.__getstate__()) {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'} >>> f, (c,), state = x.__reduce__() >>> f.__name__ '__newobj__' >>> f.__module__ 'copy_reg' >>> c.__name__ 'Simple' >>> print_dict(state) {'__name__': 'x', 'aaa': 1, 'bbb': ... | 27,136 |
def test_pickling_w_slots_only(): """ >>> x = SubSlotted('x', 'y', 'z') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> d >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> ... | def test_pickling_w_slots_only(): """ >>> x = SubSlotted('x', 'y', 'z') >>> d, s = x.__getstate__() >>> d >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x... | 27,137 |
def test_pickling_w_slots(): """ >>> x = SubSubSlotted('x', 'y', 'z', aaa=1, bbb='foo') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> print_dict(d) {'aaa': 1, 'bbb': 'foo'} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) ==... | def test_pickling_w_slots(): """ >>> x = SubSubSlotted('x', 'y', 'z', aaa=1, bbb='foo') >>> d, s = x.__getstate__() >>> print_dict(d) {'aaa': 1, 'bbb': 'foo'} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickl... | 27,138 |
def test_pickling_w_slots_w_empty_dict(): """ >>> x = SubSubSlotted('x', 'y', 'z') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> print_dict(d) {} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickl... | def test_pickling_w_slots_w_empty_dict(): """ >>> x = SubSubSlotted('x', 'y', 'z') >>> d, s = x.__getstate__() >>> print_dict(d) {} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> p... | 27,139 |
def log2(type, msg, subsys="ClientStorage:%d" % os.getpid()): LOG(subsys, type, msg) | def log2(type, msg, subsys="ZCS:%d" % os.getpid()): LOG(subsys, type, msg) | 27,140 |
def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:L') z=s=None data=sys.argv[0] nrep=5 minimize=0 for o, v in opts: if o=='-n': nrep=string.atoi(v) elif o=='-d': data=v elif o=='-s': s=v elif o=='-z': global zlib import zlib z=compress elif o=='-M': minimize=1 elif o=='-D': global debug os.environ['STUPID_LOG_... | def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:L') z=s=None data=sys.argv[0] nrep=5 minimize=0 for o, v in opts: if o=='-n': nrep=string.atoi(v) elif o=='-d': data=v elif o=='-s': s=v elif o=='-z': global zlib import zlib z=compress elif o=='-M': minimize=1 elif o=='-D': global debug os.environ['STUPID_LOG_... | 27,141 |
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | 27,142 |
def undoLog(self, first, last, filter=None): # I think this is wrong given the handling of first and last # in FileStorage. self._lock_acquire() try: # XXX Shouldn't this be sorted? transactions = self._data.items() pos = len(transactions) r = [] i = 0 while i < last and pos: pos = pos - 1 if i < first: i = i + 1 conti... | def undoLog(self, first, last, filter=None): # I think this is wrong given the handling of first and last # in FileStorage. self._lock_acquire() try: # XXX Shouldn't this be sorted? transactions = self._data.items() pos = len(transactions) r = [] i = 0 while i < last and pos: pos = pos - 1 if i < first: i = i + 1 conti... | 27,143 |
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | 27,144 |
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | 27,145 |
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | 27,146 |
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex. | 27,147 |
def connectionDebugInfo(self): r=[] pools,pooll=self._pools t=time() for version, (pool, allocated, lock) in pools.items(): for c in allocated: o=c._opened r.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': c._debug_info, 'version': version, }) return r | def connectionDebugInfo(self): r=[] pools,pooll=self._pools t=time() for version, (pool, allocated, lock) in pools.items(): for c in allocated: o=c._opened r.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': d, 'version': version, }) return r | 27,148 |
def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0): | def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, debug=0, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0): | 27,149 |
def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0): | def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0): | 27,150 |
def log(message, level=zLOG.INFO, label=None, error=None): """Internal helper to log a message using zLOG.""" zLOG.LOG(label or _label, level, message, error=error) | def log(message, level=zLOG.INFO, label=None, error=None): """Internal helper to log a message using zLOG.""" zLOG.LOG(label or _label, level, message, error=error) | 27,151 |
def close_conn(self, conn): """Internal: remove the given connection from self.connections. | def close_conn(self, conn): """Internal: remove the given connection from self.connections. | 27,152 |
def run(self): try: result = self._method(*self._args) except (SystemExit, KeyboardInterrupt): raise except Exception: self.delay.error(sys.exc_info()) else: self.delay.reply(result) | def run(self): try: result = self._method(*self._args) except (SystemExit, KeyboardInterrupt): raise except Exception: self.delay.error(sys.exc_info()) else: self.delay.reply(result) | 27,153 |
def getpids(self): if not os.path.exists(self.env.zeo_pid): # If there's no pid file, assume the server isn't running return None, None return map(int, open(self.env.zeo_pid).read().split()) | def getpids(self): if not os.path.exists(self.env.zeo_pid): # If there's no pid file, assume the server isn't running return [] return map(int, open(self.env.zeo_pid).read().split()) | 27,154 |
def stop_server(self): ppid, pid = self.getpids() if ppid is None: return self.kill(pids=[pid]) | def stop_server(self): ppid, pid = self.getpids() if ppid is None: return self.kill(pids=[pid]) | 27,155 |
def testNoPort(self): outp = self.system("-s") self.assert_(outp.find("No port specified") != -1) | def testErrNoPort(self): outp = self.system("-s") self.assert_(outp.find("No port specified") != -1) | 27,156 |
def testLogRestart(self): port = 9090 logfile1 = tempfile.mktemp(suffix="log") logfile2 = tempfile.mktemp(suffix="log") os.environ["STUPID_LOG_FILE"] = logfile1 os.environ["EVENT_LOG_FILE"] = logfile1 | def testLogRestart(self): port = 9090 logfile1 = tempfile.mktemp(suffix="log") logfile2 = tempfile.mktemp(suffix="log") os.environ["STUPID_LOG_FILE"] = logfile1 os.environ["EVENT_LOG_FILE"] = logfile1 | 27,157 |
def tpc_begin(self, transaction, sub=None): if self._invalid(None): # Some nitwit invalidated everything! raise ConflictError, oid self._invalidating=[] | def tpc_begin(self, transaction, sub=None): if self._invalid(None): # Some nitwit invalidated everything! raise ConflictError, "transaction already invalidated" self._invalidating=[] | 27,158 |
def __setattr__(self,key,value): ' ' if key[:3]=='_p_': self.__dict__[key]=value return | def __setattr__(self,key,value): ' ' k=key[:3] if k=='_p_' or k=='_v_': self.__dict__[key]=value return | 27,159 |
def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' a... | def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' a... | 27,160 |
def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' a... | def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' a... | 27,161 |
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file... | def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "ls:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open fil... | 27,162 |
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file... | def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file... | 27,163 |
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file... | def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file... | 27,164 |
def __init__(self): # Initialize global statistics self.epoch = None self.total_loads = 0 self.total_hits = 0 # Subclass must increment self.total_invals = 0 self.total_writes = 0 # Reset per-run statistics and set up simulation data self.restart() | def __init__(self, cachelimit): self.cachelimit = cachelimit # Initialize global statistics self.epoch = None self.total_loads = 0 self.total_hits = 0 # Subclass must increment self.total_invals = 0 self.total_writes = 0 # Reset per-run statistics and set up simulation data self.restart() | 27,165 |
def event(self, ts, dlen, _version, code, _current, oid, _serial): # Record first and last timestamp seen if self.ts0 is None: self.ts0 = ts if self.epoch is None: self.epoch = ts self.ts1 = ts | def event(self, ts, dlen, _version, code, _current, oid, _serial): # Record first and last timestamp seen if self.ts0 is None: self.ts0 = ts if self.epoch is None: self.epoch = ts self.ts1 = ts | 27,166 |
def __init__(self, cachelimit): # Initialize base class Simulation.__init__(self) # Store simulation parameters self.filelimit = cachelimit / 2 # Initialize additional global statistics self.total_flips = 0 | def __init__(self, cachelimit): # Initialize base class Simulation.__init__(self) # Store simulation parameters self.filelimit = cachelimit / 2 # Initialize additional global statistics self.total_flips = 0 | 27,167 |
def write(self, oid, size): # Fudge because size is rounded up to multiples of 256. (31 # is header overhead per cache record; 127 is to compensate # for rounding up to multiples of 256.) size = size + 31 - 127 if self.filesize[self.current] + size > self.filelimit: # Cache flip self.flips += 1 self.total_flips += 1 s... | def write(self, oid, size): # Fudge because size is rounded up to multiples of 256. (31 # is header overhead per cache record; 127 is to compensate # for rounding up to multiples of 256.) size = size + 31 - 127 if self.filesize[self.current] + size > self.cachelimit / 2: # Cache flip self.flips += 1 self.total_flips +... | 27,168 |
def inval(self, oid): if self.fileoids[self.current].get(oid): del self.fileoids[self.current][oid] elif self.fileoids[1 - self.current].get(oid): del self.fileoids[1 - self.current][oid] | def inval(self, oid): if self.fileoids[self.current].get(oid): del self.fileoids[self.current][oid] elif self.fileoids[1 - self.current].get(oid): del self.fileoids[1 - self.current][oid] | 27,169 |
def finish(self): if self.loads: self.report() if self.total_loads: print (self.format + " OVERALL") % ( time.ctime(self.epoch)[4:-8], duration(self.ts1 - self.epoch), self.total_loads, self.total_hits, self.total_invals, self.total_writes, self.total_flips, hitrate(self.total_loads, self.total_hits)) | def finish(self): self.report() if self.total_loads: print (self.format + " OVERALL") % ( time.ctime(self.epoch)[4:-8], duration(self.ts1 - self.epoch), self.total_loads, self.total_hits, self.total_invals, self.total_writes, self.total_flips, hitrate(self.total_loads, self.total_hits)) | 27,170 |
def pack(self, t, wait=0): t = threading.Thread(target=self._pack, args=(t, wait)) t.start() | def pack(self, t, wait=None): if wait is not None: wait = MTDelay() t = threading.Thread(target=self._pack, args=(t, wait)) t.start() | 27,171 |
def pack(self, t, wait=0): t = threading.Thread(target=self._pack, args=(t, wait)) t.start() | def pack(self, t, wait=0): t = threading.Thread(target=self._pack, args=(t, wait)) t.start() | 27,172 |
def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if wait: raise else: # XXX Why doesn't we broadcast on wait? if not wait: # Broadcast new size statistics self.server.invalidate(0, self.__storage_id, (), se... | def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if delay is not None: raise else: # XXX Why doesn't we broadcast on wait? if not wait: # Broadcast new size statistics self.server.invalidate(0, self.__stora... | 27,173 |
def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if wait: raise else: # XXX Why doesn't we broadcast on wait? if not wait: # Broadcast new size statistics self.server.invalidate(0, self.__storage_id, (), se... | def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if wait: raise else: # XXX Why doesn't we broadcast on wait?if delay is None: # Broadcast new size statistics self.server.invalidate(0, self.__storage_id, ()... | 27,174 |
def tpc_finish(self, id): if not self._check_tid(id): return invalidated = self.strategy.tpc_finish() if invalidated: self.server.invalidate(self, self.__storage_id, invalidated, self.get_size_info()) if not self._handle_waiting(): self._transaction = None self.strategy = None | def tpc_finish(self, id): if not self._check_tid(id): return invalidated = self.strategy.tpc_finish() if invalidated: self.server.invalidate(self, self.__storage_id, invalidated, self.get_size_info()) if not self._handle_waiting(): self._transaction = None self.strategy = None | 27,175 |
def tpc_abort(self, id): if not self._check_tid(id): return self.strategy.tpc_abort() if not self._handle_waiting(): self._transaction = None self.strategy = None | def tpc_abort(self, id): if not self._check_tid(id): return self.strategy.tpc_abort() if not self._handle_waiting(): self._transaction = None self.strategy = None | 27,176 |
def wait(self): if self.__storage._transaction: d = Delay() self.__storage._waiting.append((d, self)) self._log("Transaction blocked waiting for storage. " "%d clients waiting." % len(self.__storage._waiting)) return d else: self.restart() | def wait(self): if self.__storage._transaction: d = Delay() self.__storage._waiting.append((d, self)) self._log("Transaction blocked waiting for storage. " "Clients waiting: %d." % len(self.__storage._waiting)) return d else: self.restart() | 27,177 |
def _handle_waiting(self): while self.__storage._waiting: delay, zeo_storage = self.__storage._waiting.pop(0) if self._restart(zeo_storage, delay): break if self.__storage._waiting: n = len(self.__storage._waiting) self._log("Blocked transaction restarted. " "%d clients waiting." % n) else: self._log("Blocked transact... | def _handle_waiting(self): while self.__storage._waiting: delay, zeo_storage = self.__storage._waiting.pop(0) if self._restart(zeo_storage, delay): break if self.__storage._waiting: n = len(self.__storage._waiting) self._log("Blocked transaction restarted. " "%d clients waiting." % n) else: self._log("Blocked transact... | 27,178 |
def rotate_logs(): import zLOG if hasattr(zLOG.log_write, 'reinitialize'): zLOG.log_write.reinitialize() else: # Hm, lets at least try to take care of the stupid logger: zLOG._stupid_dest=None | def rotate_logs(): import zLOG if hasattr(zLOG.log_write, 'reinitialize'): zLOG.log_write.reinitialize() else: # Hm, lets at least try to take care of the stupid logger: zLOG._stupid_dest=None | 27,179 |
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return | def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return | 27,180 |
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return | def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return | 27,181 |
def tpc_abort(self, transaction): if self.__onCommitAction is not None: del self.__onCommitActions self._storage.tpc_abort(transaction) cache=self._cache cache.invalidate(self._invalidated) cache.invalidate(self._invalidating) self._invalidate_creating() | def tpc_abort(self, transaction): if self.__onCommitActions is not None: del self.__onCommitActions self._storage.tpc_abort(transaction) cache=self._cache cache.invalidate(self._invalidated) cache.invalidate(self._invalidating) self._invalidate_creating() | 27,182 |
def __bobo_traverse__(self, REQUEST=None, name=None): db, aname, version_support = self._stuff if version_support is not None and REQUEST is not None: version=REQUEST.get(version_support,'') else: version='' conn=db.open(version) | def __bobo_traverse__(self, REQUEST=None, name=None): db, aname, version_support = self._stuff if version_support is not None and REQUEST is not None: version=REQUEST.get(version_support,'') else: version='' conn=db.open(version) | 27,183 |
def log_info(self, message, type='info'): if type=='error': type=ERROR else: type=INFO LOG('ZEO Server', type, message) | def log_info(self, message, type='info'): if type=='error': type=ERROR else: type=INFO LOG('ZEO Server', type, message) | 27,184 |
def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 60: m=m[:60]+' ...' blather('message_input', m) | def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 90: m=m[:90]+' ...' blather('message_input', m) | 27,185 |
def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 60: m=m[:60]+' ...' blather('message_input', m) | def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 60: m=m[:60]+' ...' blather('message_input', m) | 27,186 |
def zeoLoad(self, oid): storage=self.__storage v=storage.modifiedInVersion(oid) if v: pv, sv = storage.load(oid, v) else: pv=sv=None p, s = storage.load(oid,'') return p, s, v, pv, sv | def zeoLoad(self, oid): storage=self.__storage v=storage.modifiedInVersion(oid) if v: pv, sv = storage.load(oid, v) else: pv=sv=None try: p, s = storage.load(oid,'') except KeyError: if sv: p=s=None else: raise return p, s, v, pv, sv | 27,187 |
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | 27,188 |
def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) if not src: raise POSException.VersionCommitError("Invalid version") | def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) if not src: raise POSException.VersionCommitError("Invalid version") | 27,189 |
def get_connection(self, database_name): """Return a Connection for the named database.""" connection = self.connections.get(database_name) if connection is None: new_con = self._db.databases[database_name].open() self.connections.update(new_con.connections) new_con.connections = self.connections connection = new_con r... | def get_connection(self, database_name): """Return a Connection for the named database.""" connection = self.connections.get(database_name) if connection is None: new_con = self._db.databases[database_name].open( transaction_manager=self.transaction_manager, mvcc=self._mvcc, version=self._version, synch=self._synch, ) ... | 27,190 |
def _redundant_pack(self, file, pos): file.seek(pos-8) p=u64(file.read(8)) file.seek(p+16) return file.read(1) not in ' u' | def _redundant_pack(self, file, pos): file.seek(pos-8) p=u64(file.read(8)) file.seek(pos-p+8) return file.read(1) not in ' u' | 27,191 |
def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, sinc... | def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, sinc... | 27,192 |
def open(self, version='', transaction=None, temporary=0, force=None, waitflag=1): """Return a object space (AKA connection) to work in | def open(self, version='', transaction=None, temporary=0, force=None, waitflag=1): """Return a object space (AKA connection) to work in | 27,193 |
def _build_degenerate_tree(self): # Build the buckets and chain them together. bucket11 = IISet([11]) | def _build_degenerate_tree(self): # Build the buckets and chain them together. bucket11 = IISet([11]) | 27,194 |
def testDegenerateBasicOps(self): t = self._build_degenerate_tree() self.assertEqual(len(t), 5) self.assertEqual(list(t.keys()), [1, 3, 5, 7, 11]) # has_key actually returns the depth of a bucket. self.assertEqual(t.has_key(1), 4) self.assertEqual(t.has_key(3), 4) self.assertEqual(t.has_key(5), 6) self.assertEqual(t.ha... | def testDegenerateBasicOps(self): t, keys = self._build_degenerate_tree() self.assertEqual(len(t), len(keys)) self.assertEqual(list(t.keys()), keys) # has_key actually returns the depth of a bucket. self.assertEqual(t.has_key(1), 4) self.assertEqual(t.has_key(3), 4) self.assertEqual(t.has_key(5), 6) self.assertEqual(t.... | 27,195 |
def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) self.assertEqual(list(tree.keys()), keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = min(keys) hikey = max(keys) self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42 | def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) sorted_keys = keys[:] sorted_keys.sort() self.assertEqual(list(tree.keys()), sorted_keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = min(keys) hikey = max(keys) self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tr... | 27,196 |
def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) self.assertEqual(list(tree.keys()), keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = min(keys) hikey = max(keys) self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42 | def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) self.assertEqual(list(tree.keys()), keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = sorted_keys[0] hikey = sorted_keys[-1] self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42 | 27,197 |
def testRanges(self): t = self._build_degenerate_tree() self._checkRanges(t, [1, 3, 5, 7, 11]) | def testRanges(self): t = self._build_degenerate_tree() self._checkRanges(t, [1, 3, 5, 7, 11]) | 27,198 |
def main(): TextTestRunner().run(test_suite()) | defmain():TextTestRunner().run(test_suite()) | 27,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.