bugged stringlengths 4 228k | fixed stringlengths 0 96.3M | __index_level_0__ int64 0 481k |
|---|---|---|
def main(): global pid pid = os.getpid() label = 'zeoserver:%d' % pid log(label, 'starting') # We don't do much sanity checking of the arguments, since if we get it # wrong, it's a bug in the test suite. keep = 0 configfile = None # Parse the arguments and let getopt.error percolate opts, args = getopt.getopt(sys.argv... | def main(): global pid pid = os.getpid() label = 'zeoserver:%d' % pid log(label, 'starting') # We don't do much sanity checking of the arguments, since if we get it # wrong, it's a bug in the test suite. keep = 0 configfile = None # Parse the arguments and let getopt.error percolate opts, args = getopt.getopt(sys.argv... | 26,900 |
def checkReconnection(self): """Check that the client reconnects when a server restarts.""" | def checkReconnection(self): """Check that the client reconnects when a server restarts.""" | 26,901 |
def commit(self, subtransaction=None): 'Finalize the transaction' | def commit(self, subtransaction=None): 'Finalize the transaction' | 26,902 |
def commit(self, subtransaction=None): 'Finalize the transaction' | def commit(self, subtransaction=None): 'Finalize the transaction' | 26,903 |
def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip... | def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip... | 26,904 |
def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip... | def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip... | 26,905 |
def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:LMt:U') s = None compress = None data=sys.argv[0] nrep=5 minimize=0 detailed=1 cache = None domain = 'AF_INET' threads = 1 for o, v in opts: if o=='-n': nrep = int(v) elif o=='-d': data = v elif o=='-s': s = v elif o=='-z': import zlib compress = zlib.compress e... | def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:LMt:U') s = None compress = None data=sys.argv[0] nrep=5 minimize=0 detailed=1 cache = None domain = 'AF_INET' threads = 1 for o, v in opts: if o=='-n': nrep = int(v) elif o=='-d': data = v elif o=='-s': s = v elif o=='-z': import zlib compress = zlib.compress e... | 26,906 |
def tpc_begin(self, transaction): self._lock_acquire() try: if self._transaction is transaction: return | def tpc_begin(self, transaction): self._lock_acquire() try: if self._transaction is transaction: return | 26,907 |
def testConnection(self, conn): """Return a pair (stub, preferred). | def testConnection(self, conn): """Return a pair (stub, preferred). | 26,908 |
def setVersionCacheSize(self, v): self._version_cache_size=v for ver in self._pools[0].keys(): if v: for c in self._pools[0][ver][1]: c._cache.cache_size=v | def setVersionCacheSize(self, v): self._version_cache_size=v for ver in self._pools[0].keys(): if ver: for c in self._pools[0][ver][1]: c._cache.cache_size=v | 26,909 |
# def persistent_id(object, | # def persistent_id(object, | 26,910 |
def tpc_finish(self, transaction): | def tpc_finish(self, transaction): | 26,911 |
def tpc_finish_(self): invalidate=self._db.invalidate for oid in self._invalidating: invalidate(oid, self) | def _invalidate_invalidating(self): invalidate=self._db.invalidate for oid in self._invalidating: invalidate(oid, self) | 26,912 |
def __init__(self, storage, pool_size=7, cache_size=400, cache_deactivate_after=60, version_pool_size=3, version_cache_size=100, version_cache_deactivate_after=10, ): """Create an object database. | def __init__(self, storage, pool_size=7, cache_size=400, cache_deactivate_after=60, version_pool_size=3, version_cache_size=100, version_cache_deactivate_after=10, ): """Create an object database. | 26,913 |
def _sane(self, index, pos): """Sanity check saved index data by reading the last undone trans | def _sane(self, index, pos): """Sanity check saved index data by reading the last undone trans | 26,914 |
def read_index(file, name, index, vindex, tindex, stop='\377'*8, ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0): read=file.read seek=file.seek seek(0,2) file_size=file.tell() if file_size: if file_size < start: raise FileStorageFormatError, file.name seek(0) if read(4) != packed_version: raise FileStorageFor... | def read_index(file, name, index, vindex, tindex, stop='\377'*8, ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0): read=file.read seek=file.seek seek(0,2) file_size=file.tell() if file_size: if file_size < start: raise FileStorageFormatError, file.name seek(0) if read(4) != packed_version: raise FileStorageFor... | 26,915 |
def next(self, index=0): file=self._file seek=file.seek read=file.read pos=self._pos | def next(self, index=0): file=self._file seek=file.seek read=file.read pos=self._pos | 26,916 |
def get_info(self): storage=self.__storage return { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), 'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(), } | def get_info(self): storage=self.__storage info = { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), 'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(), } | 26,917 |
def get_info(self): storage=self.__storage return { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), 'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(), } | def get_info(self): storage=self.__storage return { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), } | 26,918 |
def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it." | def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it." | 26,919 |
def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it." | def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it." | 26,920 |
def checkRecoverUndoInVersion(self): oid = self._storage.new_oid() version = "aVersion" revid_a = self._dostore(oid, data=MinPO(91)) revid_b = self._dostore(oid, revid=revid_a, version=version, data=MinPO(92)) revid_c = self._dostore(oid, revid=revid_b, version=version, data=MinPO(93)) self._undo(self._storage.undoInfo... | def checkRecoverUndoInVersion(self): oid = self._storage.new_oid() version = "aVersion" revid_a = self._dostore(oid, data=MinPO(91)) revid_b = self._dostore(oid, revid=revid_a, version=version, data=MinPO(92)) revid_c = self._dostore(oid, revid=revid_b, version=version, data=MinPO(93)) self._undo(self._storage.undoInfo... | 26,921 |
def _transactionalUndoRecord(self, oid, pos, serial, pre, version): """Get the indo information for a data record | def _transactionalUndoRecord(self, oid, pos, serial, pre, version): """Get the indo information for a data record | 26,922 |
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return buf = ["------"] line = ("%s %s %s %s" % (log_time(), severity_string(severity), subsystem, summary)) if not textwrap or len(line) < 80: buf.append(line) else: buf.extend(textwrap.wrap(line, width=79, subse... | def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return buf = ["------"] line = ("%s %s %s %s" % (log_time(), severity_string(severity), subsystem, summary)) if not textwrap or len(line) < 80: buf.append(line) else: buf.extend(textwrap.wrap(line, width=79, subse... | 26,923 |
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | 26,924 |
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | 26,925 |
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | 26,926 |
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | 26,927 |
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation... | 26,928 |
def _parse_addrs(self, addrs): # Return a list of (addr_type, addr) pairs. | def _parse_addrs(self, addrs): # Return a list of (addr_type, addr) pairs. | 26,929 |
def _commitVersion(self, src, dest, transaction, abort=None): # call after checking arguments and acquiring lock srcpos = self._vindex_get(src, 0) spos = p64(srcpos) # middle holds bytes 16:34 of a data record: # pos of transaction, len of version name, data length # commit version never writes data, so data leng... | def _commitVersion(self, src, dest, transaction, abort=None): # call after checking arguments and acquiring lock srcpos = self._vindex_get(src, 0) spos = p64(srcpos) # middle holds bytes 16:34 of a data record: # pos of transaction, len of version name, data length # commit version never writes data, so data leng... | 26,930 |
def pack(self, t, referencesf): """Copy data from the current database file to a packed file | def pack(self, t, referencesf): """Copy data from the current database file to a packed file | 26,931 |
def persistent_id(object,self=self,stackup=stackup): if (not hasattr(object, '_p_oid') or type(object) is ClassType): return None | def persistent_id(object,self=self,stackup=stackup): if (not hasattr(object, '_p_oid') or type(object) is ClassType): return None | 26,932 |
def __str__(self): return _fmt_oid(self.args[0]) | def __str__(self): return _fmt_oid(self.args[0]) | 26,933 |
def _log(msg, severity=zLOG.INFO, error=None): """Internal: generic logging function.""" zLOG.LOG("RUNSVR", severity, msg, "", error) | def _log(msg, severity=zLOG.INFO, error=None): """Internal: generic logging function.""" zLOG.LOG(_label, severity, msg, "", error) | 26,934 |
def notifyDisconnected(self, ignored): LOG("ClientStorage", PROBLEM, "Disconnected from storage") self._connected=0 thread.start_new_thread(self._call.connect,(0,)) try: self._commit_lock_release() except: pass | defnotifyDisconnected(self,ignored):LOG("ClientStorage",PROBLEM,"Disconnectedfromstorage")self._connected=0thread.start_new_thread(self._call.connect,(0,))try:self._commit_lock_release()except:pass | 26,935 |
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | def report(self): """Print a report about the transaction""" t = time.ctime(self.begin) if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | 26,936 |
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): d_vote = self.vote - self.begin else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | 26,937 |
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: d_vote = "*" if hasattr(self, "finish"): print self.finish - self.begin, else: d_vote = "*" print self.user, self.url | 26,938 |
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): d_finish = self.finish - self.begin else: print "*", print self.user, self.url | 26,939 |
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url | 26,940 |
def add_zeo_options(self): self.add(None, None, "a:", "address=", self.handle_address) self.add(None, None, "f:", "filename=", self.handle_filename) self.add("family", "zeo.address.family") self.add("address", "zeo.address.address", required="no server address specified; use -a or -C") self.add("read_only", "zeo.read_o... | def add_zeo_options(self): self.add(None, None, "a:", "address=", self.handle_address) self.add(None, None, "f:", "filename=", self.handle_filename) self.add("family", "zeo.address.family") self.add("address", "zeo.address.address", required="no server address specified; use -a or -C") self.add("read_only", "zeo.read_o... | 26,941 |
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | 26,942 |
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | 26,943 |
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | 26,944 |
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25) | 26,945 |
def checkTwoObjectUndo(self): # Convenience p31, p32, p51, p52 = map(pickle.dumps, (31, 32, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p31,... | def checkTwoObjectUndo(self): # Convenience p31, p32, p51, p52 = map(pickle.dumps, (31, 32, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p31,... | 26,946 |
def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.... | def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.... | 26,947 |
def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.... | def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.... | 26,948 |
def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, dat... | def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, dat... | 26,949 |
def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, dat... | def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, dat... | 26,950 |
def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info =self._storage.undoInfo() tid = ... | def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info = self._storage.undoInfo() tid =... | 26,951 |
def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info =self._storage.undoInfo() tid = ... | def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info = self._storage.undoInfo() tid =... | 26,952 |
def XXXtestEmptyFirstBucketReportedByGuido(self): b = self.t for i in xrange(29972): # reduce to 29971 and it works b[i] = i for i in xrange(30): # reduce to 29 and it works del b[i] b[i+40000] = i | def testEmptyFirstBucketReportedByGuido(self): b = self.t for i in xrange(29972): # reduce to 29971 and it works b[i] = i for i in xrange(30): # reduce to 29 and it works del b[i] b[i+40000] = i | 26,953 |
def XXXtestDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. | def testDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. | 26,954 |
def XXXtestDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. | def XXXtestDeletes(self): # Delete keys in all possible orders, checking each tree along # the way. | 26,955 |
def search(self): """Search for another record.""" dict = self._readnext() if self.filter is None or self.filter(d): if self.i >= self.first: self.results.append(dict) self.i += 1 | def search(self): """Search for another record.""" dict = self._readnext() if self.filter is None or self.filter(dict): if self.i >= self.first: self.results.append(dict) self.i += 1 | 26,956 |
def checkTxn(self, th, pos): if th.tid <= self.ltid: self.fail(pos, "time-stamp reduction: %s <= %s", _fmt_oid(th.tid), _fmt_oid(self.ltid)) self.ltid = th.tid if th.status == "c": self.fail(pos, "transaction with checkpoint flag set") if not (th.status == " " or th.status == "p"): self.fail(pos, "invalid transaction s... | def checkTxn(self, th, pos): if th.tid <= self.ltid: self.fail(pos, "time-stamp reduction: %s <= %s", _fmt_oid(th.tid), _fmt_oid(self.ltid)) self.ltid = th.tid if th.status == "c": self.fail(pos, "transaction with checkpoint flag set") if not th.status in " pu": self.fail(pos, "invalid transaction status: %r", th.statu... | 26,957 |
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | def info(RESPONSE): RESPONSE['Content-type']= 'text/plain' | 26,958 |
def undoLog(self, first, last, filter=None): self._lock_acquire() try: transactions=self._data.items() pos=len(transactions) encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos: pos=pos-1 if i < first: continue tid, (p, u, d, e, t) = transactions[pos] if p: continue d={'id': encode(tid)[:-1], 'ti... | def undoLog(self, first, last, filter=None): self._lock_acquire() try: transactions=self._data.items() pos=len(transactions) encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos: pos=pos-1 if i < first: i = i+1 continue tid, (p, u, d, e, t) = transactions[pos] if p: continue d={'id': encode(tid)[:... | 26,959 |
def commitVersion(self, src, dest, transaction, abort=None): # We are going to commit by simply storing back pointers. | def commitVersion(self, src, dest, transaction, abort=None): # We are going to commit by simply storing back pointers. | 26,960 |
def testInterface(self): self.assert_(IPersistent.isImplementedByInstancesOf(Persistent), "%s does not implement IPersistent" % Persistent) p = Persistent() self.assert_(IPersistent.isImplementedBy(p), "%s does not implement IPersistent" % p) | def testInterface(self): self.assert_(IPersistent.isImplementedByInstancesOf(Persistent), "%s does not implement IPersistent" % Persistent) p = Persistent() self.assert_(IPersistent.isImplementedBy(p), "%s does not implement IPersistent" % p) | 26,961 |
def commit(self, object, transaction): oid=object._p_oid if oid is None or object._p_jar is not self: oid = self.new_oid() object._p_jar=self object._p_oid=oid | def commit(self, object, transaction): oid=object._p_oid if oid is None or object._p_jar is not self: oid = self.new_oid() object._p_jar=self object._p_oid=oid | 26,962 |
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction! | def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction! | 26,963 |
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction! | def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction! | 26,964 |
def tearDown(self): self.t = None del self.t | def tearDown(self): del self.t | 26,965 |
def tearDown(self): self.t = None del self.t | def tearDown(self): self.t = None del self.t | 26,966 |
def _getRoot(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB n = 'fs_tmp__%s' % os.getpid() s = FileStorage(n) db = DB(s) root = db.open().root() return root | def _getRoot(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB n = 'fs_tmp__%s' % os.getpid() s = FileStorage(n) db = DB(s) root = db.open().root() return root | 26,967 |
def testFailMergeInsert(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[-99999]=-99999 b1[e1[0][0]]=e1[0][1] b2[99999]=99999 b2[e1[0][0]]=e1[0][1] test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1) | def testFailMergeInsert(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[-99999]=-99999 b1[e1[0][0]]=e1[0][1] b2[99999]=99999 b2[e1[0][0]]=e1[0][1] test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1) | 26,968 |
def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067] | def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067] | 26,969 |
def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067] | def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067] | 26,970 |
def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: pa... | def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected = ((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: ... | 26,971 |
def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: pa... | def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except ConflictError, err: pass # ConflictE... | 26,972 |
def setUp(self): self.t = IIBTree() | def setUp(self): self.t = IIBTree() | 26,973 |
def setUp(self): self.t = IOSet() | def setUp(self): self.t = IOSet() | 26,974 |
def setUp(self): self.t = OOSet() | def setUp(self): self.t = OOSet() | 26,975 |
def setUp(self): self.t = IISet() | def setUp(self): self.t = IISet() | 26,976 |
def setUp(self): self.t = OISet() | def setUp(self): self.t = OISet() | 26,977 |
def setUp(self): self.t = IOTreeSet() | def setUp(self): self.t = IOTreeSet() | 26,978 |
def setUp(self): self.t = OOTreeSet() | def setUp(self): self.t = OOTreeSet() | 26,979 |
def setUp(self): self.t = IITreeSet() | def setUp(self): self.t = IITreeSet() | 26,980 |
def setUp(self): self.t = OIBucket() | defclass NastyConfict(Base, TestCase): def setUp(self): self.t = OOBTree() def testResolutionBlowsUp(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][... | 26,981 |
def test_suite(): TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = mak... | def test_suite(): TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = mak... | 26,982 |
def __changed__(self,v=-1): old=self._p_changed if v != -1: if v and not old and self._p_jar is not None: try: get_transaction().register(self) self._p_changed=1 except: pass | def __changed__(self,v=-1): old=self._p_changed if v != -1: if v and not old and self._p_jar is not None: try: get_transaction().register(self) except: pass | 26,983 |
def open(self, read_only=0): # XXX Needed to support ReadOnlyStorage tests. Ought to be a # cleaner way. addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1) | def open(self, read_only=0): # XXX Needed to support ReadOnlyStorage tests. Ought to be a # cleaner way. addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1) | 26,984 |
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type BDBFullStorage name %s </Storage> """ % self._envdir | def getConfig(self): self._envdir = tempfile.mktemp() return """\ <Storage> type BDBFullStorage name %s </Storage> """ % self._envdir | 26,985 |
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type MappingStorage name %s </Storage> """ % self._envdir | def getConfig(self): self._envdir = tempfile.mktemp() return """\ <Storage> type MappingStorage name %s </Storage> """ % self._envdir | 26,986 |
def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) | def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) | 26,987 |
def close(self): pass | def close(self): pass | 26,988 |
def undo(self, transaction_id): raise UndoError, 'non-undoable transaction' | def undo(self, transaction_id): raise UndoError, 'non-undoable transaction' | 26,989 |
def handle_request(self, msgid, flags, name, args): if not self.check_method(name): msg = "Invalid method name: %s on %s" % (name, repr(self.obj)) raise ZRPCError(msg) if __debug__: self.log("calling %s%s" % (name, short_repr(args)), level=zLOG.BLATHER) | def handle_request(self, msgid, flags, name, args): if not self.check_method(name): msg = "Invalid method name: %s on %s" % (name, repr(self.obj)) raise ZRPCError(msg) if __debug__: self.log("calling %s%s" % (name, short_repr(args)), level=zLOG.BLATHER) | 26,990 |
def wait(self, msgid): """Invoke asyncore mainloop and wait for reply.""" if __debug__: self.log("wait(%d), async=%d" % (msgid, self.is_async()), level=zLOG.TRACE) if self.is_async(): self._pull_trigger() | def wait(self, msgid): """Invoke asyncore mainloop and wait for reply.""" if __debug__: self.log("wait(%d), async=%d" % (msgid, self.is_async()), level=zLOG.TRACE) if self.is_async(): self._pull_trigger() | 26,991 |
def main(port, storage_name, rawargs): klass = load_storage_class(storage_name) args = [] for arg in rawargs: if arg.startswith('='): arg = eval(arg[1:], {'__builtins__': {}}) args.append(arg) storage = klass(*args) zeo_port = int(port) test_port = zeo_port + 1 t = ZEOTestServer(('', test_port), storage) serv = ZEO.Sto... | def main(port, storage_name, rawargs): klass = load_storage_class(storage_name) args = [] for arg in rawargs: if arg.startswith('='): arg = eval(arg[1:], {'__builtins__': {}}) args.append(arg) storage = klass(*args) zeo_port = int(port) test_port = zeo_port + 1 t = ZEOTestServer(('', test_port), storage) serv = ZEO.Sto... | 26,992 |
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid),... | def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid),... | 26,993 |
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid),... | def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid),... | 26,994 |
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: si... | def setup_signals(storages): try: import signal except ImportError: return if hasattr(signal, 'SIGXFSZ'): signal.signal(signal.SIGXFSZ, signal.SIG_IGN) if hasattr(signal, 'SIGTERM'): signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, lambda s... | 26,995 |
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: si... | def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: si... | 26,996 |
def rotate_logs_handler(signum, frame): rotate_logs() import signal signal.signal(signal.SIGHUP, rotate_logs_handler) | def rotate_logs_handler(signum, frame): rotate_logs() import signal signal.signal(signal.SIGHUP, rotate_logs_handler) | 26,997 |
def getExtensionMethods(self): """getExtensionMethods | def getExtensionMethods(self): """getExtensionMethods | 26,998 |
def u64(v, unpack=struct.unpack): h, v = unpack(">ii", v) if v < 0: v=t32-v if h: if h < 0: h=t32-h v=h*t32+v return v | def u64(v, unpack=struct.unpack): h, v = unpack(">ii", v) if v < 0: v=t32+v if h: if h < 0: h=t32-h v=h*t32+v return v | 26,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.