sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def perform(self, store, count): """ Upgrade C{store} performing C{count} upgrades per transaction. Also, catch any exceptions and print out something useful. """ self.count = count try: self.upgradeStore(store) print 'Upgrade complete' except errors.ItemUpgradeError as e: print 'Upgrader error:' e.originalFailure.printTraceback(file=sys.stdout) print self.errorMessageFormat % ( e.oldType.typeName, e.storeID, e.oldType.schemaVersion, e.newType.schemaVersion)
Upgrade C{store} performing C{count} upgrades per transaction. Also, catch any exceptions and print out something useful.
entailment
def runcode(self, code): """ Override L{code.InteractiveConsole.runcode} to run the code in a transaction unless the local C{autocommit} is currently set to a true value. """ if not self.locals.get('autocommit', None): return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code) return code.InteractiveConsole.runcode(self, code)
Override L{code.InteractiveConsole.runcode} to run the code in a transaction unless the local C{autocommit} is currently set to a true value.
entailment
def namespace(self): """ Return a dictionary representing the namespace which should be available to the user. """ self._ns = { 'db': self.store, 'store': store, 'autocommit': False, } return self._ns
Return a dictionary representing the namespace which should be available to the user.
entailment
def addAccount(self, siteStore, username, domain, password): """ Create a new account in the given store. @param siteStore: A site Store to which login credentials will be added. @param username: Local part of the username for the credentials to add. @param domain: Domain part of the username for the credentials to add. @param password: Password for the credentials to add. @rtype: L{LoginAccount} @return: The added account. """ for ls in siteStore.query(userbase.LoginSystem): break else: ls = self.installOn(siteStore) try: acc = ls.addAccount(username, domain, password) except userbase.DuplicateUser: raise usage.UsageError("An account by that name already exists.") return acc
Create a new account in the given store. @param siteStore: A site Store to which login credentials will be added. @param username: Local part of the username for the credentials to add. @param domain: Domain part of the username for the credentials to add. @param password: Password for the credentials to add. @rtype: L{LoginAccount} @return: The added account.
entailment
def itemTypeWithSomeAttributes(attributeTypes): """ Create a new L{Item} subclass with L{numAttributes} integers in its schema. """ class SomeItem(Item): typeName = 'someitem_' + str(typeNameCounter()) for i, attributeType in enumerate(attributeTypes): locals()['attr_' + str(i)] = attributeType() return SomeItem
Create a new L{Item} subclass with L{numAttributes} integers in its schema.
entailment
def createSomeItems(store, itemType, values, counter): """ Create some instances of a particular type in a store. """ for i in counter: itemType(store=store, **values)
Create some instances of a particular type in a store.
entailment
def save(self, commit=True): """save the instance or create a new one..""" # walk through the document fields for field_name, field in iter_valid_fields(self._meta): setattr(self.instance, field_name, self.cleaned_data.get(field_name)) if commit: self.instance.save() return self.instance
save the instance or create a new one..
entailment
def transacted(func): """ Return a callable which will invoke C{func} in a transaction using the C{store} attribute of the first parameter passed to it. Typically this is used to create Item methods which are automatically run in a transaction. The attributes of the returned callable will resemble those of C{func} as closely as L{twisted.python.util.mergeFunctionMetadata} can make them. """ def transactionified(item, *a, **kw): return item.store.transact(func, item, *a, **kw) return mergeFunctionMetadata(func, transactionified)
Return a callable which will invoke C{func} in a transaction using the C{store} attribute of the first parameter passed to it. Typically this is used to create Item methods which are automatically run in a transaction. The attributes of the returned callable will resemble those of C{func} as closely as L{twisted.python.util.mergeFunctionMetadata} can make them.
entailment
def dependentItems(store, tableClass, comparisonFactory): """ Collect all the items that should be deleted when an item or items of a particular item type are deleted. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: An iterable of items to delete. """ for cascadingAttr in (_cascadingDeletes.get(tableClass, []) + _cascadingDeletes.get(None, [])): for cascadedItem in store.query(cascadingAttr.type, comparisonFactory(cascadingAttr)): yield cascadedItem
Collect all the items that should be deleted when an item or items of a particular item type are deleted. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: An iterable of items to delete.
entailment
def allowDeletion(store, tableClass, comparisonFactory): """ Returns a C{bool} indicating whether deletion of an item or items of a particular item type should be allowed to proceed. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: A C{bool} indicating whether deletion should be allowed. """ for cascadingAttr in (_disallows.get(tableClass, []) + _disallows.get(None, [])): for cascadedItem in store.query(cascadingAttr.type, comparisonFactory(cascadingAttr), limit=1): return False return True
Returns a C{bool} indicating whether deletion of an item or items of a particular item type should be allowed to proceed. @param tableClass: An L{Item} subclass. @param comparison: A one-argument callable taking an attribute and returning an L{iaxiom.IComparison} describing the items to collect. @return: A C{bool} indicating whether deletion should be allowed.
entailment
def declareLegacyItem(typeName, schemaVersion, attributes, dummyBases=()): """ Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class. """ if (typeName, schemaVersion) in _legacyTypes: return _legacyTypes[typeName, schemaVersion] if dummyBases: realBases = [declareLegacyItem(*A) for A in dummyBases] else: realBases = (Item,) attributes = attributes.copy() attributes['__module__'] = 'item_dummy' attributes['__legacy__'] = True attributes['typeName'] = typeName attributes['schemaVersion'] = schemaVersion result = type(str('DummyItem<%s,%d>' % (typeName, schemaVersion)), realBases, attributes) assert result is not None, 'wtf, %r' % (type,) _legacyTypes[(typeName, schemaVersion)] = result return result
Generate a dummy subclass of Item that will have the given attributes, and the base Item methods, but no methods of its own. This is for use with upgrading. @param typeName: a string, the Axiom TypeName to have attributes for. @param schemaVersion: an int, the (old) version of the schema this is a proxy for. @param attributes: a dict mapping {columnName: attr instance} describing the schema of C{typeName} at C{schemaVersion}. @param dummyBases: a sequence of 4-tuples of (baseTypeName, baseSchemaVersion, baseAttributes, baseBases) representing the dummy bases of this legacy class.
entailment
def empowerment(iface, priority=0): """ Class decorator for indicating a powerup's powerup interfaces. The class will also be declared as implementing the interface. @type iface: L{zope.interface.Interface} @param iface: The powerup interface. @type priority: int @param priority: The priority the powerup will be installed at. """ def _deco(cls): cls.powerupInterfaces = ( tuple(getattr(cls, 'powerupInterfaces', ())) + ((iface, priority),)) implementer(iface)(cls) return cls return _deco
Class decorator for indicating a powerup's powerup interfaces. The class will also be declared as implementing the interface. @type iface: L{zope.interface.Interface} @param iface: The powerup interface. @type priority: int @param priority: The priority the powerup will be installed at.
entailment
def powerUp(self, powerup, interface=None, priority=0): """ Installs a powerup (e.g. plugin) on an item or store. Powerups will be returned in an iterator when queried for using the 'powerupsFor' method. Normally they will be returned in order of installation [this may change in future versions, so please don't depend on it]. Higher priorities are returned first. If you have something that should run before "normal" powerups, pass POWERUP_BEFORE; if you have something that should run after, pass POWERUP_AFTER. We suggest not depending too heavily on order of execution of your powerups, but if finer-grained control is necessary you may pass any integer. Normal (unspecified) priority is zero. Powerups will only be installed once on a given item. If you install a powerup for a given interface with priority 1, then again with priority 30, the powerup will be adjusted to priority 30 but future calls to powerupFor will still only return that powerup once. If no interface or priority are specified, and the class of the powerup has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), this object will be powered up with the powerup object on those interfaces. If no interface or priority are specified and the powerup has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples, collected from the "powerupInterfaces" attribute described above. The iterable of (interface, priority) tuples it returns will then be installed. @param powerup: an Item that implements C{interface} (if specified) @param interface: a zope interface, or None @param priority: An int; preferably either POWERUP_BEFORE, POWERUP_AFTER, or unspecified. @raise TypeError: raises if interface is IPowerupIndirector You may not install a powerup for IPowerupIndirector because that would be nonsensical. """ if interface is None: for iface, priority in powerup._getPowerupInterfaces(): self.powerUp(powerup, iface, priority) elif interface is IPowerupIndirector: raise TypeError( "You cannot install a powerup for IPowerupIndirector: " + powerup) else: forc = self.store.findOrCreate(_PowerupConnector, item=self, interface=unicode(qual(interface)), powerup=powerup) forc.priority = priority
Installs a powerup (e.g. plugin) on an item or store. Powerups will be returned in an iterator when queried for using the 'powerupsFor' method. Normally they will be returned in order of installation [this may change in future versions, so please don't depend on it]. Higher priorities are returned first. If you have something that should run before "normal" powerups, pass POWERUP_BEFORE; if you have something that should run after, pass POWERUP_AFTER. We suggest not depending too heavily on order of execution of your powerups, but if finer-grained control is necessary you may pass any integer. Normal (unspecified) priority is zero. Powerups will only be installed once on a given item. If you install a powerup for a given interface with priority 1, then again with priority 30, the powerup will be adjusted to priority 30 but future calls to powerupFor will still only return that powerup once. If no interface or priority are specified, and the class of the powerup has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), this object will be powered up with the powerup object on those interfaces. If no interface or priority are specified and the powerup has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples, collected from the "powerupInterfaces" attribute described above. The iterable of (interface, priority) tuples it returns will then be installed. @param powerup: an Item that implements C{interface} (if specified) @param interface: a zope interface, or None @param priority: An int; preferably either POWERUP_BEFORE, POWERUP_AFTER, or unspecified. @raise TypeError: raises if interface is IPowerupIndirector You may not install a powerup for IPowerupIndirector because that would be nonsensical.
entailment
def powerDown(self, powerup, interface=None): """ Remove a powerup. If no interface is specified, and the type of the object being installed has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), the target will be powered down with this object on those interfaces. If this object has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples. The iterable of (interface, priority) tuples it returns will then be uninstalled. (Note particularly that if powerups are added or removed to the collection described above between calls to powerUp and powerDown, more powerups or less will be removed than were installed.) """ if interface is None: for interface, priority in powerup._getPowerupInterfaces(): self.powerDown(powerup, interface) else: for cable in self.store.query(_PowerupConnector, AND(_PowerupConnector.item == self, _PowerupConnector.interface == unicode(qual(interface)), _PowerupConnector.powerup == powerup)): cable.deleteFromStore() return raise ValueError("Not powered up for %r with %r" % (interface, powerup))
Remove a powerup. If no interface is specified, and the type of the object being installed has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), the target will be powered down with this object on those interfaces. If this object has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples. The iterable of (interface, priority) tuples it returns will then be uninstalled. (Note particularly that if powerups are added or removed to the collection described above between calls to powerUp and powerDown, more powerups or less will be removed than were installed.)
entailment
def powerupsFor(self, interface): """ Returns powerups installed using C{powerUp}, in order of descending priority. Powerups found to have been deleted, either during the course of this powerupsFor iteration, during an upgrader, or previously, will not be returned. """ inMemoryPowerup = self._inMemoryPowerups.get(interface, None) if inMemoryPowerup is not None: yield inMemoryPowerup if self.store is None: return name = unicode(qual(interface), 'ascii') for cable in self.store.query( _PowerupConnector, AND(_PowerupConnector.interface == name, _PowerupConnector.item == self), sort=_PowerupConnector.priority.descending): pup = cable.powerup if pup is None: # this powerup was probably deleted during an upgrader. cable.deleteFromStore() else: indirector = IPowerupIndirector(pup, None) if indirector is not None: yield indirector.indirect(interface) else: yield pup
Returns powerups installed using C{powerUp}, in order of descending priority. Powerups found to have been deleted, either during the course of this powerupsFor iteration, during an upgrader, or previously, will not be returned.
entailment
def interfacesFor(self, powerup): """ Return an iterator of the interfaces for which the given powerup is installed on this object. This is not implemented for in-memory powerups. It will probably fail in an unpredictable, implementation-dependent way if used on one. """ pc = _PowerupConnector for iface in self.store.query(pc, AND(pc.item == self, pc.powerup == powerup)).getColumn('interface'): yield namedAny(iface)
Return an iterator of the interfaces for which the given powerup is installed on this object. This is not implemented for in-memory powerups. It will probably fail in an unpredictable, implementation-dependent way if used on one.
entailment
def _getPowerupInterfaces(self): """ Collect powerup interfaces this object declares that it can be installed on. """ powerupInterfaces = getattr(self.__class__, "powerupInterfaces", ()) pifs = [] for x in powerupInterfaces: if isinstance(x, type(Interface)): #just an interface pifs.append((x, 0)) else: #an interface and a priority pifs.append(x) m = getattr(self, "__getPowerupInterfaces__", None) if m is not None: pifs = m(pifs) try: pifs = [(i, p) for (i, p) in pifs] except ValueError: raise ValueError("return value from %r.__getPowerupInterfaces__" " not an iterable of 2-tuples" % (self,)) return pifs
Collect powerup interfaces this object declares that it can be installed on.
entailment
def _currentlyValidAsReferentFor(self, store): """ Is this object currently valid as a reference? Objects which will be deleted in this transaction, or objects which are not in the same store are not valid. See attributes.reference.__get__. """ if store is None: # If your store is None, you can refer to whoever you want. I'm in # a store but it doesn't matter that you're not. return True if self.store is not store: return False if self.__deletingObject: return False return True
Is this object currently valid as a reference? Objects which will be deleted in this transaction, or objects which are not in the same store are not valid. See attributes.reference.__get__.
entailment
def _schemaPrepareInsert(self, store): """ Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory. """ for name, atr in self.getSchema(): atr.prepareInsert(self, store)
Prepare each attribute in my schema for insertion into a given store, either by upgrade or by creation. This makes sure all references point to this store and all relative paths point to this store's files directory.
entailment
def existingInStore(cls, store, storeID, attrs): """Create and return a new instance from a row from the store.""" self = cls.__new__(cls) self.__justCreated = False self.__subinit__(__store=store, storeID=storeID, __everInserted=True) schema = self.getSchema() assert len(schema) == len(attrs), "invalid number of attributes" for data, (name, attr) in zip(attrs, schema): attr.loaded(self, data) self.activate() return self
Create and return a new instance from a row from the store.
entailment
def getSchema(cls): """ return all persistent class attributes """ schema = [] for name, atr in cls.__attributes__: atr = atr.__get__(None, cls) if isinstance(atr, SQLAttribute): schema.append((name, atr)) cls.getSchema = staticmethod(lambda schema=schema: schema) return schema
return all persistent class attributes
entailment
def persistentValues(self): """ Return a dictionary of all attributes which will be/have been/are being stored in the database. """ return dict((k, getattr(self, k)) for (k, attr) in self.getSchema())
Return a dictionary of all attributes which will be/have been/are being stored in the database.
entailment
def committed(self): """ Called after the database is brought into a consistent state with this object. """ if self.__deleting: self.deleted() if not self.__legacy__: self.store.objectCache.uncache(self.storeID, self) self.__store = None self.__justCreated = False
Called after the database is brought into a consistent state with this object.
entailment
def checkpoint(self): """ Update the database to reflect in-memory changes made to this item; for example, to make it show up in store.query() calls where it is now valid, but was not the last time it was persisted to the database. This is called automatically when in 'autocommit mode' (i.e. not in a transaction) and at the end of each transaction for every object that has been changed. """ if self.store is None: raise NotInStore("You can't checkpoint %r: not in a store" % (self,)) if self.__deleting: if not self.__everInserted: # don't issue duplicate SQL and crap; we were created, then # destroyed immediately. return self.store.executeSQL(self._baseDeleteSQL(self.store), [self.storeID]) # re-using OIDs plays havoc with the cache, and with other things # as well. We need to make sure that we leave a placeholder row at # the end of the table. if self.__deletingObject: # Mark this object as dead. self.store.executeSchemaSQL(_schema.CHANGE_TYPE, [-1, self.storeID]) # Can't do this any more: # self.store.executeSchemaSQL(_schema.DELETE_OBJECT, [self.storeID]) # TODO: need to measure the performance impact of this, then do # it to make sure things are in fact deleted: # self.store.executeSchemaSQL(_schema.APP_VACUUM) else: assert self.__legacy__ # we're done... if self.store.autocommit: self.committed() return if self.__everInserted: # case 1: we've been inserted before, either previously in this # transaction or we were loaded from the db if not self.__dirty__: # we might have been checkpointed twice within the same # transaction; just don't do anything. return self.store.executeSQL(*self._updateSQL()) else: # case 2: we are in the middle of creating the object, we've never # been inserted into the db before schemaAttrs = self.getSchema() insertArgs = [self.storeID] for (ignoredName, attrObj) in schemaAttrs: attrObjDuplicate, attributeValue = self.__dirty__[attrObj.attrname] # assert attrObjDuplicate is attrObj insertArgs.append(attributeValue) # XXX this isn't atomic, gross. self.store.executeSQL(self._baseInsertSQL(self.store), insertArgs) self.__everInserted = True # In case 1, we're dirty but we did an update, synchronizing the # database, in case 2, we haven't been created but we issue an insert. # In either case, the code in attributes.py sets the attribute *as well # as* populating __dirty__, so we clear out dirty and we keep the same # value, knowing it's the same as what's in the db. self.__dirty__.clear() if self.store.autocommit: self.committed()
Update the database to reflect in-memory changes made to this item; for example, to make it show up in store.query() calls where it is now valid, but was not the last time it was persisted to the database. This is called automatically when in 'autocommit mode' (i.e. not in a transaction) and at the end of each transaction for every object that has been changed.
entailment
def registerUpgrader(upgrader, typeName, oldVersion, newVersion): """ Register a callable which can perform a schema upgrade between two particular versions. @param upgrader: A one-argument callable which will upgrade an object. It is invoked with an instance of the old version of the object. @param typeName: The database typename for which this is an upgrader. @param oldVersion: The version from which this will upgrade. @param newVersion: The version to which this will upgrade. This must be exactly one greater than C{oldVersion}. """ # assert (typeName, oldVersion, newVersion) not in _upgradeRegistry, "duplicate upgrader" # ^ this makes the tests blow up so it's just disabled for now; perhaps we # should have a specific test mode # assert newVersion - oldVersion == 1, "read the doc string" assert isinstance(typeName, str), "read the doc string" _upgradeRegistry[typeName, oldVersion] = upgrader
Register a callable which can perform a schema upgrade between two particular versions. @param upgrader: A one-argument callable which will upgrade an object. It is invoked with an instance of the old version of the object. @param typeName: The database typename for which this is an upgrader. @param oldVersion: The version from which this will upgrade. @param newVersion: The version to which this will upgrade. This must be exactly one greater than C{oldVersion}.
entailment
def registerAttributeCopyingUpgrader(itemType, fromVersion, toVersion, postCopy=None): """ Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will copy all attributes from the legacy item to the new item. If postCopy is provided, it will be called with the new item after upgrading. @param itemType: L{axiom.item.Item} subclass @param postCopy: a callable of one argument @return: None """ def upgrader(old): newitem = old.upgradeVersion(itemType.typeName, fromVersion, toVersion, **dict((str(name), getattr(old, name)) for (name, _) in old.getSchema())) if postCopy is not None: postCopy(newitem) return newitem registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion)
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will copy all attributes from the legacy item to the new item. If postCopy is provided, it will be called with the new item after upgrading. @param itemType: L{axiom.item.Item} subclass @param postCopy: a callable of one argument @return: None
entailment
def registerDeletionUpgrader(itemType, fromVersion, toVersion): """ Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will delete the item from the database. @param itemType: L{axiom.item.Item} subclass @return: None """ # XXX This should actually do something more special so that a new table is # not created and such. def upgrader(old): old.deleteFromStore() return None registerUpgrader(upgrader, itemType.typeName, fromVersion, toVersion)
Register an upgrader for C{itemType}, from C{fromVersion} to C{toVersion}, which will delete the item from the database. @param itemType: L{axiom.item.Item} subclass @return: None
entailment
def _hasExplicitOid(store, table): """ Does the given table have an explicit oid column? """ return any(info[1] == 'oid' for info in store.querySchemaSQL( 'PRAGMA *DATABASE*.table_info({})'.format(table)))
Does the given table have an explicit oid column?
entailment
def _upgradeTableOid(store, table, createTable, postCreate=lambda: None): """ Upgrade a table to have an explicit oid. Must be called in a transaction to avoid corrupting the database. """ if _hasExplicitOid(store, table): return store.executeSchemaSQL( 'ALTER TABLE *DATABASE*.{0} RENAME TO {0}_temp'.format(table)) createTable() store.executeSchemaSQL( 'INSERT INTO *DATABASE*.{0} ' 'SELECT oid, * FROM *DATABASE*.{0}_temp'.format(table)) store.executeSchemaSQL('DROP TABLE *DATABASE*.{0}_temp'.format(table)) postCreate()
Upgrade a table to have an explicit oid. Must be called in a transaction to avoid corrupting the database.
entailment
def upgradeSystemOid(store): """ Upgrade the system tables to use explicit oid columns. """ store.transact( _upgradeTableOid, store, 'axiom_types', lambda: store.executeSchemaSQL(CREATE_TYPES)) store.transact( _upgradeTableOid, store, 'axiom_objects', lambda: store.executeSchemaSQL(CREATE_OBJECTS), lambda: store.executeSchemaSQL(CREATE_OBJECTS_IDX))
Upgrade the system tables to use explicit oid columns.
entailment
def upgradeExplicitOid(store): """ Upgrade a store to use explicit oid columns. This allows VACUUMing the database without corrupting it. This requires copying all of axiom_objects and axiom_types, as well as all item tables that have not yet been upgraded. Consider VACUUMing the database afterwards to reclaim space. """ upgradeSystemOid(store) for typename, version in store.querySchemaSQL(LATEST_TYPES): cls = _typeNameToMostRecentClass[typename] if cls.schemaVersion != version: remaining = store.querySQL( 'SELECT oid FROM {} LIMIT 1'.format( store._tableNameFor(typename, version))) if len(remaining) == 0: # Nothing to upgrade continue else: raise RuntimeError( '{}:{} not fully upgraded to {}'.format( typename, version, cls.schemaVersion)) store.transact( _upgradeTableOid, store, store._tableNameOnlyFor(typename, version), lambda: store._justCreateTable(cls), lambda: store._createIndexesFor(cls, []))
Upgrade a store to use explicit oid columns. This allows VACUUMing the database without corrupting it. This requires copying all of axiom_objects and axiom_types, as well as all item tables that have not yet been upgraded. Consider VACUUMing the database afterwards to reclaim space.
entailment
def checkUpgradePaths(self): """ Check that all of the accumulated old Item types have a way to get from their current version to the latest version. @raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items that do not have a valid upgrade path """ cantUpgradeErrors = [] for oldVersion in self._oldTypesRemaining: # We have to be able to get from oldVersion.schemaVersion to # the most recent type. currentType = _typeNameToMostRecentClass.get( oldVersion.typeName, None) if currentType is None: # There isn't a current version of this type; it's entirely # legacy, will be upgraded by deleting and replacing with # something else. continue typeInQuestion = oldVersion.typeName upgver = oldVersion.schemaVersion while upgver < currentType.schemaVersion: # Do we have enough of the schema present to upgrade? if ((typeInQuestion, upgver) not in _upgradeRegistry): cantUpgradeErrors.append( "No upgrader present for %s (%s) from %d to %d" % ( typeInQuestion, qual(currentType), upgver, upgver + 1)) # Is there a type available for each upgrader version? if upgver+1 != currentType.schemaVersion: if (typeInQuestion, upgver+1) not in _legacyTypes: cantUpgradeErrors.append( "Type schema required for upgrade missing:" " %s version %d" % ( typeInQuestion, upgver+1)) upgver += 1 if cantUpgradeErrors: raise NoUpgradePathAvailable('\n '.join(cantUpgradeErrors))
Check that all of the accumulated old Item types have a way to get from their current version to the latest version. @raise axiom.errors.NoUpgradePathAvailable: for any, and all, Items that do not have a valid upgrade path
entailment
def queueTypeUpgrade(self, oldtype): """ Queue a type upgrade for C{oldtype}. """ if oldtype not in self._oldTypesRemaining: self._oldTypesRemaining.append(oldtype)
Queue a type upgrade for C{oldtype}.
entailment
def upgradeItem(self, thisItem): """ Upgrade a legacy item. @raise axiom.errors.UpgraderRecursion: If the given item is already in the process of being upgraded. """ sid = thisItem.storeID if sid in self._currentlyUpgrading: raise UpgraderRecursion() self._currentlyUpgrading[sid] = thisItem try: return upgradeAllTheWay(thisItem) finally: self._currentlyUpgrading.pop(sid)
Upgrade a legacy item. @raise axiom.errors.UpgraderRecursion: If the given item is already in the process of being upgraded.
entailment
def upgradeBatch(self, n): """ Upgrade the entire store in batches, yielding after each batch. @param n: Number of upgrades to perform per transaction @type n: C{int} @raise axiom.errors.ItemUpgradeError: if an item upgrade failed @return: A generator that yields after each batch upgrade. This needs to be consumed for upgrading to actually take place. """ store = self.store def _doBatch(itemType): upgradedAnything = False for theItem in store.query(itemType, limit=n): upgradedAnything = True try: self.upgradeItem(theItem) except: f = Failure() raise ItemUpgradeError( f, theItem.storeID, itemType, _typeNameToMostRecentClass[itemType.typeName]) return upgradedAnything if self.upgradesPending: didAny = False while self._oldTypesRemaining: t0 = self._oldTypesRemaining[0] upgradedAnything = store.transact(_doBatch, t0) if not upgradedAnything: self._oldTypesRemaining.pop(0) if didAny: msg("%s finished upgrading %s" % (store.dbdir.path, qual(t0))) continue elif not didAny: didAny = True msg("%s beginning upgrade..." % (store.dbdir.path,)) yield None if didAny: msg("%s completely upgraded." % (store.dbdir.path,))
Upgrade the entire store in batches, yielding after each batch. @param n: Number of upgrades to perform per transaction @type n: C{int} @raise axiom.errors.ItemUpgradeError: if an item upgrade failed @return: A generator that yields after each batch upgrade. This needs to be consumed for upgrading to actually take place.
entailment
def open(self): """ Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ self.vg.open() self.__lvh = lvm_lv_from_uuid(self.vg.handle, self.uuid) if not bool(self.__lvh): raise HandleError("Failed to initialize LV Handle.")
Obtains the lvm, vg_t and lv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
entailment
def name(self): """ Returns the logical volume name. """ self.open() name = lvm_lv_get_name(self.__lvh) self.close() return name
Returns the logical volume name.
entailment
def is_active(self): """ Returns True if the logical volume is active, False otherwise. """ self.open() active = lvm_lv_is_active(self.__lvh) self.close() return bool(active)
Returns True if the logical volume is active, False otherwise.
entailment
def is_suspended(self): """ Returns True if the logical volume is suspended, False otherwise. """ self.open() susp = lvm_lv_is_suspended(self.__lvh) self.close() return bool(susp)
Returns True if the logical volume is suspended, False otherwise.
entailment
def size(self, units="MiB"): """ Returns the logical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """ self.open() size = lvm_lv_get_size(self.__lvh) self.close() return size_convert(size, units)
Returns the logical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
entailment
def activate(self): """ Activates the logical volume. *Raises:* * HandleError """ self.open() a = lvm_lv_activate(self.handle) self.close() if a != 0: raise CommitError("Failed to activate LV.")
Activates the logical volume. *Raises:* * HandleError
entailment
def deactivate(self): """ Deactivates the logical volume. *Raises:* * HandleError """ self.open() d = lvm_lv_deactivate(self.handle) self.close() if d != 0: raise CommitError("Failed to deactivate LV.")
Deactivates the logical volume. *Raises:* * HandleError
entailment
def open(self): """ Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError """ self.vg.open() self.__pvh = lvm_pv_from_uuid(self.vg.handle, self.uuid) if not bool(self.__pvh): raise HandleError("Failed to initialize PV Handle.")
Obtains the lvm, vg_t and pv_t handle. Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion.py *Raises:* * HandleError
entailment
def name(self): """ Returns the physical volume device path. """ self.open() name = lvm_pv_get_name(self.handle) self.close() return name
Returns the physical volume device path.
entailment
def mda_count(self): """ Returns the physical volume mda count. """ self.open() mda = lvm_pv_get_mda_count(self.handle) self.close() return mda
Returns the physical volume mda count.
entailment
def size(self, units="MiB"): """ Returns the physical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """ self.open() size = lvm_pv_get_size(self.handle) self.close() return size_convert(size, units)
Returns the physical volume size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
entailment
def dev_size(self, units="MiB"): """ Returns the device size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """ self.open() size = lvm_pv_get_dev_size(self.handle) self.close() return size_convert(size, units)
Returns the device size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
entailment
def free(self, units="MiB"): """ Returns the free size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB. """ self.open() size = lvm_pv_get_free(self.handle) self.close() return size_convert(size, units)
Returns the free size in the given units. Default units are MiB. *Args:* * units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
entailment
def mongoengine_validate_wrapper(old_clean, new_clean): """ A wrapper function to validate formdata against mongoengine-field validator and raise a proper django.forms ValidationError if there are any problems. """ def inner_validate(value): value = old_clean(value) try: new_clean(value) return value except ValidationError, e: raise forms.ValidationError(e) return inner_validate
A wrapper function to validate formdata against mongoengine-field validator and raise a proper django.forms ValidationError if there are any problems.
entailment
def iter_valid_fields(meta): """walk through the available valid fields..""" # fetch field configuration and always add the id_field as exclude meta_fields = getattr(meta, 'fields', ()) meta_exclude = getattr(meta, 'exclude', ()) meta_exclude += (meta.document._meta.get('id_field'),) # walk through meta_fields or through the document fields to keep # meta_fields order in the form if meta_fields: for field_name in meta_fields: field = meta.document._fields.get(field_name) if field: yield (field_name, field) else: for field_name, field in meta.document._fields.iteritems(): # skip excluded fields if field_name not in meta_exclude: yield (field_name, field)
walk through the available valid fields..
entailment
def dependsOn(itemType, itemCustomizer=None, doc='', indexed=True, whenDeleted=reference.NULLIFY): """ This function behaves like L{axiom.attributes.reference} but with an extra behaviour: when this item is installed (via L{axiom.dependency.installOn} on a target item, the type named here will be instantiated and installed on the target as well. For example:: class Foo(Item): counter = integer() thingIDependOn = dependsOn(Baz, lambda baz: baz.setup()) @param itemType: The Item class to instantiate and install. @param itemCustomizer: A callable that accepts the item installed as a dependency as its first argument. It will be called only if an item is created to satisfy this dependency. @return: An L{axiom.attributes.reference} instance. """ frame = sys._getframe(1) locals = frame.f_locals # Try to make sure we were called from a class def. if (locals is frame.f_globals) or ('__module__' not in locals): raise TypeError("dependsOn can be used only from a class definition.") ref = reference(reftype=itemType, doc=doc, indexed=indexed, allowNone=True, whenDeleted=whenDeleted) if "__dependsOn_advice_data__" not in locals: addClassAdvisor(_dependsOn_advice) locals.setdefault('__dependsOn_advice_data__', []).append( (itemType, itemCustomizer, ref)) return ref
This function behaves like L{axiom.attributes.reference} but with an extra behaviour: when this item is installed (via L{axiom.dependency.installOn} on a target item, the type named here will be instantiated and installed on the target as well. For example:: class Foo(Item): counter = integer() thingIDependOn = dependsOn(Baz, lambda baz: baz.setup()) @param itemType: The Item class to instantiate and install. @param itemCustomizer: A callable that accepts the item installed as a dependency as its first argument. It will be called only if an item is created to satisfy this dependency. @return: An L{axiom.attributes.reference} instance.
entailment
def uninstallFrom(self, target): """ Remove this object from the target, as well as any dependencies that it automatically installed which were not explicitly "pinned" by calling "install", and raising an exception if anything still depends on this. """ #did this class powerup on any interfaces? powerdown if so. target.powerDown(self) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: dc.deleteFromStore() for item in installedUniqueRequirements(self, target): uninstallFrom(item, target) callback = getattr(self, "uninstalled", None) if callback is not None: callback()
Remove this object from the target, as well as any dependencies that it automatically installed which were not explicitly "pinned" by calling "install", and raising an exception if anything still depends on this.
entailment
def installedOn(self): """ If this item is installed on another item, return the install target. Otherwise return None. """ try: return self.store.findUnique(_DependencyConnector, _DependencyConnector.installee == self ).target except ItemNotFound: return None
If this item is installed on another item, return the install target. Otherwise return None.
entailment
def installedDependents(self, target): """ Return an iterable of things installed on the target that require this item. """ for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: yield dc.installee
Return an iterable of things installed on the target that require this item.
entailment
def installedUniqueRequirements(self, target): """ Return an iterable of things installed on the target that this item requires and are not required by anything else. """ myDepends = dependentsOf(self.__class__) #XXX optimize? for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if dc.installee is self: #we're checking all the others not ourself continue depends = dependentsOf(dc.installee.__class__) if self.__class__ in depends: raise DependencyError( "%r cannot be uninstalled from %r, " "%r still depends on it" % (self, target, dc.installee)) for cls in myDepends[:]: #If one of my dependencies is required by somebody #else, leave it alone if cls in depends: myDepends.remove(cls) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target==target): if (dc.installee.__class__ in myDepends and not dc.explicitlyInstalled): yield dc.installee
Return an iterable of things installed on the target that this item requires and are not required by anything else.
entailment
def installedRequirements(self, target): """ Return an iterable of things installed on the target that this item requires. """ myDepends = dependentsOf(self.__class__) for dc in self.store.query(_DependencyConnector, _DependencyConnector.target == target): if dc.installee.__class__ in myDepends: yield dc.installee
Return an iterable of things installed on the target that this item requires.
entailment
def storeServiceSpecialCase(st, pups): """ Adapt a store to L{IServiceCollection}. @param st: The L{Store} to adapt. @param pups: A list of L{IServiceCollection} powerups on C{st}. @return: An L{IServiceCollection} which has all of C{pups} as children. """ if st.parent is not None: # If for some bizarre reason we're starting a substore's service, let's # just assume that its parent is running its upgraders, rather than # risk starting the upgrader run twice. (XXX: it *IS* possible to # figure out whether we need to or not, I just doubt this will ever # even happen in practice -- fix here if it does) return serviceSpecialCase(st, pups) if st._axiom_service is not None: # not new, don't add twice. return st._axiom_service collection = serviceSpecialCase(st, pups) st._upgradeService.setServiceParent(collection) if st.dbdir is not None: from axiom import batch batcher = batch.BatchProcessingControllerService(st) batcher.setServiceParent(collection) scheduler = iaxiom.IScheduler(st) # If it's an old database, we might get a SubScheduler instance. It has no # setServiceParent method. setServiceParent = getattr(scheduler, 'setServiceParent', None) if setServiceParent is not None: setServiceParent(collection) return collection
Adapt a store to L{IServiceCollection}. @param st: The L{Store} to adapt. @param pups: A list of L{IServiceCollection} powerups on C{st}. @return: An L{IServiceCollection} which has all of C{pups} as children.
entailment
def _schedulerServiceSpecialCase(empowered, pups): """ This function creates (or returns a previously created) L{IScheduler} powerup. If L{IScheduler} powerups were found on C{empowered}, the first of those is given priority. Otherwise, a site L{Store} or a user L{Store} will have any pre-existing L{IScheduler} powerup associated with them (on the hackish cache attribute C{_schedulerService}) returned, or a new one created if none exists already. """ from axiom.scheduler import _SiteScheduler, _UserScheduler # Give precedence to anything found in the store for pup in pups: return pup # If the empowered is a store, construct a scheduler for it. if isinstance(empowered, Store): if getattr(empowered, '_schedulerService', None) is None: if empowered.parent is None: sched = _SiteScheduler(empowered) else: sched = _UserScheduler(empowered) empowered._schedulerService = sched return empowered._schedulerService return None
This function creates (or returns a previously created) L{IScheduler} powerup. If L{IScheduler} powerups were found on C{empowered}, the first of those is given priority. Otherwise, a site L{Store} or a user L{Store} will have any pre-existing L{IScheduler} powerup associated with them (on the hackish cache attribute C{_schedulerService}) returned, or a new one created if none exists already.
entailment
def _diffSchema(diskSchema, memorySchema): """ Format a schema mismatch for human consumption. @param diskSchema: The on-disk schema. @param memorySchema: The in-memory schema. @rtype: L{bytes} @return: A description of the schema differences. """ diskSchema = set(diskSchema) memorySchema = set(memorySchema) diskOnly = diskSchema - memorySchema memoryOnly = memorySchema - diskSchema diff = [] if diskOnly: diff.append('Only on disk:') diff.extend(map(repr, diskOnly)) if memoryOnly: diff.append('Only in memory:') diff.extend(map(repr, memoryOnly)) return '\n'.join(diff)
Format a schema mismatch for human consumption. @param diskSchema: The on-disk schema. @param memorySchema: The in-memory schema. @rtype: L{bytes} @return: A description of the schema differences.
entailment
def close(self): """ Close this file and commit it to its permanent location. @return: a Deferred which fires when the file has been moved (and backed up to tertiary storage, if necessary). """ now = time.time() try: file.close(self) _mkdirIfNotExists(self._destpath.dirname()) self.finalpath = self._destpath os.rename(self.name, self.finalpath.path) os.utime(self.finalpath.path, (now, now)) except: return defer.fail() return defer.succeed(self.finalpath)
Close this file and commit it to its permanent location. @return: a Deferred which fires when the file has been moved (and backed up to tertiary storage, if necessary).
entailment
def explain(self): """ A debugging API, exposing SQLite's I{EXPLAIN} statement. While this is not a private method, you also probably don't have any use for it unless you understand U{SQLite opcodes<http://www.sqlite.org/opcode.html>} very well. Once you do, it can be handy to call this interactively to get a sense of the complexity of a query. @return: a list, the first element of which is a L{str} (the SQL statement which will be run), and the remainder of which is 3-tuples resulting from the I{EXPLAIN} of that statement. """ return ([self._sqlAndArgs('SELECT', self._queryTarget)[0]] + self._runQuery('EXPLAIN SELECT', self._queryTarget))
A debugging API, exposing SQLite's I{EXPLAIN} statement. While this is not a private method, you also probably don't have any use for it unless you understand U{SQLite opcodes<http://www.sqlite.org/opcode.html>} very well. Once you do, it can be handy to call this interactively to get a sense of the complexity of a query. @return: a list, the first element of which is a L{str} (the SQL statement which will be run), and the remainder of which is 3-tuples resulting from the I{EXPLAIN} of that statement.
entailment
def _computeFromClause(self, tables): """ Generate the SQL string which follows the "FROM" string and before the "WHERE" string in the final SQL statement. """ tableAliases = [] self.fromClauseParts = [] for table in tables: # The indirect calls to store.getTableName() will create the tables # if needed. (XXX That's bad, actually. They should get created # some other way if necessary. -exarkun) tableName = table.getTableName(self.store) tableAlias = table.getTableAlias(self.store, tuple(tableAliases)) if tableAlias is None: self.fromClauseParts.append(tableName) else: tableAliases.append(tableAlias) self.fromClauseParts.append('%s AS %s' % (tableName, tableAlias)) self.sortClauseParts = [] for attr, direction in self.sort.orderColumns(): assert direction in ('ASC', 'DESC'), "%r not in ASC,DESC" % (direction,) if attr.type not in tables: raise ValueError( "Ordering references type excluded from comparison") self.sortClauseParts.append( '%s %s' % (attr.getColumnName(self.store), direction))
Generate the SQL string which follows the "FROM" string and before the "WHERE" string in the final SQL statement.
entailment
def _selectStuff(self, verb='SELECT'): """ Return a generator which yields the massaged results of this query with a particular SQL verb. For an attribute query, massaged results are of the type of that attribute. For an item query, they are items of the type the query is supposed to return. @param verb: a str containing the SQL verb to execute. This really must be some variant of 'SELECT', the only two currently implemented being 'SELECT' and 'SELECT DISTINCT'. """ sqlResults = self._runQuery(verb, self._queryTarget) for row in sqlResults: yield self._massageData(row)
Return a generator which yields the massaged results of this query with a particular SQL verb. For an attribute query, massaged results are of the type of that attribute. For an item query, they are items of the type the query is supposed to return. @param verb: a str containing the SQL verb to execute. This really must be some variant of 'SELECT', the only two currently implemented being 'SELECT' and 'SELECT DISTINCT'.
entailment
def next(self): """ This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data. """ if self._selfiter is None: warnings.warn( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?", DeprecationWarning, stacklevel=2) self._selfiter = self.__iter__() return self._selfiter.next()
This method is deprecated, a holdover from when queries were iterators, rather than iterables. @return: one element of massaged data.
entailment
def paginate(self, pagesize=20): """ Split up the work of gathering a result set into multiple smaller 'pages', allowing very large queries to be iterated without blocking for long periods of time. While simply iterating C{paginate()} is very similar to iterating a query directly, using this method allows the work to obtain the results to be performed on demand, over a series of different transaction. @param pagesize: the number of results gather in each chunk of work. (This is mostly for testing paginate's implementation.) @type pagesize: L{int} @return: an iterable which yields all the results of this query. """ sort = self.sort oc = list(sort.orderColumns()) if not oc: # You can't have an unsorted pagination. sort = self.tableClass.storeID.ascending oc = list(sort.orderColumns()) if len(oc) != 1: raise RuntimeError("%d-column sorts not supported yet with paginate" %(len(oc),)) sortColumn = oc[0][0] if oc[0][1] == 'ASC': sortOp = operator.gt else: sortOp = operator.lt if _isColumnUnique(sortColumn): # This is the easy case. There is never a tie to be broken, so we # can just remember our last value and yield from there. Right now # this only happens when the column is a storeID, but hopefully in # the future we will have more of this. tiebreaker = None else: tiebreaker = self.tableClass.storeID tied = lambda a, b: (sortColumn.__get__(a) == sortColumn.__get__(b)) def _AND(a, b): if a is None: return b return attributes.AND(a, b) results = list(self.store.query(self.tableClass, self.comparison, sort=sort, limit=pagesize + 1)) while results: if len(results) == 1: # XXX TODO: reject 0 pagesize. If the length of the result set # is 1, there's no next result to test for a tie with, so we # must be at the end, and we should just yield the result and finish. yield results[0] return for resultidx in range(len(results) - 1): # check for a tie. result = results[resultidx] nextResult = results[resultidx + 1] if tied(result, nextResult): # Yield any ties first, in the appropriate order. lastTieBreaker = tiebreaker.__get__(result) # Note that this query is _NOT_ limited: currently large ties # will generate arbitrarily large amounts of work. trq = self.store.query( self.tableClass, _AND(self.comparison, sortColumn == sortColumn.__get__(result))) tiedResults = list(trq) tiedResults.sort(key=lambda rslt: (sortColumn.__get__(result), tiebreaker.__get__(result))) for result in tiedResults: yield result # re-start the query here ('result' is set to the # appropriate value by the inner loop) break else: yield result lastSortValue = sortColumn.__get__(result) # hooray namespace pollution results = list(self.store.query( self.tableClass, _AND(self.comparison, sortOp(sortColumn, sortColumn.__get__(result))), sort=sort, limit=pagesize + 1))
Split up the work of gathering a result set into multiple smaller 'pages', allowing very large queries to be iterated without blocking for long periods of time. While simply iterating C{paginate()} is very similar to iterating a query directly, using this method allows the work to obtain the results to be performed on demand, over a series of different transaction. @param pagesize: the number of results gather in each chunk of work. (This is mostly for testing paginate's implementation.) @type pagesize: L{int} @return: an iterable which yields all the results of this query.
entailment
def _massageData(self, row): """ Convert a row into an Item instance by loading cached items or creating new ones based on query results. @param row: an n-tuple, where n is the number of columns specified by my item type. @return: an instance of the type specified by this query. """ result = self.store._loadedItem(self.tableClass, row[0], row[1:]) assert result.store is not None, "result %r has funky store" % (result,) return result
Convert a row into an Item instance by loading cached items or creating new ones based on query results. @param row: an n-tuple, where n is the number of columns specified by my item type. @return: an instance of the type specified by this query.
entailment
def getColumn(self, attributeName, raw=False): """ Get an L{iaxiom.IQuery} whose results will be values of a single attribute rather than an Item. @param attributeName: a L{str}, the name of a Python attribute, that describes a column on the Item subclass that this query was specified for. @return: an L{AttributeQuery} for the column described by the attribute named L{attributeName} on the item class that this query's results will be instances of. """ # XXX: 'raw' is undocumented because I think it's completely unused, # and it's definitely untested. It should probably be removed when # someone has the time. -glyph # Quotient POP3 server uses it. Not that it shouldn't be removed. # ;) -exarkun attr = getattr(self.tableClass, attributeName) return AttributeQuery(self.store, self.tableClass, self.comparison, self.limit, self.offset, self.sort, attr, raw)
Get an L{iaxiom.IQuery} whose results will be values of a single attribute rather than an Item. @param attributeName: a L{str}, the name of a Python attribute, that describes a column on the Item subclass that this query was specified for. @return: an L{AttributeQuery} for the column described by the attribute named L{attributeName} on the item class that this query's results will be instances of.
entailment
def deleteFromStore(self): """ Delete all the Items which are found by this query. """ if (self.limit is None and not isinstance(self.sort, attributes.UnspecifiedOrdering)): # The ORDER BY is pointless here, and SQLite complains about it. return self.cloneQuery(sort=None).deleteFromStore() #We can do this the fast way or the slow way. # If there's a 'deleted' callback on the Item type or 'deleteFromStore' # is overridden, we have to do it the slow way. deletedOverridden = ( self.tableClass.deleted.im_func is not item.Item.deleted.im_func) deleteFromStoreOverridden = ( self.tableClass.deleteFromStore.im_func is not item.Item.deleteFromStore.im_func) if deletedOverridden or deleteFromStoreOverridden: for it in self: it.deleteFromStore() else: # Find other item types whose instances need to be deleted # when items of the type in this query are deleted, and # remove them from the store. def itemsToDelete(attr): return attr.oneOf(self.getColumn("storeID")) if not item.allowDeletion(self.store, self.tableClass, itemsToDelete): raise errors.DeletionDisallowed( 'Cannot delete item; ' 'has referents with whenDeleted == reference.DISALLOW') for it in item.dependentItems(self.store, self.tableClass, itemsToDelete): it.deleteFromStore() # actually run the DELETE for the items in this query. self._runQuery('DELETE', "")
Delete all the Items which are found by this query.
entailment
def _involvedTables(self): """ Return a list of tables involved in this query, first checking that no required tables (those in the query target) have been omitted from the comparison. """ # SQL and arguments if self.comparison is not None: tables = self.comparison.getInvolvedTables() self.args = self.comparison.getArgs(self.store) else: tables = list(self.tableClass) self.args = [] for tableClass in self.tableClass: if tableClass not in tables: raise ValueError( "Comparison omits required reference to result type %s" % tableClass.typeName) return tables
Return a list of tables involved in this query, first checking that no required tables (those in the query target) have been omitted from the comparison.
entailment
def _massageData(self, row): """ Convert a row into a tuple of Item instances, by slicing it according to the number of columns for each instance, and then proceeding as for ItemQuery._massageData. @param row: an n-tuple, where n is the total number of columns specified by all the item types in this query. @return: a tuple of instances of the types specified by this query. """ offset = 0 resultBits = [] for i, tableClass in enumerate(self.tableClass): numAttrs = self.schemaLengths[i] result = self.store._loadedItem(self.tableClass[i], row[offset], row[offset+1:offset+numAttrs]) assert result.store is not None, "result %r has funky store" % (result,) resultBits.append(result) offset += numAttrs return tuple(resultBits)
Convert a row into a tuple of Item instances, by slicing it according to the number of columns for each instance, and then proceeding as for ItemQuery._massageData. @param row: an n-tuple, where n is the total number of columns specified by all the item types in this query. @return: a tuple of instances of the types specified by this query.
entailment
def cloneQuery(self, limit=_noItem, sort=_noItem): """ Clone the original query which this distinct query wraps, and return a new wrapper around that clone. """ newq = self.query.cloneQuery(limit=limit, sort=sort) return self.__class__(newq)
Clone the original query which this distinct query wraps, and return a new wrapper around that clone.
entailment
def count(self): """ Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results. """ if not self.query.store.autocommit: self.query.store.checkpoint() target = ', '.join([ tableClass.storeID.getColumnName(self.query.store) for tableClass in self.query.tableClass ]) sql, args = self.query._sqlAndArgs( 'SELECT DISTINCT', target) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self.query.store.querySQL(sql, args) assert len(result) == 1, 'more than one result: %r' % (result,) return result[0][0] or 0
Count the number of distinct results of the wrapped query. @return: an L{int} representing the number of distinct results.
entailment
def _massageData(self, row): """ Convert a raw database row to the type described by an attribute. For example, convert a database integer into an L{extime.Time} instance for an L{attributes.timestamp} attribute. @param row: a 1-tuple, containing the in-database value from my attribute. @return: a value of the type described by my attribute. """ if self.raw: return row[0] return self.attribute.outfilter(row[0], _FakeItemForFilter(self.store))
Convert a raw database row to the type described by an attribute. For example, convert a database integer into an L{extime.Time} instance for an L{attributes.timestamp} attribute. @param row: a 1-tuple, containing the in-database value from my attribute. @return: a value of the type described by my attribute.
entailment
def sum(self): """ Return the sum of all the values returned by this query. If no results are specified, return None. Note: for non-numeric column types the result of this method will be nonsensical. @return: a number or None. """ res = self._runQuery('SELECT', 'SUM(%s)' % (self._queryTarget,)) or [(0,)] assert len(res) == 1, "more than one result: %r" % (res,) dbval = res[0][0] or 0 return self.attribute.outfilter(dbval, _FakeItemForFilter(self.store))
Return the sum of all the values returned by this query. If no results are specified, return None. Note: for non-numeric column types the result of this method will be nonsensical. @return: a number or None.
entailment
def average(self): """ Return the average value (as defined by the AVG implementation in the database) of the values specified by this query. Note: for non-numeric column types the result of this method will be nonsensical. @return: a L{float} representing the 'average' value of this column. """ rslt = self._runQuery('SELECT', 'AVG(%s)' % (self._queryTarget,)) or [(0,)] assert len(rslt) == 1, 'more than one result: %r' % (rslt,) return rslt[0][0]
Return the average value (as defined by the AVG implementation in the database) of the values specified by this query. Note: for non-numeric column types the result of this method will be nonsensical. @return: a L{float} representing the 'average' value of this column.
entailment
def _attachChild(self, child): "attach a child database, returning an identifier for it" self._childCounter += 1 databaseName = 'child_db_%d' % (self._childCounter,) self._attachedChildren[databaseName] = child # ATTACH DATABASE statements can't use bind paramaters, blech. self.executeSQL("ATTACH DATABASE '%s' AS %s" % ( child.dbdir.child('db.sqlite').path, databaseName,)) return databaseName
attach a child database, returning an identifier for it
entailment
def _startup(self): """ Called during __init__. Check consistency of schema in database with classes in memory. Load all Python modules for stored items, and load version information for upgrader service to run later. """ typesToCheck = [] for oid, module, typename, version in self.querySchemaSQL(_schema.ALL_TYPES): if self.debug: print print 'SCHEMA:', oid, module, typename, version if typename not in _typeNameToMostRecentClass: try: namedAny(module) except ValueError as err: raise ImportError('cannot find module ' + module, str(err)) self.typenameAndVersionToID[typename, version] = oid # Can't call this until typenameAndVersionToID is populated, since this # depends on building a reverse map of that. persistedSchema = self._loadTypeSchema() # Now that we have persistedSchema, loop over everything again and # prepare old types. for (typename, version), typeID in self.typenameAndVersionToID.iteritems(): cls = _typeNameToMostRecentClass.get(typename) if cls is not None: if version != cls.schemaVersion: typesToCheck.append( self._prepareOldVersionOf( typename, version, persistedSchema)) else: typesToCheck.append(cls) for cls in typesToCheck: self._checkTypeSchemaConsistency(cls, persistedSchema) # Schema is consistent! Now, if I forgot to create any indexes last # time I saw this table, do it now... extantIndexes = self._loadExistingIndexes() for cls in typesToCheck: self._createIndexesFor(cls, extantIndexes) self._upgradeManager.checkUpgradePaths()
Called during __init__. Check consistency of schema in database with classes in memory. Load all Python modules for stored items, and load version information for upgrader service to run later.
entailment
def findOrCreate(self, userItemClass, __ifnew=None, **attrs): """ Usage:: s.findOrCreate(userItemClass [, function] [, x=1, y=2, ...]) Example:: class YourItemType(Item): a = integer() b = text() c = integer() def f(x): print x, \"-- it's new!\" s.findOrCreate(YourItemType, f, a=1, b=u'2') Search for an item with columns in the database that match the passed set of keyword arguments, returning the first match if one is found, creating one with the given attributes if not. Takes an optional positional argument function to call on the new item if it is new. """ andargs = [] for k, v in attrs.iteritems(): col = getattr(userItemClass, k) andargs.append(col == v) if len(andargs) == 0: cond = [] elif len(andargs) == 1: cond = [andargs[0]] else: cond = [attributes.AND(*andargs)] for result in self.query(userItemClass, *cond): return result newItem = userItemClass(store=self, **attrs) if __ifnew is not None: __ifnew(newItem) return newItem
Usage:: s.findOrCreate(userItemClass [, function] [, x=1, y=2, ...]) Example:: class YourItemType(Item): a = integer() b = text() c = integer() def f(x): print x, \"-- it's new!\" s.findOrCreate(YourItemType, f, a=1, b=u'2') Search for an item with columns in the database that match the passed set of keyword arguments, returning the first match if one is found, creating one with the given attributes if not. Takes an optional positional argument function to call on the new item if it is new.
entailment
def newFile(self, *path): """ Open a new file somewhere in this Store's file area. @param path: a sequence of path segments. @return: an L{AtomicFile}. """ assert len(path) > 0, "newFile requires a nonzero number of segments" if self.dbdir is None: if self.filesdir is None: raise RuntimeError("This in-memory store has no file directory") else: tmpbase = self.filesdir else: tmpbase = self.dbdir tmpname = tmpbase.child('temp').child(str(tempCounter.next()) + ".tmp") return AtomicFile(tmpname.path, self.newFilePath(*path))
Open a new file somewhere in this Store's file area. @param path: a sequence of path segments. @return: an L{AtomicFile}.
entailment
def _loadTypeSchema(self): """ Load all of the stored schema information for all types known by this store. It's important to load everything all at once (rather than loading the schema for each type separately as it is needed) to keep store opening fast. A single query with many results is much faster than many queries with a few results each. @return: A dict with two-tuples of item type name and schema version as keys and lists of five-tuples of attribute schema information for that type. The elements of the five-tuple are:: - a string giving the name of the Python attribute - a string giving the SQL type - a boolean indicating whether the attribute is indexed - the Python attribute type object (eg, axiom.attributes.integer) - a string giving documentation for the attribute """ # Oops, need an index going the other way. This only happens once per # store open, and it's based on data queried from the store, so there # doesn't seem to be any broader way to cache and re-use the result. # However, if we keyed the resulting dict on the database typeID rather # than (typeName, schemaVersion), we wouldn't need the information this # dict gives us. That would mean changing the callers of this function # to use typeID instead of that tuple, which may be possible. Probably # only represents a very tiny possible speedup. typeIDToNameAndVersion = {} for key, value in self.typenameAndVersionToID.iteritems(): typeIDToNameAndVersion[value] = key # Indexing attribute, ordering by it, and getting rid of row_offset # from the schema and the sorted() here doesn't seem to be any faster # than doing this. persistedSchema = sorted(self.querySchemaSQL( "SELECT attribute, type_id, sqltype, indexed, " "pythontype, docstring FROM *DATABASE*.axiom_attributes ")) # This is trivially (but measurably!) faster than getattr(attributes, # pythontype). getAttribute = attributes.__dict__.__getitem__ result = {} for (attribute, typeID, sqltype, indexed, pythontype, docstring) in persistedSchema: key = typeIDToNameAndVersion[typeID] if key not in result: result[key] = [] result[key].append(( attribute, sqltype, indexed, getAttribute(pythontype), docstring)) return result
Load all of the stored schema information for all types known by this store. It's important to load everything all at once (rather than loading the schema for each type separately as it is needed) to keep store opening fast. A single query with many results is much faster than many queries with a few results each. @return: A dict with two-tuples of item type name and schema version as keys and lists of five-tuples of attribute schema information for that type. The elements of the five-tuple are:: - a string giving the name of the Python attribute - a string giving the SQL type - a boolean indicating whether the attribute is indexed - the Python attribute type object (eg, axiom.attributes.integer) - a string giving documentation for the attribute
entailment
def _checkTypeSchemaConsistency(self, actualType, onDiskSchema): """ Called for all known types at database startup: make sure that what we know (in memory) about this type agrees with what is stored about this type in the database. @param actualType: A L{MetaItem} instance which is associated with a table in this store. The schema it defines in memory will be checked against the schema known in the database to ensure they agree. @param onDiskSchema: A mapping from L{MetaItem} instances (such as C{actualType}) to the schema known in the database and associated with C{actualType}. @raise RuntimeError: if the schema defined by C{actualType} does not match the database-present schema given in C{onDiskSchema} or if C{onDiskSchema} contains a newer version of the schema associated with C{actualType} than C{actualType} represents. """ # make sure that both the runtime and the database both know about this # type; if they don't both know, we can't check that their views are # consistent try: inMemorySchema = _inMemorySchemaCache[actualType] except KeyError: inMemorySchema = _inMemorySchemaCache[actualType] = [ (storedAttribute.attrname, storedAttribute.sqltype) for (name, storedAttribute) in actualType.getSchema()] key = (actualType.typeName, actualType.schemaVersion) persistedSchema = [(storedAttribute[0], storedAttribute[1]) for storedAttribute in onDiskSchema[key]] if inMemorySchema != persistedSchema: raise RuntimeError( "Schema mismatch on already-loaded %r <%r> object version %d:\n%s" % (actualType, actualType.typeName, actualType.schemaVersion, _diffSchema(persistedSchema, inMemorySchema))) if actualType.__legacy__: return if (key[0], key[1] + 1) in onDiskSchema: raise RuntimeError( "Memory version of %r is %d; database has newer" % ( actualType.typeName, key[1]))
Called for all known types at database startup: make sure that what we know (in memory) about this type agrees with what is stored about this type in the database. @param actualType: A L{MetaItem} instance which is associated with a table in this store. The schema it defines in memory will be checked against the schema known in the database to ensure they agree. @param onDiskSchema: A mapping from L{MetaItem} instances (such as C{actualType}) to the schema known in the database and associated with C{actualType}. @raise RuntimeError: if the schema defined by C{actualType} does not match the database-present schema given in C{onDiskSchema} or if C{onDiskSchema} contains a newer version of the schema associated with C{actualType} than C{actualType} represents.
entailment
def _prepareOldVersionOf(self, typename, version, persistedSchema): """ Note that this database contains old versions of a particular type. Create the appropriate dummy item subclass and queue the type to be upgraded. @param typename: The I{typeName} associated with the schema for which to create a dummy item class. @param version: The I{schemaVersion} of the old version of the schema for which to create a dummy item class. @param persistedSchema: A mapping giving information about all schemas stored in the database, used to create the attributes of the dummy item class. """ appropriateSchema = persistedSchema[typename, version] # create actual attribute objects dummyAttributes = {} for (attribute, sqlType, indexed, pythontype, docstring) in appropriateSchema: atr = pythontype(indexed=indexed, doc=docstring) dummyAttributes[attribute] = atr dummyBases = [] oldType = declareLegacyItem( typename, version, dummyAttributes, dummyBases) self._upgradeManager.queueTypeUpgrade(oldType) return oldType
Note that this database contains old versions of a particular type. Create the appropriate dummy item subclass and queue the type to be upgraded. @param typename: The I{typeName} associated with the schema for which to create a dummy item class. @param version: The I{schemaVersion} of the old version of the schema for which to create a dummy item class. @param persistedSchema: A mapping giving information about all schemas stored in the database, used to create the attributes of the dummy item class.
entailment
def findUnique(self, tableClass, comparison=None, default=_noItem): """ Find an Item in the database which should be unique. If it is found, return it. If it is not found, return 'default' if it was passed, otherwise raise L{errors.ItemNotFound}. If more than one item is found, raise L{errors.DuplicateUniqueItem}. @param comparison: implementor of L{iaxiom.IComparison}. @param default: value to use if the item is not found. """ results = list(self.query(tableClass, comparison, limit=2)) lr = len(results) if lr == 0: if default is _noItem: raise errors.ItemNotFound(comparison) else: return default elif lr == 2: raise errors.DuplicateUniqueItem(comparison, results) elif lr == 1: return results[0] else: raise AssertionError("limit=2 database query returned 3+ results: ", comparison, results)
Find an Item in the database which should be unique. If it is found, return it. If it is not found, return 'default' if it was passed, otherwise raise L{errors.ItemNotFound}. If more than one item is found, raise L{errors.DuplicateUniqueItem}. @param comparison: implementor of L{iaxiom.IComparison}. @param default: value to use if the item is not found.
entailment
def findFirst(self, tableClass, comparison=None, offset=None, sort=None, default=None): """ Usage:: s.findFirst(tableClass [, query arguments except 'limit']) Example:: class YourItemType(Item): a = integer() b = text() c = integer() ... it = s.findFirst(YourItemType, AND(YourItemType.a == 1, YourItemType.b == u'2'), sort=YourItemType.c.descending) Search for an item with columns in the database that match the passed comparison, offset and sort, returning the first match if one is found, or the passed default (None if none is passed) if one is not found. """ limit = 1 for item in self.query(tableClass, comparison, limit, offset, sort): return item return default
Usage:: s.findFirst(tableClass [, query arguments except 'limit']) Example:: class YourItemType(Item): a = integer() b = text() c = integer() ... it = s.findFirst(YourItemType, AND(YourItemType.a == 1, YourItemType.b == u'2'), sort=YourItemType.c.descending) Search for an item with columns in the database that match the passed comparison, offset and sort, returning the first match if one is found, or the passed default (None if none is passed) if one is not found.
entailment
def query(self, tableClass, comparison=None, limit=None, offset=None, sort=None): """ Return a generator of instances of C{tableClass}, or tuples of instances if C{tableClass} is a tuple of classes. Examples:: fastCars = s.query(Vehicle, axiom.attributes.AND( Vehicle.wheels == 4, Vehicle.maxKPH > 200), limit=100, sort=Vehicle.maxKPH.descending) quotesByClient = s.query( (Client, Quote), axiom.attributes.AND( Client.active == True, Quote.client == Client.storeID, Quote.created >= someDate), limit=10, sort=(Client.name.ascending, Quote.created.descending)) @param tableClass: a subclass of Item to look for instances of, or a tuple of subclasses. @param comparison: a provider of L{IComparison}, or None, to match all items available in the store. If tableClass is a tuple, then the comparison must refer to all Item subclasses in that tuple, and specify the relationships between them. @param limit: an int to limit the total length of the results, or None for all available results. @param offset: an int to specify a starting point within the available results, or None to start at 0. @param sort: an L{ISort}, something that comes from an SQLAttribute's 'ascending' or 'descending' attribute. @return: an L{ItemQuery} object, which is an iterable of Items or tuples of Items, according to tableClass. """ if isinstance(tableClass, tuple): queryClass = MultipleItemQuery else: queryClass = ItemQuery return queryClass(self, tableClass, comparison, limit, offset, sort)
Return a generator of instances of C{tableClass}, or tuples of instances if C{tableClass} is a tuple of classes. Examples:: fastCars = s.query(Vehicle, axiom.attributes.AND( Vehicle.wheels == 4, Vehicle.maxKPH > 200), limit=100, sort=Vehicle.maxKPH.descending) quotesByClient = s.query( (Client, Quote), axiom.attributes.AND( Client.active == True, Quote.client == Client.storeID, Quote.created >= someDate), limit=10, sort=(Client.name.ascending, Quote.created.descending)) @param tableClass: a subclass of Item to look for instances of, or a tuple of subclasses. @param comparison: a provider of L{IComparison}, or None, to match all items available in the store. If tableClass is a tuple, then the comparison must refer to all Item subclasses in that tuple, and specify the relationships between them. @param limit: an int to limit the total length of the results, or None for all available results. @param offset: an int to specify a starting point within the available results, or None to start at 0. @param sort: an L{ISort}, something that comes from an SQLAttribute's 'ascending' or 'descending' attribute. @return: an L{ItemQuery} object, which is an iterable of Items or tuples of Items, according to tableClass.
entailment
def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """ class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs)
Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None.
entailment
def changed(self, item): """ An item in this store was changed. Add it to the current transaction's list of changed items, if a transaction is currently underway, or raise an exception if this L{Store} is currently in a state which does not allow changes. """ if self._rejectChanges: raise errors.ChangeRejected() if self.transaction is not None: self.transaction.add(item) self.touched.add(item)
An item in this store was changed. Add it to the current transaction's list of changed items, if a transaction is currently underway, or raise an exception if this L{Store} is currently in a state which does not allow changes.
entailment
def transact(self, f, *a, **k): """ Execute C{f(*a, **k)} in the context of a database transaction. Any changes made to this L{Store} by C{f} will be committed when C{f} returns. If C{f} raises an exception, those changes will be reverted instead. If a transaction is already in progress (in this thread - ie, if a frame executing L{Store.transact} is already on the call stack), this will B{not} start a nested transaction. Changes will not be committed until the existing transaction completes, and an exception raised by C{f} will not revert changes made by C{f}. You probably don't want to ever call this if another transaction is in progress. @return: Whatever C{f(*a, **kw)} returns. @raise: Whatever C{f(*a, **kw)} raises, or a database exception. """ if self.transaction is not None: return f(*a, **k) if self.attachedToParent: return self.parent.transact(f, *a, **k) try: self._begin() try: result = f(*a, **k) self.checkpoint() except: exc = Failure() try: self.revert() except: log.err(exc) raise raise else: self._commit() return result finally: self._cleanupTxnState()
Execute C{f(*a, **k)} in the context of a database transaction. Any changes made to this L{Store} by C{f} will be committed when C{f} returns. If C{f} raises an exception, those changes will be reverted instead. If a transaction is already in progress (in this thread - ie, if a frame executing L{Store.transact} is already on the call stack), this will B{not} start a nested transaction. Changes will not be committed until the existing transaction completes, and an exception raised by C{f} will not revert changes made by C{f}. You probably don't want to ever call this if another transaction is in progress. @return: Whatever C{f(*a, **kw)} returns. @raise: Whatever C{f(*a, **kw)} raises, or a database exception.
entailment
def _indexNameOf(self, tableClass, attrname): """ Return the unqualified (ie, no database name) name of the given attribute of the given table. @type tableClass: L{MetaItem} @param tableClass: The Python class associated with a table in the database. @param attrname: A sequence of the names of the columns of the indicated table which will be included in the named index. @return: A C{str} giving the name of the index which will index the given attributes of the given table. """ return "axiomidx_%s_v%d_%s" % (tableClass.typeName, tableClass.schemaVersion, '_'.join(attrname))
Return the unqualified (ie, no database name) name of the given attribute of the given table. @type tableClass: L{MetaItem} @param tableClass: The Python class associated with a table in the database. @param attrname: A sequence of the names of the columns of the indicated table which will be included in the named index. @return: A C{str} giving the name of the index which will index the given attributes of the given table.
entailment
def getTableName(self, tableClass): """ Retrieve the fully qualified name of the table holding items of a particular class in this store. If the table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @raises axiom.errors.ItemClassesOnly: if an object other than a subclass of Item is passed. @return: a string """ if not (isinstance(tableClass, type) and issubclass(tableClass, item.Item)): raise errors.ItemClassesOnly("Only subclasses of Item have table names.") if tableClass not in self.typeToTableNameCache: self.typeToTableNameCache[tableClass] = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) # make sure the table exists self.getTypeID(tableClass) return self.typeToTableNameCache[tableClass]
Retrieve the fully qualified name of the table holding items of a particular class in this store. If the table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @raises axiom.errors.ItemClassesOnly: if an object other than a subclass of Item is passed. @return: a string
entailment
def getColumnName(self, attribute): """ Retreive the fully qualified column name for a particular attribute in this store. The attribute must be bound to an Item subclass (its type must be valid). If the underlying table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @return: a string """ if attribute not in self.attrToColumnNameCache: self.attrToColumnNameCache[attribute] = '.'.join( (self.getTableName(attribute.type), self.getShortColumnName(attribute))) return self.attrToColumnNameCache[attribute]
Retreive the fully qualified column name for a particular attribute in this store. The attribute must be bound to an Item subclass (its type must be valid). If the underlying table does not exist in the database, it will be created as a side-effect. @param tableClass: an Item subclass @return: a string
entailment
def getTypeID(self, tableClass): """ Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer """ key = (tableClass.typeName, tableClass.schemaVersion) if key in self.typenameAndVersionToID: return self.typenameAndVersionToID[key] return self.transact(self._maybeCreateTable, tableClass, key)
Retrieve the typeID associated with a particular table in the in-database schema for this Store. A typeID is an opaque integer representing the Item subclass, and the associated table in this Store's SQLite database. @param tableClass: a subclass of Item @return: an integer
entailment
def _justCreateTable(self, tableClass): """ Execute the table creation DDL for an Item subclass. Indexes are *not* created. @type tableClass: type @param tableClass: an Item subclass """ sqlstr = [] sqlarg = [] # needs to be calculated including version tableName = self._tableNameFor(tableClass.typeName, tableClass.schemaVersion) sqlstr.append("CREATE TABLE %s (" % tableName) # The column is named "oid" instead of "storeID" for backwards # compatibility with the implicit oid/rowid column in old Stores. sqlarg.append("oid INTEGER PRIMARY KEY") for nam, atr in tableClass.getSchema(): sqlarg.append("\n%s %s" % (atr.getShortColumnName(self), atr.sqltype)) sqlstr.append(', '.join(sqlarg)) sqlstr.append(')') self.createSQL(''.join(sqlstr))
Execute the table creation DDL for an Item subclass. Indexes are *not* created. @type tableClass: type @param tableClass: an Item subclass
entailment
def _maybeCreateTable(self, tableClass, key): """ A type ID has been requested for an Item subclass whose table was not present when this Store was opened. Attempt to create the table, and if that fails because another Store object (perhaps in another process) has created the table, re-read the schema. When that's done, return the typeID. This method is internal to the implementation of getTypeID. It must be run in a transaction. @param tableClass: an Item subclass @param key: a 2-tuple of the tableClass's typeName and schemaVersion @return: a typeID for the table; a new one if no table exists, or the existing one if the table was created by another Store object referencing this database. """ try: self._justCreateTable(tableClass) except errors.TableAlreadyExists: # Although we don't have a memory of this table from the last time # we called "_startup()", another process has updated the schema # since then. self._startup() return self.typenameAndVersionToID[key] typeID = self.executeSchemaSQL(_schema.CREATE_TYPE, [tableClass.typeName, tableClass.__module__, tableClass.schemaVersion]) self.typenameAndVersionToID[key] = typeID if self.tablesCreatedThisTransaction is not None: self.tablesCreatedThisTransaction.append(tableClass) # If the new type is a legacy type (not the current version), we need # to queue it for upgrade to ensure that if we are in the middle of an # upgrade, legacy items of this version get upgraded. cls = _typeNameToMostRecentClass.get(tableClass.typeName) if cls is not None and tableClass.schemaVersion != cls.schemaVersion: self._upgradeManager.queueTypeUpgrade(tableClass) # We can pass () for extantIndexes here because since the table didn't # exist for tableClass, none of its indexes could have either. # Whatever checks _createIndexesFor will make would give the same # result against the actual set of existing indexes as they will # against (). self._createIndexesFor(tableClass, ()) for n, (name, storedAttribute) in enumerate(tableClass.getSchema()): self.executeSchemaSQL( _schema.ADD_SCHEMA_ATTRIBUTE, [typeID, n, storedAttribute.indexed, storedAttribute.sqltype, storedAttribute.allowNone, storedAttribute.attrname, storedAttribute.doc, storedAttribute.__class__.__name__]) # XXX probably need something better for pythontype eventually, # when we figure out a good way to do user-defined attributes or we # start parameterizing references. return typeID
A type ID has been requested for an Item subclass whose table was not present when this Store was opened. Attempt to create the table, and if that fails because another Store object (perhaps in another process) has created the table, re-read the schema. When that's done, return the typeID. This method is internal to the implementation of getTypeID. It must be run in a transaction. @param tableClass: an Item subclass @param key: a 2-tuple of the tableClass's typeName and schemaVersion @return: a typeID for the table; a new one if no table exists, or the existing one if the table was created by another Store object referencing this database.
entailment
def _createIndexesFor(self, tableClass, extantIndexes): """ Create any indexes which don't exist and are required by the schema defined by C{tableClass}. @param tableClass: A L{MetaItem} instance which may define a schema which includes indexes. @param extantIndexes: A container (anything which can be the right-hand argument to the C{in} operator) which contains the unqualified names of all indexes which already exist in the underlying database and do not need to be created. """ try: indexes = _requiredTableIndexes[tableClass] except KeyError: indexes = set() for nam, atr in tableClass.getSchema(): if atr.indexed: indexes.add(((atr.getShortColumnName(self),), (atr.attrname,))) for compound in atr.compoundIndexes: indexes.add((tuple(inatr.getShortColumnName(self) for inatr in compound), tuple(inatr.attrname for inatr in compound))) _requiredTableIndexes[tableClass] = indexes # _ZOMFG_ SQL is such a piece of _shit_: you can't fully qualify the # table name in CREATE INDEX statements because the _INDEX_ is fully # qualified! indexColumnPrefix = '.'.join(self.getTableName(tableClass).split(".")[1:]) for (indexColumns, indexAttrs) in indexes: nameOfIndex = self._indexNameOf(tableClass, indexAttrs) if nameOfIndex in extantIndexes: continue csql = 'CREATE INDEX %s.%s ON %s(%s)' % ( self.databaseName, nameOfIndex, indexColumnPrefix, ', '.join(indexColumns)) self.createSQL(csql)
Create any indexes which don't exist and are required by the schema defined by C{tableClass}. @param tableClass: A L{MetaItem} instance which may define a schema which includes indexes. @param extantIndexes: A container (anything which can be the right-hand argument to the C{in} operator) which contains the unqualified names of all indexes which already exist in the underlying database and do not need to be created.
entailment
def getItemByID(self, storeID, default=_noItem, autoUpgrade=True): """ Retrieve an item by its storeID, and return it. Note: most of the failure modes of this method are catastrophic and should not be handled by application code. The only one that application programmers should be concerned with is KeyError. They are listed for educational purposes. @param storeID: an L{int} which refers to the store. @param default: if passed, return this value rather than raising in the case where no Item is found. @raise TypeError: if storeID is not an integer. @raise UnknownItemType: if the storeID refers to an item row in the database, but the corresponding type information is not available to Python. @raise RuntimeError: if the found item's class version is higher than the current application is aware of. (In other words, if you have upgraded a database to a new schema and then attempt to open it with a previous version of the code.) @raise errors.ItemNotFound: if no item existed with the given storeID. @return: an Item, or the given default, if it was passed and no row corresponding to the given storeID can be located in the database. """ if not isinstance(storeID, (int, long)): raise TypeError("storeID *must* be an int or long, not %r" % ( type(storeID).__name__,)) if storeID == STORE_SELF_ID: return self try: return self.objectCache.get(storeID) except KeyError: pass log.msg(interface=iaxiom.IStatEvent, stat_cache_misses=1, key=storeID) results = self.querySchemaSQL(_schema.TYPEOF_QUERY, [storeID]) assert (len(results) in [1, 0]),\ "Database panic: more than one result for TYPEOF!" if results: typename, module, version = results[0] useMostRecent = False moreRecentAvailable = False # The schema may have changed since the last time I saw the # database. Let's look to see if this is suspiciously broken... if _typeIsTotallyUnknown(typename, version): # Another process may have created it - let's re-up the schema # and see what we get. self._startup() # OK, all the modules have been loaded now, everything # verified. if _typeIsTotallyUnknown(typename, version): # If there is STILL no inkling of it anywhere, we are # almost certainly boned. Let's tell the user in a # structured way, at least. raise errors.UnknownItemType( "cannot load unknown schema/version pair: %r %r - id: %r" % (typename, version, storeID)) if typename in _typeNameToMostRecentClass: moreRecentAvailable = True mostRecent = _typeNameToMostRecentClass[typename] if mostRecent.schemaVersion < version: raise RuntimeError("%s:%d - was found in the database and most recent %s is %d" % (typename, version, typename, mostRecent.schemaVersion)) if mostRecent.schemaVersion == version: useMostRecent = True if useMostRecent: T = mostRecent else: T = self.getOldVersionOf(typename, version) # for the moment we're going to assume no inheritance attrs = self.querySQL(T._baseSelectSQL(self), [storeID]) if len(attrs) == 0: if default is _noItem: raise errors.ItemNotFound( 'No results for known-to-be-good object') return default elif len(attrs) > 1: raise errors.DataIntegrityError( 'Too many results for {:d}'.format(storeID)) attrs = attrs[0] x = T.existingInStore(self, storeID, attrs) if moreRecentAvailable and (not useMostRecent) and autoUpgrade: # upgradeVersion will do caching as necessary, we don't have to # cache here. (It must, so that app code can safely call # upgradeVersion and get a consistent object out of it.) x = self.transact(self._upgradeManager.upgradeItem, x) elif not x.__legacy__: # We loaded the most recent version of an object self.objectCache.cache(storeID, x) return x if default is _noItem: raise errors.ItemNotFound(storeID) return default
Retrieve an item by its storeID, and return it. Note: most of the failure modes of this method are catastrophic and should not be handled by application code. The only one that application programmers should be concerned with is KeyError. They are listed for educational purposes. @param storeID: an L{int} which refers to the store. @param default: if passed, return this value rather than raising in the case where no Item is found. @raise TypeError: if storeID is not an integer. @raise UnknownItemType: if the storeID refers to an item row in the database, but the corresponding type information is not available to Python. @raise RuntimeError: if the found item's class version is higher than the current application is aware of. (In other words, if you have upgraded a database to a new schema and then attempt to open it with a previous version of the code.) @raise errors.ItemNotFound: if no item existed with the given storeID. @return: an Item, or the given default, if it was passed and no row corresponding to the given storeID can be located in the database.
entailment
def querySQL(self, sql, args=()): """For use with SELECT (or SELECT-like PRAGMA) statements. """ if self.debug: result = timeinto(self.queryTimes, self._queryandfetch, sql, args) else: result = self._queryandfetch(sql, args) return result
For use with SELECT (or SELECT-like PRAGMA) statements.
entailment
def createSQL(self, sql, args=()): """ For use with auto-committing statements such as CREATE TABLE or CREATE INDEX. """ before = time.time() self._execSQL(sql, args) after = time.time() if after - before > 2.0: log.msg('Extremely long CREATE: %s' % (after - before,)) log.msg(sql)
For use with auto-committing statements such as CREATE TABLE or CREATE INDEX.
entailment
def executeSQL(self, sql, args=()): """ For use with UPDATE or INSERT statements. """ sql = self._execSQL(sql, args) result = self.cursor.lastRowID() if self.executedThisTransaction is not None: self.executedThisTransaction.append((result, sql, args)) return result
For use with UPDATE or INSERT statements.
entailment
def updateVersion(fname): """ given a filename to a file containing a __counter__ variable, open it, read the count, add one, rewrite the file. This: __counter__=123 Becomes: __counter__=124 """ fname=os.path.abspath(fname) if not os.path.exists(fname): print("can not update version! file doesn't exist:\n",fname) return with open(fname) as f: raw=f.read().split("\n") for i,line in enumerate(raw): if line.startswith("__counter__="): version=int(line.split("=")[1]) raw[i]="__counter__=%d"%(version+1) with open(fname,'w') as f: f.write("\n".join(raw)) print("upgraded version %d -> %d"%(version,version+1)) sys.path.insert(0,os.path.dirname(fname)) import version print("New version:",version.__version__) with open('version.txt','w') as f: f.write(str(version.__version__))
given a filename to a file containing a __counter__ variable, open it, read the count, add one, rewrite the file. This: __counter__=123 Becomes: __counter__=124
entailment