idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
5,900
def refresh_api_secret ( user , resource , table ) : resource_name = table . name [ 0 : - 1 ] where_clause = sql . and_ ( table . c . etag == resource [ 'etag' ] , table . c . id == resource [ 'id' ] , ) values = { 'api_secret' : signature . gen_secret ( ) , 'etag' : utils . gen_etag ( ) } query = table . update ( ) . where ( where_clause ) . values ( ** values ) result = flask . g . db_conn . execute ( query ) if not result . rowcount : raise dci_exc . DCIConflict ( resource_name , resource [ 'id' ] ) res = flask . jsonify ( ( { 'id' : resource [ 'id' ] , 'etag' : resource [ 'etag' ] , 'api_secret' : values [ 'api_secret' ] } ) ) res . headers . add_header ( 'ETag' , values [ 'etag' ] ) return res
Refresh the resource API Secret .
5,901
def npm ( package_json , output_file , pinned_file ) : amd_build_deprecation_warning ( ) try : version = get_distribution ( current_app . name ) . version except DistributionNotFound : version = '' output = { 'name' : current_app . name , 'version' : make_semver ( version ) if version else version , 'dependencies' : { } , } if package_json : output = dict ( output , ** json . load ( package_json ) ) deps = extract_deps ( current_app . extensions [ 'invenio-assets' ] . env , click . echo ) output [ 'dependencies' ] . update ( deps ) if pinned_file : output [ 'dependencies' ] . update ( json . load ( pinned_file ) . get ( 'dependencies' , { } ) ) if output_file is None : if not os . path . exists ( current_app . static_folder ) : os . makedirs ( current_app . static_folder ) output_file = open ( os . path . join ( current_app . static_folder , 'package.json' ) , 'w' ) click . echo ( 'Writing {0}' . format ( output_file . name ) ) json . dump ( output , output_file , indent = 4 ) output_file . close ( )
Generate a package . json file .
5,902
def getAnalyses ( self , ** kwargs ) : if kwargs . get ( 'time_from' ) : kwargs [ 'from' ] = kwargs . get ( 'time_from' ) del kwargs [ 'time_from' ] if kwargs . get ( 'time_to' ) : kwargs [ 'to' ] = kwargs . get ( 'time_to' ) del kwargs [ 'time_to' ] for key in kwargs : if key not in [ 'limit' , 'offset' , 'from' , 'to' ] : sys . stderr . write ( '%s not a valid argument for analysis()\n' % key ) response = self . pingdom . request ( 'GET' , 'analysis/%s' % self . id , kwargs ) return [ PingdomAnalysis ( self , x ) for x in response . json ( ) [ 'analysis' ] ]
Returns a list of the latest root cause analysis results for a specified check .
5,903
def getDetails ( self ) : response = self . pingdom . request ( 'GET' , 'checks/%s' % self . id ) self . __addDetails__ ( response . json ( ) [ 'check' ] ) return response . json ( ) [ 'check' ]
Update check details returns dictionary of details
5,904
def modify ( self , ** kwargs ) : for key in kwargs : if key not in [ 'paused' , 'resolution' , 'contactids' , 'sendtoemail' , 'sendtosms' , 'sendtotwitter' , 'sendtoiphone' , 'sendnotificationwhendown' , 'notifyagainevery' , 'notifywhenbackup' , 'created' , 'type' , 'hostname' , 'status' , 'lasterrortime' , 'lasttesttime' , 'url' , 'encryption' , 'port' , 'auth' , 'shouldcontain' , 'shouldnotcontain' , 'postdata' , 'additionalurls' , 'stringtosend' , 'stringtoexpect' , 'expectedip' , 'nameserver' , 'use_legacy_notifications' , 'host' , 'alert_policy' , 'autoresolve' , 'probe_filters' ] : sys . stderr . write ( "'%s'" % key + ' is not a valid argument of' + '<PingdomCheck>.modify()\n' ) if any ( [ k for k in kwargs if k in legacy_notification_parameters ] ) : if "use_legacy_notifications" in kwargs and kwargs [ "use_legacy_notifications" ] != True : raise Exception ( "Cannot set legacy parameter when use_legacy_notifications is not True" ) kwargs [ "use_legacy_notifications" ] = True response = self . pingdom . request ( "PUT" , 'checks/%s' % self . id , kwargs ) return response . json ( ) [ 'message' ]
Modify settings for a check . The provided settings will overwrite previous values . Settings not provided will stay the same as before the update . To clear an existing value provide an empty value . Please note that you cannot change the type of a check once it has been created .
5,905
def probes ( self , fromtime , totime = None ) : args = { 'from' : fromtime } if totime : args [ 'to' ] = totime response = self . pingdom . request ( 'GET' , 'summary.probes/%s' % self . id , args ) return response . json ( ) [ 'probes' ]
Get a list of probes that performed tests for a specified check during a specified period .
5,906
def publishPublicReport ( self ) : response = self . pingdom . request ( 'PUT' , 'reports.public/%s' % self . id ) return response . json ( ) [ 'message' ]
Activate public report for this check .
5,907
def removePublicReport ( self ) : response = self . pingdom . request ( 'DELETE' , 'reports.public/%s' % self . id ) return response . json ( ) [ 'message' ]
Deactivate public report for this check .
5,908
def extract_deps ( bundles , log = None ) : def _flatten ( bundle ) : deps = [ ] if hasattr ( bundle , 'npm' ) : deps . append ( bundle . npm ) for content in bundle . contents : if isinstance ( content , BundleBase ) : deps . extend ( _flatten ( content ) ) return deps flatten_deps = [ ] for bundle in bundles : flatten_deps . extend ( _flatten ( bundle ) ) packages = defaultdict ( list ) for dep in flatten_deps : for pkg , version in dep . items ( ) : packages [ pkg ] . append ( version ) deps = { } for package , versions in packages . items ( ) : deps [ package ] = semver . max_satisfying ( versions , '*' , True ) if log and len ( versions ) > 1 : log ( 'Warn: {0} version {1} resolved to: {2}' . format ( repr ( package ) , versions , repr ( deps [ package ] ) ) ) return deps
Extract the dependencies from the bundle and its sub - bundles .
5,909
def make_semver ( version_str ) : v = parse_version ( version_str ) major = v . _version . release [ 0 ] try : minor = v . _version . release [ 1 ] except IndexError : minor = 0 try : patch = v . _version . release [ 2 ] except IndexError : patch = 0 prerelease = [ ] if v . _version . pre : prerelease . append ( '' . join ( str ( x ) for x in v . _version . pre ) ) if v . _version . dev : prerelease . append ( '' . join ( str ( x ) for x in v . _version . dev ) ) prerelease = '.' . join ( prerelease ) version = '{0}.{1}.{2}' . format ( major , minor , patch ) if prerelease : version += '-{0}' . format ( prerelease ) if v . local : version += '+{0}' . format ( v . local ) return version
Make a semantic version from Python PEP440 version .
5,910
def get_max_size ( pool , num_option , item_length ) : max_items = POOL_SIZE / item_length existing = POOL_OPTION_MIN_SIZE * num_option + sum ( [ max ( 0 , len ( pool . get ( i , { } ) ) - 5 ) for i in xrange ( num_option ) ] ) return int ( max_items - existing )
Calculate the max number of item that an option can stored in the pool at give time .
5,911
def offer_answer ( pool , answer , rationale , student_id , algo , options ) : if algo [ 'name' ] == 'simple' : offer_simple ( pool , answer , rationale , student_id , options ) elif algo [ 'name' ] == 'random' : offer_random ( pool , answer , rationale , student_id , options ) else : raise UnknownChooseAnswerAlgorithm ( )
submit a student answer to the answer pool
5,912
def offer_simple ( pool , answer , rationale , student_id , options ) : existing = pool . setdefault ( answer , { } ) if len ( existing ) >= get_max_size ( pool , len ( options ) , POOL_ITEM_LENGTH_SIMPLE ) : student_id_to_remove = random . choice ( existing . keys ( ) ) del existing [ student_id_to_remove ] existing [ student_id ] = { } pool [ answer ] = existing
The simple selection algorithm .
5,913
def offer_random ( pool , answer , rationale , student_id , options ) : offer_simple ( pool , answer , rationale , student_id , options )
The random selection algorithm . The same as simple algorithm
5,914
def validate_seeded_answers_simple ( answers , options , algo ) : seen_options = { } for answer in answers : if answer : key = options [ answer [ 'answer' ] ] . get ( 'text' ) if options [ answer [ 'answer' ] ] . get ( 'image_url' ) : key += options [ answer [ 'answer' ] ] . get ( 'image_url' ) seen_options . setdefault ( key , 0 ) seen_options [ key ] += 1 missing_options = [ ] index = 1 for option in options : key = option . get ( 'text' ) + option . get ( 'image_url' ) if option . get ( 'image_url' ) else option . get ( 'text' ) if option . get ( 'text' ) != 'n/a' : if seen_options . get ( key , 0 ) == 0 : missing_options . append ( _ ( 'Option ' ) + str ( index ) ) index += 1 if missing_options : return { 'seed_error' : _ ( 'Missing option seed(s): ' ) + ', ' . join ( missing_options ) } return None
This validator checks if the answers includes all possible options
5,915
def validate_seeded_answers ( answers , options , algo ) : if algo [ 'name' ] == 'simple' : return validate_seeded_answers_simple ( answers , options , algo ) elif algo [ 'name' ] == 'random' : return validate_seeded_answers_random ( answers ) else : raise UnknownChooseAnswerAlgorithm ( )
Validate answers based on selection algorithm
5,916
def get_other_answers ( pool , seeded_answers , get_student_item_dict , algo , options ) : num_responses = len ( options ) if 'num_responses' not in algo or algo [ 'num_responses' ] == "#" else int ( algo [ 'num_responses' ] ) if algo [ 'name' ] == 'simple' : return get_other_answers_simple ( pool , seeded_answers , get_student_item_dict , num_responses ) elif algo [ 'name' ] == 'random' : return get_other_answers_random ( pool , seeded_answers , get_student_item_dict , num_responses ) else : raise UnknownChooseAnswerAlgorithm ( )
Select other student s answers from answer pool or seeded answers based on the selection algorithm
5,917
def get_other_answers_simple ( pool , seeded_answers , get_student_item_dict , num_responses ) : ret = [ ] pool = { int ( k ) : v for k , v in pool . items ( ) } total_in_pool = len ( seeded_answers ) merged_pool = convert_seeded_answers ( seeded_answers ) student_id = get_student_item_dict ( ) [ 'student_id' ] for key in pool : total_in_pool += len ( pool [ key ] ) if student_id in pool [ key ] . keys ( ) : total_in_pool -= 1 if key in merged_pool : merged_pool [ key ] . update ( pool [ key ] . items ( ) ) else : merged_pool [ key ] = pool [ key ] selected = [ ] while len ( ret ) < min ( num_responses , total_in_pool ) : for option , students in merged_pool . items ( ) : student = student_id i = 0 while ( student == student_id or i > 100 ) and ( str ( option ) + student ) not in selected : student = random . choice ( students . keys ( ) ) i += 1 selected . append ( str ( option ) + student ) if student . startswith ( 'seeded' ) : rationale = students [ student ] else : student_item = get_student_item_dict ( student ) submission = sas_api . get_answers_for_student ( student_item ) rationale = submission . get_rationale ( 0 ) ret . append ( { 'option' : option , 'rationale' : rationale } ) if len ( ret ) >= min ( num_responses , total_in_pool ) : break return { "answers" : ret }
Get answers from others with simple algorithm which picks one answer for each option .
5,918
def get_other_answers_random ( pool , seeded_answers , get_student_item_dict , num_responses ) : ret = [ ] pool = { int ( k ) : v for k , v in pool . items ( ) } seeded = { 'seeded' + str ( index ) : answer for index , answer in enumerate ( seeded_answers ) } merged_pool = seeded . keys ( ) for key in pool : merged_pool += pool [ key ] . keys ( ) random . shuffle ( merged_pool ) student_id = get_student_item_dict ( ) [ 'student_id' ] for student in merged_pool : if len ( ret ) >= num_responses : break elif student == student_id : continue if student . startswith ( 'seeded' ) : option = seeded [ student ] [ 'answer' ] rationale = seeded [ student ] [ 'rationale' ] else : student_item = get_student_item_dict ( student ) submission = sas_api . get_answers_for_student ( student_item ) rationale = submission . get_rationale ( 0 ) option = submission . get_vote ( 0 ) ret . append ( { 'option' : option , 'rationale' : rationale } ) return { "answers" : ret }
Get answers from others with random algorithm which randomly select answer from the pool .
5,919
def convert_seeded_answers ( answers ) : converted = { } for index , answer in enumerate ( answers ) : converted . setdefault ( answer [ 'answer' ] , { } ) converted [ answer [ 'answer' ] ] [ 'seeded' + str ( index ) ] = answer [ 'rationale' ] return converted
Convert seeded answers into the format that can be merged into student answers .
5,920
def mark ( self ) : self . reliableListener . lastRun = extime . Time ( ) BatchProcessingError ( store = self . reliableListener . store , processor = self . reliableListener . processor , listener = self . reliableListener . listener , item = self . workUnit , error = self . failure . getErrorMessage ( ) )
Mark the unit of work as failed in the database and update the listener so as to skip it next time .
5,921
def addReliableListener ( self , listener , style = iaxiom . LOCAL ) : existing = self . store . findUnique ( _ReliableListener , attributes . AND ( _ReliableListener . processor == self , _ReliableListener . listener == listener ) , default = None ) if existing is not None : return existing for work in self . store . query ( self . workUnitType , sort = self . workUnitType . storeID . descending , limit = 1 ) : forwardMark = work . storeID backwardMark = work . storeID + 1 break else : forwardMark = 0 backwardMark = 0 if self . scheduled is None : self . scheduled = extime . Time ( ) iaxiom . IScheduler ( self . store ) . schedule ( self , self . scheduled ) return _ReliableListener ( store = self . store , processor = self , listener = listener , forwardMark = forwardMark , backwardMark = backwardMark , style = style )
Add the given Item to the set which will be notified of Items available for processing .
5,922
def removeReliableListener ( self , listener ) : self . store . query ( _ReliableListener , attributes . AND ( _ReliableListener . processor == self , _ReliableListener . listener == listener ) ) . deleteFromStore ( ) self . store . query ( BatchProcessingError , attributes . AND ( BatchProcessingError . processor == self , BatchProcessingError . listener == listener ) ) . deleteFromStore ( )
Remove a previously added listener .
5,923
def getReliableListeners ( self ) : for rellist in self . store . query ( _ReliableListener , _ReliableListener . processor == self ) : yield rellist . listener
Return an iterable of the listeners which have been added to this batch processor .
5,924
def itemAdded ( self ) : localCount = self . store . query ( _ReliableListener , attributes . AND ( _ReliableListener . processor == self , _ReliableListener . style == iaxiom . LOCAL ) , limit = 1 ) . count ( ) remoteCount = self . store . query ( _ReliableListener , attributes . AND ( _ReliableListener . processor == self , _ReliableListener . style == iaxiom . REMOTE ) , limit = 1 ) . count ( ) if localCount and self . scheduled is None : self . scheduled = extime . Time ( ) iaxiom . IScheduler ( self . store ) . schedule ( self , self . scheduled ) if remoteCount : batchService = iaxiom . IBatchService ( self . store , None ) if batchService is not None : batchService . start ( )
Called to indicate that a new item of the type monitored by this batch processor is being added to the database .
5,925
def call ( self , itemMethod ) : item = itemMethod . im_self method = itemMethod . im_func . func_name return self . batchController . getProcess ( ) . addCallback ( CallItemMethod ( storepath = item . store . dbdir , storeid = item . storeID , method = method ) . do )
Invoke the given bound item method in the batch process .
5,926
def processWhileRunning ( self ) : work = self . step ( ) for result , more in work : yield result if not self . running : break if more : delay = 0.1 else : delay = 10.0 yield task . deferLater ( reactor , delay , lambda : None )
Run tasks until stopService is called .
5,927
def getcols ( sheetMatch = None , colMatch = "Decay" ) : book = BOOK ( ) if sheetMatch is None : matchingSheets = book . sheetNames print ( 'all %d sheets selected ' % ( len ( matchingSheets ) ) ) else : matchingSheets = [ x for x in book . sheetNames if sheetMatch in x ] print ( '%d of %d sheets selected matching "%s"' % ( len ( matchingSheets ) , len ( book . sheetNames ) , sheetMatch ) ) matchingSheetsWithCol = [ ] for sheetName in matchingSheets : i = book . sheetNames . index ( sheetName ) for j , colName in enumerate ( book . sheets [ i ] . colDesc ) : if colMatch in colName : matchingSheetsWithCol . append ( ( sheetName , j ) ) break else : print ( " no match in [%s]%s" % ( book . bookName , sheetName ) ) print ( "%d of %d of those have your column" % ( len ( matchingSheetsWithCol ) , len ( matchingSheets ) ) ) for item in matchingSheetsWithCol : print ( item , item [ 0 ] , item [ 1 ] )
find every column in every sheet and put it in a new sheet or book .
5,928
def namespace ( self ) : self . _ns = { 'db' : self . store , 'store' : store , 'autocommit' : False , } return self . _ns
Return a dictionary representing the namespace which should be available to the user .
5,929
def addAccount ( self , siteStore , username , domain , password ) : for ls in siteStore . query ( userbase . LoginSystem ) : break else : ls = self . installOn ( siteStore ) try : acc = ls . addAccount ( username , domain , password ) except userbase . DuplicateUser : raise usage . UsageError ( "An account by that name already exists." ) return acc
Create a new account in the given store .
5,930
def createSomeItems ( store , itemType , values , counter ) : for i in counter : itemType ( store = store , ** values )
Create some instances of a particular type in a store .
5,931
def save ( self , commit = True ) : for field_name , field in iter_valid_fields ( self . _meta ) : setattr ( self . instance , field_name , self . cleaned_data . get ( field_name ) ) if commit : self . instance . save ( ) return self . instance
save the instance or create a new one ..
5,932
def dependentItems ( store , tableClass , comparisonFactory ) : for cascadingAttr in ( _cascadingDeletes . get ( tableClass , [ ] ) + _cascadingDeletes . get ( None , [ ] ) ) : for cascadedItem in store . query ( cascadingAttr . type , comparisonFactory ( cascadingAttr ) ) : yield cascadedItem
Collect all the items that should be deleted when an item or items of a particular item type are deleted .
5,933
def declareLegacyItem ( typeName , schemaVersion , attributes , dummyBases = ( ) ) : if ( typeName , schemaVersion ) in _legacyTypes : return _legacyTypes [ typeName , schemaVersion ] if dummyBases : realBases = [ declareLegacyItem ( * A ) for A in dummyBases ] else : realBases = ( Item , ) attributes = attributes . copy ( ) attributes [ '__module__' ] = 'item_dummy' attributes [ '__legacy__' ] = True attributes [ 'typeName' ] = typeName attributes [ 'schemaVersion' ] = schemaVersion result = type ( str ( 'DummyItem<%s,%d>' % ( typeName , schemaVersion ) ) , realBases , attributes ) assert result is not None , 'wtf, %r' % ( type , ) _legacyTypes [ ( typeName , schemaVersion ) ] = result return result
Generate a dummy subclass of Item that will have the given attributes and the base Item methods but no methods of its own . This is for use with upgrading .
5,934
def empowerment ( iface , priority = 0 ) : def _deco ( cls ) : cls . powerupInterfaces = ( tuple ( getattr ( cls , 'powerupInterfaces' , ( ) ) ) + ( ( iface , priority ) , ) ) implementer ( iface ) ( cls ) return cls return _deco
Class decorator for indicating a powerup s powerup interfaces .
5,935
def powerDown ( self , powerup , interface = None ) : if interface is None : for interface , priority in powerup . _getPowerupInterfaces ( ) : self . powerDown ( powerup , interface ) else : for cable in self . store . query ( _PowerupConnector , AND ( _PowerupConnector . item == self , _PowerupConnector . interface == unicode ( qual ( interface ) ) , _PowerupConnector . powerup == powerup ) ) : cable . deleteFromStore ( ) return raise ValueError ( "Not powered up for %r with %r" % ( interface , powerup ) )
Remove a powerup .
5,936
def interfacesFor ( self , powerup ) : pc = _PowerupConnector for iface in self . store . query ( pc , AND ( pc . item == self , pc . powerup == powerup ) ) . getColumn ( 'interface' ) : yield namedAny ( iface )
Return an iterator of the interfaces for which the given powerup is installed on this object .
5,937
def _getPowerupInterfaces ( self ) : powerupInterfaces = getattr ( self . __class__ , "powerupInterfaces" , ( ) ) pifs = [ ] for x in powerupInterfaces : if isinstance ( x , type ( Interface ) ) : pifs . append ( ( x , 0 ) ) else : pifs . append ( x ) m = getattr ( self , "__getPowerupInterfaces__" , None ) if m is not None : pifs = m ( pifs ) try : pifs = [ ( i , p ) for ( i , p ) in pifs ] except ValueError : raise ValueError ( "return value from %r.__getPowerupInterfaces__" " not an iterable of 2-tuples" % ( self , ) ) return pifs
Collect powerup interfaces this object declares that it can be installed on .
5,938
def _schemaPrepareInsert ( self , store ) : for name , atr in self . getSchema ( ) : atr . prepareInsert ( self , store )
Prepare each attribute in my schema for insertion into a given store either by upgrade or by creation . This makes sure all references point to this store and all relative paths point to this store s files directory .
5,939
def existingInStore ( cls , store , storeID , attrs ) : self = cls . __new__ ( cls ) self . __justCreated = False self . __subinit__ ( __store = store , storeID = storeID , __everInserted = True ) schema = self . getSchema ( ) assert len ( schema ) == len ( attrs ) , "invalid number of attributes" for data , ( name , attr ) in zip ( attrs , schema ) : attr . loaded ( self , data ) self . activate ( ) return self
Create and return a new instance from a row from the store .
5,940
def getSchema ( cls ) : schema = [ ] for name , atr in cls . __attributes__ : atr = atr . __get__ ( None , cls ) if isinstance ( atr , SQLAttribute ) : schema . append ( ( name , atr ) ) cls . getSchema = staticmethod ( lambda schema = schema : schema ) return schema
return all persistent class attributes
5,941
def committed ( self ) : if self . __deleting : self . deleted ( ) if not self . __legacy__ : self . store . objectCache . uncache ( self . storeID , self ) self . __store = None self . __justCreated = False
Called after the database is brought into a consistent state with this object .
5,942
def registerUpgrader ( upgrader , typeName , oldVersion , newVersion ) : assert isinstance ( typeName , str ) , "read the doc string" _upgradeRegistry [ typeName , oldVersion ] = upgrader
Register a callable which can perform a schema upgrade between two particular versions .
5,943
def _hasExplicitOid ( store , table ) : return any ( info [ 1 ] == 'oid' for info in store . querySchemaSQL ( 'PRAGMA *DATABASE*.table_info({})' . format ( table ) ) )
Does the given table have an explicit oid column?
5,944
def _upgradeTableOid ( store , table , createTable , postCreate = lambda : None ) : if _hasExplicitOid ( store , table ) : return store . executeSchemaSQL ( 'ALTER TABLE *DATABASE*.{0} RENAME TO {0}_temp' . format ( table ) ) createTable ( ) store . executeSchemaSQL ( 'INSERT INTO *DATABASE*.{0} ' 'SELECT oid, * FROM *DATABASE*.{0}_temp' . format ( table ) ) store . executeSchemaSQL ( 'DROP TABLE *DATABASE*.{0}_temp' . format ( table ) ) postCreate ( )
Upgrade a table to have an explicit oid .
5,945
def upgradeSystemOid ( store ) : store . transact ( _upgradeTableOid , store , 'axiom_types' , lambda : store . executeSchemaSQL ( CREATE_TYPES ) ) store . transact ( _upgradeTableOid , store , 'axiom_objects' , lambda : store . executeSchemaSQL ( CREATE_OBJECTS ) , lambda : store . executeSchemaSQL ( CREATE_OBJECTS_IDX ) )
Upgrade the system tables to use explicit oid columns .
5,946
def upgradeExplicitOid ( store ) : upgradeSystemOid ( store ) for typename , version in store . querySchemaSQL ( LATEST_TYPES ) : cls = _typeNameToMostRecentClass [ typename ] if cls . schemaVersion != version : remaining = store . querySQL ( 'SELECT oid FROM {} LIMIT 1' . format ( store . _tableNameFor ( typename , version ) ) ) if len ( remaining ) == 0 : continue else : raise RuntimeError ( '{}:{} not fully upgraded to {}' . format ( typename , version , cls . schemaVersion ) ) store . transact ( _upgradeTableOid , store , store . _tableNameOnlyFor ( typename , version ) , lambda : store . _justCreateTable ( cls ) , lambda : store . _createIndexesFor ( cls , [ ] ) )
Upgrade a store to use explicit oid columns . This allows VACUUMing the database without corrupting it .
5,947
def checkUpgradePaths ( self ) : cantUpgradeErrors = [ ] for oldVersion in self . _oldTypesRemaining : currentType = _typeNameToMostRecentClass . get ( oldVersion . typeName , None ) if currentType is None : continue typeInQuestion = oldVersion . typeName upgver = oldVersion . schemaVersion while upgver < currentType . schemaVersion : if ( ( typeInQuestion , upgver ) not in _upgradeRegistry ) : cantUpgradeErrors . append ( "No upgrader present for %s (%s) from %d to %d" % ( typeInQuestion , qual ( currentType ) , upgver , upgver + 1 ) ) if upgver + 1 != currentType . schemaVersion : if ( typeInQuestion , upgver + 1 ) not in _legacyTypes : cantUpgradeErrors . append ( "Type schema required for upgrade missing:" " %s version %d" % ( typeInQuestion , upgver + 1 ) ) upgver += 1 if cantUpgradeErrors : raise NoUpgradePathAvailable ( '\n ' . join ( cantUpgradeErrors ) )
Check that all of the accumulated old Item types have a way to get from their current version to the latest version .
5,948
def upgradeItem ( self , thisItem ) : sid = thisItem . storeID if sid in self . _currentlyUpgrading : raise UpgraderRecursion ( ) self . _currentlyUpgrading [ sid ] = thisItem try : return upgradeAllTheWay ( thisItem ) finally : self . _currentlyUpgrading . pop ( sid )
Upgrade a legacy item .
5,949
def upgradeBatch ( self , n ) : store = self . store def _doBatch ( itemType ) : upgradedAnything = False for theItem in store . query ( itemType , limit = n ) : upgradedAnything = True try : self . upgradeItem ( theItem ) except : f = Failure ( ) raise ItemUpgradeError ( f , theItem . storeID , itemType , _typeNameToMostRecentClass [ itemType . typeName ] ) return upgradedAnything if self . upgradesPending : didAny = False while self . _oldTypesRemaining : t0 = self . _oldTypesRemaining [ 0 ] upgradedAnything = store . transact ( _doBatch , t0 ) if not upgradedAnything : self . _oldTypesRemaining . pop ( 0 ) if didAny : msg ( "%s finished upgrading %s" % ( store . dbdir . path , qual ( t0 ) ) ) continue elif not didAny : didAny = True msg ( "%s beginning upgrade..." % ( store . dbdir . path , ) ) yield None if didAny : msg ( "%s completely upgraded." % ( store . dbdir . path , ) )
Upgrade the entire store in batches yielding after each batch .
5,950
def open ( self ) : self . vg . open ( ) self . __lvh = lvm_lv_from_uuid ( self . vg . handle , self . uuid ) if not bool ( self . __lvh ) : raise HandleError ( "Failed to initialize LV Handle." )
Obtains the lvm vg_t and lv_t handle . Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion . py
5,951
def name ( self ) : self . open ( ) name = lvm_lv_get_name ( self . __lvh ) self . close ( ) return name
Returns the logical volume name .
5,952
def is_active ( self ) : self . open ( ) active = lvm_lv_is_active ( self . __lvh ) self . close ( ) return bool ( active )
Returns True if the logical volume is active False otherwise .
5,953
def is_suspended ( self ) : self . open ( ) susp = lvm_lv_is_suspended ( self . __lvh ) self . close ( ) return bool ( susp )
Returns True if the logical volume is suspended False otherwise .
5,954
def size ( self , units = "MiB" ) : self . open ( ) size = lvm_lv_get_size ( self . __lvh ) self . close ( ) return size_convert ( size , units )
Returns the logical volume size in the given units . Default units are MiB .
5,955
def activate ( self ) : self . open ( ) a = lvm_lv_activate ( self . handle ) self . close ( ) if a != 0 : raise CommitError ( "Failed to activate LV." )
Activates the logical volume .
5,956
def deactivate ( self ) : self . open ( ) d = lvm_lv_deactivate ( self . handle ) self . close ( ) if d != 0 : raise CommitError ( "Failed to deactivate LV." )
Deactivates the logical volume .
5,957
def open ( self ) : self . vg . open ( ) self . __pvh = lvm_pv_from_uuid ( self . vg . handle , self . uuid ) if not bool ( self . __pvh ) : raise HandleError ( "Failed to initialize PV Handle." )
Obtains the lvm vg_t and pv_t handle . Usually you would never need to use this method unless you are doing operations using the ctypes function wrappers in conversion . py
5,958
def name ( self ) : self . open ( ) name = lvm_pv_get_name ( self . handle ) self . close ( ) return name
Returns the physical volume device path .
5,959
def mda_count ( self ) : self . open ( ) mda = lvm_pv_get_mda_count ( self . handle ) self . close ( ) return mda
Returns the physical volume mda count .
5,960
def size ( self , units = "MiB" ) : self . open ( ) size = lvm_pv_get_size ( self . handle ) self . close ( ) return size_convert ( size , units )
Returns the physical volume size in the given units . Default units are MiB .
5,961
def dev_size ( self , units = "MiB" ) : self . open ( ) size = lvm_pv_get_dev_size ( self . handle ) self . close ( ) return size_convert ( size , units )
Returns the device size in the given units . Default units are MiB .
5,962
def free ( self , units = "MiB" ) : self . open ( ) size = lvm_pv_get_free ( self . handle ) self . close ( ) return size_convert ( size , units )
Returns the free size in the given units . Default units are MiB .
5,963
def mongoengine_validate_wrapper ( old_clean , new_clean ) : def inner_validate ( value ) : value = old_clean ( value ) try : new_clean ( value ) return value except ValidationError , e : raise forms . ValidationError ( e ) return inner_validate
A wrapper function to validate formdata against mongoengine - field validator and raise a proper django . forms ValidationError if there are any problems .
5,964
def iter_valid_fields ( meta ) : meta_fields = getattr ( meta , 'fields' , ( ) ) meta_exclude = getattr ( meta , 'exclude' , ( ) ) meta_exclude += ( meta . document . _meta . get ( 'id_field' ) , ) if meta_fields : for field_name in meta_fields : field = meta . document . _fields . get ( field_name ) if field : yield ( field_name , field ) else : for field_name , field in meta . document . _fields . iteritems ( ) : if field_name not in meta_exclude : yield ( field_name , field )
walk through the available valid fields ..
5,965
def uninstallFrom ( self , target ) : target . powerDown ( self ) for dc in self . store . query ( _DependencyConnector , _DependencyConnector . target == target ) : if dc . installee is self : dc . deleteFromStore ( ) for item in installedUniqueRequirements ( self , target ) : uninstallFrom ( item , target ) callback = getattr ( self , "uninstalled" , None ) if callback is not None : callback ( )
Remove this object from the target as well as any dependencies that it automatically installed which were not explicitly pinned by calling install and raising an exception if anything still depends on this .
5,966
def installedOn ( self ) : try : return self . store . findUnique ( _DependencyConnector , _DependencyConnector . installee == self ) . target except ItemNotFound : return None
If this item is installed on another item return the install target . Otherwise return None .
5,967
def installedDependents ( self , target ) : for dc in self . store . query ( _DependencyConnector , _DependencyConnector . target == target ) : depends = dependentsOf ( dc . installee . __class__ ) if self . __class__ in depends : yield dc . installee
Return an iterable of things installed on the target that require this item .
5,968
def installedUniqueRequirements ( self , target ) : myDepends = dependentsOf ( self . __class__ ) for dc in self . store . query ( _DependencyConnector , _DependencyConnector . target == target ) : if dc . installee is self : continue depends = dependentsOf ( dc . installee . __class__ ) if self . __class__ in depends : raise DependencyError ( "%r cannot be uninstalled from %r, " "%r still depends on it" % ( self , target , dc . installee ) ) for cls in myDepends [ : ] : if cls in depends : myDepends . remove ( cls ) for dc in self . store . query ( _DependencyConnector , _DependencyConnector . target == target ) : if ( dc . installee . __class__ in myDepends and not dc . explicitlyInstalled ) : yield dc . installee
Return an iterable of things installed on the target that this item requires and are not required by anything else .
5,969
def installedRequirements ( self , target ) : myDepends = dependentsOf ( self . __class__ ) for dc in self . store . query ( _DependencyConnector , _DependencyConnector . target == target ) : if dc . installee . __class__ in myDepends : yield dc . installee
Return an iterable of things installed on the target that this item requires .
5,970
def _diffSchema ( diskSchema , memorySchema ) : diskSchema = set ( diskSchema ) memorySchema = set ( memorySchema ) diskOnly = diskSchema - memorySchema memoryOnly = memorySchema - diskSchema diff = [ ] if diskOnly : diff . append ( 'Only on disk:' ) diff . extend ( map ( repr , diskOnly ) ) if memoryOnly : diff . append ( 'Only in memory:' ) diff . extend ( map ( repr , memoryOnly ) ) return '\n' . join ( diff )
Format a schema mismatch for human consumption .
5,971
def close ( self ) : now = time . time ( ) try : file . close ( self ) _mkdirIfNotExists ( self . _destpath . dirname ( ) ) self . finalpath = self . _destpath os . rename ( self . name , self . finalpath . path ) os . utime ( self . finalpath . path , ( now , now ) ) except : return defer . fail ( ) return defer . succeed ( self . finalpath )
Close this file and commit it to its permanent location .
5,972
def _computeFromClause ( self , tables ) : tableAliases = [ ] self . fromClauseParts = [ ] for table in tables : tableName = table . getTableName ( self . store ) tableAlias = table . getTableAlias ( self . store , tuple ( tableAliases ) ) if tableAlias is None : self . fromClauseParts . append ( tableName ) else : tableAliases . append ( tableAlias ) self . fromClauseParts . append ( '%s AS %s' % ( tableName , tableAlias ) ) self . sortClauseParts = [ ] for attr , direction in self . sort . orderColumns ( ) : assert direction in ( 'ASC' , 'DESC' ) , "%r not in ASC,DESC" % ( direction , ) if attr . type not in tables : raise ValueError ( "Ordering references type excluded from comparison" ) self . sortClauseParts . append ( '%s %s' % ( attr . getColumnName ( self . store ) , direction ) )
Generate the SQL string which follows the FROM string and before the WHERE string in the final SQL statement .
5,973
def _selectStuff ( self , verb = 'SELECT' ) : sqlResults = self . _runQuery ( verb , self . _queryTarget ) for row in sqlResults : yield self . _massageData ( row )
Return a generator which yields the massaged results of this query with a particular SQL verb .
5,974
def next ( self ) : if self . _selfiter is None : warnings . warn ( "Calling 'next' directly on a query is deprecated. " "Perhaps you want to use iter(query).next(), or something " "more expressive like store.findFirst or store.findOrCreate?" , DeprecationWarning , stacklevel = 2 ) self . _selfiter = self . __iter__ ( ) return self . _selfiter . next ( )
This method is deprecated a holdover from when queries were iterators rather than iterables .
5,975
def paginate ( self , pagesize = 20 ) : sort = self . sort oc = list ( sort . orderColumns ( ) ) if not oc : sort = self . tableClass . storeID . ascending oc = list ( sort . orderColumns ( ) ) if len ( oc ) != 1 : raise RuntimeError ( "%d-column sorts not supported yet with paginate" % ( len ( oc ) , ) ) sortColumn = oc [ 0 ] [ 0 ] if oc [ 0 ] [ 1 ] == 'ASC' : sortOp = operator . gt else : sortOp = operator . lt if _isColumnUnique ( sortColumn ) : tiebreaker = None else : tiebreaker = self . tableClass . storeID tied = lambda a , b : ( sortColumn . __get__ ( a ) == sortColumn . __get__ ( b ) ) def _AND ( a , b ) : if a is None : return b return attributes . AND ( a , b ) results = list ( self . store . query ( self . tableClass , self . comparison , sort = sort , limit = pagesize + 1 ) ) while results : if len ( results ) == 1 : yield results [ 0 ] return for resultidx in range ( len ( results ) - 1 ) : result = results [ resultidx ] nextResult = results [ resultidx + 1 ] if tied ( result , nextResult ) : lastTieBreaker = tiebreaker . __get__ ( result ) trq = self . store . query ( self . tableClass , _AND ( self . comparison , sortColumn == sortColumn . __get__ ( result ) ) ) tiedResults = list ( trq ) tiedResults . sort ( key = lambda rslt : ( sortColumn . __get__ ( result ) , tiebreaker . __get__ ( result ) ) ) for result in tiedResults : yield result break else : yield result lastSortValue = sortColumn . __get__ ( result ) results = list ( self . store . query ( self . tableClass , _AND ( self . comparison , sortOp ( sortColumn , sortColumn . __get__ ( result ) ) ) , sort = sort , limit = pagesize + 1 ) )
Split up the work of gathering a result set into multiple smaller pages allowing very large queries to be iterated without blocking for long periods of time .
5,976
def _massageData ( self , row ) : result = self . store . _loadedItem ( self . tableClass , row [ 0 ] , row [ 1 : ] ) assert result . store is not None , "result %r has funky store" % ( result , ) return result
Convert a row into an Item instance by loading cached items or creating new ones based on query results .
5,977
def deleteFromStore ( self ) : if ( self . limit is None and not isinstance ( self . sort , attributes . UnspecifiedOrdering ) ) : return self . cloneQuery ( sort = None ) . deleteFromStore ( ) deletedOverridden = ( self . tableClass . deleted . im_func is not item . Item . deleted . im_func ) deleteFromStoreOverridden = ( self . tableClass . deleteFromStore . im_func is not item . Item . deleteFromStore . im_func ) if deletedOverridden or deleteFromStoreOverridden : for it in self : it . deleteFromStore ( ) else : def itemsToDelete ( attr ) : return attr . oneOf ( self . getColumn ( "storeID" ) ) if not item . allowDeletion ( self . store , self . tableClass , itemsToDelete ) : raise errors . DeletionDisallowed ( 'Cannot delete item; ' 'has referents with whenDeleted == reference.DISALLOW' ) for it in item . dependentItems ( self . store , self . tableClass , itemsToDelete ) : it . deleteFromStore ( ) self . _runQuery ( 'DELETE' , "" )
Delete all the Items which are found by this query .
5,978
def _massageData ( self , row ) : offset = 0 resultBits = [ ] for i , tableClass in enumerate ( self . tableClass ) : numAttrs = self . schemaLengths [ i ] result = self . store . _loadedItem ( self . tableClass [ i ] , row [ offset ] , row [ offset + 1 : offset + numAttrs ] ) assert result . store is not None , "result %r has funky store" % ( result , ) resultBits . append ( result ) offset += numAttrs return tuple ( resultBits )
Convert a row into a tuple of Item instances by slicing it according to the number of columns for each instance and then proceeding as for ItemQuery . _massageData .
5,979
def cloneQuery ( self , limit = _noItem , sort = _noItem ) : newq = self . query . cloneQuery ( limit = limit , sort = sort ) return self . __class__ ( newq )
Clone the original query which this distinct query wraps and return a new wrapper around that clone .
5,980
def count ( self ) : if not self . query . store . autocommit : self . query . store . checkpoint ( ) target = ', ' . join ( [ tableClass . storeID . getColumnName ( self . query . store ) for tableClass in self . query . tableClass ] ) sql , args = self . query . _sqlAndArgs ( 'SELECT DISTINCT' , target ) sql = 'SELECT COUNT(*) FROM (' + sql + ')' result = self . query . store . querySQL ( sql , args ) assert len ( result ) == 1 , 'more than one result: %r' % ( result , ) return result [ 0 ] [ 0 ] or 0
Count the number of distinct results of the wrapped query .
5,981
def sum ( self ) : res = self . _runQuery ( 'SELECT' , 'SUM(%s)' % ( self . _queryTarget , ) ) or [ ( 0 , ) ] assert len ( res ) == 1 , "more than one result: %r" % ( res , ) dbval = res [ 0 ] [ 0 ] or 0 return self . attribute . outfilter ( dbval , _FakeItemForFilter ( self . store ) )
Return the sum of all the values returned by this query . If no results are specified return None .
5,982
def _attachChild ( self , child ) : "attach a child database, returning an identifier for it" self . _childCounter += 1 databaseName = 'child_db_%d' % ( self . _childCounter , ) self . _attachedChildren [ databaseName ] = child self . executeSQL ( "ATTACH DATABASE '%s' AS %s" % ( child . dbdir . child ( 'db.sqlite' ) . path , databaseName , ) ) return databaseName
attach a child database returning an identifier for it
5,983
def newFile ( self , * path ) : assert len ( path ) > 0 , "newFile requires a nonzero number of segments" if self . dbdir is None : if self . filesdir is None : raise RuntimeError ( "This in-memory store has no file directory" ) else : tmpbase = self . filesdir else : tmpbase = self . dbdir tmpname = tmpbase . child ( 'temp' ) . child ( str ( tempCounter . next ( ) ) + ".tmp" ) return AtomicFile ( tmpname . path , self . newFilePath ( * path ) )
Open a new file somewhere in this Store s file area .
5,984
def _prepareOldVersionOf ( self , typename , version , persistedSchema ) : appropriateSchema = persistedSchema [ typename , version ] dummyAttributes = { } for ( attribute , sqlType , indexed , pythontype , docstring ) in appropriateSchema : atr = pythontype ( indexed = indexed , doc = docstring ) dummyAttributes [ attribute ] = atr dummyBases = [ ] oldType = declareLegacyItem ( typename , version , dummyAttributes , dummyBases ) self . _upgradeManager . queueTypeUpgrade ( oldType ) return oldType
Note that this database contains old versions of a particular type . Create the appropriate dummy item subclass and queue the type to be upgraded .
5,985
def batchInsert ( self , itemType , itemAttributes , dataRows ) : class FakeItem : pass _NEEDS_DEFAULT = object ( ) fakeOSelf = FakeItem ( ) fakeOSelf . store = self sql = itemType . _baseInsertSQL ( self ) indices = { } schema = [ attr for ( name , attr ) in itemType . getSchema ( ) ] for i , attr in enumerate ( itemAttributes ) : indices [ attr ] = i for row in dataRows : oid = self . store . executeSchemaSQL ( _schema . CREATE_OBJECT , [ self . store . getTypeID ( itemType ) ] ) insertArgs = [ oid ] for attr in schema : i = indices . get ( attr , _NEEDS_DEFAULT ) if i is _NEEDS_DEFAULT : pyval = attr . default else : pyval = row [ i ] dbval = attr . _convertPyval ( fakeOSelf , pyval ) insertArgs . append ( dbval ) self . executeSQL ( sql , insertArgs )
Create multiple items in the store without loading corresponding Python objects into memory .
5,986
def getTableName ( self , tableClass ) : if not ( isinstance ( tableClass , type ) and issubclass ( tableClass , item . Item ) ) : raise errors . ItemClassesOnly ( "Only subclasses of Item have table names." ) if tableClass not in self . typeToTableNameCache : self . typeToTableNameCache [ tableClass ] = self . _tableNameFor ( tableClass . typeName , tableClass . schemaVersion ) self . getTypeID ( tableClass ) return self . typeToTableNameCache [ tableClass ]
Retrieve the fully qualified name of the table holding items of a particular class in this store . If the table does not exist in the database it will be created as a side - effect .
5,987
def getTypeID ( self , tableClass ) : key = ( tableClass . typeName , tableClass . schemaVersion ) if key in self . typenameAndVersionToID : return self . typenameAndVersionToID [ key ] return self . transact ( self . _maybeCreateTable , tableClass , key )
Retrieve the typeID associated with a particular table in the in - database schema for this Store . A typeID is an opaque integer representing the Item subclass and the associated table in this Store s SQLite database .
5,988
def _justCreateTable ( self , tableClass ) : sqlstr = [ ] sqlarg = [ ] tableName = self . _tableNameFor ( tableClass . typeName , tableClass . schemaVersion ) sqlstr . append ( "CREATE TABLE %s (" % tableName ) sqlarg . append ( "oid INTEGER PRIMARY KEY" ) for nam , atr in tableClass . getSchema ( ) : sqlarg . append ( "\n%s %s" % ( atr . getShortColumnName ( self ) , atr . sqltype ) ) sqlstr . append ( ', ' . join ( sqlarg ) ) sqlstr . append ( ')' ) self . createSQL ( '' . join ( sqlstr ) )
Execute the table creation DDL for an Item subclass .
5,989
def getItemByID ( self , storeID , default = _noItem , autoUpgrade = True ) : if not isinstance ( storeID , ( int , long ) ) : raise TypeError ( "storeID *must* be an int or long, not %r" % ( type ( storeID ) . __name__ , ) ) if storeID == STORE_SELF_ID : return self try : return self . objectCache . get ( storeID ) except KeyError : pass log . msg ( interface = iaxiom . IStatEvent , stat_cache_misses = 1 , key = storeID ) results = self . querySchemaSQL ( _schema . TYPEOF_QUERY , [ storeID ] ) assert ( len ( results ) in [ 1 , 0 ] ) , "Database panic: more than one result for TYPEOF!" if results : typename , module , version = results [ 0 ] useMostRecent = False moreRecentAvailable = False if _typeIsTotallyUnknown ( typename , version ) : self . _startup ( ) if _typeIsTotallyUnknown ( typename , version ) : raise errors . UnknownItemType ( "cannot load unknown schema/version pair: %r %r - id: %r" % ( typename , version , storeID ) ) if typename in _typeNameToMostRecentClass : moreRecentAvailable = True mostRecent = _typeNameToMostRecentClass [ typename ] if mostRecent . schemaVersion < version : raise RuntimeError ( "%s:%d - was found in the database and most recent %s is %d" % ( typename , version , typename , mostRecent . schemaVersion ) ) if mostRecent . schemaVersion == version : useMostRecent = True if useMostRecent : T = mostRecent else : T = self . getOldVersionOf ( typename , version ) attrs = self . querySQL ( T . _baseSelectSQL ( self ) , [ storeID ] ) if len ( attrs ) == 0 : if default is _noItem : raise errors . ItemNotFound ( 'No results for known-to-be-good object' ) return default elif len ( attrs ) > 1 : raise errors . DataIntegrityError ( 'Too many results for {:d}' . format ( storeID ) ) attrs = attrs [ 0 ] x = T . existingInStore ( self , storeID , attrs ) if moreRecentAvailable and ( not useMostRecent ) and autoUpgrade : x = self . transact ( self . _upgradeManager . upgradeItem , x ) elif not x . __legacy__ : self . objectCache . cache ( storeID , x ) return x if default is _noItem : raise errors . ItemNotFound ( storeID ) return default
Retrieve an item by its storeID and return it .
5,990
def createSQL ( self , sql , args = ( ) ) : before = time . time ( ) self . _execSQL ( sql , args ) after = time . time ( ) if after - before > 2.0 : log . msg ( 'Extremely long CREATE: %s' % ( after - before , ) ) log . msg ( sql )
For use with auto - committing statements such as CREATE TABLE or CREATE INDEX .
5,991
def executeSQL ( self , sql , args = ( ) ) : sql = self . _execSQL ( sql , args ) result = self . cursor . lastRowID ( ) if self . executedThisTransaction is not None : self . executedThisTransaction . append ( ( result , sql , args ) ) return result
For use with UPDATE or INSERT statements .
5,992
def invokeRunnable ( self ) : runnable = self . runnable if runnable is None : self . deleteFromStore ( ) else : try : self . running = True newTime = runnable . run ( ) finally : self . running = False self . _rescheduleFromRun ( newTime )
Run my runnable and reschedule or delete myself based on its result . Must be run in a transaction .
5,993
def unscheduleFirst ( self , runnable ) : for evt in self . store . query ( TimedEvent , TimedEvent . runnable == runnable , sort = TimedEvent . time . ascending ) : evt . deleteFromStore ( ) break
Remove from given item from the schedule .
5,994
def scheduledTimes ( self , runnable ) : events = self . store . query ( TimedEvent , TimedEvent . runnable == runnable ) return ( event . time for event in events if not event . running )
Return an iterable of the times at which the given item is scheduled to run .
5,995
def startService ( self ) : super ( _SiteScheduler , self ) . startService ( ) self . _transientSchedule ( self . now ( ) , self . now ( ) )
Start calling persistent timed events whose time has come .
5,996
def stopService ( self ) : super ( _SiteScheduler , self ) . stopService ( ) if self . timer is not None : self . timer . cancel ( ) self . timer = None
Stop calling persistent timed events .
5,997
def _transientSchedule ( self , when , now ) : if self . store . parent is not None : subStore = self . store . parent . getItemByID ( self . store . idInParent ) hook = self . store . parent . findOrCreate ( _SubSchedulerParentHook , subStore = subStore ) hook . _schedule ( when )
If this service s store is attached to its parent ask the parent to schedule this substore to tick at the given time .
5,998
def migrateDown ( self ) : subStore = self . store . parent . getItemByID ( self . store . idInParent ) ssph = self . store . parent . findUnique ( _SubSchedulerParentHook , _SubSchedulerParentHook . subStore == subStore , default = None ) if ssph is not None : te = self . store . parent . findUnique ( TimedEvent , TimedEvent . runnable == ssph , default = None ) if te is not None : te . deleteFromStore ( ) ssph . deleteFromStore ( )
Remove the components in the site store for this SubScheduler .
5,999
def migrateUp ( self ) : te = self . store . findFirst ( TimedEvent , sort = TimedEvent . time . descending ) if te is not None : self . _transientSchedule ( te . time , None )
Recreate the hooks in the site store to trigger this SubScheduler .