idx
int64
0
165k
question
stringlengths
73
5.81k
target
stringlengths
5
918
19,400
public boolean hasDeletedSlots ( ) { long blkNum = ( Long ) getVal ( OFFSET_LDS_BLOCKID , BIGINT ) . asJavaVal ( ) ; return blkNum != NO_SLOT_BLOCKID ? true : false ; }
Return true if this file has deleted data records .
19,401
public RecordId getLastDeletedSlot ( ) { Constant blkNum = getVal ( OFFSET_LDS_BLOCKID , BIGINT ) ; Constant rid = getVal ( OFFSET_LDS_RID , INTEGER ) ; BlockId bid = new BlockId ( fileName , ( Long ) blkNum . asJavaVal ( ) ) ; return new RecordId ( bid , ( Integer ) rid . asJavaVal ( ) ) ; }
Returns the id of last deleted record .
19,402
public RecordId getTailSolt ( ) { Constant blkNum = getVal ( OFFSET_TS_BLOCKID , BIGINT ) ; Constant rid = getVal ( OFFSET_TS_RID , INTEGER ) ; BlockId bid = new BlockId ( fileName , ( Long ) blkNum . asJavaVal ( ) ) ; return new RecordId ( bid , ( Integer ) rid . asJavaVal ( ) ) ; }
Returns the id of tail slot .
19,403
public void setLastDeletedSlot ( RecordId rid ) { setVal ( OFFSET_LDS_BLOCKID , new BigIntConstant ( rid . block ( ) . number ( ) ) ) ; setVal ( OFFSET_LDS_RID , new IntegerConstant ( rid . id ( ) ) ) ; }
Set the id of last deleted record .
19,404
public void setTailSolt ( RecordId rid ) { setVal ( OFFSET_TS_BLOCKID , new BigIntConstant ( rid . block ( ) . number ( ) ) ) ; setVal ( OFFSET_TS_RID , new IntegerConstant ( rid . id ( ) ) ) ; }
Set the id of last tail slot .
19,405
public static Integer getInteger ( Object object ) { try { if ( object instanceof Integer ) return ( Integer ) object ; if ( object instanceof String ) return Integer . valueOf ( ( String ) object ) ; } catch ( NumberFormatException nfe ) { } return null ; }
Return an integer if it is an Integer or can get Integer from String else null
19,406
public Plan createQueryPlan ( String qry , Transaction tx ) { Parser parser = new Parser ( qry ) ; QueryData data = parser . queryCommand ( ) ; Verifier . verifyQueryData ( data , tx ) ; return qPlanner . createPlan ( data , tx ) ; }
Creates a plan for an SQL select statement using the supplied planner .
19,407
public int executeUpdate ( String cmd , Transaction tx ) { if ( tx . isReadOnly ( ) ) throw new UnsupportedOperationException ( ) ; Parser parser = new Parser ( cmd ) ; Object obj = parser . updateCommand ( ) ; if ( obj . getClass ( ) . equals ( InsertData . class ) ) { Verifier . verifyInsertData ( ( InsertData ) obj , tx ) ; return uPlanner . executeInsert ( ( InsertData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( DeleteData . class ) ) { Verifier . verifyDeleteData ( ( DeleteData ) obj , tx ) ; return uPlanner . executeDelete ( ( DeleteData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( ModifyData . class ) ) { Verifier . verifyModifyData ( ( ModifyData ) obj , tx ) ; return uPlanner . executeModify ( ( ModifyData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( CreateTableData . class ) ) { Verifier . verifyCreateTableData ( ( CreateTableData ) obj , tx ) ; return uPlanner . executeCreateTable ( ( CreateTableData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( CreateViewData . class ) ) { Verifier . verifyCreateViewData ( ( CreateViewData ) obj , tx ) ; return uPlanner . executeCreateView ( ( CreateViewData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( CreateIndexData . class ) ) { Verifier . verifyCreateIndexData ( ( CreateIndexData ) obj , tx ) ; return uPlanner . executeCreateIndex ( ( CreateIndexData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( DropTableData . class ) ) { Verifier . verifyDropTableData ( ( DropTableData ) obj , tx ) ; return uPlanner . executeDropTable ( ( DropTableData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( DropViewData . class ) ) { Verifier . verifyDropViewData ( ( DropViewData ) obj , tx ) ; return uPlanner . executeDropView ( ( DropViewData ) obj , tx ) ; } else if ( obj . getClass ( ) . equals ( DropIndexData . class ) ) { Verifier . verifyDropIndexData ( ( DropIndexData ) obj , tx ) ; return uPlanner . executeDropIndex ( ( DropIndexData ) obj , tx ) ; } else throw new UnsupportedOperationException ( ) ; }
Executes an SQL insert delete modify or create statement . The method dispatches to the appropriate method of the supplied update planner depending on what the parser returns .
19,408
public void addField ( String fldName , Type type ) { fields . put ( fldName , type ) ; if ( myFieldSet != null ) myFieldSet . add ( fldName ) ; }
Adds a field to this schema having a specified name and type .
19,409
public void add ( String fldName , Schema sch ) { Type type = sch . type ( fldName ) ; addField ( fldName , type ) ; }
Adds a field in another schema having the specified name to this schema .
19,410
public void addAll ( Schema sch ) { fields . putAll ( sch . fields ) ; if ( myFieldSet != null ) myFieldSet = new TreeSet < String > ( fields . keySet ( ) ) ; }
Adds all of the fields in the specified schema to this schema .
19,411
public SortedSet < String > fields ( ) { if ( myFieldSet == null ) myFieldSet = new TreeSet < String > ( fields . keySet ( ) ) ; return myFieldSet ; }
Returns a sorted set containing the field names in this schema sorted by their natural ordering .
19,412
public Scan open ( ) { Scan s = p1 . open ( ) ; TableScan ts = ( TableScan ) tp2 . open ( ) ; Index idx = ii . open ( tx ) ; return new IndexJoinScan ( s , idx , joinFields , ts ) ; }
Opens an index - join scan for this query
19,413
public Scan open ( ) { TableScan ts = ( TableScan ) tp . open ( ) ; Index idx = ii . open ( tx ) ; return new IndexSelectScan ( idx , new SearchRange ( ii . fieldNames ( ) , schema ( ) , searchRanges ) , ts ) ; }
Creates a new index - select scan for this query
19,414
public long blocksAccessed ( ) { return Index . searchCost ( ii . indexType ( ) , new SearchKeyType ( schema ( ) , ii . fieldNames ( ) ) , tp . recordsOutput ( ) , recordsOutput ( ) ) + recordsOutput ( ) ; }
Estimates the number of block accesses to compute the index selection which is the same as the index traversal cost plus the number of matching data records .
19,415
public static void init ( String dirName , StoredProcedureFactory factory ) { if ( inited ) { if ( logger . isLoggable ( Level . WARNING ) ) logger . warning ( "discarding duplicated init request" ) ; return ; } spFactory = factory ; queryPlannerCls = CoreProperties . getLoader ( ) . getPropertyAsClass ( VanillaDb . class . getName ( ) + ".QUERYPLANNER" , HeuristicQueryPlanner . class , QueryPlanner . class ) ; updatePlannerCls = CoreProperties . getLoader ( ) . getPropertyAsClass ( VanillaDb . class . getName ( ) + ".UPDATEPLANNER" , IndexUpdatePlanner . class , UpdatePlanner . class ) ; initFileAndLogMgr ( dirName ) ; initTaskMgr ( ) ; initTxMgr ( ) ; Transaction initTx = txMgr . newTransaction ( Connection . TRANSACTION_SERIALIZABLE , false ) ; boolean isDbNew = fileMgr . isNew ( ) ; initCatalogMgr ( isDbNew , initTx ) ; if ( isDbNew ) { if ( logger . isLoggable ( Level . INFO ) ) logger . info ( "creating new database..." ) ; } else { if ( logger . isLoggable ( Level . INFO ) ) logger . info ( "recovering existing database" ) ; RecoveryMgr . initializeSystem ( initTx ) ; } initStatMgr ( initTx ) ; txMgr . createCheckpoint ( initTx ) ; initTx . commit ( ) ; boolean doCheckpointing = CoreProperties . getLoader ( ) . getPropertyAsBoolean ( VanillaDb . class . getName ( ) + ".DO_CHECKPOINT" , true ) ; if ( doCheckpointing ) initCheckpointingTask ( ) ; inited = true ; }
Initializes the system . This method is called during system startup .
19,416
public static Planner newPlanner ( ) { QueryPlanner qplanner ; UpdatePlanner uplanner ; try { qplanner = ( QueryPlanner ) queryPlannerCls . newInstance ( ) ; uplanner = ( UpdatePlanner ) updatePlannerCls . newInstance ( ) ; } catch ( InstantiationException | IllegalAccessException e ) { e . printStackTrace ( ) ; return null ; } return new Planner ( qplanner , uplanner ) ; }
Creates a planner for SQL commands . To change how the planner works modify this method .
19,417
public static void stopProfilerAndReport ( ) { profiler . stopCollecting ( ) ; try { String path = CoreProperties . getLoader ( ) . getPropertyAsString ( VanillaDb . class . getName ( ) + ".PROFILE_OUTPUT_DIR" , System . getProperty ( "user.home" ) ) ; File out = new File ( path , System . currentTimeMillis ( ) + "_profile.txt" ) ; FileWriter wrFile = new FileWriter ( out ) ; BufferedWriter bwrFile = new BufferedWriter ( wrFile ) ; bwrFile . write ( profiler . getTopPackages ( 30 ) ) ; bwrFile . newLine ( ) ; bwrFile . write ( profiler . getTopMethods ( 30 ) ) ; bwrFile . newLine ( ) ; bwrFile . write ( profiler . getTopLines ( 30 ) ) ; bwrFile . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Stop profiler and generate report file .
19,418
void read ( BlockId blk , IoBuffer buffer ) { try { IoChannel fileChannel = getFileChannel ( blk . fileName ( ) ) ; buffer . clear ( ) ; fileChannel . read ( buffer , blk . number ( ) * BLOCK_SIZE ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; throw new RuntimeException ( "cannot read block " + blk ) ; } }
Reads the contents of a disk block into a byte buffer .
19,419
void write ( BlockId blk , IoBuffer buffer ) { try { IoChannel fileChannel = getFileChannel ( blk . fileName ( ) ) ; buffer . rewind ( ) ; fileChannel . write ( buffer , blk . number ( ) * BLOCK_SIZE ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; throw new RuntimeException ( "cannot write block" + blk ) ; } }
Writes the contents of a byte buffer into a disk block .
19,420
BlockId append ( String fileName , IoBuffer buffer ) { try { IoChannel fileChannel = getFileChannel ( fileName ) ; buffer . rewind ( ) ; long newSize = fileChannel . append ( buffer ) ; return new BlockId ( fileName , newSize / BLOCK_SIZE - 1 ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; return null ; } }
Appends the contents of a byte buffer to the end of the specified file .
19,421
public long size ( String fileName ) { try { IoChannel fileChannel = getFileChannel ( fileName ) ; return fileChannel . size ( ) / BLOCK_SIZE ; } catch ( IOException e ) { throw new RuntimeException ( "cannot access " + fileName ) ; } }
Returns the number of blocks in the specified file .
19,422
private IoChannel getFileChannel ( String fileName ) throws IOException { synchronized ( prepareAnchor ( fileName ) ) { IoChannel fileChannel = openFiles . get ( fileName ) ; if ( fileChannel == null ) { File dbFile = fileName . equals ( DEFAULT_LOG_FILE ) ? new File ( logDirectory , fileName ) : new File ( dbDirectory , fileName ) ; fileChannel = IoAllocator . newIoChannel ( dbFile ) ; openFiles . put ( fileName , fileChannel ) ; } return fileChannel ; } }
Returns the file channel for the specified filename . The file channel is stored in a map keyed on the filename . If the file is not open then it is opened and the file channel is added to the map .
19,423
public static Histogram productHistogram ( Histogram hist1 , Histogram hist2 ) { Set < String > prodFlds = new HashSet < String > ( hist1 . fields ( ) ) ; prodFlds . addAll ( hist2 . fields ( ) ) ; Histogram prodHist = new Histogram ( prodFlds ) ; double numRec1 = hist1 . recordsOutput ( ) ; double numRec2 = hist2 . recordsOutput ( ) ; if ( Double . compare ( numRec1 , 1.0 ) < 0 || Double . compare ( numRec2 , 1.0 ) < 0 ) return prodHist ; for ( String fld : hist1 . fields ( ) ) for ( Bucket bkt : hist1 . buckets ( fld ) ) prodHist . addBucket ( fld , new Bucket ( bkt . valueRange ( ) , bkt . frequency ( ) * numRec2 , bkt . distinctValues ( ) , bkt . valuePercentiles ( ) ) ) ; for ( String fld : hist2 . fields ( ) ) for ( Bucket bkt : hist2 . buckets ( fld ) ) prodHist . addBucket ( fld , new Bucket ( bkt . valueRange ( ) , bkt . frequency ( ) * numRec1 , bkt . distinctValues ( ) , bkt . valuePercentiles ( ) ) ) ; return prodHist ; }
Returns a histogram that for each field approximates the value distribution of products from the specified histograms .
19,424
public Scan open ( ) { Scan s1 = p1 . open ( ) ; Scan s2 = p2 . open ( ) ; return new ProductScan ( s1 , s2 ) ; }
Creates a product scan for this query .
19,425
public boolean next ( ) { boolean ok = idx . next ( ) ; if ( ok ) { RecordId rid = idx . getDataRecordId ( ) ; ts . moveToRecordId ( rid ) ; } return ok ; }
Moves to the next record which in this case means moving the index to the next record satisfying the selection constant and returning false if there are no more such index records . If there is a next record the method moves the tablescan to the corresponding data record .
19,426
public void beforeFirst ( ) { currentScan = null ; s1 . beforeFirst ( ) ; hasMore1 = s1 . next ( ) ; if ( s2 != null ) { s2 . beforeFirst ( ) ; hasMore2 = s2 . next ( ) ; } }
Positions the scan before the first record in sorted order . Internally it moves to the first record of each underlying scan . The variable currentScan is set to null indicating that there is no current scan .
19,427
public boolean next ( ) { if ( currentScan != null ) { if ( currentScan == s1 ) hasMore1 = s1 . next ( ) ; else if ( currentScan == s2 ) hasMore2 = s2 . next ( ) ; } if ( ! hasMore1 && ! hasMore2 ) return false ; else if ( hasMore1 && hasMore2 ) { currentScan = comp . compare ( s1 , s2 ) < 0 ? s1 : s2 ; } else if ( hasMore1 ) currentScan = s1 ; else if ( hasMore2 ) currentScan = s2 ; return true ; }
Moves to the next record in sorted order . First the current scan is moved to the next record . Then the lowest record of the two scans is found and that scan is chosen to be the new current scan .
19,428
public void savePosition ( ) { RecordId rid1 = s1 . getRecordId ( ) ; RecordId rid2 = ( s2 == null ) ? null : s2 . getRecordId ( ) ; savedPosition = Arrays . asList ( rid1 , rid2 ) ; }
Saves the position of the current record so that it can be restored at a later time .
19,429
public void restorePosition ( ) { RecordId rid1 = savedPosition . get ( 0 ) ; RecordId rid2 = savedPosition . get ( 1 ) ; s1 . moveToRecordId ( rid1 ) ; if ( rid2 != null ) s2 . moveToRecordId ( rid2 ) ; }
Moves the scan to its previously - saved position .
19,430
public void createCheckpoint ( Transaction checkpointTx ) { List < Long > txNums ; synchronized ( this ) { txNums = new LinkedList < Long > ( activeTxs ) ; checkpointTx . bufferMgr ( ) . flushAll ( ) ; LogSeqNum lsn = checkpointTx . recoveryMgr ( ) . checkpoint ( txNums ) ; VanillaDb . logMgr ( ) . flush ( lsn ) ; } }
Creates non - quiescent checkpoint record .
19,431
public Plan createPlan ( QueryData data , Transaction tx ) { List < Plan > plans = new ArrayList < Plan > ( ) ; for ( String tblname : data . tables ( ) ) { String viewdef = VanillaDb . catalogMgr ( ) . getViewDef ( tblname , tx ) ; if ( viewdef != null ) plans . add ( VanillaDb . newPlanner ( ) . createQueryPlan ( viewdef , tx ) ) ; else plans . add ( new TablePlan ( tblname , tx ) ) ; } Plan p = plans . remove ( 0 ) ; for ( Plan nextplan : plans ) p = new ProductPlan ( p , nextplan ) ; p = new SelectPlan ( p , data . pred ( ) ) ; if ( data . groupFields ( ) != null ) { p = new GroupByPlan ( p , data . groupFields ( ) , data . aggregationFn ( ) , tx ) ; } p = new ProjectPlan ( p , data . projectFields ( ) ) ; if ( data . sortFields ( ) != null ) p = new SortPlan ( p , data . sortFields ( ) , data . sortDirections ( ) , tx ) ; if ( data . isExplain ( ) ) p = new ExplainPlan ( p ) ; return p ; }
Creates a query plan as follows . It first takes the product of all tables and views ; it then selects on the predicate ; and finally it projects on the field list .
19,432
public Plan createPlan ( QueryData data , Transaction tx ) { int id = 0 ; for ( String tbl : data . tables ( ) ) { String viewdef = VanillaDb . catalogMgr ( ) . getViewDef ( tbl , tx ) ; if ( viewdef != null ) views . add ( VanillaDb . newPlanner ( ) . createQueryPlan ( viewdef , tx ) ) ; else { TablePlanner tp = new TablePlanner ( tbl , data . pred ( ) , tx , id ) ; tablePlanners . add ( tp ) ; id += 1 ; } } Plan trunk = getAccessPath ( ) ; if ( data . groupFields ( ) != null ) trunk = new GroupByPlan ( trunk , data . groupFields ( ) , data . aggregationFn ( ) , tx ) ; trunk = new ProjectPlan ( trunk , data . projectFields ( ) ) ; if ( data . sortFields ( ) != null ) trunk = new SortPlan ( trunk , data . sortFields ( ) , data . sortDirections ( ) , tx ) ; if ( data . isExplain ( ) ) trunk = new ExplainPlan ( trunk ) ; return trunk ; }
Creates a left - deep query plan using the Selinger optimization . Main idea is to find all permutation of table join order and to choose the cheapest plan . However all permutation can be too much for us to go through all . So we use DP and left - deep only to optimize this process .
19,433
public void format ( Buffer buf ) { int pos = 0 ; setVal ( buf , pos , Constant . defaultInstance ( INTEGER ) ) ; int flagSize = Page . maxSize ( BIGINT ) ; pos += Page . maxSize ( INTEGER ) ; for ( int i = 0 ; i < flags . length ; i ++ ) { setVal ( buf , pos , new BigIntConstant ( flags [ i ] ) ) ; pos += flagSize ; } int slotSize = BTreePage . slotSize ( sch ) ; for ( int p = pos ; p + slotSize <= Buffer . BUFFER_SIZE ; p += slotSize ) makeDefaultRecord ( buf , p ) ; }
Formats the page by initializing as many index - record slots as possible to have default values .
19,434
public void rollback ( ) { for ( TransactionLifecycleListener l : lifecycleListeners ) { l . onTxRollback ( this ) ; } if ( logger . isLoggable ( Level . FINE ) ) logger . fine ( "transaction " + txNum + " rolled back" ) ; }
Rolls back the current transaction . Undoes any modified values flushes those blocks writes and flushes a rollback record to the log releases all locks and unpins any pinned blocks .
19,435
public void setVal ( String fldName , Constant val ) { rf . setVal ( fldName , val ) ; }
Sets the value of the specified field as a Constant .
19,436
public Scan open ( ) { SortScan ss1 = ( SortScan ) sp1 . open ( ) ; SortScan ss2 = ( SortScan ) sp2 . open ( ) ; return new MergeJoinScan ( ss1 , ss2 , fldName1 , fldName2 ) ; }
The method first sorts its two underlying scans on their join field . It then returns a mergejoin scan of the two sorted table scans .
19,437
public void format ( Buffer buf ) { int slotSize = RecordPage . slotSize ( ti . schema ( ) ) ; Constant emptyFlag = new IntegerConstant ( EMPTY ) ; for ( int pos = 0 ; pos + slotSize <= Buffer . BUFFER_SIZE ; pos += slotSize ) { setVal ( buf , pos , emptyFlag ) ; makeDefaultRecord ( buf , pos ) ; } }
Formats the page by allocating as many record slots as possible given the record size . Each record slot is assigned a flag of EMPTY . Each numeric field is given a value of 0 and each string field is given a value of .
19,438
public void flush ( LogSeqNum lsn ) { logMgrLock . lock ( ) ; try { if ( lsn . compareTo ( lastFlushedLsn ) >= 0 ) flush ( ) ; } finally { logMgrLock . unlock ( ) ; } }
Ensures that the log records corresponding to the specified LSN has been written to disk . All earlier log records will also be written to disk .
19,439
public ReversibleIterator < BasicLogRecord > iterator ( ) { logMgrLock . lock ( ) ; try { flush ( ) ; return new LogIterator ( currentBlk ) ; } finally { logMgrLock . unlock ( ) ; } }
Returns an iterator for the log records which will be returned in reverse order starting with the most recent .
19,440
public LogSeqNum append ( Constant [ ] rec ) { logMgrLock . lock ( ) ; try { int recsize = pointerSize * 2 ; for ( Constant c : rec ) recsize += Page . size ( c ) ; if ( currentPos + recsize >= BLOCK_SIZE ) { flush ( ) ; appendNewBlock ( ) ; } LogSeqNum lsn = currentLSN ( ) ; for ( Constant c : rec ) appendVal ( c ) ; finalizeRecord ( ) ; lastLsn = lsn ; return lsn ; } finally { logMgrLock . unlock ( ) ; } }
Appends a log record to the file . The record contains an arbitrary array of values . The method also writes an integer to the end of each log record whose value is the offset of the corresponding integer for the previous log record . These integers allow log records to be read in reverse order .
19,441
public void removeAndCreateNewLog ( ) { logMgrLock . lock ( ) ; try { VanillaDb . fileMgr ( ) . delete ( logFile ) ; lastLsn = LogSeqNum . DEFAULT_VALUE ; lastFlushedLsn = LogSeqNum . DEFAULT_VALUE ; appendNewBlock ( ) ; } finally { logMgrLock . unlock ( ) ; } }
Remove the old log file and create a new one .
19,442
private void appendVal ( Constant val ) { myPage . setVal ( currentPos , val ) ; currentPos += Page . size ( val ) ; }
Adds the specified value to the page at the position denoted by currentPos . Then increments currentPos by the size of the value .
19,443
private void finalizeRecord ( ) { myPage . setVal ( currentPos , new IntegerConstant ( getLastRecordPosition ( ) ) ) ; setPreviousNextRecordPosition ( currentPos + pointerSize ) ; setLastRecordPosition ( currentPos ) ; currentPos += pointerSize ; setNextRecordPosition ( currentPos ) ; currentPos += pointerSize ; }
Sets up a circular chain of pointers to the records in the page . There is an integer added to the end of each log record whose value is the offset of the previous log record . The first four bytes of the page contain an integer whose value is the offset of the integer for the last log record in the page .
19,444
public Scan open ( ) { Schema sch = p . schema ( ) ; TempTable temp = new TempTable ( sch , tx ) ; Scan src = p . open ( ) ; UpdateScan dest = temp . open ( ) ; src . beforeFirst ( ) ; while ( src . next ( ) ) { dest . insert ( ) ; for ( String fldname : sch . fields ( ) ) dest . setVal ( fldname , src . getVal ( fldname ) ) ; } src . close ( ) ; dest . beforeFirst ( ) ; return dest ; }
This method loops through the underlying query copying its output records into a temporary table . It then returns a table scan for that table .
19,445
private static Schema schema ( SearchKeyType keyType ) { Schema sch = new Schema ( ) ; for ( int i = 0 ; i < keyType . length ( ) ; i ++ ) sch . addField ( keyFieldName ( i ) , keyType . get ( i ) ) ; sch . addField ( SCHEMA_RID_BLOCK , BIGINT ) ; sch . addField ( SCHEMA_RID_ID , INTEGER ) ; return sch ; }
Returns the schema of the index records .
19,446
public RecordId getDataRecordId ( ) { long blkNum = ( Long ) rf . getVal ( SCHEMA_RID_BLOCK ) . asJavaVal ( ) ; int id = ( Integer ) rf . getVal ( SCHEMA_RID_ID ) . asJavaVal ( ) ; return new RecordId ( new BlockId ( dataFileName , blkNum ) , id ) ; }
Retrieves the data record ID from the current index record .
19,447
public void insert ( SearchKey key , RecordId dataRecordId , boolean doLogicalLogging ) { beforeFirst ( new SearchRange ( key ) ) ; if ( doLogicalLogging ) tx . recoveryMgr ( ) . logLogicalStart ( ) ; rf . insert ( ) ; for ( int i = 0 ; i < keyType . length ( ) ; i ++ ) rf . setVal ( keyFieldName ( i ) , key . get ( i ) ) ; rf . setVal ( SCHEMA_RID_BLOCK , new BigIntConstant ( dataRecordId . block ( ) . number ( ) ) ) ; rf . setVal ( SCHEMA_RID_ID , new IntegerConstant ( dataRecordId . id ( ) ) ) ; if ( doLogicalLogging ) tx . recoveryMgr ( ) . logIndexInsertionEnd ( ii . indexName ( ) , key , dataRecordId . block ( ) . number ( ) , dataRecordId . id ( ) ) ; }
Inserts a new index record into this index .
19,448
public void delete ( SearchKey key , RecordId dataRecordId , boolean doLogicalLogging ) { beforeFirst ( new SearchRange ( key ) ) ; if ( doLogicalLogging ) tx . recoveryMgr ( ) . logLogicalStart ( ) ; while ( next ( ) ) if ( getDataRecordId ( ) . equals ( dataRecordId ) ) { rf . delete ( ) ; return ; } if ( doLogicalLogging ) tx . recoveryMgr ( ) . logIndexDeletionEnd ( ii . indexName ( ) , key , dataRecordId . block ( ) . number ( ) , dataRecordId . id ( ) ) ; }
Deletes the specified index record .
19,449
public boolean next ( ) { if ( prodScan == null ) return false ; while ( ! prodScan . next ( ) ) if ( ! useNextChunk ( ) ) return false ; return true ; }
Moves to the next record in the current scan . If there are no more records in the current chunk then move to the next LHS record and the beginning of that chunk . If there are no more LHS records then move to the next chunk and begin again .
19,450
static Schema schema ( SearchKeyType keyType ) { Schema sch = new Schema ( ) ; for ( int i = 0 ; i < keyType . length ( ) ; i ++ ) sch . addField ( keyFieldName ( i ) , keyType . get ( i ) ) ; sch . addField ( SCH_CHILD , BIGINT ) ; return sch ; }
Returns the schema of the B - tree directory records .
19,451
public BlockId search ( SearchKey searchKey , String leafFileName , SearchPurpose purpose ) { if ( purpose == SearchPurpose . READ ) return searchForRead ( searchKey , leafFileName ) ; else if ( purpose == SearchPurpose . INSERT ) return searchForInsert ( searchKey , leafFileName ) ; else if ( purpose == SearchPurpose . DELETE ) return searchForDelete ( searchKey , leafFileName ) ; else throw new UnsupportedOperationException ( ) ; }
Returns the block number of the B - tree leaf block that contains the specified search key .
19,452
private int findSlotBefore ( SearchKey searchKey ) { int startSlot = 0 , endSlot = currentPage . getNumRecords ( ) - 1 ; int middleSlot = ( startSlot + endSlot ) / 2 ; if ( endSlot >= 0 ) { while ( middleSlot != startSlot ) { if ( getKey ( currentPage , middleSlot , keyType . length ( ) ) . compareTo ( searchKey ) < 0 ) startSlot = middleSlot ; else endSlot = middleSlot ; middleSlot = ( startSlot + endSlot ) / 2 ; } if ( getKey ( currentPage , endSlot , keyType . length ( ) ) . compareTo ( searchKey ) < 0 ) return endSlot ; else if ( getKey ( currentPage , startSlot , keyType . length ( ) ) . compareTo ( searchKey ) < 0 ) return startSlot ; else return startSlot - 1 ; } else return - 1 ; }
Calculates the slot right before the one having the specified search key .
19,453
public synchronized Constant getVal ( int offset , Type type ) { int size ; byte [ ] byteVal = null ; if ( type . isFixedSize ( ) ) { size = type . maxSize ( ) ; } else { byteVal = new byte [ ByteHelper . INT_SIZE ] ; contents . get ( offset , byteVal ) ; size = ByteHelper . toInteger ( byteVal ) ; offset += ByteHelper . INT_SIZE ; } byteVal = new byte [ size ] ; contents . get ( offset , byteVal ) ; return Constant . newInstance ( type , byteVal ) ; }
Returns the value at a specified offset of this page . If a constant was not stored at that offset the behavior of the method is unpredictable .
19,454
public synchronized void setVal ( int offset , Constant val ) { byte [ ] byteval = val . asBytes ( ) ; if ( ! val . getType ( ) . isFixedSize ( ) ) { if ( offset + ByteHelper . INT_SIZE + byteval . length > BLOCK_SIZE ) throw new BufferOverflowException ( ) ; byte [ ] sizeBytes = ByteHelper . toBytes ( byteval . length ) ; contents . put ( offset , sizeBytes ) ; offset += sizeBytes . length ; } contents . put ( offset , byteval ) ; }
Writes a constant value to the specified offset on the page .
19,455
public static Constant newInstance ( Type type , byte [ ] val ) { switch ( type . getSqlType ( ) ) { case ( INTEGER ) : return new IntegerConstant ( val ) ; case ( BIGINT ) : return new BigIntConstant ( val ) ; case ( DOUBLE ) : return new DoubleConstant ( val ) ; case ( VARCHAR ) : return new VarcharConstant ( val , type ) ; } throw new UnsupportedOperationException ( "Unspported SQL type: " + type . getSqlType ( ) ) ; }
Constructs a new instance of the specified type with value converted from the input byte array .
19,456
public static Constant defaultInstance ( Type type ) { switch ( type . getSqlType ( ) ) { case ( INTEGER ) : return defaultInteger ; case ( BIGINT ) : return defaultBigInt ; case ( DOUBLE ) : return defaultDouble ; case ( VARCHAR ) : return defaultVarchar ; } throw new UnsupportedOperationException ( "Unspported SQL type: " + type . getSqlType ( ) ) ; }
Constructs a new instance of the specified type with default value . For all numeric constants the default value is 0 ; for string constants the default value is an empty string .
19,457
public void setReadOnly ( boolean readOnly ) throws RemoteException { if ( this . readOnly != readOnly ) { tx . commit ( ) ; this . readOnly = readOnly ; try { tx = VanillaDb . txMgr ( ) . newTransaction ( isolationLevel , readOnly ) ; } catch ( Exception e ) { throw new RemoteException ( "error creating transaction " , e ) ; } } }
Sets this connection s auto - commit mode to the given state . The default setting of auto - commit mode is true . This method may commit current transaction and start a new transaction .
19,458
public void commit ( ) throws RemoteException { tx . commit ( ) ; try { tx = VanillaDb . txMgr ( ) . newTransaction ( isolationLevel , readOnly ) ; } catch ( Exception e ) { throw new RemoteException ( "error creating transaction " , e ) ; } }
Commits the current transaction and begins a new one .
19,459
public int insertFromScan ( Scan s ) { if ( ! super . insertIntoNextEmptySlot ( ) ) { return 0 ; } for ( String fldName : sch . fields ( ) ) { Constant val = s . getVal ( fldName ) ; this . setVal ( fldName , val ) ; } if ( s . next ( ) ) return 1 ; else return - 1 ; }
Insert records to TempRecordFile for sorting at most one block long
19,460
public boolean copyToScan ( UpdateScan s ) { if ( ! this . next ( ) ) return false ; s . insert ( ) ; for ( String fldName : sch . fields ( ) ) { s . setVal ( fldName , this . getVal ( fldName ) ) ; } return true ; }
Copy sorted records to UpdateScan
19,461
static Schema schema ( SearchKeyType keyType ) { Schema sch = new Schema ( ) ; for ( int i = 0 ; i < keyType . length ( ) ; i ++ ) sch . addField ( keyFieldName ( i ) , keyType . get ( i ) ) ; sch . addField ( SCH_RID_BLOCK , BIGINT ) ; sch . addField ( SCH_RID_ID , INTEGER ) ; return sch ; }
Returns the schema of the B - tree leaf records .
19,462
public boolean next ( ) { while ( true ) { currentSlot ++ ; if ( ! isOverflowing ) { if ( currentSlot >= currentPage . getNumRecords ( ) ) { if ( getSiblingFlag ( currentPage ) != - 1 ) { moveTo ( getSiblingFlag ( currentPage ) , - 1 ) ; continue ; } return false ; } else if ( searchRange . match ( getKey ( currentPage , currentSlot , keyType . length ( ) ) ) ) { if ( currentSlot == 0 && getOverflowFlag ( currentPage ) != - 1 ) { isOverflowing = true ; overflowFrom = currentPage . currentBlk ( ) . number ( ) ; moveTo ( getOverflowFlag ( currentPage ) , 0 ) ; } return true ; } else if ( searchRange . betweenMinAndMax ( getKey ( currentPage , currentSlot , keyType . length ( ) ) ) ) { continue ; } else return false ; } else { if ( currentSlot >= currentPage . getNumRecords ( ) ) { moveTo ( getOverflowFlag ( currentPage ) , 0 ) ; if ( currentPage . currentBlk ( ) . number ( ) == overflowFrom ) { isOverflowing = false ; overflowFrom = - 1 ; } } return true ; } } }
Moves to the next B - tree leaf record matching the search key .
19,463
public DirEntry insert ( RecordId dataRecordId ) { if ( ! searchRange . isSingleValue ( ) ) throw new IllegalStateException ( ) ; currentSlot ++ ; SearchKey searchKey = searchRange . asSearchKey ( ) ; insert ( currentSlot , searchKey , dataRecordId ) ; if ( currentSlot == 0 && getOverflowFlag ( currentPage ) != - 1 && ! getKey ( currentPage , 1 , keyType . length ( ) ) . equals ( searchKey ) ) { SearchKey splitKey = getKey ( currentPage , 1 , keyType . length ( ) ) ; long newBlkNum = currentPage . split ( 1 , new long [ ] { getOverflowFlag ( currentPage ) , getSiblingFlag ( currentPage ) } ) ; setOverflowFlag ( currentPage , - 1 ) ; setSiblingFlag ( currentPage , newBlkNum ) ; return new DirEntry ( splitKey , newBlkNum ) ; } if ( ! currentPage . isFull ( ) ) return null ; SearchKey firstKey = getKey ( currentPage , 0 , keyType . length ( ) ) ; SearchKey lastKey = getKey ( currentPage , currentPage . getNumRecords ( ) - 1 , keyType . length ( ) ) ; if ( lastKey . equals ( firstKey ) ) { long overflowFlag = ( getOverflowFlag ( currentPage ) == - 1 ) ? currentPage . currentBlk ( ) . number ( ) : getOverflowFlag ( currentPage ) ; long newBlkNum = currentPage . split ( 1 , new long [ ] { overflowFlag , - 1 } ) ; setOverflowFlag ( currentPage , newBlkNum ) ; return null ; } else { int splitPos = currentPage . getNumRecords ( ) / 2 ; SearchKey splitKey = getKey ( currentPage , splitPos , keyType . length ( ) ) ; if ( splitKey . equals ( firstKey ) ) { while ( getKey ( currentPage , splitPos , keyType . length ( ) ) . equals ( splitKey ) ) splitPos ++ ; splitKey = getKey ( currentPage , splitPos , keyType . length ( ) ) ; } else { while ( getKey ( currentPage , splitPos - 1 , keyType . length ( ) ) . equals ( splitKey ) ) splitPos -- ; } long newBlkNum = currentPage . split ( splitPos , new long [ ] { - 1 , getSiblingFlag ( currentPage ) } ) ; setSiblingFlag ( currentPage , newBlkNum ) ; return new DirEntry ( splitKey , newBlkNum ) ; } }
Inserts a new B - tree leaf record having the specified data record ID and the previously - specified search key . This method can only be called once immediately after construction .
19,464
public void delete ( RecordId dataRecordId ) { if ( ! searchRange . isSingleValue ( ) ) throw new IllegalStateException ( ) ; while ( next ( ) ) if ( getDataRecordId ( ) . equals ( dataRecordId ) ) { delete ( currentSlot ) ; break ; } if ( ! isOverflowing ) { if ( getOverflowFlag ( currentPage ) != - 1 ) { BlockId blk = new BlockId ( currentPage . currentBlk ( ) . fileName ( ) , getOverflowFlag ( currentPage ) ) ; ccMgr . modifyLeafBlock ( blk ) ; BTreePage overflowPage = new BTreePage ( blk , NUM_FLAGS , schema , tx ) ; SearchKey firstKey = getKey ( currentPage , 0 , keyType . length ( ) ) ; if ( ( currentPage . getNumRecords ( ) == 0 || ( overflowPage . getNumRecords ( ) != 0 && getKey ( overflowPage , 0 , keyType . length ( ) ) != firstKey ) ) ) { overflowPage . transferRecords ( overflowPage . getNumRecords ( ) - 1 , currentPage , 0 , 1 ) ; if ( overflowPage . getNumRecords ( ) == 0 ) { long overflowFlag = ( getOverflowFlag ( overflowPage ) == currentPage . currentBlk ( ) . number ( ) ) ? - 1 : getOverflowFlag ( overflowPage ) ; setOverflowFlag ( currentPage , overflowFlag ) ; } overflowPage . close ( ) ; } } } else { if ( currentPage . getNumRecords ( ) == 0 ) { BlockId blk = new BlockId ( currentPage . currentBlk ( ) . fileName ( ) , moveFrom ) ; BTreePage prePage = new BTreePage ( blk , NUM_FLAGS , schema , tx ) ; long overflowFlag = ( getOverflowFlag ( currentPage ) == prePage . currentBlk ( ) . number ( ) ) ? - 1 : getOverflowFlag ( currentPage ) ; setOverflowFlag ( prePage , overflowFlag ) ; prePage . close ( ) ; } } }
Deletes the B - tree leaf record having the specified data record ID and the previously - specified search key . This method can only be called once immediately after construction .
19,465
private void moveSlotBefore ( ) { int startSlot = 0 , endSlot = currentPage . getNumRecords ( ) - 1 ; int middleSlot = ( startSlot + endSlot ) / 2 ; SearchKey searchMin = searchRange . getMin ( ) ; if ( endSlot >= 0 ) { while ( middleSlot != startSlot ) { if ( searchMin . compareTo ( getKey ( currentPage , middleSlot , keyType . length ( ) ) ) > 0 ) startSlot = middleSlot ; else endSlot = middleSlot ; middleSlot = ( startSlot + endSlot ) / 2 ; } if ( searchMin . compareTo ( getKey ( currentPage , endSlot , keyType . length ( ) ) ) > 0 ) currentSlot = endSlot ; else if ( searchMin . compareTo ( getKey ( currentPage , startSlot , keyType . length ( ) ) ) > 0 ) currentSlot = startSlot ; else currentSlot = startSlot - 1 ; } else currentSlot = - 1 ; }
Positions the current slot right before the first index record that matches the specified search range .
19,466
private void moveTo ( long blkNum , int slot ) { moveFrom = currentPage . currentBlk ( ) . number ( ) ; BlockId blk = new BlockId ( currentPage . currentBlk ( ) . fileName ( ) , blkNum ) ; ccMgr . readLeafBlock ( blk ) ; currentPage . close ( ) ; currentPage = new BTreePage ( blk , NUM_FLAGS , schema , tx ) ; currentSlot = slot ; }
Opens the page for the specified block and moves the current slot to the specified position .
19,467
public static Histogram syncHistogram ( Histogram hist ) { double maxRecs = 0.0 ; for ( String fld : hist . fields ( ) ) { double numRecs = 0.0 ; for ( Bucket bkt : hist . buckets ( fld ) ) numRecs += bkt . frequency ( ) ; if ( Double . compare ( numRecs , maxRecs ) > 0 ) maxRecs = numRecs ; } Histogram syncHist = new Histogram ( hist . fields ( ) ) ; for ( String fld : hist . fields ( ) ) { double numRecs = 0.0 ; for ( Bucket bkt : hist . buckets ( fld ) ) numRecs += bkt . frequency ( ) ; double extrapolation = maxRecs / numRecs ; for ( Bucket bkt : hist . buckets ( fld ) ) syncHist . addBucket ( fld , new Bucket ( bkt . valueRange ( ) , extrapolation * bkt . frequency ( ) , bkt . distinctValues ( ) , bkt . valuePercentiles ( ) ) ) ; } return syncHist ; }
Buckets of a field may be discarded during the cost estimation if its frequency is less than 1 . As a result the total frequencies of buckets may be diverse in different fields . This method synchronizes the total frequencies of different fields in the specified histogram .
19,468
synchronized int reserveNextCorrelationId ( VersionedIoFuture future ) { Integer next = getNextCorrelationId ( ) ; while ( requests . containsKey ( next ) ) { next = getNextCorrelationId ( ) ; } requests . put ( next , future ) ; return next ; }
Reserves a correlation ID by taking the next value and ensuring it is stored in the Map .
19,469
public static Histogram predHistogram ( Histogram hist , Predicate pred ) { if ( Double . compare ( hist . recordsOutput ( ) , 1.0 ) < 0 ) return new Histogram ( hist . fields ( ) ) ; Map < String , ConstantRange > cRanges = new HashMap < String , ConstantRange > ( ) ; for ( String fld : hist . fields ( ) ) { ConstantRange cr = pred . constantRange ( fld ) ; if ( cr != null ) cRanges . put ( fld , cr ) ; } Histogram crHist = constantRangeHistogram ( hist , cRanges ) ; Histogram jfHist = crHist ; Deque < String > flds = new LinkedList < String > ( jfHist . fields ( ) ) ; while ( ! flds . isEmpty ( ) ) { String fld = flds . removeFirst ( ) ; Set < String > group = pred . joinFields ( fld ) ; if ( group != null ) { flds . removeAll ( group ) ; group . add ( fld ) ; jfHist = joinFieldsHistogram ( jfHist , group ) ; } } return jfHist ; }
Returns a histogram that for each field approximates the distribution of field values from the specified histogram satisfying the specified predicate .
19,470
public static Histogram constantRangeHistogram ( Histogram hist , Map < String , ConstantRange > cRanges ) { if ( Double . compare ( hist . recordsOutput ( ) , 1.0 ) < 0 ) return new Histogram ( hist . fields ( ) ) ; Histogram crHist = new Histogram ( hist ) ; for ( String fld : cRanges . keySet ( ) ) { Collection < Bucket > crBkts = new ArrayList < Bucket > ( crHist . buckets ( fld ) . size ( ) ) ; ConstantRange cr = cRanges . get ( fld ) ; double freqSum = 0.0 ; for ( Bucket bkt : crHist . buckets ( fld ) ) { Bucket crBkt = constantRangeBucket ( bkt , cr ) ; if ( crBkt != null ) { crBkts . add ( crBkt ) ; freqSum += crBkt . frequency ( ) ; } } if ( Double . compare ( freqSum , 1.0 ) < 0 ) return new Histogram ( hist . fields ( ) ) ; double crReduction = freqSum / crHist . recordsOutput ( ) ; if ( Double . compare ( crReduction , 1.0 ) == 0 ) continue ; crHist . setBuckets ( fld , crBkts ) ; for ( String restFld : crHist . fields ( ) ) { if ( restFld . equals ( fld ) ) continue ; Collection < Bucket > restBkts = new ArrayList < Bucket > ( crHist . buckets ( restFld ) . size ( ) ) ; for ( Bucket bkt : crHist . buckets ( restFld ) ) { double restFreq = bkt . frequency ( ) * crReduction ; if ( Double . compare ( restFreq , 1.0 ) < 0 ) continue ; double restDistVals = Math . min ( bkt . distinctValues ( ) , restFreq ) ; Bucket restBkt = new Bucket ( bkt . valueRange ( ) , restFreq , restDistVals , bkt . valuePercentiles ( ) ) ; restBkts . add ( restBkt ) ; } crHist . setBuckets ( restFld , restBkts ) ; } } return syncHistogram ( crHist ) ; }
Returns a histogram that for each field approximates the distribution of values from the specified histogram falling within the specified search range .
19,471
public static Bucket constantRangeBucket ( Bucket bkt , ConstantRange cRange ) { ConstantRange newRange = bkt . valueRange ( ) . intersect ( cRange ) ; if ( ! newRange . isValid ( ) ) return null ; double newDistVals = bkt . distinctValues ( newRange ) ; if ( Double . compare ( newDistVals , 1.0 ) < 0 ) return null ; double newFreq = bkt . frequency ( ) * newDistVals / bkt . distinctValues ( ) ; if ( bkt . valuePercentiles ( ) == null ) return new Bucket ( newRange , newFreq , newDistVals ) ; Percentiles newPcts = bkt . valuePercentiles ( ) . percentiles ( newRange ) ; return new Bucket ( newRange , newFreq , newDistVals , newPcts ) ; }
Creates a new bucket by keeping the statistics of records and values in the specified bucket falling within the specified search range .
19,472
public static Histogram joinFieldsHistogram ( Histogram hist , Set < String > group ) { if ( group . size ( ) < 2 ) return new Histogram ( hist ) ; List < String > flds = new ArrayList < String > ( group ) ; Collection < Bucket > jfBkts = hist . buckets ( flds . get ( 0 ) ) ; for ( int i = 1 ; i < flds . size ( ) ; i ++ ) { Collection < Bucket > temp = jfBkts ; jfBkts = new ArrayList < Bucket > ( 2 * jfBkts . size ( ) ) ; for ( Bucket bkt1 : temp ) { for ( Bucket bkt2 : hist . buckets ( flds . get ( i ) ) ) { Bucket jfBkt = joinFieldBucket ( bkt1 , bkt2 , hist . recordsOutput ( ) ) ; if ( jfBkt != null ) jfBkts . add ( jfBkt ) ; } } } double freqSum = 0.0 ; for ( Bucket bkt : jfBkts ) freqSum += bkt . frequency ( ) ; if ( Double . compare ( freqSum , 1.0 ) < 0 ) return new Histogram ( hist . fields ( ) ) ; double jfReduction = freqSum / hist . recordsOutput ( ) ; if ( Double . compare ( jfReduction , 1.0 ) == 0 ) return new Histogram ( hist ) ; Histogram jfHist = new Histogram ( hist . fields ( ) ) ; for ( String fld : hist . fields ( ) ) { if ( group . contains ( fld ) ) jfHist . setBuckets ( fld , jfBkts ) ; else { for ( Bucket bkt : hist . buckets ( fld ) ) { double restFreq = bkt . frequency ( ) * jfReduction ; if ( Double . compare ( restFreq , 1.0 ) < 0 ) continue ; double restDistVals = Math . min ( bkt . distinctValues ( ) , restFreq ) ; Bucket restBkt = new Bucket ( bkt . valueRange ( ) , restFreq , restDistVals , bkt . valuePercentiles ( ) ) ; jfHist . addBucket ( fld , restBkt ) ; } } } return syncHistogram ( jfHist ) ; }
Returns a histogram that for each field approximates the distribution of values from the specified histogram joining with other fields in the specified group .
19,473
public static Bucket joinFieldBucket ( Bucket bkt1 , Bucket bkt2 , double numRec ) { ConstantRange newRange = bkt1 . valueRange ( ) . intersect ( bkt2 . valueRange ( ) ) ; if ( ! newRange . isValid ( ) ) return null ; double rdv1 = bkt1 . distinctValues ( newRange ) ; double rdv2 = bkt2 . distinctValues ( newRange ) ; double newDistVals = Math . min ( rdv1 , rdv2 ) ; if ( Double . compare ( newDistVals , 1.0 ) < 0 ) return null ; double newFreq = Math . min ( bkt1 . frequency ( ) * ( bkt2 . frequency ( ) / numRec ) * ( newDistVals / bkt1 . distinctValues ( ) ) / rdv2 , bkt2 . frequency ( ) * ( bkt1 . frequency ( ) / numRec ) * ( newDistVals / bkt2 . distinctValues ( ) ) / rdv1 ) ; if ( Double . compare ( newFreq , 1.0 ) < 0 ) return null ; Bucket smaller = rdv1 < rdv2 ? bkt1 : bkt2 ; if ( smaller . valuePercentiles ( ) == null ) return new Bucket ( newRange , newFreq , newDistVals ) ; Percentiles newPcts = smaller . valuePercentiles ( ) . percentiles ( newRange ) ; return new Bucket ( newRange , newFreq , newDistVals , newPcts ) ; }
Creates a new bucket by keeping the statistics of joining records and values from the two specified buckets .
19,474
public Scan open ( ) { Scan s = p . open ( ) ; return new SelectScan ( s , pred ) ; }
Creates a select scan for this query .
19,475
public Retrofit . Builder create ( String baseUrl , ObjectMapper objectMapper ) { return new Retrofit . Builder ( ) . baseUrl ( baseUrl ) . client ( _okHttpClient ) . addConverterFactory ( JacksonConverterFactory . create ( objectMapper ) ) ; }
Creates a new builder instance with the provided base url . Initialized with a Jackson JSON converter using the provided object mapper .
19,476
public boolean isSatisfied ( Record rec ) { for ( Term t : terms ) if ( ! t . isSatisfied ( rec ) ) return false ; return true ; }
Returns true if this predicate evaluates to true with respect to the specified record .
19,477
public Predicate selectPredicate ( Schema sch ) { Predicate result = new Predicate ( ) ; for ( Term t : terms ) if ( t . isApplicableTo ( sch ) ) result . terms . add ( t ) ; if ( result . terms . size ( ) == 0 ) return null ; else return result ; }
Returns the sub - predicate that applies to the specified schema .
19,478
public Predicate joinPredicate ( Schema sch1 , Schema sch2 ) { Predicate result = new Predicate ( ) ; Schema newsch = new Schema ( ) ; newsch . addAll ( sch1 ) ; newsch . addAll ( sch2 ) ; for ( Term t : terms ) if ( ! t . isApplicableTo ( sch1 ) && ! t . isApplicableTo ( sch2 ) && t . isApplicableTo ( newsch ) ) result . terms . add ( t ) ; return result . terms . size ( ) == 0 ? null : result ; }
Returns the sub - predicate consisting of terms that applies to the union of the two specified schemas but not to either schema separately .
19,479
public ConstantRange constantRange ( String fldName ) { ConstantRange cr = null ; for ( Term t : terms ) { Constant c = t . oppositeConstant ( fldName ) ; if ( c != null ) { Operator op = t . operator ( fldName ) ; if ( op == OP_GT ) cr = cr == null ? ConstantRange . newInstance ( c , false , null , false ) : cr . applyLow ( c , false ) ; else if ( op == OP_GTE ) cr = cr == null ? ConstantRange . newInstance ( c , true , null , false ) : cr . applyLow ( c , true ) ; else if ( op == OP_EQ ) cr = cr == null ? ConstantRange . newInstance ( c ) : cr . applyConstant ( c ) ; else if ( op == OP_LTE ) cr = cr == null ? ConstantRange . newInstance ( null , false , c , true ) : cr . applyHigh ( c , true ) ; else if ( op == OP_LT ) cr = cr == null ? ConstantRange . newInstance ( null , false , c , false ) : cr . applyHigh ( c , false ) ; } } if ( cr != null && cr . isValid ( ) && ( cr . hasLowerBound ( ) || cr . hasUpperBound ( ) ) ) return cr ; return null ; }
Determines if the specified field is constrained by a constant range in this predicate . If so the method returns that range . If not the method returns null .
19,480
public Constant getVal ( int offset , Type type ) { internalLock . readLock ( ) . lock ( ) ; try { return contents . getVal ( DATA_START_OFFSET + offset , type ) ; } finally { internalLock . readLock ( ) . unlock ( ) ; } }
Returns the value at the specified offset of this buffer s page . If an integer was not stored at that location the behavior of the method is unpredictable .
19,481
public void setVal ( int offset , Constant val , long txNum , LogSeqNum lsn ) { internalLock . writeLock ( ) . lock ( ) ; try { modifiedBy . add ( txNum ) ; if ( lsn != null && lsn . compareTo ( lastLsn ) > 0 ) lastLsn = lsn ; lastLsn . writeToPage ( contents , LAST_LSN_OFFSET ) ; contents . setVal ( DATA_START_OFFSET + offset , val ) ; } finally { internalLock . writeLock ( ) . unlock ( ) ; } }
Writes a value to the specified offset of this buffer s page . This method assumes that the transaction has already written an appropriate log record . The buffer saves the id of the transaction and the LSN of the log record . A negative lsn value indicates that a log record was not necessary .
19,482
void flush ( ) { internalLock . writeLock ( ) . lock ( ) ; flushLock . lock ( ) ; try { if ( isNew || modifiedBy . size ( ) > 0 ) { VanillaDb . logMgr ( ) . flush ( lastLsn ) ; contents . write ( blk ) ; modifiedBy . clear ( ) ; isNew = false ; } } finally { flushLock . unlock ( ) ; internalLock . writeLock ( ) . unlock ( ) ; } }
Writes the page to its disk block if the page is dirty . The method ensures that the corresponding log record has been written to disk prior to writing the page to disk .
19,483
boolean isModifiedBy ( long txNum ) { internalLock . writeLock ( ) . lock ( ) ; try { return modifiedBy . contains ( txNum ) ; } finally { internalLock . writeLock ( ) . unlock ( ) ; } }
Returns true if the buffer is dirty due to a modification by the specified transaction .
19,484
void assignToBlock ( BlockId blk ) { internalLock . writeLock ( ) . lock ( ) ; try { flush ( ) ; this . blk = blk ; contents . read ( blk ) ; pins = 0 ; lastLsn = LogSeqNum . readFromPage ( contents , LAST_LSN_OFFSET ) ; } finally { internalLock . writeLock ( ) . unlock ( ) ; } }
Reads the contents of the specified block into the buffer s page . If the buffer was dirty then the contents of the previous page are first written to disk .
19,485
void assignToNew ( String fileName , PageFormatter fmtr ) { internalLock . writeLock ( ) . lock ( ) ; try { flush ( ) ; fmtr . format ( this ) ; blk = contents . append ( fileName ) ; pins = 0 ; isNew = true ; lastLsn = LogSeqNum . DEFAULT_VALUE ; } finally { internalLock . writeLock ( ) . unlock ( ) ; } }
Initializes the buffer s page according to the specified formatter and appends the page to the specified file . If the buffer was dirty then the contents of the previous page are first written to disk .
19,486
protected IOException toIoException ( Exception e ) { if ( e instanceof IOException ) { return ( IOException ) e ; } else { return new IOException ( "Unexpected failure" , e ) ; } }
This Exception conversion needs to return the IOException instead of throwing it this is so that the compiler can detect that for the final Exception check something is actually thrown .
19,487
public void createCheckpoint ( ) { if ( logger . isLoggable ( Level . INFO ) ) logger . info ( "Start creating checkpoint" ) ; if ( MY_METHOD == METHOD_MONITOR ) { if ( VanillaDb . txMgr ( ) . getNextTxNum ( ) - lastTxNum > TX_COUNT_TO_CHECKPOINT ) { Transaction tx = VanillaDb . txMgr ( ) . newTransaction ( Connection . TRANSACTION_SERIALIZABLE , false ) ; VanillaDb . txMgr ( ) . createCheckpoint ( tx ) ; tx . commit ( ) ; lastTxNum = VanillaDb . txMgr ( ) . getNextTxNum ( ) ; } } else if ( MY_METHOD == METHOD_PERIODIC ) { Transaction tx = VanillaDb . txMgr ( ) . newTransaction ( Connection . TRANSACTION_SERIALIZABLE , false ) ; VanillaDb . txMgr ( ) . createCheckpoint ( tx ) ; tx . commit ( ) ; } if ( logger . isLoggable ( Level . INFO ) ) logger . info ( "A checkpoint created" ) ; }
Create a non - quiescent checkpoint .
19,488
public boolean matchKeyword ( String keyword ) { return tok . ttype == StreamTokenizer . TT_WORD && tok . sval . equals ( keyword ) && keywords . contains ( tok . sval ) ; }
Returns true if the current token is the specified keyword .
19,489
public String eatStringConstant ( ) { if ( ! matchStringConstant ( ) ) throw new BadSyntaxException ( ) ; String s = tok . sval ; nextToken ( ) ; return s ; }
Throws an exception if the current token is not a string . Otherwise returns that string and moves to the next token .
19,490
public String eatId ( ) { if ( ! matchId ( ) ) throw new BadSyntaxException ( ) ; String s = tok . sval ; nextToken ( ) ; return s ; }
Throws an exception if the current token is not an identifier . Otherwise returns the identifier string and moves to the next token .
19,491
public void sample ( Record rec ) { totalRecs ++ ; if ( samples . size ( ) < MAX_SAMPLES ) { samples . add ( new Sample ( rec , schema ) ) ; updateNewValueInterval ( rec ) ; } else { double flip = random . nextDouble ( ) ; if ( flip < ( double ) MAX_SAMPLES / totalRecs ) { samples . set ( random . nextInt ( MAX_SAMPLES ) , new Sample ( rec , schema ) ) ; updateNewValueInterval ( rec ) ; } } }
Keep a record as a sample with certain probability . This method is designed to uniformly sample all records of a table under the situation where the total number of records is unknown in advance . A client should call this method when iterating through each record of a table .
19,492
public Scan open ( ) { TempTable tt = copyRecordsFrom ( rhs ) ; TableInfo ti = tt . getTableInfo ( ) ; Scan leftscan = lhs . open ( ) ; return new MultiBufferProductScan ( leftscan , ti , tx ) ; }
A scan for this query is created and returned as follows . First the method materializes its RHS query . It then determines the optimal chunk size based on the size of the materialized file and the number of available buffers . It creates a chunk plan for each chunk saving them in a list . Finally it creates a multiscan for this list of plans and returns that scan .
19,493
public boolean next ( ) throws RemoteException { try { return s . next ( ) ; } catch ( RuntimeException e ) { rconn . rollback ( ) ; throw e ; } }
Moves to the next record in the result set by moving to the next record in the saved scan .
19,494
public int getInt ( String fldName ) throws RemoteException { try { fldName = fldName . toLowerCase ( ) ; return ( Integer ) s . getVal ( fldName ) . castTo ( INTEGER ) . asJavaVal ( ) ; } catch ( RuntimeException e ) { rconn . rollback ( ) ; throw e ; } }
Returns the integer value of the specified field by returning the corresponding value on the saved scan .
19,495
public long getLong ( String fldName ) throws RemoteException { try { fldName = fldName . toLowerCase ( ) ; return ( Long ) s . getVal ( fldName ) . castTo ( BIGINT ) . asJavaVal ( ) ; } catch ( RuntimeException e ) { rconn . rollback ( ) ; throw e ; } }
Returns the long value of the specified field by returning the corresponding value on the saved scan .
19,496
public double getDouble ( String fldName ) throws RemoteException { try { fldName = fldName . toLowerCase ( ) ; return ( Double ) s . getVal ( fldName ) . castTo ( DOUBLE ) . asJavaVal ( ) ; } catch ( RuntimeException e ) { rconn . rollback ( ) ; throw e ; } }
Returns the double value of the specified field by returning the corresponding value on the saved scan .
19,497
public String getString ( String fldName ) throws RemoteException { try { fldName = fldName . toLowerCase ( ) ; return ( String ) s . getVal ( fldName ) . castTo ( VARCHAR ) . asJavaVal ( ) ; } catch ( RuntimeException e ) { rconn . rollback ( ) ; throw e ; } }
Returns the string value of the specified field by returning the corresponding value on the saved scan .
19,498
public void close ( ) throws RemoteException { s . close ( ) ; if ( rconn . getAutoCommit ( ) ) rconn . commit ( ) ; else rconn . endStatement ( ) ; }
Closes the result set by closing its scan .
19,499
public boolean next ( ) { while ( true ) { if ( rp . next ( ) ) return true ; if ( current == endBlkNum ) return false ; moveToBlock ( current + 1 ) ; } }
Moves to the next record in the current block of the chunk . If there are no more records then make the next block be current . If there are no more blocks in the chunk return false .