idx
int64
0
165k
question
stringlengths
73
5.81k
target
stringlengths
5
918
154,800
public static byte [ ] readTxnBytes ( InputArchive ia ) throws IOException { try { byte [ ] bytes = ia . readBuffer ( "txtEntry" ) ; if ( bytes . length == 0 ) return bytes ; if ( ia . readByte ( "EOF" ) != 'B' ) { LOG . error ( "Last transaction was partial." ) ; return null ; } return bytes ; } catch ( EOFException e...
Reads a transaction entry from the input archive .
154,801
public static byte [ ] marshallTxnEntry ( TxnHeader hdr , Record txn ) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream ( ) ; OutputArchive boa = BinaryOutputArchive . getArchive ( baos ) ; hdr . serialize ( boa , "hdr" ) ; if ( txn != null ) { txn . serialize ( boa , "txn" ) ; } return baos ...
Serializes transaction header and transaction data into a byte buffer .
154,802
public static void writeTxnBytes ( OutputArchive oa , byte [ ] bytes ) throws IOException { oa . writeBuffer ( bytes , "txnEntry" ) ; oa . writeByte ( ( byte ) 0x42 , "EOR" ) ; }
Write the serialized transaction record to the output archive .
154,803
public static List < File > sortDataDir ( File [ ] files , String prefix , boolean ascending ) { if ( files == null ) return new ArrayList < File > ( 0 ) ; List < File > filelist = Arrays . asList ( files ) ; Collections . sort ( filelist , new DataDirFileComparator ( prefix , ascending ) ) ; return filelist ; }
Sort the list of files . Recency as determined by the version component of the file name .
154,804
private List < String > diagnoseIndexMismatch ( Index theIndex , Index otherIndex ) { List < String > mismatchedAttrs = new ArrayList < > ( ) ; if ( theIndex . getType ( ) != otherIndex . getType ( ) ) { mismatchedAttrs . add ( "index type (hash vs tree)" ) ; } if ( theIndex . getUnique ( ) != otherIndex . getUnique ( ...
Give two strings return a list of attributes that do not match
154,805
private void validateTableCompatibility ( String theName , String otherName , Table theTable , Table otherTable , FailureMessage failureMessage ) { if ( theTable . getIsdred ( ) != otherTable . getIsdred ( ) ) { failureMessage . addReason ( "To swap table " + theName + " with table " + otherName + " both tables must be...
Flag any issues of incompatibility between the two table operands of a swap by appending error details to a feedback buffer . These details and possibly others should get attached to a PlannerErrorException s message by the caller .
154,806
private void validateColumnCompatibility ( String theName , String otherName , Table theTable , Table otherTable , FailureMessage failureMessage ) { CatalogMap < Column > theColumns = theTable . getColumns ( ) ; int theColCount = theColumns . size ( ) ; CatalogMap < Column > otherColumns = otherTable . getColumns ( ) ;...
Flag any issues of incompatibility between the columns of the two table operands of a swap by appending error details to a feedback buffer . These details and possibly others should get attached to a PlannerErrorException s message by the caller .
154,807
private final void growIfNeeded ( int minimumDesired ) { if ( buffer . b ( ) . remaining ( ) < minimumDesired ) { int newCapacity = buffer . b ( ) . capacity ( ) ; int newRemaining = newCapacity - buffer . b ( ) . position ( ) ; while ( newRemaining < minimumDesired ) { newRemaining += newCapacity ; newCapacity *= 2 ; ...
Resizes the internal byte buffer with a simple doubling policy if needed .
154,808
public static byte [ ] serialize ( FastSerializable object ) throws IOException { FastSerializer out = new FastSerializer ( ) ; object . writeExternal ( out ) ; return out . getBBContainer ( ) . b ( ) . array ( ) ; }
Get the byte version of object . This is a shortcut utility method when you only need to serialize a single object .
154,809
public byte [ ] getBytes ( ) { byte [ ] retval = new byte [ buffer . b ( ) . position ( ) ] ; int position = buffer . b ( ) . position ( ) ; buffer . b ( ) . rewind ( ) ; buffer . b ( ) . get ( retval ) ; assert position == buffer . b ( ) . position ( ) ; return retval ; }
This method is slow and horrible . It entails an extra copy . Don t use it! Ever! Not even for test! Just say no to test only code . It will also leak the BBContainer if this FS is being used with a pool .
154,810
public ByteBuffer getBuffer ( ) { assert ( isDirect == false ) ; assert ( buffer . b ( ) . hasArray ( ) ) ; assert ( ! buffer . b ( ) . isDirect ( ) ) ; buffer . b ( ) . flip ( ) ; return buffer . b ( ) . asReadOnlyBuffer ( ) ; }
Return a readOnly slice of this buffer . Flips the internal buffer . May not be usefully invoked multiple times on the same internal state .
154,811
public String getHexEncodedBytes ( ) { buffer . b ( ) . flip ( ) ; byte bytes [ ] = new byte [ buffer . b ( ) . remaining ( ) ] ; buffer . b ( ) . get ( bytes ) ; String hex = Encoder . hexEncode ( bytes ) ; buffer . discard ( ) ; return hex ; }
Get a ascii - string - safe version of the binary value using a hex encoding .
154,812
public static void writeString ( String string , ByteBuffer buffer ) throws IOException { if ( string == null ) { buffer . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = string . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; buffer . putInt ( len ) ; buffer . put ( str...
Write a string in the standard VoltDB way without wrapping the byte buffer .
154,813
public void writeString ( String string ) throws IOException { if ( string == null ) { writeInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = string . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; writeInt ( len ) ; write ( strbytes ) ; }
Write a string in the standard VoltDB way . That is two bytes of length info followed by the bytes of characters encoded in UTF - 8 .
154,814
public void writeVarbinary ( byte [ ] bin ) throws IOException { if ( bin == null ) { writeInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } if ( bin . length > VoltType . MAX_VALUE_LENGTH ) { throw new IOException ( "Varbinary exceeds maximum length of " + VoltType . MAX_VALUE_LENGTH + " bytes." ) ; } writeInt ( bin ...
Write a varbinary in the standard VoltDB way . That is four bytes of length info followed by the bytes .
154,815
public void writeTable ( VoltTable table ) throws IOException { int len = table . getSerializedSize ( ) ; growIfNeeded ( len ) ; table . flattenToBuffer ( buffer . b ( ) ) ; }
Write a table using it s ByteBuffer serialization code .
154,816
public void writeInvocation ( StoredProcedureInvocation invocation ) throws IOException { int len = invocation . getSerializedSize ( ) ; growIfNeeded ( len ) ; invocation . flattenToBuffer ( buffer . b ( ) ) ; }
Write an SPI using it s ByteBuffer serialization code .
154,817
public void writeParameterSet ( ParameterSet params ) throws IOException { int len = params . getSerializedSize ( ) ; growIfNeeded ( len ) ; params . flattenToBuffer ( buffer . b ( ) ) ; }
Write a ParameterSet using it s ByteBuffer serialization code .
154,818
public void start ( InputHandler ih , Set < Long > verbotenThreads ) { m_ih = ih ; m_verbotenThreads = verbotenThreads ; startSetup ( ) ; m_thread . start ( ) ; }
Start this VoltNetwork s thread . populate the verbotenThreads set with the id of the thread that is created
154,819
public boolean execute ( String sql ) throws SQLException { checkClosed ( ) ; VoltSQL query = VoltSQL . parseSQL ( sql ) ; return this . execute ( query ) ; }
Executes the given SQL statement which may return multiple results .
154,820
public boolean execute ( String sql , String [ ] columnNames ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Executes the given SQL statement which may return multiple results and signals the driver that the auto - generated keys indicated in the given array should be made available for retrieval .
154,821
public int [ ] executeBatch ( ) throws SQLException { checkClosed ( ) ; closeCurrentResult ( ) ; if ( batch == null || batch . size ( ) == 0 ) { return new int [ 0 ] ; } int [ ] updateCounts = new int [ batch . size ( ) ] ; int runningUpdateCount = 0 ; int i = 0 ; try { for ( ; i < batch . size ( ) ; i ++ ) { setCurren...
Submits a batch of commands to the database for execution and if all commands execute successfully returns an array of update counts .
154,822
public ResultSet executeQuery ( String sql ) throws SQLException { checkClosed ( ) ; VoltSQL query = VoltSQL . parseSQL ( sql ) ; if ( ! query . isOfType ( VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , sql ) ; } return this . executeQuery ( query ) ; }
Executes the given SQL statement which returns a single ResultSet object .
154,823
public void setFetchDirection ( int direction ) throws SQLException { checkClosed ( ) ; if ( ( direction != ResultSet . FETCH_FORWARD ) && ( direction != ResultSet . FETCH_REVERSE ) && ( direction != ResultSet . FETCH_UNKNOWN ) ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , direction ) ; } this . fetchDirecti...
Gives the driver a hint as to the direction in which rows will be processed in ResultSet objects created using this Statement object .
154,824
public void setFetchSize ( int rows ) throws SQLException { checkClosed ( ) ; if ( rows < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , rows ) ; } this . fetchSize = rows ; }
Gives the JDBC driver a hint as to the number of rows that should be fetched from the database when more rows are needed for ResultSet objects genrated by this Statement .
154,825
public void setMaxFieldSize ( int max ) throws SQLException { checkClosed ( ) ; if ( max < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , max ) ; } throw SQLError . noSupport ( ) ; }
Sets the limit for the maximum number of bytes that can be returned for character and binary column values in a ResultSet object produced by this Statement object .
154,826
public void setMaxRows ( int max ) throws SQLException { checkClosed ( ) ; if ( max < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , max ) ; } this . maxRows = max ; }
Sets the limit for the maximum number of rows that any ResultSet object generated by this Statement object can contain to the given number .
154,827
public void setQueryTimeout ( int seconds ) throws SQLException { checkClosed ( ) ; if ( seconds < 0 ) { throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT , seconds ) ; } if ( seconds == 0 ) { this . m_timeout = Integer . MAX_VALUE ; } else { this . m_timeout = seconds ; } }
0 is infinite in our case its Integer . MAX_VALUE
154,828
public void updateReplicasForJoin ( TransactionState snapshotTransactionState ) { long [ ] replicasAdded = new long [ 0 ] ; if ( m_term != null ) { replicasAdded = ( ( SpTerm ) m_term ) . updateReplicas ( snapshotTransactionState ) ; } m_scheduler . forwardPendingTaskToRejoinNode ( replicasAdded , snapshotTransactionSt...
This will be called from Snapshot in elastic joining or rejoining cases .
154,829
public Person newPerson ( ) { Person p = new Person ( ) ; p . firstname = firstnames [ rand . nextInt ( firstnames . length ) ] ; p . lastname = lastnames [ rand . nextInt ( lastnames . length ) ] ; p . sex = sexes [ rand . nextInt ( 2 ) ] ; p . dob = randomDOB ( ) ; int i = rand . nextInt ( areaCodes . length ) ; p . ...
generate a random person
154,830
public void loadFromJSONPlan ( JSONObject jobj , Database db ) throws JSONException { if ( jobj . has ( Members . PLAN_NODES_LISTS ) ) { JSONArray jplanNodesArray = jobj . getJSONArray ( Members . PLAN_NODES_LISTS ) ; for ( int i = 0 ; i < jplanNodesArray . length ( ) ; ++ i ) { JSONObject jplanNodesObj = jplanNodesArr...
Load json plan . The plan must have either PLAN_NODE array in case of a statement without subqueries or PLAN_NODES_LISTS array of PLAN_NODE arrays for each sub statement .
154,831
private void loadPlanNodesFromJSONArrays ( int stmtId , JSONArray jArray , Database db ) { List < AbstractPlanNode > planNodes = new ArrayList < > ( ) ; int size = jArray . length ( ) ; try { for ( int i = 0 ; i < size ; i ++ ) { JSONObject jobj = jArray . getJSONObject ( i ) ; String nodeTypeStr = jobj . getString ( "...
Load plan nodes from the PLAN_NODE array . All the nodes are from a substatement with the id = stmtId
154,832
public final void configure ( Properties props , FormatterBuilder formatterBuilder ) { Map < URI , ImporterConfig > configs = m_factory . createImporterConfigurations ( props , formatterBuilder ) ; m_configs = new ImmutableMap . Builder < URI , ImporterConfig > ( ) . putAll ( configs ) . putAll ( Maps . filterKeys ( m_...
This will be called for every importer configuration section for this importer type .
154,833
public final void stop ( ) { m_stopping = true ; ImmutableMap < URI , AbstractImporter > oldReference ; boolean success = false ; do { oldReference = m_importers . get ( ) ; success = m_importers . compareAndSet ( oldReference , ImmutableMap . < URI , AbstractImporter > of ( ) ) ; } while ( ! success ) ; if ( ! m_start...
This is called by the importer framework to stop importers . All resources for this importer will be unregistered from the resource distributer .
154,834
public int [ ] get ( ) { int includedHashes = Math . min ( m_hashCount , MAX_HASHES_COUNT ) ; int [ ] retval = new int [ includedHashes + HEADER_OFFSET ] ; System . arraycopy ( m_hashes , 0 , retval , HEADER_OFFSET , includedHashes ) ; m_inputCRC . update ( m_hashCount ) ; m_inputCRC . update ( m_catalogVersion ) ; ret...
Serialize the running hashes to an array and complete the overall hash for the first int value in the array .
154,835
public void offerStatement ( int stmtHash , int offset , ByteBuffer psetBuffer ) { m_inputCRC . update ( stmtHash ) ; m_inputCRC . updateFromPosition ( offset , psetBuffer ) ; if ( m_hashCount < MAX_HASHES_COUNT ) { m_hashes [ m_hashCount ] = stmtHash ; m_hashes [ m_hashCount + 1 ] = ( int ) m_inputCRC . getValue ( ) ;...
Update the overall hash . Add a pair of ints to the array if the size isn t too large .
154,836
public static int compareHashes ( int [ ] leftHashes , int [ ] rightHashes ) { assert ( leftHashes != null ) ; assert ( rightHashes != null ) ; assert ( leftHashes . length >= 3 ) ; assert ( rightHashes . length >= 3 ) ; if ( leftHashes [ 0 ] == rightHashes [ 0 ] ) { return - 1 ; } int includedHashLeft = Math . min ( l...
Compare two hash arrays return true if the same .
154,837
public static String description ( int [ ] hashes , int m_hashMismatchPos ) { assert ( hashes != null ) ; assert ( hashes . length >= 3 ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "Full Hash " ) . append ( hashes [ 0 ] ) ; sb . append ( ", Catalog Version " ) . append ( hashes [ 1 ] ) ; sb . append ( "...
Log the contents of the hash array
154,838
private boolean renameOverwrite ( String oldname , String newname ) { boolean deleted = delete ( newname ) ; if ( exists ( oldname ) ) { File file = new File ( oldname ) ; return file . renameTo ( new File ( newname ) ) ; } return deleted ; }
Rename the file with oldname to newname . If a file with newname already exists it is deleted before the renaming operation proceeds .
154,839
public String canonicalOrAbsolutePath ( String path ) { try { return canonicalPath ( path ) ; } catch ( Exception e ) { return absolutePath ( path ) ; } }
Retrieves the canonical path for the given path or the absolute path if attemting to retrieve the canonical path fails .
154,840
private boolean isCoordinatorStatsUsable ( boolean incremental ) { if ( m_coordinatorTask == null ) { return false ; } if ( incremental ) { return m_coordinatorTask . m_timedInvocations - m_coordinatorTask . m_lastTimedInvocations > 0 ; } return m_coordinatorTask . m_timedInvocations > 0 ; }
if any coordinator task is executed at all .
154,841
public int getIncrementalMinResultSizeAndReset ( ) { int retval = m_workerTask . m_incrMinResultSize ; m_workerTask . m_incrMinResultSize = Integer . MAX_VALUE ; if ( isCoordinatorStatsUsable ( true ) ) { m_coordinatorTask . m_incrMinResultSize = Integer . MAX_VALUE ; } return retval ; }
The result size should be taken from the final output coming from the coordinator task .
154,842
private Statement compileAlterTableDropTTL ( Table t ) { if ( t . getTTL ( ) == null ) { throw Error . error ( ErrorCode . X_42501 ) ; } if ( ! StringUtil . isEmpty ( t . getTTL ( ) . migrationTarget ) ) { throw unexpectedToken ( "May not drop migration target" ) ; } Object [ ] args = new Object [ ] { t . getName ( ) ,...
VoltDB extension drop TTL
154,843
StatementDMQL compileTriggerSetStatement ( Table table , RangeVariable [ ] rangeVars ) { read ( ) ; Expression [ ] updateExpressions ; int [ ] columnMap ; OrderedHashSet colNames = new OrderedHashSet ( ) ; HsqlArrayList exprList = new HsqlArrayList ( ) ; RangeVariable [ ] targetRangeVars = new RangeVariable [ ] { range...
Creates SET Statement for a trigger row from this parse context .
154,844
ColumnSchema readColumnDefinitionOrNull ( Table table , HsqlName hsqlName , HsqlArrayList constraintList ) { boolean isIdentity = false ; boolean isPKIdentity = false ; boolean identityAlways = false ; Expression generateExpr = null ; boolean isNullable = true ; Expression defaultExpr = null ; Type typeObject ; NumberS...
Responsible for handling the creation of table columns during the process of executing CREATE TABLE or ADD COLUMN etc . statements .
154,845
void readCheckConstraintCondition ( Constraint c ) { readThis ( Tokens . OPENBRACKET ) ; startRecording ( ) ; isCheckOrTriggerCondition = true ; Expression condition = XreadBooleanValueExpression ( ) ; isCheckOrTriggerCondition = false ; Token [ ] tokens = getRecordedStatement ( ) ; readThis ( Tokens . CLOSEBRACKET ) ;...
Responsible for handling check constraints section of CREATE TABLE ...
154,846
private int [ ] readColumnList ( Table table , boolean ascOrDesc ) { OrderedHashSet set = readColumnNames ( ascOrDesc ) ; return table . getColumnIndexes ( set ) ; }
Process a bracketed column list as used in the declaration of SQL CONSTRAINTS and return an array containing the indexes of the columns within the table .
154,847
void processAlterTableRename ( Table table ) { HsqlName name = readNewSchemaObjectName ( SchemaObject . TABLE ) ; name . setSchemaIfNull ( table . getSchemaName ( ) ) ; if ( table . getSchemaName ( ) != name . schema ) { throw Error . error ( ErrorCode . X_42505 ) ; } database . schemaManager . renameSchemaObject ( tab...
Responsible for handling tail of ALTER TABLE ... RENAME ...
154,848
void processAlterTableDropColumn ( Table table , String colName , boolean cascade ) { int colindex = table . getColumnIndex ( colName ) ; if ( table . getColumnCount ( ) == 1 ) { throw Error . error ( ErrorCode . X_42591 ) ; } session . commit ( false ) ; TableWorks tableWorks = new TableWorks ( session , table ) ; tab...
Responsible for handling tail of ALTER TABLE ... DROP COLUMN ...
154,849
void processAlterTableDropConstraint ( Table table , String name , boolean cascade ) { session . commit ( false ) ; TableWorks tableWorks = new TableWorks ( session , table ) ; tableWorks . dropConstraint ( name , cascade ) ; return ; }
Responsible for handling tail of ALTER TABLE ... DROP CONSTRAINT ...
154,850
private void processAlterColumnType ( Table table , ColumnSchema oldCol , boolean fullDefinition ) { ColumnSchema newCol ; if ( oldCol . isGenerated ( ) ) { throw Error . error ( ErrorCode . X_42561 ) ; } if ( fullDefinition ) { HsqlArrayList list = new HsqlArrayList ( ) ; Constraint c = table . getPrimaryConstraint ( ...
Allows changes to type of column or addition of an IDENTITY generator . IDENTITY is not removed if it does not appear in new column definition Constraint definitions are not allowed
154,851
private void processAlterColumnRename ( Table table , ColumnSchema column ) { checkIsSimpleName ( ) ; if ( table . findColumn ( token . tokenString ) > - 1 ) { throw Error . error ( ErrorCode . X_42504 , token . tokenString ) ; } database . schemaManager . checkColumnIsReferenced ( table . getName ( ) , column . getNam...
Responsible for handling tail of ALTER COLUMN ... RENAME ...
154,852
void readLimitConstraintCondition ( Constraint c ) { readThis ( Tokens . PARTITION ) ; readThis ( Tokens . ROWS ) ; int rowsLimit = readInteger ( ) ; c . rowsLimit = rowsLimit ; if ( readIfThis ( Tokens . EXECUTE ) ) { readThis ( Tokens . OPENBRACKET ) ; startRecording ( ) ; int numOpenBrackets = 1 ; while ( numOpenBra...
Responsible for handling Volt limit constraints section of CREATE TABLE ...
154,853
private java . util . List < Expression > XreadExpressions ( java . util . List < Boolean > ascDesc ) { return XreadExpressions ( ascDesc , false ) ; }
Default disallow empty parenthesis
154,854
static boolean isValid ( int type ) { switch ( type ) { case OpCode . notification : return false ; case OpCode . create : case OpCode . delete : case OpCode . createSession : case OpCode . exists : case OpCode . getData : case OpCode . setData : case OpCode . sync : case OpCode . getACL : case OpCode . setACL : case O...
is the packet type a valid packet in zookeeper
154,855
public void startSeekingFor ( final Set < Long > hsids , final Map < Long , Boolean > inTrouble ) { if ( ! m_hsids . equals ( hsids ) ) { if ( ! m_hsids . isEmpty ( ) ) clear ( ) ; m_hsids = ImmutableSortedSet . copyOf ( hsids ) ; } m_survivors = m_strategy . accept ( survivorPicker , Pair . of ( m_hsids , inTrouble ) ...
Start accumulate site links graphing information
154,856
static protected void removeValues ( TreeMultimap < Long , Long > mm , Set < Long > values ) { Iterator < Map . Entry < Long , Long > > itr = mm . entries ( ) . iterator ( ) ; while ( itr . hasNext ( ) ) { Map . Entry < Long , Long > e = itr . next ( ) ; if ( values . contains ( e . getValue ( ) ) ) { itr . remove ( ) ...
Convenience method that remove all instances of the given values from the given map
154,857
public static Predicate < Map . Entry < Long , Boolean > > amongDeadHsids ( final Set < Long > hsids ) { return new Predicate < Map . Entry < Long , Boolean > > ( ) { public boolean apply ( Entry < Long , Boolean > e ) { return hsids . contains ( e . getKey ( ) ) && e . getValue ( ) ; } } ; }
returns a map entry predicate that tests whether or not the given map entry describes a dead site
154,858
private void removeValue ( TreeMultimap < Long , Long > mm , long value ) { Iterator < Map . Entry < Long , Long > > itr = mm . entries ( ) . iterator ( ) ; while ( itr . hasNext ( ) ) { Map . Entry < Long , Long > e = itr . next ( ) ; if ( e . getValue ( ) . equals ( value ) ) { itr . remove ( ) ; } } }
Convenience method that remove all instances of the given value from the given map
154,859
void add ( long reportingHsid , final Map < Long , Boolean > failed ) { if ( ! m_hsids . contains ( reportingHsid ) ) return ; Boolean harakiri = failed . get ( reportingHsid ) ; if ( harakiri != null && harakiri . booleanValue ( ) ) return ; Set < Long > dead = Sets . newHashSet ( ) ; for ( Map . Entry < Long , Boolea...
Adds alive and dead graph information
154,860
public void add ( long reportingHsid , SiteFailureMessage sfm ) { if ( ! m_hsids . contains ( reportingHsid ) || ! sfm . m_survivors . contains ( reportingHsid ) ) return ; Set < Long > survivors = sfm . m_survivors ; if ( Sets . filter ( sfm . getObservedFailedSites ( ) , in ( m_hsids ) ) . isEmpty ( ) ) { survivors =...
Adds alive and dead graph information from a reporting site survivor set
154,861
protected boolean seenByInterconnectedPeers ( Set < Long > destinations , Set < Long > origins ) { Set < Long > seers = Multimaps . filterValues ( m_alive , in ( origins ) ) . keySet ( ) ; int before = origins . size ( ) ; origins . addAll ( seers ) ; if ( origins . containsAll ( destinations ) ) { return true ; } else...
Walk the alive graph to see if there is a connected path between origins and destinations
154,862
public Set < Long > forWhomSiteIsDead ( long hsid ) { ImmutableSet . Builder < Long > isb = ImmutableSet . builder ( ) ; Set < Long > deadBy = m_dead . get ( hsid ) ; if ( ! deadBy . isEmpty ( ) && m_survivors . contains ( hsid ) && m_strategy == ArbitrationStrategy . MATCHING_CARDINALITY ) { isb . addAll ( Sets . filt...
Is the given hsid considered dead by anyone in my survivor set?
154,863
protected static InMemoryJarfile addDDLToCatalog ( Catalog oldCatalog , InMemoryJarfile jarfile , String [ ] adhocDDLStmts , boolean isXDCR ) throws IOException , VoltCompilerException { StringBuilder sb = new StringBuilder ( ) ; compilerLog . info ( "Applying the following DDL to cluster:" ) ; for ( String stmt : adho...
Append the supplied adhoc DDL to the current catalog s DDL and recompile the jarfile
154,864
static protected CompletableFuture < ClientResponse > makeQuickResponse ( byte statusCode , String msg ) { ClientResponseImpl cri = new ClientResponseImpl ( statusCode , new VoltTable [ 0 ] , msg ) ; CompletableFuture < ClientResponse > f = new CompletableFuture < > ( ) ; f . complete ( cri ) ; return f ; }
Error generating shortcut method
154,865
protected String verifyAndWriteCatalogJar ( CatalogChangeResult ccr ) { String procedureName = "@VerifyCatalogAndWriteJar" ; CompletableFuture < Map < Integer , ClientResponse > > cf = callNTProcedureOnAllHosts ( procedureName , ccr . catalogBytes , ccr . encodedDiffCommands , ccr . catalogHash , ccr . deploymentBytes ...
Run the catalog jar NT procedure to check and write the catalog file . Check the results map from every host and return error message if needed .
154,866
public static long getNextGenerationId ( ) { try { return UniqueIdGenerator . makeIdFromComponents ( System . currentTimeMillis ( ) , m_generationId . incrementAndGet ( ) , MpInitiator . MP_INIT_PID ) ; } catch ( Throwable t ) { m_generationId . set ( 0L ) ; return UniqueIdGenerator . makeIdFromComponents ( System . cu...
Get a unique id for the next generation for export .
154,867
public User getUser ( String name , String password ) { if ( name == null ) { name = "" ; } if ( password == null ) { password = "" ; } User user = get ( name ) ; user . checkPassword ( password ) ; return user ; }
Returns the User object with the specified name and password from this object s set .
154,868
public User get ( String name ) { User user = ( User ) userList . get ( name ) ; if ( user == null ) { throw Error . error ( ErrorCode . X_28501 , name ) ; } return user ; }
Returns the User object identified by the name argument .
154,869
void reset ( int hashTableSize , int capacity ) { if ( linkTable != null ) { voltDBresetCapacity = linkTable . length ; } ++ voltDBresetCount ; voltDBlastResetEvent = voltDBhistoryDepth ; voltDBhistoryCapacity = Math . min ( voltDBhistoryMaxCapacity , Math . max ( voltDBhistoryMinCapacity , voltDBhistoryDepth ) ) ; vol...
Reset the structure with a new size as empty .
154,870
void clear ( ) { if ( linkTable != null ) { voltDBclearCapacity = linkTable . length ; } ++ voltDBclearCount ; voltDBlastClearEvent = voltDBhistoryDepth ; int to = linkTable . length ; int [ ] intArray = linkTable ; while ( -- to >= 0 ) { intArray [ to ] = 0 ; } resetTables ( ) ; }
Reset the index as empty .
154,871
void unlinkNode ( int index , int lastLookup , int lookup ) { voltDBhistory [ voltDBhistoryDepth ++ % voltDBhistoryCapacity ] = - index - 1 ; if ( lastLookup == - 1 ) { hashTable [ index ] = linkTable [ lookup ] ; } else { linkTable [ lastLookup ] = linkTable [ lookup ] ; } linkTable [ lookup ] = reclaimedNodePointer ;...
Unlink a node from a linked list and link into the reclaimed list .
154,872
boolean removeEmptyNode ( int lookup ) { voltDBhistory [ voltDBhistoryDepth ++ % voltDBhistoryCapacity ] = 1000000 + lookup ; boolean found = false ; int lastLookup = - 1 ; for ( int i = reclaimedNodePointer ; i >= 0 ; lastLookup = i , i = linkTable [ i ] ) { if ( i == lookup ) { if ( lastLookup == - 1 ) { reclaimedNod...
Remove a node that has already been unlinked . This is not required for index operations . It is used only when the row needs to be removed from the data structures that store the actual indexed data and the nodes need to be contiguous .
154,873
private static String translateSep ( String sep , boolean isProperty ) { if ( sep == null ) { return null ; } int next = sep . indexOf ( BACKSLASH_CHAR ) ; if ( next != - 1 ) { int start = 0 ; char [ ] sepArray = sep . toCharArray ( ) ; char ch = 0 ; int len = sep . length ( ) ; StringBuffer sb = new StringBuffer ( len...
Translates the escaped characters in a separator string and returns the non - escaped string .
154,874
public void open ( boolean readonly ) { fileFreePosition = 0 ; try { dataFile = ScaledRAFile . newScaledRAFile ( database , fileName , readonly , ScaledRAFile . DATA_FILE_RAF , null , null ) ; fileFreePosition = dataFile . length ( ) ; if ( fileFreePosition > Integer . MAX_VALUE ) { throw new HsqlException ( "" , "" , ...
Opens a data source file .
154,875
public synchronized void close ( boolean write ) { if ( dataFile == null ) { return ; } try { cache . saveAll ( ) ; boolean empty = ( dataFile . length ( ) <= NL . length ( ) ) ; dataFile . close ( ) ; dataFile = null ; if ( empty && ! cacheReadonly ) { FileUtil . getDefaultInstance ( ) . delete ( fileName ) ; } } catc...
Writes newly created rows to disk . In the current implentation such rows have already been saved so this method just removes a source file that has no rows .
154,876
void purge ( ) { uncommittedCache . clear ( ) ; try { if ( cacheReadonly ) { close ( false ) ; } else { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; } FileUtil . getDefaultInstance ( ) . delete ( fileName ) ; } } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode ....
Closes the source file and deletes it if it is not read - only .
154,877
int findNextUsedLinePos ( int pos ) { try { int firstPos = pos ; int currentPos = pos ; boolean wasCR = false ; dataFile . seek ( pos ) ; while ( true ) { int c = dataFile . read ( ) ; currentPos ++ ; switch ( c ) { case CR_CHAR : wasCR = true ; break ; case LF_CHAR : wasCR = false ; ( ( RowInputText ) rowIn ) . skippe...
Searches from file pointer pos and finds the beginning of the first line that contains any non - space character . Increments the row counter when a blank line is skipped .
154,878
protected synchronized void saveRows ( CachedObject [ ] rows , int offset , int count ) { if ( count == 0 ) { return ; } for ( int i = offset ; i < offset + count ; i ++ ) { CachedObject r = rows [ i ] ; uncommittedCache . put ( r . getPos ( ) , r ) ; rows [ i ] = null ; } }
This is called internally when old rows need to be removed from the cache . Text table rows that have not been saved are those that have not been committed yet . So we don t save them but add them to the uncommitted cache until such time that they are committed or rolled back - fredt
154,879
public DoubleHistogram copy ( ) { final DoubleHistogram targetHistogram = new DoubleHistogram ( configuredHighestToLowestValueRatio , getNumberOfSignificantValueDigits ( ) ) ; targetHistogram . setTrackableValueRange ( currentLowestValueInAutoRange , currentHighestValueLimitInAutoRange ) ; integerValuesHistogram . copy...
Create a copy of this histogram complete with data and everything .
154,880
public void add ( final DoubleHistogram fromHistogram ) throws ArrayIndexOutOfBoundsException { int arrayLength = fromHistogram . integerValuesHistogram . countsArrayLength ; AbstractHistogram fromIntegerHistogram = fromHistogram . integerValuesHistogram ; for ( int i = 0 ; i < arrayLength ; i ++ ) { long count = fromI...
Add the contents of another histogram to this one .
154,881
public void subtract ( final DoubleHistogram otherHistogram ) { int arrayLength = otherHistogram . integerValuesHistogram . countsArrayLength ; AbstractHistogram otherIntegerHistogram = otherHistogram . integerValuesHistogram ; for ( int i = 0 ; i < arrayLength ; i ++ ) { long otherCount = otherIntegerHistogram . getCo...
Subtract the contents of another histogram from this one .
154,882
public double highestEquivalentValue ( final double value ) { double nextNonEquivalentValue = nextNonEquivalentValue ( value ) ; double highestEquivalentValue = nextNonEquivalentValue - ( 2 * Math . ulp ( nextNonEquivalentValue ) ) ; while ( highestEquivalentValue + Math . ulp ( highestEquivalentValue ) < nextNonEquiva...
Get the highest value that is equivalent to the given value within the histogram s resolution . Where equivalent means that value samples recorded for any two equivalent values are counted in a common total count .
154,883
public static DoubleHistogram decodeFromCompressedByteBuffer ( final ByteBuffer buffer , final long minBarForHighestToLowestValueRatio ) throws DataFormatException { return decodeFromCompressedByteBuffer ( buffer , Histogram . class , minBarForHighestToLowestValueRatio ) ; }
Construct a new DoubleHistogram by decoding it from a compressed form in a ByteBuffer .
154,884
public Object getValueAt ( int row , int col ) { if ( row >= rows . size ( ) ) { return null ; } Object [ ] colArray = ( Object [ ] ) rows . elementAt ( row ) ; if ( col >= colArray . length ) { return null ; } return colArray [ col ] ; }
Get the object at the specified cell location .
154,885
public void setHead ( Object [ ] h ) { headers = new Object [ h . length ] ; for ( int i = 0 ; i < h . length ; i ++ ) { headers [ i ] = h [ i ] ; } }
Set the name of the column headings .
154,886
public void addRow ( Object [ ] r ) { Object [ ] row = new Object [ r . length ] ; for ( int i = 0 ; i < r . length ; i ++ ) { row [ i ] = r [ i ] ; if ( row [ i ] == null ) { } } rows . addElement ( row ) ; }
Append a tuple to the end of the table .
154,887
public Object [ ] getCurrent ( ) { if ( currentPos < 0 || currentPos >= size ) { return null ; } if ( currentPos == currentOffset + table . length ) { getBlock ( currentOffset + table . length ) ; } return table [ currentPos - currentOffset ] ; }
Returns the current row object . Type of object is implementation defined .
154,888
void getBlock ( int offset ) { try { RowSetNavigatorClient source = session . getRows ( id , offset , baseBlockSize ) ; table = source . table ; currentOffset = source . currentOffset ; } catch ( HsqlException e ) { } }
baseBlockSize remains unchanged .
154,889
public ResultSet getBestRowIdentifier ( String catalog , String schema , String table , int scope , boolean nullable ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves a description of a table s optimal set of columns that uniquely identifies a row .
154,890
public ResultSet getCatalogs ( ) throws SQLException { checkClosed ( ) ; VoltTable result = new VoltTable ( new VoltTable . ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) ) ; result . addRow ( new Object [ ] { catalogString } ) ; return new JDBC4ResultSet ( null , result ) ; }
Retrieves the catalog names available in this database .
154,891
public int getDatabaseMajorVersion ( ) throws SQLException { checkClosed ( ) ; System . out . println ( "\n\n\nVERSION: " + versionString ) ; return Integer . valueOf ( versionString . split ( "\\." ) [ 0 ] ) ; }
Retrieves the major version number of the underlying database .
154,892
public ResultSet getFunctions ( String catalog , String schemaPattern , String functionNamePattern ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves a description of the system and user functions available in the given catalog .
154,893
public ResultSet getPrimaryKeys ( String catalog , String schema , String table ) throws SQLException { assert ( table != null && ! table . isEmpty ( ) ) ; checkClosed ( ) ; this . sysCatalog . setString ( 1 , "PRIMARYKEYS" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vta...
Retrieves a description of the given table s primary key columns .
154,894
public ResultSet getSchemas ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_CATALOG" , VoltType . STRING ) ) ; JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
Retrieves the schema names available in this database .
154,895
public ResultSet getTablePrivileges ( String catalog , String schemaPattern , String tableNamePattern ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) , new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_NAME" , V...
Retrieves a description of the access rights for each table available in a catalog .
154,896
public static Pattern computeJavaPattern ( String sqlPattern ) { StringBuffer pattern_buff = new StringBuffer ( ) ; for ( int i = 0 ; i < sqlPattern . length ( ) ; i ++ ) { char c = sqlPattern . charAt ( i ) ; if ( c == '_' ) { pattern_buff . append ( '.' ) ; } else if ( c == '%' ) { pattern_buff . append ( ".*" ) ; } ...
Convert the users VoltDB SQL pattern into a regex pattern
154,897
public ResultSet getTables ( String catalog , String schemaPattern , String tableNamePattern , String [ ] types ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TABLES" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vtable = res . getVoltTable ...
Retrieves a description of the tables available in the given catalog .
154,898
public ResultSet getTableTypes ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_TYPE" , VoltType . STRING ) ) ; for ( String type : tableTypes ) { vtable . addRow ( type ) ; } JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
Retrieves the table types available in this database .
154,899
public ResultSet getTypeInfo ( ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TYPEINFO" ) ; ResultSet res = this . sysCatalog . executeQuery ( ) ; return res ; }
Retrieves a description of all the data types supported by this database .