code
stringlengths
73
34.1k
label
stringclasses
1 value
Result executeMergeStatement(Session session) { Result resultOut = null; RowSetNavigator generatedNavigator = null; PersistentStore store = session.sessionData.getRowStore(baseTable); if (generatedIndexes != null) { resultOut = Result.newUpdateCountResult(generatedResultMetaData, 0); generatedNavigator = resultOut.getChainedResult().getNavigator(); } int count = 0; // data generated for non-matching rows RowSetNavigatorClient newData = new RowSetNavigatorClient(8); // rowset for update operation HashMappedList updateRowSet = new HashMappedList(); RangeVariable[] joinRangeIterators = targetRangeVariables; // populate insert and update lists RangeIterator[] rangeIterators = new RangeIterator[joinRangeIterators.length]; for (int i = 0; i < joinRangeIterators.length; i++) { rangeIterators[i] = joinRangeIterators[i].getIterator(session); } for (int currentIndex = 0; 0 <= currentIndex; ) { RangeIterator it = rangeIterators[currentIndex]; boolean beforeFirst = it.isBeforeFirst(); if (it.next()) { if (currentIndex < joinRangeIterators.length - 1) { currentIndex++; continue; } } else { if (currentIndex == 1 && beforeFirst) { Object[] data = getMergeInsertData(session); if (data != null) { newData.add(data); } } it.reset(); currentIndex--; continue; } // row matches! if (updateExpressions != null) { Row row = it.getCurrentRow(); // this is always the second iterator Object[] data = getUpdatedData(session, baseTable, updateColumnMap, updateExpressions, baseTable.getColumnTypes(), row.getData()); updateRowSet.add(row, data); } } // run the transaction as a whole, updating and inserting where needed // update any matched rows if (updateRowSet.size() > 0) { count = update(session, baseTable, updateRowSet); } // insert any non-matched rows newData.beforeFirst(); while (newData.hasNext()) { Object[] data = newData.getNext(); baseTable.insertRow(session, store, data); if (generatedNavigator != null) { Object[] generatedValues = getGeneratedColumns(data); generatedNavigator.add(generatedValues); } } baseTable.fireAfterTriggers(session, Trigger.INSERT_AFTER, newData); count += newData.getSize(); if (resultOut == null) { return Result.getUpdateCountResult(count); } else { resultOut.setUpdateCount(count); return resultOut; } }
java
Result executeDeleteStatement(Session session) { int count = 0; RowSetNavigatorLinkedList oldRows = new RowSetNavigatorLinkedList(); RangeIterator it = RangeVariable.getIterator(session, targetRangeVariables); while (it.next()) { Row currentRow = it.getCurrentRow(); oldRows.add(currentRow); } count = delete(session, baseTable, oldRows); if (restartIdentity && targetTable.identitySequence != null) { targetTable.identitySequence.reset(); } return Result.getUpdateCountResult(count); }
java
int delete(Session session, Table table, RowSetNavigator oldRows) { if (table.fkMainConstraints.length == 0) { deleteRows(session, table, oldRows); oldRows.beforeFirst(); if (table.hasTrigger(Trigger.DELETE_AFTER)) { table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows); } return oldRows.getSize(); } HashSet path = session.sessionContext.getConstraintPath(); HashMappedList tableUpdateList = session.sessionContext.getTableUpdateList(); if (session.database.isReferentialIntegrity()) { oldRows.beforeFirst(); while (oldRows.hasNext()) { oldRows.next(); Row row = oldRows.getCurrentRow(); path.clear(); checkCascadeDelete(session, table, tableUpdateList, row, false, path); } } if (session.database.isReferentialIntegrity()) { oldRows.beforeFirst(); while (oldRows.hasNext()) { oldRows.next(); Row row = oldRows.getCurrentRow(); path.clear(); checkCascadeDelete(session, table, tableUpdateList, row, true, path); } } oldRows.beforeFirst(); while (oldRows.hasNext()) { oldRows.next(); Row row = oldRows.getCurrentRow(); if (!row.isDeleted(session)) { table.deleteNoRefCheck(session, row); } } for (int i = 0; i < tableUpdateList.size(); i++) { Table targetTable = (Table) tableUpdateList.getKey(i); HashMappedList updateList = (HashMappedList) tableUpdateList.get(i); if (updateList.size() > 0) { targetTable.updateRowSet(session, updateList, null, true); updateList.clear(); } } oldRows.beforeFirst(); if (table.hasTrigger(Trigger.DELETE_AFTER)) { table.fireAfterTriggers(session, Trigger.DELETE_AFTER, oldRows); } path.clear(); return oldRows.getSize(); }
java
static void mergeUpdate(HashMappedList rowSet, Row row, Object[] newData, int[] cols) { Object[] data = (Object[]) rowSet.get(row); if (data != null) { for (int j = 0; j < cols.length; j++) { data[cols[j]] = newData[cols[j]]; } } else { rowSet.add(row, newData); } }
java
static boolean mergeKeepUpdate(Session session, HashMappedList rowSet, int[] cols, Type[] colTypes, Row row, Object[] newData) { Object[] data = (Object[]) rowSet.get(row); if (data != null) { if (IndexAVL .compareRows(row .getData(), newData, cols, colTypes) != 0 && IndexAVL .compareRows(newData, data, cols, colTypes) != 0) { return false; } for (int j = 0; j < cols.length; j++) { newData[cols[j]] = data[cols[j]]; } rowSet.put(row, newData); } else { rowSet.add(row, newData); } return true; }
java
protected ExportRowData decodeRow(byte[] rowData) throws IOException { ExportRow row = ExportRow.decodeRow(m_legacyRow, getPartition(), m_startTS, rowData); return new ExportRowData(row.values, row.partitionValue, row.partitionId); }
java
public boolean writeRow(Object row[], CSVWriter writer, boolean skipinternal, BinaryEncoding binaryEncoding, SimpleDateFormat dateFormatter) { int firstfield = getFirstField(skipinternal); try { String[] fields = new String[m_tableSchema.size() - firstfield]; for (int i = firstfield; i < m_tableSchema.size(); i++) { if (row[i] == null) { fields[i - firstfield] = "NULL"; } else if (m_tableSchema.get(i) == VoltType.VARBINARY && binaryEncoding != null) { if (binaryEncoding == BinaryEncoding.HEX) { fields[i - firstfield] = Encoder.hexEncode((byte[]) row[i]); } else { fields[i - firstfield] = Encoder.base64Encode((byte[]) row[i]); } } else if (m_tableSchema.get(i) == VoltType.STRING) { fields[i - firstfield] = (String) row[i]; } else if (m_tableSchema.get(i) == VoltType.TIMESTAMP && dateFormatter != null) { TimestampType timestamp = (TimestampType) row[i]; fields[i - firstfield] = dateFormatter.format(timestamp.asApproximateJavaDate()); } else { fields[i - firstfield] = row[i].toString(); } } writer.writeNext(fields); } catch (Exception x) { x.printStackTrace(); return false; } return true; }
java
public final int setPartitionColumnName(String partitionColumnName) { if (partitionColumnName == null || partitionColumnName.trim().isEmpty()) { return PARTITION_ID_INDEX; } int idx = -1; for (String name : m_source.columnNames) { if (name.equalsIgnoreCase(partitionColumnName)) { idx = m_source.columnNames.indexOf(name); break; } } if (idx == -1) { m_partitionColumnIndex = PARTITION_ID_INDEX; m_logger.error("Export configuration error: specified " + m_source.tableName + "." + partitionColumnName + " does not exist. A default partition or routing key will be used."); } else { m_partitionColumnIndex = idx; } return m_partitionColumnIndex; }
java
public static void registerShutdownHook(int priority, boolean runOnCrash, Runnable action) { m_instance.addHook(priority, runOnCrash, action); //Any hook registered lets print crash messsage. ShutdownHooks.m_crashMessage = true; }
java
SocketAddress getRemoteSocketAddress() { // a lot could go wrong here, so rather than put in a bunch of code // to check for nulls all down the chain let's do it the simple // yet bulletproof way try { return ((SocketChannel) sendThread.sockKey.channel()).socket() .getRemoteSocketAddress(); } catch (NullPointerException e) { return null; } }
java
SocketAddress getLocalSocketAddress() { // a lot could go wrong here, so rather than put in a bunch of code // to check for nulls all down the chain let's do it the simple // yet bulletproof way try { return ((SocketChannel) sendThread.sockKey.channel()).socket() .getLocalSocketAddress(); } catch (NullPointerException e) { return null; } }
java
private static String makeThreadName(String suffix) { String name = Thread.currentThread().getName() .replaceAll("-EventThread", ""); return name + suffix; }
java
public void commit(Xid xid, boolean onePhase) throws XAException { // Comment out following debug statement before public release: System.err.println("Performing a " + (onePhase ? "1-phase" : "2-phase") + " commit on " + xid); JDBCXAResource resource = xaDataSource.getResource(xid); if (resource == null) { throw new XAException("The XADataSource has no such Xid: " + xid); } resource.commitThis(onePhase); }
java
public boolean isSameRM(XAResource xares) throws XAException { if (!(xares instanceof JDBCXAResource)) { return false; } return xaDataSource == ((JDBCXAResource) xares).getXADataSource(); }
java
public int prepare(Xid xid) throws XAException { validateXid(xid); /** * @todo: This is where the real 2-phase work should be done to * determine if a commit done here would succeed or not. */ /** * @todo: May improve performance to return XA_RDONLY whenever * possible, but I don't know. * Could determine this by checking if DB instance is in RO mode, * or perhaps (with much difficulty) to determine if there have * been any modifications performed. */ if (state != XA_STATE_ENDED) { throw new XAException("Invalid XAResource state"); } // throw new XAException( // "Sorry. HSQLDB has not implemented 2-phase commits yet"); state = XA_STATE_PREPARED; return XA_OK; // As noted above, should check non-committed work. }
java
public void rollback(Xid xid) throws XAException { JDBCXAResource resource = xaDataSource.getResource(xid); if (resource == null) { throw new XAException( "The XADataSource has no such Xid in prepared state: " + xid); } resource.rollbackThis(); }
java
private void processValue(String value) { // this Option has a separator character if (hasValueSeparator()) { // get the separator character char sep = getValueSeparator(); // store the index for the value separator int index = value.indexOf(sep); // while there are more value separators while (index != -1) { // next value to be added if (values.size() == numberOfArgs - 1) { break; } // store add(value.substring(0, index)); // parse value = value.substring(index + 1); // get new index index = value.indexOf(sep); } } // store the actual value or the last value that has been parsed add(value); }
java
public boolean enterWhenUninterruptibly(Guard guard, long time, TimeUnit unit) { final long timeoutNanos = toSafeNanos(time, unit); if (guard.monitor != this) { throw new IllegalMonitorStateException(); } final ReentrantLock lock = this.lock; long startTime = 0L; boolean signalBeforeWaiting = lock.isHeldByCurrentThread(); boolean interrupted = Thread.interrupted(); try { if (fair || !lock.tryLock()) { startTime = initNanoTime(timeoutNanos); for (long remainingNanos = timeoutNanos; ; ) { try { if (lock.tryLock(remainingNanos, TimeUnit.NANOSECONDS)) { break; } else { return false; } } catch (InterruptedException interrupt) { interrupted = true; remainingNanos = remainingNanos(startTime, timeoutNanos); } } } boolean satisfied = false; try { while (true) { try { if (guard.isSatisfied()) { satisfied = true; } else { final long remainingNanos; if (startTime == 0L) { startTime = initNanoTime(timeoutNanos); remainingNanos = timeoutNanos; } else { remainingNanos = remainingNanos(startTime, timeoutNanos); } satisfied = awaitNanos(guard, remainingNanos, signalBeforeWaiting); } return satisfied; } catch (InterruptedException interrupt) { interrupted = true; signalBeforeWaiting = false; } } } finally { if (!satisfied) { lock.unlock(); // No need to signal if timed out } } } finally { if (interrupted) { Thread.currentThread().interrupt(); } } }
java
public boolean enterIfInterruptibly(Guard guard, long time, TimeUnit unit) throws InterruptedException { if (guard.monitor != this) { throw new IllegalMonitorStateException(); } final ReentrantLock lock = this.lock; if (!lock.tryLock(time, unit)) { return false; } boolean satisfied = false; try { return satisfied = guard.isSatisfied(); } finally { if (!satisfied) { lock.unlock(); } } }
java
public boolean waitFor(Guard guard, long time, TimeUnit unit) throws InterruptedException { final long timeoutNanos = toSafeNanos(time, unit); if (!((guard.monitor == this) & lock.isHeldByCurrentThread())) { throw new IllegalMonitorStateException(); } if (guard.isSatisfied()) { return true; } if (Thread.interrupted()) { throw new InterruptedException(); } return awaitNanos(guard, timeoutNanos, true); }
java
public void ack(long hsId, boolean isEOS, long targetId, int blockIndex) { rejoinLog.debug("Queue ack for hsId:" + hsId + " isEOS: " + isEOS + " targetId:" + targetId + " blockIndex: " + blockIndex); m_blockIndices.offer(Pair.of(hsId, new RejoinDataAckMessage(isEOS, targetId, blockIndex))); }
java
@Override public boolean absolute(int row) throws SQLException { checkClosed(); if (rowCount == 0) { if (row == 0) { return true; } return false; } if (row == 0) { beforeFirst(); return true; } if (rowCount + row < 0) { beforeFirst(); return false; } if (row > rowCount) { cursorPosition = Position.afterLast; if(row == rowCount+1) { return true; } else { return false; } } try { // for negative row numbers or row numbers lesser then activeRowIndex, resetRowPosition // method is called and the cursor advances to the desired row from top of the table if(row < 0) { row += rowCount; row++; } if(table.getActiveRowIndex() > row || cursorPosition != Position.middle) { table.resetRowPosition(); table.advanceToRow(0); } cursorPosition = Position.middle; return table.advanceToRow(row-1); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public int findColumn(String columnLabel) throws SQLException { checkClosed(); try { return table.getColumnIndex(columnLabel) + 1; } catch (IllegalArgumentException iax) { throw SQLError.get(iax, SQLError.COLUMN_NOT_FOUND, columnLabel); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { final VoltType type = table.getColumnType(columnIndex - 1); BigDecimal decimalValue = null; switch(type) { case TINYINT: decimalValue = new BigDecimal(table.getLong(columnIndex - 1)); break; case SMALLINT: decimalValue = new BigDecimal(table.getLong(columnIndex - 1)); break; case INTEGER: decimalValue = new BigDecimal(table.getLong(columnIndex - 1)); break; case BIGINT: decimalValue = new BigDecimal(table.getLong(columnIndex - 1)); break; case FLOAT: decimalValue = new BigDecimal(table.getDouble(columnIndex - 1)); break; case DECIMAL: decimalValue = table.getDecimalAsBigDecimal(columnIndex - 1); break; default: throw new IllegalArgumentException("Cannot get BigDecimal value for column type '" + type + "'"); } return table.wasNull() ? null : decimalValue; } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public InputStream getBinaryStream(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { return new ByteArrayInputStream( table.getStringAsBytes(columnIndex - 1)); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public Blob getBlob(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { return new SerialBlob(table.getStringAsBytes(columnIndex - 1)); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public boolean getBoolean(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); // TODO: Tempting to apply a != 0 operation on numbers and // .equals("true") on strings, but... hacky try { return (new Long(table.getLong(columnIndex - 1))).intValue() == 1; } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public byte getByte(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { Long longValue = getPrivateInteger(columnIndex); if (longValue > Byte.MAX_VALUE || longValue < Byte.MIN_VALUE) { throw new SQLException("Value out of byte range"); } return longValue.byteValue(); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public byte[] getBytes(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { if (table.getColumnType(columnIndex - 1) == VoltType.STRING) return table.getStringAsBytes(columnIndex - 1); else if (table.getColumnType(columnIndex - 1) == VoltType.VARBINARY) return table.getVarbinary(columnIndex - 1); else throw SQLError.get(SQLError.CONVERSION_NOT_FOUND, table.getColumnType(columnIndex - 1), "byte[]"); } catch (SQLException x) { throw x; } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public Clob getClob(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { return new SerialClob(table.getString(columnIndex - 1) .toCharArray()); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public float getFloat(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { final VoltType type = table.getColumnType(columnIndex - 1); Double doubleValue = null; switch(type) { case TINYINT: doubleValue = new Double(table.getLong(columnIndex - 1)); break; case SMALLINT: doubleValue = new Double(table.getLong(columnIndex - 1)); break; case INTEGER: doubleValue = new Double(table.getLong(columnIndex - 1)); break; case BIGINT: doubleValue = new Double(table.getLong(columnIndex - 1)); break; case FLOAT: doubleValue = new Double(table.getDouble(columnIndex - 1)); break; case DECIMAL: doubleValue = table.getDecimalAsBigDecimal(columnIndex - 1).doubleValue(); break; default: throw new IllegalArgumentException("Cannot get float value for column type '" + type + "'"); } if (table.wasNull()) { doubleValue = new Double(0); } else if (Math.abs(doubleValue) > new Double(Float.MAX_VALUE)) { throw new SQLException("Value out of float range"); } return doubleValue.floatValue(); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public int getInt(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { Long longValue = getPrivateInteger(columnIndex); if (longValue > Integer.MAX_VALUE || longValue < Integer.MIN_VALUE) { throw new SQLException("Value out of int range"); } return longValue.intValue(); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public long getLong(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { Long longValue = getPrivateInteger(columnIndex); return longValue; } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public Reader getNCharacterStream(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { String value = table.getString(columnIndex - 1); if (!wasNull()) return new StringReader(value); return null; } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public NClob getNClob(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { return new JDBC4NClob(table.getString(columnIndex - 1) .toCharArray()); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public Object getObject(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { VoltType type = table.getColumnType(columnIndex - 1); if (type == VoltType.TIMESTAMP) return getTimestamp(columnIndex); else return table.get(columnIndex - 1, type); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public short getShort(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); try { Long longValue = getPrivateInteger(columnIndex); if (longValue > Short.MAX_VALUE || longValue < Short.MIN_VALUE) { throw new SQLException("Value out of short range"); } return longValue.shortValue(); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override @Deprecated public InputStream getUnicodeStream(int columnIndex) throws SQLException { checkColumnBounds(columnIndex); throw SQLError.noSupport(); }
java
@Override @Deprecated public InputStream getUnicodeStream(String columnLabel) throws SQLException { return getUnicodeStream(findColumn(columnLabel)); }
java
@Override public boolean last() throws SQLException { checkClosed(); if (rowCount == 0) { return false; } try { if (cursorPosition != Position.middle) { cursorPosition = Position.middle; table.resetRowPosition(); table.advanceToRow(0); } return table.advanceToRow(rowCount - 1); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public boolean next() throws SQLException { checkClosed(); if (cursorPosition == Position.afterLast || table.getActiveRowIndex() == rowCount - 1) { cursorPosition = Position.afterLast; return false; } if (cursorPosition == Position.beforeFirst) { cursorPosition = Position.middle; } try { return table.advanceRow(); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public boolean previous() throws SQLException { checkClosed(); if (cursorPosition == Position.afterLast) { return last(); } if (cursorPosition == Position.beforeFirst || table.getActiveRowIndex() <= 0) { beforeFirst(); return false; } try { int tempRowIndex = table.getActiveRowIndex(); table.resetRowPosition(); table.advanceToRow(0); return table.advanceToRow(tempRowIndex - 1); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public boolean relative(int rows) throws SQLException { checkClosed(); if (rowCount == 0) { return false; } if (cursorPosition == Position.afterLast && rows > 0) { return false; } if (cursorPosition == Position.beforeFirst && rows <= 0) { return false; } if (table.getActiveRowIndex() + rows >= rowCount) { cursorPosition = Position.afterLast; if (table.getActiveRowIndex() + rows == rowCount) { return true; } return false; } try { // for negative row numbers, resetRowPosition method is called // and the cursor advances to the desired row from top of the table int rowsToMove = table.getActiveRowIndex() + rows; if (cursorPosition == Position.beforeFirst || rows < 0) { if(cursorPosition == Position.afterLast) { rowsToMove = rowCount + rows; } else if(cursorPosition == Position.beforeFirst) { rowsToMove = rows - 1; } else { rowsToMove = table.getActiveRowIndex() + rows; } if(rowsToMove < 0){ beforeFirst(); return false; } table.resetRowPosition(); table.advanceToRow(0); } cursorPosition = Position.middle; return table.advanceToRow(rowsToMove); } catch (Exception x) { throw SQLError.get(x); } }
java
@Override public void setFetchDirection(int direction) throws SQLException { if ((direction != FETCH_FORWARD) && (direction != FETCH_REVERSE) && (direction != FETCH_UNKNOWN)) throw SQLError.get(SQLError.ILLEGAL_STATEMENT, direction); this.fetchDirection = direction; }
java
@Override public void updateAsciiStream(String columnLabel, InputStream x, long length) throws SQLException { throw SQLError.noSupport(); }
java
@Override public void updateBlob(String columnLabel, InputStream inputStream, long length) throws SQLException { throw SQLError.noSupport(); }
java
@Override public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { throw SQLError.noSupport(); }
java
@Override public void updateObject(String columnLabel, Object x, int scaleOrLength) throws SQLException { throw SQLError.noSupport(); }
java
@Override public boolean wasNull() throws SQLException { checkClosed(); try { return table.wasNull(); } catch (Exception x) { throw SQLError.get(x); } }
java
public Object[] getRowData() throws SQLException { Object[] row = new Object[columnCount]; for (int i = 1; i < columnCount + 1; i++) { row[i - 1] = getObject(i); } return row; }
java
void transformAndQueue(T event, long systemCurrentTimeMillis) { // if you're super unlucky, this blows up the stack if (rand.nextDouble() < 0.05) { // duplicate this message (note recursion means maybe more than duped) transformAndQueue(event, systemCurrentTimeMillis); } long delayms = nextZipfDelay(); delayed.add(systemCurrentTimeMillis + delayms, event); }
java
@Override public T next(long systemCurrentTimeMillis) { // drain all the waiting messages from the source (up to 10k) while (delayed.size() < 10000) { T event = source.next(systemCurrentTimeMillis); if (event == null) { break; } transformAndQueue(event, systemCurrentTimeMillis); } return delayed.nextReady(systemCurrentTimeMillis); }
java
public int compareNames(SchemaColumn that) { String thatTbl; String thisTbl; if (m_tableAlias != null && that.m_tableAlias != null) { thisTbl = m_tableAlias; thatTbl = that.m_tableAlias; } else { thisTbl = m_tableName; thatTbl = that.m_tableName; } int tblCmp = nullSafeStringCompareTo(thisTbl, thatTbl); if (tblCmp != 0) { return tblCmp; } String thisCol; String thatCol; if (m_columnName != null && that.m_columnName != null) { thisCol = m_columnName; thatCol = that.m_columnName; } else { thisCol = m_columnAlias; thatCol = that.m_columnAlias; } int colCmp = nullSafeStringCompareTo(thisCol, thatCol); return colCmp; }
java
public SchemaColumn copyAndReplaceWithTVE(int colIndex) { TupleValueExpression newTve; if (m_expression instanceof TupleValueExpression) { newTve = (TupleValueExpression) m_expression.clone(); newTve.setColumnIndex(colIndex); } else { newTve = new TupleValueExpression(m_tableName, m_tableAlias, m_columnName, m_columnAlias, m_expression, colIndex); } return new SchemaColumn(m_tableName, m_tableAlias, m_columnName, m_columnAlias, newTve, m_differentiator); }
java
@Override synchronized void offer(TransactionTask task) { Iv2Trace.logTransactionTaskQueueOffer(task); m_backlog.addLast(task); taskQueueOffer(); }
java
private boolean aquireFileLock() { // PRE: // // raf is never null and is never closed upon entry. // // Rhetorical question to self: How does one tell if a RandomAccessFile // is closed, short of invoking an operation and getting an IOException // the says its closed (assuming you can control the Locale of the error // message)? // final RandomAccessFile lraf = super.raf; // In an ideal world, we would use a lock region back off approach, // starting with region MAX_LOCK_REGION, then MAX_NFS_LOCK_REGION, // then MIN_LOCK_REGION. // // In practice, however, it is just generally unwise to mount network // file system database instances. Be warned. // // In general, it is probably also unwise to mount removable media // database instances that are not read-only. boolean success = false; try { if (this.fileLock != null) { // API says never throws exception, but I suspect // it's quite possible some research / FOSS JVMs might // still throw unsupported operation exceptions on certain // NIO classes...better to be safe than sorry. if (this.fileLock.isValid()) { return true; } else { // It's not valid, so releasing it is a no-op. // // However, we should still clean up the referenceand hope // no previous complications exist (a hung FileLock in a // flaky JVM) or that gc kicks in and saves the day... // (unlikely, though). this.releaseFileLock(); } } if (isPosixManditoryFileLock()) { try { Runtime.getRuntime().exec(new String[] { "chmod", "g+s,g-x", file.getPath() }); } catch (Exception ex) { //ex.printStackTrace(); } } // Note: from FileChannel.tryLock(...) JavaDoc: // // @return A lock object representing the newly-acquired lock, // or <tt>null</tt> if the lock could not be acquired // because another program holds an overlapping lock this.fileLock = lraf.getChannel().tryLock(0, MIN_LOCK_REGION, false); // According to the API, if it's non-null, it must be valid. // This may not actually yet be the full truth of the matter under // all commonly available JVM implementations. // fileLock.isValid() API says it never throws, though, so // with fingers crossed... success = (this.fileLock != null && this.fileLock.isValid()); } catch (Exception e) {} if (!success) { this.releaseFileLock(); } return success; }
java
private boolean releaseFileLock() { // Note: Closing the super class RandomAccessFile has the // side-effect of closing the file lock's FileChannel, // so we do not deal with this here. boolean success = false; if (this.fileLock == null) { success = true; } else { try { this.fileLock.release(); success = true; } catch (Exception e) {} finally { this.fileLock = null; } } return success; }
java
protected Object addOrRemove(int intKey, Object objectValue, boolean remove) { int hash = intKey; int index = hashIndex.getHashIndex(hash); int lookup = hashIndex.hashTable[index]; int lastLookup = -1; Object returnValue = null; for (; lookup >= 0; lastLookup = lookup, lookup = hashIndex.getNextLookup(lookup)) { if (intKey == intKeyTable[lookup]) { break; } } if (lookup >= 0) { if (remove) { if (intKey == 0) { hasZeroKey = false; zeroKeyIndex = -1; } intKeyTable[lookup] = 0; returnValue = objectValueTable[lookup]; objectValueTable[lookup] = null; hashIndex.unlinkNode(index, lastLookup, lookup); if (accessTable != null) { accessTable[lookup] = 0; } return returnValue; } if (isObjectValue) { returnValue = objectValueTable[lookup]; objectValueTable[lookup] = objectValue; } if (accessTable != null) { accessTable[lookup] = accessCount++; } return returnValue; } // not found if (remove) { return returnValue; } if (hashIndex.elementCount >= threshold) { if (reset()) { return addOrRemove(intKey, objectValue, remove); } else { return null; } } lookup = hashIndex.linkNode(index, lastLookup); intKeyTable[lookup] = intKey; if (intKey == 0) { hasZeroKey = true; zeroKeyIndex = lookup; } objectValueTable[lookup] = objectValue; if (accessTable != null) { accessTable[lookup] = accessCount++; } return returnValue; }
java
protected Object removeObject(Object objectKey, boolean removeRow) { if (objectKey == null) { return null; } int hash = objectKey.hashCode(); int index = hashIndex.getHashIndex(hash); int lookup = hashIndex.hashTable[index]; int lastLookup = -1; Object returnValue = null; for (; lookup >= 0; lastLookup = lookup, lookup = hashIndex.getNextLookup(lookup)) { if (objectKeyTable[lookup].equals(objectKey)) { objectKeyTable[lookup] = null; hashIndex.unlinkNode(index, lastLookup, lookup); if (isObjectValue) { returnValue = objectValueTable[lookup]; objectValueTable[lookup] = null; } if (removeRow) { removeRow(lookup); } return returnValue; } } // not found return returnValue; }
java
public void clear() { if (hashIndex.modified) { accessCount = 0; accessMin = accessCount; hasZeroKey = false; zeroKeyIndex = -1; clearElementArrays(0, hashIndex.linkTable.length); hashIndex.clear(); if (minimizeOnEmpty) { rehash(initialCapacity); } } }
java
public int getAccessCountCeiling(int count, int margin) { return ArrayCounter.rank(accessTable, hashIndex.newNodePointer, count, accessMin + 1, accessCount, margin); }
java
protected void clear(int count, int margin) { if (margin < 64) { margin = 64; } int maxlookup = hashIndex.newNodePointer; int accessBase = getAccessCountCeiling(count, margin); for (int lookup = 0; lookup < maxlookup; lookup++) { Object o = objectKeyTable[lookup]; if (o != null && accessTable[lookup] < accessBase) { removeObject(o, false); } } accessMin = accessBase; }
java
public void materialise(Session session) { PersistentStore store; // table constructors if (isDataExpression) { store = session.sessionData.getSubqueryRowStore(table); dataExpression.insertValuesIntoSubqueryTable(session, store); return; } Result result = queryExpression.getResult(session, isExistsPredicate ? 1 : 0); RowSetNavigatorData navigator = ((RowSetNavigatorData) result.getNavigator()); if (uniqueRows) { navigator.removeDuplicates(); } store = session.sessionData.getSubqueryRowStore(table); table.insertResult(store, result); result.getNavigator().close(); }
java
static public void encodeDecimal(final FastSerializer fs, BigDecimal value) throws IOException { fs.write((byte)VoltDecimalHelper.kDefaultScale); fs.write((byte)16); fs.write(VoltDecimalHelper.serializeBigDecimal(value)); }
java
static public void encodeGeographyPoint(final FastSerializer fs, GeographyPointValue value) throws IOException { final int length = GeographyPointValue.getLengthInBytes(); ByteBuffer bb = ByteBuffer.allocate(length); bb.order(ByteOrder.nativeOrder()); value.flattenToBuffer(bb); byte[] array = bb.array(); assert(array.length == length); fs.write(array); }
java
static public void encodeGeography(final FastSerializer fs, GeographyValue value) throws IOException { ByteBuffer bb = ByteBuffer.allocate(value.getLengthInBytes()); bb.order(ByteOrder.nativeOrder()); value.flattenToBuffer(bb); byte[] array = bb.array(); fs.writeInt(array.length); fs.write(array); }
java
@SuppressWarnings("unchecked") private ImmutableMap<String, ProcedureRunnerNTGenerator> loadSystemProcedures(boolean startup) { ImmutableMap.Builder<String, ProcedureRunnerNTGenerator> builder = ImmutableMap.<String, ProcedureRunnerNTGenerator>builder(); Set<Entry<String,Config>> entrySet = SystemProcedureCatalog.listing.entrySet(); for (Entry<String, Config> entry : entrySet) { String procName = entry.getKey(); Config sysProc = entry.getValue(); // transactional sysprocs handled by LoadedProcedureSet if (sysProc.transactional) { continue; } final String className = sysProc.getClassname(); Class<? extends VoltNonTransactionalProcedure> procClass = null; // this check is for sysprocs that don't have a procedure class if (className != null) { try { procClass = (Class<? extends VoltNonTransactionalProcedure>) Class.forName(className); } catch (final ClassNotFoundException e) { if (sysProc.commercial) { continue; } VoltDB.crashLocalVoltDB("Missing Java class for NT System Procedure: " + procName); } if (startup) { // This is a startup-time check to make sure we can instantiate try { if ((procClass.newInstance() instanceof VoltNTSystemProcedure) == false) { VoltDB.crashLocalVoltDB("NT System Procedure is incorrect class type: " + procName); } } catch (InstantiationException | IllegalAccessException e) { VoltDB.crashLocalVoltDB("Unable to instantiate NT System Procedure: " + procName); } } ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(procClass); builder.put(procName, prntg); } } return builder.build(); }
java
@SuppressWarnings("unchecked") synchronized void update(CatalogContext catalogContext) { CatalogMap<Procedure> procedures = catalogContext.database.getProcedures(); Map<String, ProcedureRunnerNTGenerator> runnerGeneratorMap = new TreeMap<>(); for (Procedure procedure : procedures) { if (procedure.getTransactional()) { continue; } // this code is mostly lifted from transactional procedures String className = procedure.getClassname(); Class<? extends VoltNonTransactionalProcedure> clz = null; try { clz = (Class<? extends VoltNonTransactionalProcedure>) catalogContext.classForProcedureOrUDF(className); } catch (ClassNotFoundException e) { if (className.startsWith("org.voltdb.")) { String msg = String.format(LoadedProcedureSet.ORGVOLTDB_PROCNAME_ERROR_FMT, className); VoltDB.crashLocalVoltDB(msg, false, null); } else { String msg = String.format(LoadedProcedureSet.UNABLETOLOAD_ERROR_FMT, className); VoltDB.crashLocalVoltDB(msg, false, null); } } // The ProcedureRunnerNTGenerator has all of the dangerous and slow // stuff in it. Like classfinding, instantiation, and reflection. ProcedureRunnerNTGenerator prntg = new ProcedureRunnerNTGenerator(clz); runnerGeneratorMap.put(procedure.getTypeName(), prntg); } m_procs = ImmutableMap.<String, ProcedureRunnerNTGenerator>builder().putAll(runnerGeneratorMap).build(); // reload all sysprocs loadSystemProcedures(false); // Set the system to start accepting work again now that ebertything is updated. // We had to stop because stats would be wonky if we called a proc while updating // this stuff. m_paused = false; // release all of the pending invocations into the real queue m_pendingInvocations .forEach(pi -> callProcedureNT(pi.ciHandle, pi.user, pi.ccxn, pi.isAdmin, pi.ntPriority, pi.task)); m_pendingInvocations.clear(); }
java
synchronized void callProcedureNT(final long ciHandle, final AuthUser user, final Connection ccxn, final boolean isAdmin, final boolean ntPriority, final StoredProcedureInvocation task) { // If paused, stuff a record of the invocation into a queue that gets // drained when un-paused. We're counting on regular upstream backpressure // to prevent this from getting too out of hand. if (m_paused) { PendingInvocation pi = new PendingInvocation(ciHandle, user, ccxn, isAdmin, ntPriority, task); m_pendingInvocations.add(pi); return; } String procName = task.getProcName(); final ProcedureRunnerNTGenerator prntg; if (procName.startsWith("@")) { prntg = m_sysProcs.get(procName); } else { prntg = m_procs.get(procName); } final ProcedureRunnerNT runner; try { runner = prntg.generateProcedureRunnerNT(user, ccxn, isAdmin, ciHandle, task.getClientHandle(), task.getBatchTimeout()); } catch (InstantiationException | IllegalAccessException e1) { // I don't expect to hit this, but it's here... // must be done as IRM to CI mailbox for backpressure accounting ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE, new VoltTable[0], "Could not create running context for " + procName + ".", task.getClientHandle()); InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle, ccxn.connectionId(), response); m_mailbox.deliver(irm); return; } m_outstanding.put(runner.m_id, runner); Runnable invocationRunnable = new Runnable() { @Override public void run() { try { runner.call(task.getParams().toArray()); } catch (Throwable ex) { ex.printStackTrace(); throw ex; } } }; try { // pick the executor service based on priority // - new (from user) txns get regular one // - sub tasks and sub procs generated by nt procs get // immediate exec service (priority) if (ntPriority) { m_priorityExecutorService.submit(invocationRunnable); } else { m_primaryExecutorService.submit(invocationRunnable); } } catch (RejectedExecutionException e) { handleNTProcEnd(runner); // I really don't expect this to happen... but it's here. // must be done as IRM to CI mailbox for backpressure accounting ClientResponseImpl response = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE, new VoltTable[0], "Could not submit NT procedure " + procName + " to exec service for .", task.getClientHandle()); InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(ciHandle, ccxn.connectionId(), response); m_mailbox.deliver(irm); return; } }
java
void handleCallbacksForFailedHosts(final Set<Integer> failedHosts) { for (ProcedureRunnerNT runner : m_outstanding.values()) { runner.processAnyCallbacksFromFailedHosts(failedHosts); } }
java
private boolean isDefinedFunctionName(String functionName) { return FunctionForVoltDB.isFunctionNameDefined(functionName) || FunctionSQL.isFunction(functionName) || FunctionCustom.getFunctionId(functionName) != ID_NOT_DEFINED || (null != m_schema.findChild("ud_function", functionName)); }
java
public Object upper(Session session, Object data) { if (data == null) { return null; } if (typeCode == Types.SQL_CLOB) { String result = ((ClobData) data).getSubString(session, 0, (int) ((ClobData) data).length(session)); result = collation.toUpperCase(result); ClobData clob = session.createClob(result.length()); clob.setString(session, 0, result); return clob; } return collation.toUpperCase((String) data); }
java
public void outputStartTime(final long startTimeMsec) { log.format(Locale.US, "#[StartTime: %.3f (seconds since epoch), %s]\n", startTimeMsec / 1000.0, (new Date(startTimeMsec)).toString()); }
java
public String latencyHistoReport() { ByteArrayOutputStream baos= new ByteArrayOutputStream(); PrintStream pw = null; try { pw = new PrintStream(baos, false, Charsets.UTF_8.name()); } catch (UnsupportedEncodingException e) { Throwables.propagate(e); } //Get a latency report in milliseconds m_latencyHistogram.outputPercentileDistributionVolt(pw, 1, 1000.0); return new String(baos.toByteArray(), Charsets.UTF_8); }
java
public synchronized BBContainer getNextChunk() throws IOException { if (m_chunkReaderException != null) { throw m_chunkReaderException; } if (!m_hasMoreChunks.get()) { final Container c = m_availableChunks.poll(); return c; } if (m_chunkReader == null) { m_chunkReader = new ChunkReader(); m_chunkReaderThread = new Thread(m_chunkReader, "ChunkReader"); m_chunkReaderThread.start(); } Container c = null; while (c == null && (m_hasMoreChunks.get() || !m_availableChunks.isEmpty())) { c = m_availableChunks.poll(); if (c == null) { try { wait(); } catch (InterruptedException e) { throw new IOException(e); } } } if (c != null) { m_chunkReads.release(); } else { if (m_chunkReaderException != null) { throw m_chunkReaderException; } } return c; }
java
protected void validateSpecifiedUserAndPassword(String user, String password) throws SQLException { String configuredUser = connProperties.getProperty("user"); String configuredPassword = connProperties.getProperty("password"); if (((user == null && configuredUser != null) || (user != null && configuredUser == null)) || (user != null && !user.equals(configuredUser)) || ((password == null && configuredPassword != null) || (password != null && configuredPassword == null)) || (password != null && !password.equals(configuredPassword))) { throw new SQLException("Given user name or password does not " + "match those configured for this object"); } }
java
public Object setConnectionProperty(String name, String value) { return connProperties.setProperty(name, value); }
java
@Override public void start(boolean block) throws InterruptedException, ExecutionException { Future<?> task = m_es.submit(new ParentEvent(null)); if (block) { task.get(); } }
java
public Object getAggregatedValue(Session session, Object currValue) { if (currValue == null) { // A VoltDB extension APPROX_COUNT_DISTINCT return opType == OpTypes.COUNT || opType == OpTypes.APPROX_COUNT_DISTINCT ? ValuePool.INTEGER_0: null; /* disable 2 lines... return opType == OpTypes.COUNT ? ValuePool.INTEGER_0 : null; ...disabled 2 lines */ // End of VoltDB extension } return ((SetFunction) currValue).getValue(); }
java
private boolean tableListIncludesReadOnlyView(List<Table> tableList) { for (Table table : tableList) { if (table.getMaterializer() != null && !TableType.isStream(table.getMaterializer().getTabletype())) { return true; } } return false; }
java
private boolean tableListIncludesExportOnly(List<Table> tableList) { // list of all export tables (assume uppercase) NavigableSet<String> exportTables = CatalogUtil.getExportTableNames(m_catalogDb); // this loop is O(number-of-joins * number-of-export-tables) // which seems acceptable if not great. Probably faster than // re-hashing the export only tables for faster lookup. for (Table table : tableList) { if (exportTables.contains(table.getTypeName()) && TableType.isStream(table.getTabletype())) { return true; } } return false; }
java
private ParsedResultAccumulator getBestCostPlanForEphemeralScans(List<StmtEphemeralTableScan> scans) { int nextPlanId = m_planSelector.m_planId; boolean orderIsDeterministic = true; boolean hasSignificantOffsetOrLimit = false; String contentNonDeterminismMessage = null; for (StmtEphemeralTableScan scan : scans) { if (scan instanceof StmtSubqueryScan) { nextPlanId = planForParsedSubquery((StmtSubqueryScan)scan, nextPlanId); // If we can't plan this, then give up. if (((StmtSubqueryScan) scan).getBestCostPlan() == null) { return null; } } else if (scan instanceof StmtCommonTableScan) { nextPlanId = planForCommonTableQuery((StmtCommonTableScan)scan, nextPlanId); if (((StmtCommonTableScan) scan).getBestCostBasePlan() == null) { return null; } } else { throw new PlanningErrorException("Unknown scan plan type."); } orderIsDeterministic = scan.isOrderDeterministic(orderIsDeterministic); contentNonDeterminismMessage = scan.contentNonDeterminismMessage(contentNonDeterminismMessage); hasSignificantOffsetOrLimit = scan.hasSignificantOffsetOrLimit(hasSignificantOffsetOrLimit); } // need to reset plan id for the entire SQL m_planSelector.m_planId = nextPlanId; return new ParsedResultAccumulator(orderIsDeterministic, hasSignificantOffsetOrLimit, contentNonDeterminismMessage); }
java
private boolean getBestCostPlanForExpressionSubQueries(Set<AbstractExpression> subqueryExprs) { int nextPlanId = m_planSelector.m_planId; for (AbstractExpression expr : subqueryExprs) { assert(expr instanceof SelectSubqueryExpression); if (!(expr instanceof SelectSubqueryExpression)) { continue; // DEAD CODE? } SelectSubqueryExpression subqueryExpr = (SelectSubqueryExpression) expr; StmtSubqueryScan subqueryScan = subqueryExpr.getSubqueryScan(); nextPlanId = planForParsedSubquery(subqueryScan, nextPlanId); CompiledPlan bestPlan = subqueryScan.getBestCostPlan(); if (bestPlan == null) { return false; } subqueryExpr.setSubqueryNode(bestPlan.rootPlanGraph); // The subquery plan must not contain Receive/Send nodes because it will be executed // multiple times during the parent statement execution. if (bestPlan.rootPlanGraph.hasAnyNodeOfType(PlanNodeType.SEND)) { // fail the whole plan m_recentErrorMsg = IN_EXISTS_SCALAR_ERROR_MESSAGE; return false; } } // need to reset plan id for the entire SQL m_planSelector.m_planId = nextPlanId; return true; }
java
private CompiledPlan getNextPlan() { CompiledPlan retval; AbstractParsedStmt nextStmt = null; if (m_parsedSelect != null) { nextStmt = m_parsedSelect; retval = getNextSelectPlan(); } else if (m_parsedInsert != null) { nextStmt = m_parsedInsert; retval = getNextInsertPlan(); } else if (m_parsedDelete != null) { nextStmt = m_parsedDelete; retval = getNextDeletePlan(); // note that for replicated tables, multi-fragment plans // need to divide the result by the number of partitions } else if (m_parsedUpdate != null) { nextStmt = m_parsedUpdate; retval = getNextUpdatePlan(); } else if (m_parsedUnion != null) { nextStmt = m_parsedUnion; retval = getNextUnionPlan(); } else if (m_parsedSwap != null) { nextStmt = m_parsedSwap; retval = getNextSwapPlan(); } else if (m_parsedMigrate != null) { nextStmt = m_parsedMigrate; retval = getNextMigratePlan(); } else { throw new RuntimeException( "setupForNewPlans encountered unsupported statement type."); } if (retval == null || retval.rootPlanGraph == null) { return null; } assert (nextStmt != null); retval.setParameters(nextStmt.getParameters()); return retval; }
java
private void connectChildrenBestPlans(AbstractPlanNode parentPlan) { if (parentPlan instanceof AbstractScanPlanNode) { AbstractScanPlanNode scanNode = (AbstractScanPlanNode) parentPlan; StmtTableScan tableScan = scanNode.getTableScan(); if (tableScan instanceof StmtSubqueryScan) { CompiledPlan bestCostPlan = ((StmtSubqueryScan)tableScan).getBestCostPlan(); assert (bestCostPlan != null); AbstractPlanNode subQueryRoot = bestCostPlan.rootPlanGraph; subQueryRoot.disconnectParents(); scanNode.clearChildren(); scanNode.addAndLinkChild(subQueryRoot); } else if (tableScan instanceof StmtCommonTableScan) { assert(parentPlan instanceof SeqScanPlanNode); SeqScanPlanNode scanPlanNode = (SeqScanPlanNode)parentPlan; StmtCommonTableScan cteScan = (StmtCommonTableScan)tableScan; CompiledPlan bestCostBasePlan = cteScan.getBestCostBasePlan(); CompiledPlan bestCostRecursivePlan = cteScan.getBestCostRecursivePlan(); assert(bestCostBasePlan != null); AbstractPlanNode basePlanRoot = bestCostBasePlan.rootPlanGraph; scanPlanNode.setCTEBaseNode(basePlanRoot); if (bestCostRecursivePlan != null) { // Either the CTE is not recursive, or this is a recursive CTE but we // got here during the planning of the recurse query when the recurse // query plan is still being worked on. AbstractPlanNode recursePlanRoot = bestCostRecursivePlan.rootPlanGraph; assert(basePlanRoot instanceof CommonTablePlanNode); CommonTablePlanNode ctePlanNode = (CommonTablePlanNode)basePlanRoot; ctePlanNode.setRecursiveNode(recursePlanRoot); } } } else { for (int i = 0; i < parentPlan.getChildCount(); ++i) { connectChildrenBestPlans(parentPlan.getChild(i)); } } }
java
private boolean needProjectionNode (AbstractPlanNode root) { if (!root.planNodeClassNeedsProjectionNode()) { return false; } // If there is a complexGroupby at his point, it means that // display columns contain all the order by columns and // does not require another projection node on top of sort node. // If there is a complex aggregation case, the projection plan node is already added // right above the group by plan node. In future, we may inline that projection node. if (m_parsedSelect.hasComplexGroupby() || m_parsedSelect.hasComplexAgg()) { return false; } if (root instanceof AbstractReceivePlanNode && m_parsedSelect.hasPartitionColumnInGroupby()) { // Top aggregate has been removed, its schema is exactly the same to // its local aggregate node. return false; } return true; }
java
static private boolean deleteIsTruncate(ParsedDeleteStmt stmt, AbstractPlanNode plan) { if (!(plan instanceof SeqScanPlanNode)) { return false; } // Assume all index scans have filters in this context, so only consider seq scans. SeqScanPlanNode seqScanNode = (SeqScanPlanNode)plan; if (seqScanNode.getPredicate() != null) { return false; } if (stmt.hasLimitOrOffset()) { return false; } return true; }
java
private static AbstractPlanNode addCoordinatorToDMLNode( AbstractPlanNode dmlRoot, boolean isReplicated) { dmlRoot = SubPlanAssembler.addSendReceivePair(dmlRoot); AbstractPlanNode sumOrLimitNode; if (isReplicated) { // Replicated table DML result doesn't need to be summed. All partitions should // modify the same number of tuples in replicated table, so just pick the result from // any partition. LimitPlanNode limitNode = new LimitPlanNode(); sumOrLimitNode = limitNode; limitNode.setLimit(1); } else { // create the nodes being pushed on top of dmlRoot. AggregatePlanNode countNode = new AggregatePlanNode(); sumOrLimitNode = countNode; // configure the count aggregate (sum) node to produce a single // output column containing the result of the sum. // Create a TVE that should match the tuple count input column // This TVE is magic. // really really need to make this less hard-wired TupleValueExpression count_tve = new TupleValueExpression( AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", 0); count_tve.setValueType(VoltType.BIGINT); count_tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes()); countNode.addAggregate(ExpressionType.AGGREGATE_SUM, false, 0, count_tve); // The output column. Not really based on a TVE (it is really the // count expression represented by the count configured above). But // this is sufficient for now. This looks identical to the above // TVE but it's logically different so we'll create a fresh one. TupleValueExpression tve = new TupleValueExpression( AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", 0); tve.setValueType(VoltType.BIGINT); tve.setValueSize(VoltType.BIGINT.getLengthInBytesForFixedTypes()); NodeSchema count_schema = new NodeSchema(); count_schema.addColumn( AbstractParsedStmt.TEMP_TABLE_NAME, AbstractParsedStmt.TEMP_TABLE_NAME, "modified_tuples", "modified_tuples", tve); countNode.setOutputSchema(count_schema); } // connect the nodes to build the graph sumOrLimitNode.addAndLinkChild(dmlRoot); SendPlanNode sendNode = new SendPlanNode(); sendNode.addAndLinkChild(sumOrLimitNode); return sendNode; }
java
private static OrderByPlanNode buildOrderByPlanNode(List<ParsedColInfo> cols) { OrderByPlanNode n = new OrderByPlanNode(); for (ParsedColInfo col : cols) { n.addSortExpression(col.m_expression, col.m_ascending ? SortDirectionType.ASC : SortDirectionType.DESC); } return n; }
java
private static boolean isOrderByNodeRequired(AbstractParsedStmt parsedStmt, AbstractPlanNode root) { // Only sort when the statement has an ORDER BY. if ( ! parsedStmt.hasOrderByColumns()) { return false; } // Skip the explicit ORDER BY plan step if an IndexScan is already providing the equivalent ordering. // Note that even tree index scans that produce values in their own "key order" only report // their sort direction != SortDirectionType.INVALID // when they enforce an ordering equivalent to the one requested in the ORDER BY // or window function clause. Even an intervening non-hash aggregate will not interfere // in this optimization. // Is there a window function between the root and the // scan or join nodes? Also, does this window function // use the index. int numberWindowFunctions = 0; int numberReceiveNodes = 0; int numberHashAggregates = 0; // EE keeps the insertion ORDER so that ORDER BY could apply before DISTINCT. // However, this probably is not optimal if there are low cardinality results. // Again, we have to replace the TVEs for ORDER BY clause for these cases in planning. // // Find the scan or join node. AbstractPlanNode probe; for (probe = root; ! ((probe instanceof AbstractJoinPlanNode) || (probe instanceof AbstractScanPlanNode)) && (probe != null); probe = (probe.getChildCount() > 0) ? probe.getChild(0) : null) { // Count the number of window functions between the // root and the join/scan node. Note that we know we // have a statement level order by (SLOB) here. If the SLOB // can use the index for ordering the scan or join node, // we will have recorded it in the scan or join node. if (probe.getPlanNodeType() == PlanNodeType.WINDOWFUNCTION) { numberWindowFunctions += 1; } // Also, see if there are receive nodes. We need to // generate an ORDERBY node if there are RECEIVE nodes, // because the RECEIVE->MERGERECEIVE microoptimization // needs them. if (probe.getPlanNodeType() == PlanNodeType.RECEIVE) { numberReceiveNodes += 1; } // Finally, count the number of non-serial aggregate // nodes. A hash or partial aggregate operation invalidates // the ordering, but a serial aggregation does not. if ((probe.getPlanNodeType() == PlanNodeType.HASHAGGREGATE) || (probe.getPlanNodeType() == PlanNodeType.PARTIALAGGREGATE)) { numberHashAggregates += 1; } } if (probe == null) { // No idea what happened here. We can't find a // scan or join node at all. This seems unlikely // to be right. Maybe this should be an assert? return true; } // // o If the SLOB cannot use the index, then we // need an order by node always. // o If there are zero window functions, then // - If the SLOB cannot use the index than we // need an order by node. // - If the SLOB can use the index, then // = If the statement is a single fragment // statement then we don't need an order by // node. // = If the statement is a two fragment // statement then we need an order by node. // This is because we will convert the RECEIVE // node into a MERGERECEIVE node in the // microoptimizer, and the MERGERECEIVE // node needs an inline order by node to do // the merge. // o If there is only one window function, then // - If the window function does not use the index // then we always need an order by node. // - If the window function can use the index but // the SLOB can't use the index, then we need an // order by node. // - If both the SLOB and the window function can // use the index, then we don't need an order // by, no matter how many fragments this statement // has. This is because any RECEIVE node will be // a descendent of the window function node. So // the RECEIVE to MERGERECEIVE conversion happens // in the window function and not the order by. // o If there is more than one window function then // we always need an order by node. The second // window function will invalidate the ordering of // the first one. (Actually, if the SLOB order is // compatible with the last window function then // the situation is like the one-window function // below.) // if ( ! (probe instanceof IndexSortablePlanNode)) { return true; } IndexUseForOrderBy indexUse = ((IndexSortablePlanNode)probe).indexUse(); if (indexUse.getSortOrderFromIndexScan() == SortDirectionType.INVALID) { return true; } // Hash aggregates and partial aggregates // invalidate the index ordering. So, we will need // an ORDERBY node. if (numberHashAggregates > 0) { return true; } if ( numberWindowFunctions == 0 ) { if ( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.NO_INDEX_USE ) { return true; } assert( indexUse.getWindowFunctionUsesIndex() == SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX ); // Return true for MP (numberReceiveNodes > 0) and // false for SP (numberReceiveNodes == 0); return numberReceiveNodes > 0; } if (numberWindowFunctions == 1) { // If the WF uses the index then getWindowFunctionUsesIndex() // will return 0. if ( ( indexUse.getWindowFunctionUsesIndex() != 0 ) || ( ! indexUse.isWindowFunctionCompatibleWithOrderBy() ) ) { return true; } // Both the WF and the SLOB can use the index. Since the // window function will have the order by node, the SLOB // does not need one. So this is a false. return false; } // This can actually never happen now, // because we only support one window function. return true; }
java
private static AbstractPlanNode handleOrderBy(AbstractParsedStmt parsedStmt, AbstractPlanNode root) { assert (parsedStmt instanceof ParsedSelectStmt || parsedStmt instanceof ParsedUnionStmt || parsedStmt instanceof ParsedDeleteStmt); if (! isOrderByNodeRequired(parsedStmt, root)) { return root; } OrderByPlanNode orderByNode = buildOrderByPlanNode(parsedStmt.orderByColumns()); orderByNode.addAndLinkChild(root); return orderByNode; }
java
private AbstractPlanNode handleSelectLimitOperator(AbstractPlanNode root) { // The coordinator's top limit graph fragment for a MP plan. // If planning "order by ... limit", getNextSelectPlan() // will have already added an order by to the coordinator frag. // This is the only limit node in a SP plan LimitPlanNode topLimit = m_parsedSelect.getLimitNodeTop(); assert(topLimit != null); /* * TODO: allow push down limit with distinct (select distinct C from T limit 5) * , DISTINCT in aggregates and DISTINCT PUSH DOWN with partition column included. */ AbstractPlanNode sendNode = null; // Whether or not we can push the limit node down boolean canPushDown = ! m_parsedSelect.hasDistinctWithGroupBy(); if (canPushDown) { sendNode = checkLimitPushDownViability(root); if (sendNode == null) { canPushDown = false; } else { canPushDown = m_parsedSelect.getCanPushdownLimit(); } } if (m_parsedSelect.m_mvFixInfo.needed()) { // Do not push down limit for mv based distributed query. canPushDown = false; } /* * Push down the limit plan node when possible even if offset is set. If * the plan is for a partitioned table, do the push down. Otherwise, * there is no need to do the push down work, the limit plan node will * be run in the partition. */ if (canPushDown) { /* * For partitioned table, the pushed-down limit plan node has a limit based * on the combined limit and offset, which may require an expression if either of these * was not a hard-coded constant and didn't get parameterized. * The top level limit plan node remains the same, with the original limit and offset values. */ LimitPlanNode distLimit = m_parsedSelect.getLimitNodeDist(); // Disconnect the distributed parts of the plan below the SEND node AbstractPlanNode distributedPlan = sendNode.getChild(0); distributedPlan.clearParents(); sendNode.clearChildren(); // If the distributed limit must be performed on ordered input, // ensure the order of the data on each partition. if (m_parsedSelect.hasOrderByColumns()) { distributedPlan = handleOrderBy(m_parsedSelect, distributedPlan); } if (isInlineLimitPlanNodePossible(distributedPlan)) { // Inline the distributed limit. distributedPlan.addInlinePlanNode(distLimit); sendNode.addAndLinkChild(distributedPlan); } else { distLimit.addAndLinkChild(distributedPlan); // Add the distributed work back to the plan sendNode.addAndLinkChild(distLimit); } } // In future, inline LIMIT for join, Receive // Then we do not need to distinguish the order by node. return inlineLimitOperator(root, topLimit); }
java
private AbstractPlanNode handleUnionLimitOperator(AbstractPlanNode root) { // The coordinator's top limit graph fragment for a MP plan. // If planning "order by ... limit", getNextUnionPlan() // will have already added an order by to the coordinator frag. // This is the only limit node in a SP plan LimitPlanNode topLimit = m_parsedUnion.getLimitNodeTop(); assert(topLimit != null); return inlineLimitOperator(root, topLimit); }
java
private AbstractPlanNode inlineLimitOperator(AbstractPlanNode root, LimitPlanNode topLimit) { if (isInlineLimitPlanNodePossible(root)) { root.addInlinePlanNode(topLimit); } else if (root instanceof ProjectionPlanNode && isInlineLimitPlanNodePossible(root.getChild(0)) ) { // In future, inlined this projection node for OrderBy and Aggregate // Then we could delete this ELSE IF block. root.getChild(0).addInlinePlanNode(topLimit); } else { topLimit.addAndLinkChild(root); root = topLimit; } return root; }
java
static private boolean isInlineLimitPlanNodePossible(AbstractPlanNode pn) { if (pn instanceof OrderByPlanNode || pn.getPlanNodeType() == PlanNodeType.AGGREGATE) { return true; } return false; }
java
private boolean switchToIndexScanForGroupBy(AbstractPlanNode candidate, IndexGroupByInfo gbInfo) { if (! m_parsedSelect.isGrouped()) { return false; } if (candidate instanceof IndexScanPlanNode) { calculateIndexGroupByInfo((IndexScanPlanNode) candidate, gbInfo); if (gbInfo.m_coveredGroupByColumns != null && !gbInfo.m_coveredGroupByColumns.isEmpty()) { // The candidate index does cover all or some // of the GROUP BY columns and can be serialized gbInfo.m_indexAccess = candidate; return true; } return false; } AbstractPlanNode sourceSeqScan = findSeqScanCandidateForGroupBy(candidate); if (sourceSeqScan == null) { return false; } assert(sourceSeqScan instanceof SeqScanPlanNode); AbstractPlanNode parent = null; if (sourceSeqScan.getParentCount() > 0) { parent = sourceSeqScan.getParent(0); } AbstractPlanNode indexAccess = indexAccessForGroupByExprs( (SeqScanPlanNode)sourceSeqScan, gbInfo); if (indexAccess.getPlanNodeType() != PlanNodeType.INDEXSCAN) { // does not find proper index to replace sequential scan return false; } gbInfo.m_indexAccess = indexAccess; if (parent != null) { // have a parent and would like to replace // the sequential scan with an index scan indexAccess.clearParents(); // For two children join node, index 0 is its outer side parent.replaceChild(0, indexAccess); return false; } // parent is null and switched to index scan from sequential scan return true; }
java
private AbstractPlanNode handleWindowedOperators(AbstractPlanNode root) { // Get the windowed expression. We need to set its output // schema from the display list. WindowFunctionExpression winExpr = m_parsedSelect.getWindowFunctionExpressions().get(0); assert(winExpr != null); // This will set the output schema to contain the // windowed schema column only. In generateOutputSchema // we will add the input columns. WindowFunctionPlanNode pnode = new WindowFunctionPlanNode(); pnode.setWindowFunctionExpression(winExpr); // We always need an order by plan node, even if the sort // is optimized away by an index. This may be turned // into an inline order by in a MergeReceivePlanNode. IndexUseForOrderBy scanNode = findScanNodeForWindowFunction(root); AbstractPlanNode cnode = null; int winfunc = (scanNode == null) ? SubPlanAssembler.NO_INDEX_USE : scanNode.getWindowFunctionUsesIndex(); // If we have an index which is compatible with the statement // level order by, and we have a window function which can't // use the index we have to ignore the statement level order by // index use. We will need to order the input according to the // window function first, and that will in general invalidate the // statement level order by ordering. if ((SubPlanAssembler.STATEMENT_LEVEL_ORDER_BY_INDEX == winfunc) || (SubPlanAssembler.NO_INDEX_USE == winfunc)) { // No index. Calculate the expression order here and stuff it into // the order by node. Note that if we support more than one window // function this would be the case when scanNode.getWindowFunctionUsesIndex() // returns a window function number which is different from the number // of winExpr. List<AbstractExpression> partitionByExpressions = winExpr.getPartitionByExpressions(); // If the order by expression list contains a partition by expression then // we won't have to sort by it twice. We sort by the partition by expressions // first, and we don't care what order we sort by them. So, find the // sort direction in the order by list and use that in the partition by // list, and then mark that it was deleted in the order by // list. // // We choose to make this dontsort rather than dosort because the // Java default value for boolean is false, and we want to sort by // default. boolean dontsort[] = new boolean[winExpr.getOrderbySize()]; List<AbstractExpression> orderByExpressions = winExpr.getOrderByExpressions(); List<SortDirectionType> orderByDirections = winExpr.getOrderByDirections(); OrderByPlanNode onode = new OrderByPlanNode(); for (int idx = 0; idx < winExpr.getPartitionbySize(); ++idx) { SortDirectionType pdir = SortDirectionType.ASC; AbstractExpression partitionByExpression = partitionByExpressions.get(idx); int sidx = winExpr.getSortIndexOfOrderByExpression(partitionByExpression); if (0 <= sidx) { pdir = orderByDirections.get(sidx); dontsort[sidx] = true; } onode.addSortExpression(partitionByExpression, pdir); } for (int idx = 0; idx < winExpr.getOrderbySize(); ++idx) { if (!dontsort[idx]) { AbstractExpression orderByExpr = orderByExpressions.get(idx); SortDirectionType orderByDir = orderByDirections.get(idx); onode.addSortExpression(orderByExpr, orderByDir); } } onode.addAndLinkChild(root); cnode = onode; } else { assert(scanNode != null); // This means the index is good for this window function. // If this is an MP statement we still need to generate the // order by node, because we may need to turn it into an // inline order by node of a MergeReceive node. assert( 0 == scanNode.getWindowFunctionUsesIndex() ); if (m_partitioning.requiresTwoFragments()) { OrderByPlanNode onode = new OrderByPlanNode(); SortDirectionType dir = scanNode.getSortOrderFromIndexScan(); assert(dir != SortDirectionType.INVALID); // This was created when the index was determined. // We cached it in the scan node. List<AbstractExpression> orderExprs = scanNode.getFinalExpressionOrderFromIndexScan(); assert(orderExprs != null); for (AbstractExpression ae : orderExprs) { onode.addSortExpression(ae, dir); } // Link in the OrderByNode. onode.addAndLinkChild(root); cnode = onode; } else { // Don't create and link in the order by node. cnode = root; } } pnode.addAndLinkChild(cnode); return pnode; }
java
private static void updatePartialIndex(IndexScanPlanNode scan) { if (scan.getPredicate() == null && scan.getPartialIndexPredicate() != null) { if (scan.isForSortOrderOnly()) { scan.setPredicate(Collections.singletonList(scan.getPartialIndexPredicate())); } scan.setForPartialIndexOnly(); } }
java
private void calculateIndexGroupByInfo(IndexScanPlanNode root, IndexGroupByInfo gbInfo) { String fromTableAlias = root.getTargetTableAlias(); assert(fromTableAlias != null); Index index = root.getCatalogIndex(); if ( ! IndexType.isScannable(index.getType())) { return; } ArrayList<AbstractExpression> bindings = new ArrayList<>(); gbInfo.m_coveredGroupByColumns = calculateGroupbyColumnsCovered( index, fromTableAlias, bindings); gbInfo.m_canBeFullySerialized = (gbInfo.m_coveredGroupByColumns.size() == m_parsedSelect.groupByColumns().size()); }
java
private AbstractPlanNode indexAccessForGroupByExprs(SeqScanPlanNode root, IndexGroupByInfo gbInfo) { if (! root.isPersistentTableScan()) { // subquery and common tables are not handled return root; } String fromTableAlias = root.getTargetTableAlias(); assert(fromTableAlias != null); List<ParsedColInfo> groupBys = m_parsedSelect.groupByColumns(); Table targetTable = m_catalogDb.getTables().get(root.getTargetTableName()); assert(targetTable != null); CatalogMap<Index> allIndexes = targetTable.getIndexes(); List<Integer> maxCoveredGroupByColumns = new ArrayList<>(); ArrayList<AbstractExpression> maxCoveredBindings = null; Index pickedUpIndex = null; boolean foundAllGroupByCoveredIndex = false; for (Index index : allIndexes) { if ( ! IndexType.isScannable(index.getType())) { continue; } if ( ! index.getPredicatejson().isEmpty()) { // do not try to look at Partial/Sparse index continue; } ArrayList<AbstractExpression> bindings = new ArrayList<>(); List<Integer> coveredGroupByColumns = calculateGroupbyColumnsCovered( index, fromTableAlias, bindings); if (coveredGroupByColumns.size() > maxCoveredGroupByColumns.size()) { maxCoveredGroupByColumns = coveredGroupByColumns; pickedUpIndex = index; maxCoveredBindings = bindings; if (maxCoveredGroupByColumns.size() == groupBys.size()) { foundAllGroupByCoveredIndex = true; break; } } } if (pickedUpIndex == null) { return root; } IndexScanPlanNode indexScanNode = new IndexScanPlanNode( root, null, pickedUpIndex, SortDirectionType.INVALID); indexScanNode.setForGroupingOnly(); indexScanNode.setBindings(maxCoveredBindings); gbInfo.m_coveredGroupByColumns = maxCoveredGroupByColumns; gbInfo.m_canBeFullySerialized = foundAllGroupByCoveredIndex; return indexScanNode; }
java