code
stringlengths
73
34.1k
label
stringclasses
1 value
private void respondWithDummy() { final FragmentResponseMessage response = new FragmentResponseMessage(m_fragmentMsg, m_initiator.getHSId()); response.m_sourceHSId = m_initiator.getHSId(); response.setRecovering(true); response.setStatus(FragmentResponseMessage.SUCCESS, null); // Set the dependencies even if this is a dummy response. This site could be the master // on elastic join, so the fragment response message is actually going to the MPI. for (int frag = 0; frag < m_fragmentMsg.getFragmentCount(); frag++) { final int outputDepId = m_fragmentMsg.getOutputDepId(frag); response.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, m_rawDummyResponse, 0, m_rawDummyResponse.length)); } response.setRespBufferable(m_respBufferable); m_initiator.deliver(response); }
java
public FragmentResponseMessage processFragmentTask(SiteProcedureConnection siteConnection) { final FragmentResponseMessage currentFragResponse = new FragmentResponseMessage(m_fragmentMsg, m_initiator.getHSId()); currentFragResponse.setStatus(FragmentResponseMessage.SUCCESS, null); for (int frag = 0; frag < m_fragmentMsg.getFragmentCount(); frag++) { final long fragmentId = VoltSystemProcedure.hashToFragId(m_fragmentMsg.getPlanHash(frag)); // equivalent to dep.depId: // final int outputDepId = m_fragmentMsg.getOutputDepId(frag); final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPSITE); if (traceLog != null) { traceLog.add(() -> VoltTrace.beginDuration("runfragmenttask", "txnId", TxnEgo.txnIdToString(getTxnId()), "partition", Integer.toString(siteConnection.getCorrespondingPartitionId()), "fragmentId", String.valueOf(fragmentId))); } ParameterSet params = m_fragmentMsg.getParameterSetForFragment(frag); try { // run the overloaded sysproc planfragment. pass an empty dependency // set since remote (non-aggregator) fragments don't receive dependencies. final DependencyPair dep = siteConnection.executeSysProcPlanFragment(m_txnState, m_inputDeps, fragmentId, params); // @Shutdown returns null, handle it here if (dep != null) { currentFragResponse.addDependency(dep); } } catch (final EEException | SQLException | ReplicatedTableException e) { hostLog.l7dlog(Level.TRACE, LogKeys.host_ExecutionSite_ExceptionExecutingPF.name(), new Object[] { Encoder.hexEncode(m_fragmentMsg.getFragmentPlan(frag)) }, e); currentFragResponse.setStatus(FragmentResponseMessage.UNEXPECTED_ERROR, e); addDependencyToFragment(currentFragResponse); break; } catch (final SerializableException e) { // Note that with SerializableException, the error code here might get changed before // the client/user sees it. It really just needs to indicate failure. // // Key point here vs the next catch block for VAE is to not wrap the subclass of // SerializableException here to preserve it during the serialization. // currentFragResponse.setStatus( FragmentResponseMessage.USER_ERROR, e); addDependencyToFragment(currentFragResponse); break; } catch (final VoltAbortException e) { currentFragResponse.setStatus( FragmentResponseMessage.USER_ERROR, new SerializableException(CoreUtils.throwableToString(e))); addDependencyToFragment(currentFragResponse); break; } if (traceLog != null) { traceLog.add(VoltTrace::endDuration); } } // we should never rollback DR buffer for MP sysprocs because we don't report the DR buffer size and therefore don't know if it is empty or not. currentFragResponse.setDrBufferSize(1); return currentFragResponse; }
java
@Override public void run() { try { VoltTable partitionKeys = null; partitionKeys = m_client.callProcedure("@GetPartitionKeys", "INTEGER").getResults()[0]; while (partitionKeys.advanceRow()) { m_client.callProcedure(new NullCallback(), "DeleteOldAdRequests", partitionKeys.getLong("PARTITION_KEY"), m_expiredAgeInSeconds); } m_client.callProcedure(new NullCallback(), "DeleteExpiredBids"); } catch (IOException | ProcCallException ex) { ex.printStackTrace(); } }
java
public void updateLobUsage(boolean commit) { if (!hasLobOps) { return; } hasLobOps = false; if (commit) { for (int i = 0; i < createdLobs.size(); i++) { long lobID = createdLobs.get(i); int delta = lobUsageCount.get(lobID, 0); if (delta == 1) { lobUsageCount.remove(lobID); createdLobs.remove(i); i--; } else if (!session.isBatch) { database.lobManager.adjustUsageCount(lobID, delta - 1); lobUsageCount.remove(lobID); createdLobs.remove(i); i--; } } if (!lobUsageCount.isEmpty()) { Iterator it = lobUsageCount.keySet().iterator(); while (it.hasNext()) { long lobID = it.nextLong(); int delta = lobUsageCount.get(lobID); database.lobManager.adjustUsageCount(lobID, delta - 1); } lobUsageCount.clear(); } return; } else { for (int i = 0; i < createdLobs.size(); i++) { long lobID = createdLobs.get(i); database.lobManager.deleteLob(lobID); } createdLobs.clear(); lobUsageCount.clear(); return; } }
java
public void allocateLobForResult(ResultLob result, InputStream inputStream) { long resultLobId = result.getLobID(); CountdownInputStream countStream; switch (result.getSubType()) { case ResultLob.LobResultTypes.REQUEST_CREATE_BYTES : { long blobId; long blobLength = result.getBlockLength(); if (inputStream == null) { blobId = resultLobId; inputStream = result.getInputStream(); } else { BlobData blob = session.createBlob(blobLength); blobId = blob.getId(); resultLobs.put(resultLobId, blobId); } countStream = new CountdownInputStream(inputStream); countStream.setCount(blobLength); database.lobManager.setBytesForNewBlob( blobId, countStream, result.getBlockLength()); break; } case ResultLob.LobResultTypes.REQUEST_CREATE_CHARS : { long clobId; long clobLength = result.getBlockLength(); if (inputStream == null) { clobId = resultLobId; if (result.getReader() != null) { inputStream = new ReaderInputStream(result.getReader()); } else { inputStream = result.getInputStream(); } } else { ClobData clob = session.createClob(clobLength); clobId = clob.getId(); resultLobs.put(resultLobId, clobId); } countStream = new CountdownInputStream(inputStream); countStream.setCount(clobLength * 2); database.lobManager.setCharsForNewClob( clobId, countStream, result.getBlockLength()); break; } } }
java
@Override public void resolveColumnIndexes() { // First, assert that our topology is sane and then // recursively resolve all child/inline column indexes IndexScanPlanNode index_scan = (IndexScanPlanNode) getInlinePlanNode(PlanNodeType.INDEXSCAN); assert(m_children.size() == 2 && index_scan == null); for (AbstractPlanNode child : m_children) { child.resolveColumnIndexes(); } final NodeSchema outer_schema = m_children.get(0).getOutputSchema(); final NodeSchema inner_schema = m_children.get(1).getOutputSchema(); final int outerSize = outer_schema.size(); final int innerSize = inner_schema.size(); // resolve predicates resolvePredicate(m_preJoinPredicate, outer_schema, inner_schema); resolvePredicate(m_joinPredicate, outer_schema, inner_schema); resolvePredicate(m_wherePredicate, outer_schema, inner_schema); // Resolve subquery expression indexes resolveSubqueryColumnIndexes(); // Resolve TVE indexes for each schema column. for (int i = 0; i < m_outputSchemaPreInlineAgg.size(); ++i) { SchemaColumn col = m_outputSchemaPreInlineAgg.getColumn(i); // These will all be TVEs. assert(col.getExpression() instanceof TupleValueExpression); TupleValueExpression tve = (TupleValueExpression)col.getExpression(); int index; if (i < outerSize) { index = tve.setColumnIndexUsingSchema(outer_schema); } else { index = tve.setColumnIndexUsingSchema(inner_schema); index += outerSize; } if (index == -1) { throw new RuntimeException("Unable to find index for column: " + col.toString()); } tve.setColumnIndex(index); tve.setDifferentiator(index); } // We want the output columns to be ordered like [outer table columns][inner table columns], // and further ordered by TVE index within the left- and righthand sides. // generateOutputSchema already places outer columns on the left and inner on the right, // so we just need to order the left- and righthand sides by TVE index separately. m_outputSchemaPreInlineAgg.sortByTveIndex(0, outer_schema.size()); m_outputSchemaPreInlineAgg.sortByTveIndex(outer_schema.size(), m_outputSchemaPreInlineAgg.size()); m_hasSignificantOutputSchema = true; resolveRealOutputSchema(); }
java
public void resolveSortDirection() { AbstractPlanNode outerTable = m_children.get(0); if (m_joinType == JoinType.FULL) { // Disable the usual optimizations for ordering join output by // outer table only. In case of FULL join, the unmatched inner table tuples // get appended to the end of the join's output table thus invalidating // the outer table join order. m_sortDirection = SortDirectionType.INVALID; return; } if (outerTable instanceof IndexSortablePlanNode) { m_sortDirection = ((IndexSortablePlanNode)outerTable).indexUse().getSortOrderFromIndexScan(); } }
java
protected long discountEstimatedProcessedTupleCount(AbstractPlanNode childNode) { // Discount estimated processed tuple count for the outer child based on the number of // filter expressions this child has with a rapidly diminishing effect // that ranges from a discount of 0.09 (ORETATION_EQAUL) // or 0.045 (all other expression types) for one post filter to a max discount approaching // 0.888... (=8/9) for many EQUALITY filters. // The discount value is less than the partial index discount (0.1) to make sure // the index wins AbstractExpression predicate = null; if (childNode instanceof AbstractScanPlanNode) { predicate = ((AbstractScanPlanNode) childNode).getPredicate(); } else if (childNode instanceof NestLoopPlanNode) { predicate = ((NestLoopPlanNode) childNode).getWherePredicate(); } else if (childNode instanceof NestLoopIndexPlanNode) { AbstractPlanNode inlineIndexScan = ((NestLoopIndexPlanNode) childNode).getInlinePlanNode(PlanNodeType.INDEXSCAN); assert(inlineIndexScan != null); predicate = ((AbstractScanPlanNode) inlineIndexScan).getPredicate(); } else { return childNode.getEstimatedProcessedTupleCount(); } if (predicate == null) { return childNode.getEstimatedProcessedTupleCount(); } List<AbstractExpression> predicateExprs = ExpressionUtil.uncombinePredicate(predicate); // Counters to count the number of equality and all other expressions int eqCount = 0; int otherCount = 0; final double MAX_EQ_POST_FILTER_DISCOUNT = 0.09; final double MAX_OTHER_POST_FILTER_DISCOUNT = 0.045; double discountCountFactor = 1.0; // Discount tuple count. for (AbstractExpression predicateExpr: predicateExprs) { if (ExpressionType.COMPARE_EQUAL == predicateExpr.getExpressionType()) { discountCountFactor -= Math.pow(MAX_EQ_POST_FILTER_DISCOUNT, ++eqCount); } else { discountCountFactor -= Math.pow(MAX_OTHER_POST_FILTER_DISCOUNT, ++otherCount); } } return (long) (childNode.getEstimatedProcessedTupleCount() * discountCountFactor); }
java
public Serializable getObject() { try { return InOutUtil.deserialize(data); } catch (Exception e) { throw Error.error(ErrorCode.X_22521, e.toString()); } }
java
@VisibleForTesting static char[][] createReplacementArray(Map<Character, String> map) { checkNotNull(map); // GWT specific check (do not optimize) if (map.isEmpty()) { return EMPTY_REPLACEMENT_ARRAY; } char max = Collections.max(map.keySet()); char[][] replacements = new char[max + 1][]; for (char c : map.keySet()) { replacements[c] = map.get(c).toCharArray(); } return replacements; }
java
public int getPrecision(int param) throws SQLException { checkRange(param); Type type = rmd.columnTypes[--param]; if (type.isDateTimeType()) { return type.displaySize(); } else { long size = type.precision; if (size > Integer.MAX_VALUE) { size = 0; } return (int) size; } }
java
public String getParameterTypeName(int param) throws SQLException { checkRange(param); return rmd.columnTypes[--param].getNameString(); }
java
protected static TaskLog initializeTaskLog(String voltroot, int pid) { // Construct task log and start logging task messages File overflowDir = new File(voltroot, "join_overflow"); return ProClass.newInstanceOf("org.voltdb.rejoin.TaskLogImpl", "Join", ProClass.HANDLER_LOG, pid, overflowDir); }
java
protected void restoreBlock(RestoreWork rejoinWork, SiteProcedureConnection siteConnection) { kickWatchdog(true); rejoinWork.restore(siteConnection); }
java
protected void returnToTaskQueue(boolean sourcesReady) { if (sourcesReady) { // If we've done something meaningful, go ahead and return ourselves to the queue immediately m_taskQueue.offer(this); } else { // Otherwise, avoid spinning too aggressively, so wait a millisecond before requeueing VoltDB.instance().scheduleWork(new ReturnToTaskQueueAction(), 1, -1, TimeUnit.MILLISECONDS); } }
java
static void putLong(ByteBuffer buffer, long value) { value = (value << 1) ^ (value >> 63); if (value >>> 7 == 0) { buffer.put((byte) value); } else { buffer.put((byte) ((value & 0x7F) | 0x80)); if (value >>> 14 == 0) { buffer.put((byte) (value >>> 7)); } else { buffer.put((byte) (value >>> 7 | 0x80)); if (value >>> 21 == 0) { buffer.put((byte) (value >>> 14)); } else { buffer.put((byte) (value >>> 14 | 0x80)); if (value >>> 28 == 0) { buffer.put((byte) (value >>> 21)); } else { buffer.put((byte) (value >>> 21 | 0x80)); if (value >>> 35 == 0) { buffer.put((byte) (value >>> 28)); } else { buffer.put((byte) (value >>> 28 | 0x80)); if (value >>> 42 == 0) { buffer.put((byte) (value >>> 35)); } else { buffer.put((byte) (value >>> 35 | 0x80)); if (value >>> 49 == 0) { buffer.put((byte) (value >>> 42)); } else { buffer.put((byte) (value >>> 42 | 0x80)); if (value >>> 56 == 0) { buffer.put((byte) (value >>> 49)); } else { buffer.put((byte) (value >>> 49 | 0x80)); buffer.put((byte) (value >>> 56)); } } } } } } } } }
java
static void putInt(ByteBuffer buffer, int value) { value = (value << 1) ^ (value >> 31); if (value >>> 7 == 0) { buffer.put((byte) value); } else { buffer.put((byte) ((value & 0x7F) | 0x80)); if (value >>> 14 == 0) { buffer.put((byte) (value >>> 7)); } else { buffer.put((byte) (value >>> 7 | 0x80)); if (value >>> 21 == 0) { buffer.put((byte) (value >>> 14)); } else { buffer.put((byte) (value >>> 14 | 0x80)); if (value >>> 28 == 0) { buffer.put((byte) (value >>> 21)); } else { buffer.put((byte) (value >>> 21 | 0x80)); buffer.put((byte) (value >>> 28)); } } } } }
java
static long getLong(ByteBuffer buffer) { long v = buffer.get(); long value = v & 0x7F; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 7; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 14; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 21; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 28; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 35; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 42; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 49; if ((v & 0x80) != 0) { v = buffer.get(); value |= v << 56; } } } } } } } } value = (value >>> 1) ^ (-(value & 1)); return value; }
java
static int getInt (ByteBuffer buffer) { int v = buffer.get(); int value = v & 0x7F; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 7; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 14; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 21; if ((v & 0x80) != 0) { v = buffer.get(); value |= (v & 0x7F) << 28; } } } } value = (value >>> 1) ^ (-(value & 1)); return value; }
java
static public void main(String[] sa) throws IOException, TarMalformatException { if (sa.length < 1) { System.out.println(RB.singleton.getString(RB.TARREADER_SYNTAX, TarReader.class.getName())); System.out.println(RB.singleton.getString(RB.LISTING_FORMAT)); System.exit(0); } File exDir = (sa.length > 1 && sa[1].startsWith("--directory=")) ? (new File(sa[1].substring("--directory=".length()))) : null; int firstPatInd = (exDir == null) ? 2 : 3; if (sa.length < firstPatInd || ((!sa[0].equals("t")) && !sa[0].equals("x"))) { throw new IllegalArgumentException( RB.singleton.getString( RB.TARREADER_SYNTAXERR, TarReader.class.getName())); } String[] patternStrings = null; if (sa.length > firstPatInd) { patternStrings = new String[sa.length - firstPatInd]; for (int i = firstPatInd; i < sa.length; i++) { patternStrings[i - firstPatInd] = sa[i]; } } if (sa[0].equals("t") && exDir != null) { throw new IllegalArgumentException( RB.singleton.getString(RB.DIR_X_CONFLICT)); } int dirIndex = (exDir == null) ? 1 : 2; int tarReaderMode = sa[0].equals("t") ? LIST_MODE : EXTRACT_MODE; new TarReader(new File(sa[dirIndex]), tarReaderMode, patternStrings, null, exDir).read(); }
java
public static Date getDateFromUniqueId(long uniqueId) { long time = uniqueId >> (COUNTER_BITS + PARTITIONID_BITS); time += VOLT_EPOCH; return new Date(time); }
java
public static Object createObject(String classname) throws ParseException { Class<?> cl; try { cl = Class.forName(classname); } catch (ClassNotFoundException cnfe) { throw new ParseException("Unable to find the class: " + classname); } try { return cl.newInstance(); } catch (Exception e) { throw new ParseException(e.getClass().getName() + "; Unable to create an instance of: " + classname); } }
java
public static Number createNumber(String str) throws ParseException { try { if (str.indexOf('.') != -1) { return Double.valueOf(str); } return Long.valueOf(str); } catch (NumberFormatException e) { throw new ParseException(e.getMessage()); } }
java
private boolean fixupACL(List<Id> authInfo, List<ACL> acl) { if (skipACL) { return true; } if (acl == null || acl.size() == 0) { return false; } Iterator<ACL> it = acl.iterator(); LinkedList<ACL> toAdd = null; while (it.hasNext()) { ACL a = it.next(); Id id = a.getId(); if (id.getScheme().equals("world") && id.getId().equals("anyone")) { // wide open } else if (id.getScheme().equals("auth")) { // This is the "auth" id, so we have to expand it to the // authenticated ids of the requestor it.remove(); if (toAdd == null) { toAdd = new LinkedList<ACL>(); } boolean authIdValid = false; for (Id cid : authInfo) { AuthenticationProvider ap = ProviderRegistry .getProvider(cid.getScheme()); if (ap == null) { LOG.error("Missing AuthenticationProvider for " + cid.getScheme()); } else if (ap.isAuthenticated()) { authIdValid = true; toAdd.add(new ACL(a.getPerms(), cid)); } } if (!authIdValid) { return false; } } else { AuthenticationProvider ap = ProviderRegistry.getProvider(id .getScheme()); if (ap == null) { return false; } if (!ap.isValid(id.getId())) { return false; } } } if (toAdd != null) { for (ACL a : toAdd) { acl.add(a); } } return acl.size() > 0; }
java
public boolean authenticate(ClientAuthScheme scheme, String fromAddress) { if (m_done) throw new IllegalStateException("this authentication request has a result"); boolean authenticated = false; try { authenticated = authenticateImpl(scheme, fromAddress); } catch (Exception ex) { m_authenticationFailure = ex; } finally { m_done = true; } return authenticated; }
java
public long run( String symbol, TimestampType time, long seq_number, String exchange, int bidPrice, int bidSize, int askPrice, int askSize) throws VoltAbortException { // convert bid and ask 0 values to null Integer bidPriceSafe = askPrice > 0 ? askPrice : null; Integer askPriceSafe = askPrice > 0 ? askPrice : null; voltQueueSQL(insertTick, symbol, time, seq_number, exchange, bidPriceSafe, bidSize, askPriceSafe, askSize); voltQueueSQL(upsertLastTick, symbol, time, seq_number, exchange, bidPrice, bidSize, askPrice, askSize); // Queue best bid and ask selects voltQueueSQL(selectMaxBid, symbol); voltQueueSQL(selectMinAsk, symbol); // Execute queued statements VoltTable results0[] = voltExecuteSQL(); // Read the best bid results VoltTable tb = results0[2]; tb.advanceRow(); String bex = tb.getString(0); long bid = tb.getLong(1); long bsize = tb.getLong(2); // Read the best ask results VoltTable ta = results0[3]; ta.advanceRow(); String aex = ta.getString(0); long ask = ta.getLong(1); long asize = ta.getLong(2); // check if the tick is part of the nbbo if (bex.equals(exchange) || aex.equals(exchange)) { // this new quote was the best bid or ask // insert a new NBBO record // use this quote's symbol, time and sequence number voltQueueSQL(insertNBBO, symbol, time, seq_number, bid, bsize, bex, ask, asize, aex); voltExecuteSQL(true); } return ClientResponse.SUCCESS; }
java
public static <K extends Comparable<?>, V> Builder<K, V> builder() { return new Builder<K, V>(); }
java
void setLeaderState(boolean isLeader) { m_isLeader = isLeader; // The leader doesn't truncate its own SP log; if promoted, // wipe out the SP portion of the existing log. This promotion // action always happens after repair is completed. if (m_isLeader) { if (!m_logSP.isEmpty()) { truncate(m_logSP.getLast().getHandle(), IS_SP); } } }
java
public void deliver(VoltMessage msg) { if (!m_isLeader && msg instanceof Iv2InitiateTaskMessage) { final Iv2InitiateTaskMessage m = (Iv2InitiateTaskMessage) msg; // We can't repair read only SP transactions. Just don't log them to the repair log. if (m.isReadOnly()) { return; } m_lastSpHandle = m.getSpHandle(); truncate(m.getTruncationHandle(), IS_SP); //Cann't repair MigratePartitionLeader if ("@MigratePartitionLeader".equalsIgnoreCase(m.getStoredProcedureName())) { return; } m_logSP.add(new Item(IS_SP, m, m.getSpHandle(), m.getTxnId())); } else if (msg instanceof FragmentTaskMessage) { boolean newMp = false; final FragmentTaskMessage m = (FragmentTaskMessage) msg; if (m.getTxnId() > m_lastMpHandle || m_lastMpHandle == Long.MAX_VALUE) { m_lastMpHandle = m.getTxnId(); newMp = true; } // We can't repair read only MP transactions. Just don't log them to the repair log. if (m.isReadOnly()) { return; } truncate(m.getTruncationHandle(), IS_MP); // only log the first fragment of a procedure (and handle 1st case) if (newMp) { m_logMP.add(new Item(IS_MP, m, m.getSpHandle(), m.getTxnId())); m_lastSpHandle = m.getSpHandle(); } } else if (msg instanceof CompleteTransactionMessage) { // a CompleteTransactionMessage which indicates restart is not the end of the // transaction. We don't want to log it in the repair log. CompleteTransactionMessage ctm = (CompleteTransactionMessage)msg; //Restore will send a complete transaction message with a lower mp transaction id because //the restore transaction precedes the loading of the right mp transaction id from the snapshot //Hence Math.max m_lastMpHandle = Math.max(m_lastMpHandle, ctm.getTxnId()); // We can't repair read only MP transactions. Just don't log them to the repair log. // Restart transaction do not need to be repaired here, don't log them as well. if (ctm.isReadOnly() || ctm.isRestart() || ctm.isAbortDuringRepair()) { return; } truncate(ctm.getTruncationHandle(), IS_MP); m_logMP.add(new Item(IS_MP, ctm, ctm.getSpHandle(), ctm.getTxnId())); m_lastSpHandle = ctm.getSpHandle(); } else if (msg instanceof DumpMessage) { String who = CoreUtils.hsIdToString(m_HSId); repairLogger.warn("Repair log dump for site: " + who + ", isLeader: " + m_isLeader + ", " + who + ": lastSpHandle: " + m_lastSpHandle + ", lastMpHandle: " + m_lastMpHandle); for (Iv2RepairLogResponseMessage il : contents(0l, false)) { repairLogger.warn("[Repair log contents]" + who + ": msg: " + il); } } else if (msg instanceof DummyTransactionTaskMessage) { m_lastSpHandle = Math.max(m_lastSpHandle, ((DummyTransactionTaskMessage) msg).getSpHandle()); } else if (msg instanceof RepairLogTruncationMessage) { final RepairLogTruncationMessage truncateMsg = (RepairLogTruncationMessage) msg; truncate(truncateMsg.getHandle(), IS_SP); } }
java
private void truncate(long handle, boolean isSP) { // MIN value means no work to do, is a startup condition if (handle == Long.MIN_VALUE) { return; } Deque<RepairLog.Item> deq = null; if (isSP) { deq = m_logSP; if (m_truncationHandle < handle) { m_truncationHandle = handle; notifyTxnCommitInterests(handle); } } else { deq = m_logMP; } RepairLog.Item item = null; while ((item = deq.peek()) != null) { if (item.canTruncate(handle)) { deq.poll(); } else { break; } } }
java
public List<Iv2RepairLogResponseMessage> contents(long requestId, boolean forMPI) { List<Item> items = new LinkedList<Item>(); // All cases include the log of MP transactions items.addAll(m_logMP); // SP repair requests also want the SP transactions if (!forMPI) { items.addAll(m_logSP); } // Contents need to be sorted in increasing spHandle order Collections.sort(items, m_handleComparator); int ofTotal = items.size() + 1; if (repairLogger.isDebugEnabled()) { repairLogger.debug("Responding with " + ofTotal + " repair log parts."); } List<Iv2RepairLogResponseMessage> responses = new LinkedList<Iv2RepairLogResponseMessage>(); // this constructor sets its sequence no to 0 as ack // messages are first in the sequence Iv2RepairLogResponseMessage hheader = new Iv2RepairLogResponseMessage( requestId, ofTotal, m_lastSpHandle, m_lastMpHandle, TheHashinator.getCurrentVersionedConfigCooked()); responses.add(hheader); int seq = responses.size(); // = 1, as the first sequence Iterator<Item> itemator = items.iterator(); while (itemator.hasNext()) { Item item = itemator.next(); Iv2RepairLogResponseMessage response = new Iv2RepairLogResponseMessage( requestId, seq++, ofTotal, item.getHandle(), item.getTxnId(), item.getMessage()); responses.add(response); } return responses; }
java
public final synchronized void endProcedure(boolean aborted, boolean failed, SingleCallStatsToken statsToken) { if (aborted) { m_procStatsData.m_abortCount++; } if (failed) { m_procStatsData.m_failureCount++; } m_procStatsData.m_invocations++; // this means additional stats were not recorded if (!statsToken.samplingProcedure()) { return; } // This is a sampled invocation. // Update timings and size statistics. final long endTime = System.nanoTime(); final long duration = endTime - statsToken.startTimeNanos; if (duration < 0) { if (Math.abs(duration) > 1000000000) { log.info("Procedure: " + m_procName + " recorded a negative execution time larger than one second: " + duration); } return; } m_procStatsData.m_timedInvocations++; // sampled timings m_procStatsData.m_totalTimedExecutionTime += duration; m_procStatsData.m_minExecutionTime = Math.min(duration, m_procStatsData.m_minExecutionTime); m_procStatsData.m_maxExecutionTime = Math.max(duration, m_procStatsData.m_maxExecutionTime); m_procStatsData.m_incrMinExecutionTime = Math.min(duration, m_procStatsData.m_incrMinExecutionTime); m_procStatsData.m_incrMaxExecutionTime = Math.max(duration, m_procStatsData.m_incrMaxExecutionTime); m_procStatsData.m_totalResultSize += statsToken.resultSize; m_procStatsData.m_minResultSize = Math.min(statsToken.resultSize, m_procStatsData.m_minResultSize); m_procStatsData.m_maxResultSize = Math.max(statsToken.resultSize, m_procStatsData.m_maxResultSize); m_procStatsData.m_incrMinResultSize = Math.min(statsToken.resultSize, m_procStatsData.m_incrMinResultSize); m_procStatsData.m_incrMaxResultSize = Math.max(statsToken.resultSize, m_procStatsData.m_incrMaxResultSize); m_procStatsData.m_totalParameterSetSize += statsToken.parameterSetSize; m_procStatsData.m_minParameterSetSize = Math.min(statsToken.parameterSetSize, m_procStatsData.m_minParameterSetSize); m_procStatsData.m_maxParameterSetSize = Math.max(statsToken.parameterSetSize, m_procStatsData.m_maxParameterSetSize); m_procStatsData.m_incrMinParameterSetSize = Math.min(statsToken.parameterSetSize, m_procStatsData.m_incrMinParameterSetSize); m_procStatsData.m_incrMaxParameterSetSize = Math.max(statsToken.parameterSetSize, m_procStatsData.m_incrMaxParameterSetSize); // stop here if no statements if (statsToken.stmtStats == null) { return; } for (SingleCallStatsToken.PerStmtStats pss : statsToken.stmtStats) { long stmtDuration = 0; int stmtResultSize = 0; int stmtParameterSetSize = 0; if (pss.measurements != null) { stmtDuration = pss.measurements.stmtDuration; stmtResultSize = pss.measurements.stmtResultSize; stmtParameterSetSize = pss.measurements.stmtParameterSetSize; } endFragment(pss.stmtName, pss.isCoordinatorTask, pss.stmtFailed, pss.measurements != null, stmtDuration, stmtResultSize, stmtParameterSetSize); } }
java
public final synchronized void endFragment(String stmtName, boolean isCoordinatorTask, boolean failed, boolean sampledStmt, long duration, int resultSize, int parameterSetSize) { if (stmtName == null) { return; } StatementStats stmtStats = m_stmtStatsMap.get(stmtName); if (stmtStats == null) { return; } StatsData dataToUpdate = isCoordinatorTask ? stmtStats.m_coordinatorTask : stmtStats.m_workerTask; // m_failureCount and m_invocations need to be updated even if the current invocation is not sampled. if (failed) { dataToUpdate.m_failureCount++; } dataToUpdate.m_invocations++; // If the current invocation is not sampled, we can stop now. // Notice that this function can be called by a FragmentTask from a multi-partition procedure. // Cannot use the isRecording() value here because SP sites can have values different from the MP Site. if (!sampledStmt) { return; } // This is a sampled invocation. // Update timings and size statistics below. if (duration < 0) { if (Math.abs(duration) > 1000000000) { log.info("Statement: " + stmtStats.m_stmtName + " in procedure: " + m_procName + " recorded a negative execution time larger than one second: " + duration); } return; } dataToUpdate.m_timedInvocations++; // sampled timings dataToUpdate.m_totalTimedExecutionTime += duration; dataToUpdate.m_minExecutionTime = Math.min(duration, dataToUpdate.m_minExecutionTime); dataToUpdate.m_maxExecutionTime = Math.max(duration, dataToUpdate.m_maxExecutionTime); dataToUpdate.m_incrMinExecutionTime = Math.min(duration, dataToUpdate.m_incrMinExecutionTime); dataToUpdate.m_incrMaxExecutionTime = Math.max(duration, dataToUpdate.m_incrMaxExecutionTime); // sampled size statistics dataToUpdate.m_totalResultSize += resultSize; dataToUpdate.m_minResultSize = Math.min(resultSize, dataToUpdate.m_minResultSize); dataToUpdate.m_maxResultSize = Math.max(resultSize, dataToUpdate.m_maxResultSize); dataToUpdate.m_incrMinResultSize = Math.min(resultSize, dataToUpdate.m_incrMinResultSize); dataToUpdate.m_incrMaxResultSize = Math.max(resultSize, dataToUpdate.m_incrMaxResultSize); dataToUpdate.m_totalParameterSetSize += parameterSetSize; dataToUpdate.m_minParameterSetSize = Math.min(parameterSetSize, dataToUpdate.m_minParameterSetSize); dataToUpdate.m_maxParameterSetSize = Math.max(parameterSetSize, dataToUpdate.m_maxParameterSetSize); dataToUpdate.m_incrMinParameterSetSize = Math.min(parameterSetSize, dataToUpdate.m_incrMinParameterSetSize); dataToUpdate.m_incrMaxParameterSetSize = Math.max(parameterSetSize, dataToUpdate.m_incrMaxParameterSetSize); }
java
public synchronized Session newSession(Database db, User user, boolean readonly, boolean forLog, int timeZoneSeconds) { Session s = new Session(db, user, !forLog, !forLog, readonly, sessionIdCount, timeZoneSeconds); s.isProcessingLog = forLog; sessionMap.put(sessionIdCount, s); sessionIdCount++; return s; }
java
public Session getSysSessionForScript(Database db) { Session session = new Session(db, db.getUserManager().getSysUser(), false, false, false, 0, 0); session.isProcessingScript = true; return session; }
java
public synchronized void closeAllSessions() { // don't disconnect system user; need it to save database Session[] sessions = getAllSessions(); for (int i = 0; i < sessions.length; i++) { sessions[i].close(); } }
java
public HostAndPort withDefaultPort(int defaultPort) { checkArgument(isValidPort(defaultPort)); if (hasPort() || port == defaultPort) { return this; } return new HostAndPort(host, defaultPort, hasBracketlessColons); }
java
public Runnable writeCatalogJarToFile(String path, String name, CatalogJarWriteMode mode) throws IOException { File catalogFile = new VoltFile(path, name); File catalogTmpFile = new VoltFile(path, name + ".tmp"); if (mode == CatalogJarWriteMode.CATALOG_UPDATE) { // This means a @UpdateCore case, the asynchronous writing of // jar file has finished, rename the jar file catalogFile.delete(); catalogTmpFile.renameTo(catalogFile); return null; } if (mode == CatalogJarWriteMode.START_OR_RESTART) { // This happens in the beginning of , // when the catalog jar does not yet exist. Though the contents // written might be a default one and could be overwritten later // by @UAC, @UpdateClasses, etc. return m_catalogInfo.m_jarfile.writeToFile(catalogFile); } if (mode == CatalogJarWriteMode.RECOVER) { // we must overwrite the file (the file may have been changed) catalogFile.delete(); if (catalogTmpFile.exists()) { // If somehow the catalog temp jar is not cleaned up, then delete it catalogTmpFile.delete(); } return m_catalogInfo.m_jarfile.writeToFile(catalogFile); } VoltDB.crashLocalVoltDB("Unsupported mode to write catalog jar", true, null); return null; }
java
public Class<?> classForProcedureOrUDF(String procedureClassName) throws LinkageError, ExceptionInInitializerError, ClassNotFoundException { return classForProcedureOrUDF(procedureClassName, m_catalogInfo.m_jarfile.getLoader()); }
java
public DeploymentType getDeployment() { if (m_memoizedDeployment == null) { m_memoizedDeployment = CatalogUtil.getDeployment( new ByteArrayInputStream(m_catalogInfo.m_deploymentBytes)); // This should NEVER happen if (m_memoizedDeployment == null) { VoltDB.crashLocalVoltDB("The internal deployment bytes are invalid. This should never occur; please contact VoltDB support with your logfiles."); } } return m_memoizedDeployment; }
java
public boolean removeAfter(Node node) { if (node == null || node.next == null) { return false; } if (node.next == last) { last = node; } node.next = node.next.next; return true; }
java
protected ProcedurePartitionData parseCreateProcedureClauses( ProcedureDescriptor descriptor, String clauses) throws VoltCompilerException { // Nothing to do if there were no clauses. // Null means there's no partition data to return. // There's also no roles to add. if (clauses == null || clauses.isEmpty()) { return null; } ProcedurePartitionData data = null; Matcher matcher = SQLParser.matchAnyCreateProcedureStatementClause(clauses); int start = 0; while (matcher.find(start)) { start = matcher.end(); if (matcher.group(1) != null) { // Add roles if it's an ALLOW clause. More that one ALLOW clause is okay. for (String roleName : StringUtils.split(matcher.group(1), ',')) { // Don't put the same role in the list more than once. String roleNameFixed = roleName.trim().toLowerCase(); if (!descriptor.m_authGroups.contains(roleNameFixed)) { descriptor.m_authGroups.add(roleNameFixed); } } } else { // Add partition info if it's a PARTITION clause. Only one is allowed. if (data != null) { throw m_compiler.new VoltCompilerException( "Only one PARTITION clause is allowed for CREATE PROCEDURE."); } data = new ProcedurePartitionData(matcher.group(2), matcher.group(3), matcher.group(4), matcher.group(5), matcher.group(6), matcher.group(7)); } } return data; }
java
public static void interactWithTheUser() throws Exception { final SQLConsoleReader interactiveReader = new SQLConsoleReader(new FileInputStream(FileDescriptor.in), System.out); interactiveReader.setBellEnabled(false); FileHistory historyFile = null; try { // Maintain persistent history in ~/.sqlcmd_history. historyFile = new FileHistory(new File(System.getProperty("user.home"), ".sqlcmd_history")); interactiveReader.setHistory(historyFile); // Make Ctrl-D (EOF) exit if on an empty line, otherwise delete the next character. KeyMap keyMap = interactiveReader.getKeys(); keyMap.bind(new Character(KeyMap.CTRL_D).toString(), new ActionListener() { @Override public void actionPerformed(ActionEvent e) { CursorBuffer cursorBuffer = interactiveReader.getCursorBuffer(); if (cursorBuffer.length() == 0) { // tells caller to stop (basically a goto) throw new SQLCmdEarlyExitException(); } else { try { interactiveReader.delete(); } catch (IOException e1) { } } } }); getInteractiveQueries(interactiveReader); } finally { // Flush input history to a file. if (historyFile != null) { try { historyFile.flush(); } catch (IOException e) { System.err.printf("* Unable to write history to \"%s\" *\n", historyFile.getFile().getPath()); if (m_debug) { e.printStackTrace(); } } } // Clean up jline2 resources. if (interactiveReader != null) { interactiveReader.shutdown(); } } }
java
static void executeScriptFiles(List<FileInfo> filesInfo, SQLCommandLineReader parentLineReader, DDLParserCallback callback) throws IOException { LineReaderAdapter adapter = null; SQLCommandLineReader reader = null; StringBuilder statements = new StringBuilder(); if ( ! m_interactive && callback == null) { // We have to check for the callback to avoid spewing to System.out in the "init --classes" filtering codepath. // Better logging/output handling in general would be nice to have here -- output on System.out will be consumed // by the test generators (build_eemakefield) and cause build failures. System.out.println(); StringBuilder commandString = new StringBuilder(); commandString.append(filesInfo.get(0).toString()); for (int ii = 1; ii < filesInfo.size(); ii++) { commandString.append(" " + filesInfo.get(ii).getFile().toString()); } System.out.println(commandString.toString()); } for (int ii = 0; ii < filesInfo.size(); ii++) { FileInfo fileInfo = filesInfo.get(ii); adapter = null; reader = null; if (fileInfo.getOption() == FileOption.INLINEBATCH) { // File command is a "here document" so pass in the current // input stream. reader = parentLineReader; } else { try { reader = adapter = new LineReaderAdapter(new FileReader(fileInfo.getFile())); } catch (FileNotFoundException e) { System.err.println("Script file '" + fileInfo.getFile() + "' could not be found."); stopOrContinue(e); return; // continue to the next line after the FILE command } // if it is a batch option, get all contents from all the files and send it as a string if (fileInfo.getOption() == FileOption.BATCH) { String line; // use the current reader we obtained to read from the file // and append to existing statements while ((line = reader.readBatchLine()) != null) { statements.append(line).append("\n"); } // set reader to null since we finish reading from the file reader = null; // if it is the last file, create a reader to read from the string of all files contents if ( ii == filesInfo.size() - 1 ) { String allStatements = statements.toString(); byte[] bytes = allStatements.getBytes("UTF-8"); ByteArrayInputStream bais = new ByteArrayInputStream(bytes); // reader LineReaderAdapter needs an input stream reader reader = adapter = new LineReaderAdapter(new InputStreamReader( bais ) ); } // NOTE - fileInfo has the last file info for batch with multiple files } } try { executeScriptFromReader(fileInfo, reader, callback); } catch (SQLCmdEarlyExitException e) { throw e; } catch (Exception x) { stopOrContinue(x); } finally { if (adapter != null) { adapter.close(); } } } }
java
private static void printUsage(String msg) { System.out.print(msg); System.out.println("\n"); m_exitCode = -1; printUsage(); }
java
static void printHelp(OutputStream prtStr) { try { InputStream is = SQLCommand.class.getResourceAsStream(m_readme); while (is.available() > 0) { byte[] bytes = new byte[is.available()]; // Fix for ENG-3440 is.read(bytes, 0, bytes.length); prtStr.write(bytes); // For JUnit test } } catch (Exception x) { System.err.println(x.getMessage()); m_exitCode = -1; return; } }
java
public static void main(String args[]) { System.setProperty("voltdb_no_logging", "true"); int exitCode = mainWithReturnCode(args); System.exit(exitCode); }
java
private synchronized void checkTimeout(final long timeoutMs) { final Entry<Integer, SendWork> oldest = m_outstandingWork.firstEntry(); if (oldest != null) { final long now = System.currentTimeMillis(); SendWork work = oldest.getValue(); if ((now - work.m_ts) > timeoutMs) { StreamSnapshotTimeoutException exception = new StreamSnapshotTimeoutException(String.format( "A snapshot write task failed after a timeout (currently %d seconds outstanding). " + "Node rejoin may need to be retried", (now - work.m_ts) / 1000)); rejoinLog.error(exception.getMessage()); m_writeFailed.compareAndSet(null, exception); } } }
java
synchronized void clearOutstanding() { if (m_outstandingWork.isEmpty() && (m_outstandingWorkCount.get() == 0)) { return; } rejoinLog.trace("Clearing outstanding work."); for (Entry<Integer, SendWork> e : m_outstandingWork.entrySet()) { e.getValue().discard(); } m_outstandingWork.clear(); m_outstandingWorkCount.set(0); }
java
@Override public synchronized void receiveAck(int blockIndex) { SendWork work = m_outstandingWork.get(blockIndex); // releases the BBContainers and cleans up if (work == null || work.m_ackCounter == null) { rejoinLog.warn("Received invalid blockIndex ack for targetId " + m_targetId + " for index " + String.valueOf(blockIndex) + ((work == null) ? " already removed the block." : " ack counter haven't been initialized.")); return; } if (work.receiveAck()) { rejoinLog.trace("Received ack for targetId " + m_targetId + " removes block for index " + String.valueOf(blockIndex)); m_outstandingWorkCount.decrementAndGet(); m_outstandingWork.remove(blockIndex); work.discard(); } else { rejoinLog.trace("Received ack for targetId " + m_targetId + " decrements counter for block index " + String.valueOf(blockIndex)); } }
java
synchronized ListenableFuture<Boolean> send(StreamSnapshotMessageType type, int blockIndex, BBContainer chunk, boolean replicatedTable) { SettableFuture<Boolean> sendFuture = SettableFuture.create(); rejoinLog.trace("Sending block " + blockIndex + " of type " + (replicatedTable?"REPLICATED ":"PARTITIONED ") + type.name() + " from targetId " + m_targetId + " to " + CoreUtils.hsIdToString(m_destHSId) + (replicatedTable?", " + CoreUtils.hsIdCollectionToString(m_otherDestHostHSIds):"")); SendWork sendWork = new SendWork(type, m_targetId, m_destHSId, replicatedTable?m_otherDestHostHSIds:null, chunk, sendFuture); m_outstandingWork.put(blockIndex, sendWork); m_outstandingWorkCount.incrementAndGet(); m_sender.offer(sendWork); return sendFuture; }
java
public static String toSchemaWithoutInlineBatches(String schema) { StringBuilder sb = new StringBuilder(schema); int i = sb.indexOf(batchSpecificComments); if (i != -1) { sb.delete(i, i + batchSpecificComments.length()); } i = sb.indexOf(startBatch); if (i != -1) { sb.delete(i, i + startBatch.length()); } i = sb.indexOf(endBatch); if (i != -1) { sb.delete(i, i + endBatch.length()); } return sb.toString(); }
java
final void shutdown() throws InterruptedException { // stop the old proc call reaper m_timeoutReaperHandle.cancel(false); m_ex.shutdown(); if (CoreUtils.isJunitTest()) { m_ex.awaitTermination(1, TimeUnit.SECONDS); } else { m_ex.awaitTermination(365, TimeUnit.DAYS); } m_network.shutdown(); if (m_cipherService != null) { m_cipherService.shutdown(); m_cipherService = null; } }
java
public long getPartitionForParameter(byte typeValue, Object value) { if (m_hashinator == null) { return -1; } return m_hashinator.getHashedPartitionForParameter(typeValue, value); }
java
private void refreshPartitionKeys(boolean topologyUpdate) { long interval = System.currentTimeMillis() - m_lastPartitionKeyFetched.get(); if (!m_useClientAffinity && interval < PARTITION_KEYS_INFO_REFRESH_FREQUENCY) { return; } try { ProcedureInvocation invocation = new ProcedureInvocation(m_sysHandle.getAndDecrement(), "@GetPartitionKeys", "INTEGER"); CountDownLatch latch = null; if (!topologyUpdate) { latch = new CountDownLatch(1); } PartitionUpdateCallback cb = new PartitionUpdateCallback(latch); if (!queue(invocation, cb, true, System.nanoTime(), USE_DEFAULT_CLIENT_TIMEOUT)) { m_partitionUpdateStatus.set(new ClientResponseImpl(ClientResponseImpl.SERVER_UNAVAILABLE, new VoltTable[0], "Fails to queue the partition update query, please try later.")); } if (!topologyUpdate) { latch.await(); } m_lastPartitionKeyFetched.set(System.currentTimeMillis()); } catch (InterruptedException | IOException e) { m_partitionUpdateStatus.set(new ClientResponseImpl(ClientResponseImpl.SERVER_UNAVAILABLE, new VoltTable[0], "Fails to fetch partition keys from server:" + e.getMessage())); } }
java
public void addSortExpressions(List<AbstractExpression> sortExprs, List<SortDirectionType> sortDirs) { assert(sortExprs.size() == sortDirs.size()); for (int i = 0; i < sortExprs.size(); ++i) { addSortExpression(sortExprs.get(i), sortDirs.get(i)); } }
java
public void addSortExpression(AbstractExpression sortExpr, SortDirectionType sortDir) { assert(sortExpr != null); // PlanNodes all need private deep copies of expressions // so that the resolveColumnIndexes results // don't get bashed by other nodes or subsequent planner runs m_sortExpressions.add(sortExpr.clone()); m_sortDirections.add(sortDir); }
java
static java.util.logging.Level getPriorityForLevel(Level level) { switch (level) { case DEBUG: return java.util.logging.Level.FINEST; case ERROR: return java.util.logging.Level.SEVERE; case FATAL: return java.util.logging.Level.SEVERE; case INFO: return java.util.logging.Level.INFO; case TRACE: return java.util.logging.Level.FINER; case WARN: return java.util.logging.Level.WARNING; default: return null; } }
java
void checkAddColumn(ColumnSchema col) { if (table.isText() && !table.isEmpty(session)) { throw Error.error(ErrorCode.X_S0521); } if (table.findColumn(col.getName().name) != -1) { throw Error.error(ErrorCode.X_42504); } if (col.isPrimaryKey() && table.hasPrimaryKey()) { throw Error.error(ErrorCode.X_42530); } if (col.isIdentity() && table.hasIdentityColumn()) { throw Error.error(ErrorCode.X_42525); } if (!table.isEmpty(session) && !col.hasDefault() && (!col.isNullable() || col.isPrimaryKey()) && !col.isIdentity()) { throw Error.error(ErrorCode.X_42531); } }
java
void makeNewTable(OrderedHashSet dropConstraintSet, OrderedHashSet dropIndexSet) { Table tn = table.moveDefinition(session, table.tableType, null, null, null, -1, 0, dropConstraintSet, dropIndexSet); if (tn.indexList.length == table.indexList.length) { database.persistentStoreCollection.releaseStore(tn); return; } tn.moveData(session, table, -1, 0); database.persistentStoreCollection.releaseStore(table); table = tn; }
java
Index addIndex(int[] col, HsqlName name, boolean unique, boolean migrating) { Index newindex; if (table.isEmpty(session) || table.isIndexingMutable()) { PersistentStore store = session.sessionData.getRowStore(table); newindex = table.createIndex(store, name, col, null, null, unique, migrating, false, false); } else { newindex = table.createIndexStructure(name, col, null, null, unique, migrating, false, false); Table tn = table.moveDefinition(session, table.tableType, null, null, newindex, -1, 0, emptySet, emptySet); // for all sessions move the data tn.moveData(session, table, -1, 0); database.persistentStoreCollection.releaseStore(table); table = tn; setNewTableInSchema(table); updateConstraints(table, emptySet); } database.schemaManager.addSchemaObject(newindex); database.schemaManager.recompileDependentObjects(table); return newindex; }
java
void dropIndex(String indexName) { Index index; index = table.getIndex(indexName); if (table.isIndexingMutable()) { table.dropIndex(session, indexName); } else { OrderedHashSet indexSet = new OrderedHashSet(); indexSet.add(table.getIndex(indexName).getName()); Table tn = table.moveDefinition(session, table.tableType, null, null, null, -1, 0, emptySet, indexSet); tn.moveData(session, table, -1, 0); updateConstraints(tn, emptySet); setNewTableInSchema(tn); database.persistentStoreCollection.releaseStore(table); table = tn; } if (!index.isConstraint()) { database.schemaManager.removeSchemaObject(index.getName()); } database.schemaManager.recompileDependentObjects(table); }
java
void retypeColumn(ColumnSchema oldCol, ColumnSchema newCol) { boolean allowed = true; int oldType = oldCol.getDataType().typeCode; int newType = newCol.getDataType().typeCode; if (!table.isEmpty(session) && oldType != newType) { allowed = newCol.getDataType().canConvertFrom(oldCol.getDataType()); switch (oldType) { case Types.SQL_BLOB : case Types.SQL_CLOB : case Types.OTHER : case Types.JAVA_OBJECT : allowed = false; break; } } if (!allowed) { throw Error.error(ErrorCode.X_42561); } int colIndex = table.getColumnIndex(oldCol.getName().name); // if there is a multi-column PK, do not change the PK attributes if (newCol.isIdentity() && table.hasIdentityColumn() && table.identityColumn != colIndex) { throw Error.error(ErrorCode.X_42525); } if (table.getPrimaryKey().length > 1) { newCol.setPrimaryKey(oldCol.isPrimaryKey()); if (ArrayUtil.find(table.getPrimaryKey(), colIndex) != -1) {} } else if (table.hasPrimaryKey()) { if (oldCol.isPrimaryKey()) { newCol.setPrimaryKey(true); } else if (newCol.isPrimaryKey()) { throw Error.error(ErrorCode.X_42532); } } else if (newCol.isPrimaryKey()) { throw Error.error(ErrorCode.X_42530); } // apply and return if only metadata change is required boolean meta = newType == oldType; meta &= oldCol.isNullable() == newCol.isNullable(); meta &= oldCol.getDataType().scale == newCol.getDataType().scale; meta &= (oldCol.isIdentity() == newCol.isIdentity()); meta &= (oldCol.getDataType().precision == newCol.getDataType().precision || (oldCol.getDataType().precision < newCol.getDataType().precision && (oldType == Types.SQL_VARCHAR || oldType == Types.SQL_VARBINARY))); if (meta) { // size of some types may be increased with this command // default expressions can change oldCol.setType(newCol); oldCol.setDefaultExpression(newCol.getDefaultExpression()); if (newCol.isIdentity()) { oldCol.setIdentity(newCol.getIdentitySequence()); } table.setColumnTypeVars(colIndex); table.resetDefaultsFlag(); return; } database.schemaManager.checkColumnIsReferenced(table.getName(), table.getColumn(colIndex).getName()); table.checkColumnInCheckConstraint(colIndex); table.checkColumnInFKConstraint(colIndex); checkConvertColDataType(oldCol, newCol); retypeColumn(newCol, colIndex); }
java
void setColNullability(ColumnSchema column, boolean nullable) { Constraint c = null; int colIndex = table.getColumnIndex(column.getName().name); if (column.isNullable() == nullable) { return; } if (nullable) { if (column.isPrimaryKey()) { throw Error.error(ErrorCode.X_42526); } table.checkColumnInFKConstraint(colIndex, Constraint.SET_NULL); removeColumnNotNullConstraints(colIndex); } else { HsqlName constName = database.nameManager.newAutoName("CT", table.getSchemaName(), table.getName(), SchemaObject.CONSTRAINT); c = new Constraint(constName, true, null, Constraint.CHECK); c.check = new ExpressionLogical(column); c.prepareCheckConstraint(session, table, true); column.setNullable(false); table.addConstraint(c); table.setColumnTypeVars(colIndex); database.schemaManager.addSchemaObject(c); } }
java
void setColDefaultExpression(int colIndex, Expression def) { if (def == null) { table.checkColumnInFKConstraint(colIndex, Constraint.SET_DEFAULT); } table.setDefaultExpression(colIndex, def); }
java
public boolean setTableType(Session session, int newType) { int currentType = table.getTableType(); if (currentType == newType) { return false; } switch (newType) { case TableBase.CACHED_TABLE : break; case TableBase.MEMORY_TABLE : break; default : return false; } Table tn; try { tn = table.moveDefinition(session, newType, null, null, null, -1, 0, emptySet, emptySet); tn.moveData(session, table, -1, 0); updateConstraints(tn, emptySet); } catch (HsqlException e) { return false; } setNewTableInSchema(tn); database.persistentStoreCollection.releaseStore(table); table = tn; database.schemaManager.recompileDependentObjects(table); return true; }
java
Index addExprIndex(int[] col, Expression[] indexExprs, HsqlName name, boolean unique, boolean migrating, Expression predicate) { Index newindex; if (table.isEmpty(session) || table.isIndexingMutable()) { newindex = table.createAndAddExprIndexStructure(name, col, indexExprs, unique, migrating, false); } else { newindex = table.createIndexStructure(name, col, null, null, unique, migrating, false, false).withExpressions(indexExprs); Table tn = table.moveDefinition(session, table.tableType, null, null, newindex, -1, 0, emptySet, emptySet); // for all sessions move the data tn.moveData(session, table, -1, 0); database.persistentStoreCollection.releaseStore(table); table = tn; setNewTableInSchema(table); updateConstraints(table, emptySet); } database.schemaManager.addSchemaObject(newindex); database.schemaManager.recompileDependentObjects(table); if (predicate != null) { newindex = newindex.withPredicate(predicate); } return newindex; }
java
Index addIndex(int[] col, HsqlName name, boolean unique, boolean migrating, Expression predicate) { return addIndex(col, name, unique, migrating).withPredicate(predicate); }
java
static public ParsedColInfo fromOrderByXml(AbstractParsedStmt parsedStmt, VoltXMLElement orderByXml) { // A generic adjuster that just calls finalizeValueTypes ExpressionAdjuster adjuster = new ExpressionAdjuster() { @Override public AbstractExpression adjust(AbstractExpression expr) { ExpressionUtil.finalizeValueTypes(expr); return expr; } }; return fromOrderByXml(parsedStmt, orderByXml, adjuster); }
java
static public ParsedColInfo fromOrderByXml(AbstractParsedStmt parsedStmt, VoltXMLElement orderByXml, ExpressionAdjuster adjuster) { // make sure everything is kosher assert(orderByXml.name.equalsIgnoreCase("orderby")); // get desc/asc String desc = orderByXml.attributes.get("desc"); boolean descending = (desc != null) && (desc.equalsIgnoreCase("true")); // get the columnref or other expression inside the orderby node VoltXMLElement child = orderByXml.children.get(0); assert(child != null); // create the orderby column ParsedColInfo orderCol = new ParsedColInfo(); orderCol.m_orderBy = true; orderCol.m_ascending = !descending; AbstractExpression orderExpr = parsedStmt.parseExpressionTree(child); assert(orderExpr != null); orderCol.m_expression = adjuster.adjust(orderExpr); // Cases: // child could be columnref, in which case it's either a normal column // or an expression. // The latter could be a case if this column came from a subquery that // was optimized out. // Just make a ParsedColInfo object for it and the planner will do the // right thing later. if (orderExpr instanceof TupleValueExpression) { TupleValueExpression tve = (TupleValueExpression) orderExpr; orderCol.m_columnName = tve.getColumnName(); orderCol.m_tableName = tve.getTableName(); orderCol.m_tableAlias = tve.getTableAlias(); if (orderCol.m_tableAlias == null) { orderCol.m_tableAlias = orderCol.m_tableName; } orderCol.m_alias = tve.getColumnAlias(); } else { String alias = child.attributes.get("alias"); orderCol.m_alias = alias; orderCol.m_tableName = AbstractParsedStmt.TEMP_TABLE_NAME; orderCol.m_tableAlias = AbstractParsedStmt.TEMP_TABLE_NAME; orderCol.m_columnName = ""; // Replace its expression to TVE after we build the ExpressionIndexMap if ((child.name.equals("operation") == false) && (child.name.equals("aggregation") == false) && (child.name.equals("win_aggregation") == false) && (child.name.equals("function") == false) && (child.name.equals("rank") == false) && (child.name.equals("value") == false) && (child.name.equals("columnref") == false)) { throw new RuntimeException( "ORDER BY parsed with strange child node type: " + child.name); } } return orderCol; }
java
public SchemaColumn asSchemaColumn() { String columnAlias = (m_alias == null) ? m_columnName : m_alias; return new SchemaColumn(m_tableName, m_tableAlias, m_columnName, columnAlias, m_expression, m_differentiator); }
java
public static void crashVoltDB(String reason, String traces[], String filename, int lineno) { VoltLogger hostLog = new VoltLogger("HOST"); String fn = (filename == null) ? "unknown" : filename; String re = (reason == null) ? "Fatal EE error." : reason; hostLog.fatal(re + " In " + fn + ":" + lineno); if (traces != null) { for ( String trace : traces) { hostLog.fatal(trace); } } VoltDB.crashLocalVoltDB(re + " In " + fn + ":" + lineno, true, null); }
java
public byte[] nextDependencyAsBytes(final int dependencyId) { final VoltTable vt = m_dependencyTracker.nextDependency(dependencyId); if (vt != null) { final ByteBuffer buf2 = PrivateVoltTableFactory.getTableDataReference(vt); int pos = buf2.position(); byte[] bytes = new byte[buf2.limit() - pos]; buf2.get(bytes); buf2.position(pos); return bytes; } else { return null; } }
java
public void loadCatalog(long timestamp, String serializedCatalog) { try { setupProcedure(null); m_fragmentContext = FragmentContext.CATALOG_LOAD; coreLoadCatalog(timestamp, getStringBytes(serializedCatalog)); } finally { m_fragmentContext = FragmentContext.UNKNOWN; } }
java
public final void updateCatalog(final long timestamp, final boolean isStreamUpdate, final String diffCommands) throws EEException { try { setupProcedure(null); m_fragmentContext = FragmentContext.CATALOG_UPDATE; coreUpdateCatalog(timestamp, isStreamUpdate, diffCommands); } finally { m_fragmentContext = FragmentContext.UNKNOWN; } }
java
public FastDeserializer executePlanFragments( int numFragmentIds, long[] planFragmentIds, long[] inputDepIds, Object[] parameterSets, DeterminismHash determinismHash, String[] sqlTexts, boolean[] isWriteFrags, int[] sqlCRCs, long txnId, long spHandle, long lastCommittedSpHandle, long uniqueId, long undoQuantumToken, boolean traceOn) throws EEException { try { // For now, re-transform undoQuantumToken to readOnly. Redundancy work in site.executePlanFragments() m_fragmentContext = (undoQuantumToken == Long.MAX_VALUE) ? FragmentContext.RO_BATCH : FragmentContext.RW_BATCH; // reset context for progress updates m_sqlTexts = sqlTexts; if (traceOn) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPSITE); if (traceLog != null) { traceLog.add(() -> VoltTrace.beginDuration("execplanfragment", "txnId", TxnEgo.txnIdToString(txnId), "partition", Integer.toString(m_partitionId))); } } FastDeserializer results = coreExecutePlanFragments(m_currentBatchIndex, numFragmentIds, planFragmentIds, inputDepIds, parameterSets, determinismHash, isWriteFrags, sqlCRCs, txnId, spHandle, lastCommittedSpHandle, uniqueId, undoQuantumToken, traceOn); if (traceOn) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPSITE); if (traceLog != null) { traceLog.add(VoltTrace::endDuration); } } m_plannerStats.updateEECacheStats(m_eeCacheSize, numFragmentIds - m_cacheMisses, m_cacheMisses, m_partitionId); return results; } finally { // don't count any cache misses when there's an exception. This is a lie and they // will still be used to estimate the cache size, but it's hard to count cache hits // during an exception, so we don't count cache misses either to get the right ratio. m_cacheMisses = 0; m_sqlTexts = null; m_fragmentContext = FragmentContext.UNKNOWN; } }
java
public synchronized void setFlushInterval(long delay, long seconds) { if (m_flush != null) { m_flush.cancel(false); m_flush = null; } if (seconds > 0) { m_flush = m_ses.scheduleAtFixedRate(new Runnable() { @Override public void run() { try { flush(); } catch (Exception e) { loaderLog.error("Failed to flush loader buffer, some tuples may not be inserted.", e); } } }, delay, seconds, TimeUnit.SECONDS); } }
java
@Override public synchronized void close() { if (isClosed) { return; } rollback(false); try { database.logger.writeToLog(this, Tokens.T_DISCONNECT); } catch (HsqlException e) {} sessionData.closeAllNavigators(); sessionData.persistentStoreCollection.clearAllTables(); sessionData.closeResultCache(); database.compiledStatementManager.removeSession(sessionId); database.sessionManager.removeSession(this); database.closeIfLast(); database = null; user = null; rowActionList = null; sessionContext.savepoints = null; intConnection = null; sessionContext = null; lastIdentity = null; isClosed = true; }
java
public void setIsolation(int level) { if (isInMidTransaction()) { throw Error.error(ErrorCode.X_25001); } if (level == SessionInterface.TX_READ_UNCOMMITTED) { isReadOnly = true; } isolationMode = level; if (isolationMode != isolationModeDefault) { database.logger.writeToLog(this, getTransactionIsolationSQL()); } }
java
void checkDDLWrite() { checkReadWrite(); if (isProcessingScript || isProcessingLog) { return; } if (database.isFilesReadOnly()) { throw Error.error(ErrorCode.DATABASE_IS_READONLY); } }
java
void addDeleteAction(Table table, Row row) { // tempActionHistory.add("add delete action " + actionTimestamp); if (abortTransaction) { // throw Error.error(ErrorCode.X_40001); } database.txManager.addDeleteAction(this, table, row); }
java
@Override public synchronized void setAutoCommit(boolean autocommit) { if (isClosed) { return; } if (autocommit != isAutoCommit) { commit(false); isAutoCommit = autocommit; } }
java
@Override public synchronized void commit(boolean chain) { // tempActionHistory.add("commit " + actionTimestamp); if (isClosed) { return; } if (!isTransaction) { isReadOnly = isReadOnlyDefault; isolationMode = isolationModeDefault; return; } if (!database.txManager.commitTransaction(this)) { // tempActionHistory.add("commit aborts " + actionTimestamp); rollback(false); throw Error.error(ErrorCode.X_40001); } endTransaction(true); }
java
@Override public synchronized void rollback(boolean chain) { // tempActionHistory.add("rollback " + actionTimestamp); if (isClosed) { return; } if (!isTransaction) { isReadOnly = isReadOnlyDefault; isolationMode = isolationModeDefault; return; } try { database.logger.writeToLog(this, Tokens.T_ROLLBACK); } catch (HsqlException e) {} database.txManager.rollback(this); endTransaction(false); }
java
@Override public synchronized void savepoint(String name) { int index = sessionContext.savepoints.getIndex(name); if (index != -1) { sessionContext.savepoints.remove(name); sessionContext.savepointTimestamps.remove(index); } sessionContext.savepoints.add(name, ValuePool.getInt(rowActionList.size())); sessionContext.savepointTimestamps.addLast(actionTimestamp); try { database.logger.writeToLog(this, getSavepointSQL(name)); } catch (HsqlException e) {} }
java
@Override public synchronized void rollbackToSavepoint(String name) { if (isClosed) { return; } int index = sessionContext.savepoints.getIndex(name); if (index < 0) { throw Error.error(ErrorCode.X_3B001, name); } database.txManager.rollbackSavepoint(this, index); try { database.logger.writeToLog(this, getSavepointRollbackSQL(name)); } catch (HsqlException e) {} }
java
public synchronized void rollbackToSavepoint() { if (isClosed) { return; } String name = (String) sessionContext.savepoints.getKey(0); database.txManager.rollbackSavepoint(this, 0); try { database.logger.writeToLog(this, getSavepointRollbackSQL(name)); } catch (HsqlException e) {} }
java
@Override public synchronized void releaseSavepoint(String name) { // remove this and all later savepoints int index = sessionContext.savepoints.getIndex(name); if (index < 0) { throw Error.error(ErrorCode.X_3B001, name); } while (sessionContext.savepoints.size() > index) { sessionContext.savepoints.remove(sessionContext.savepoints.size() - 1); sessionContext.savepointTimestamps.removeLast(); } }
java
public void setReadOnly(boolean readonly) { if (!readonly && database.databaseReadOnly) { throw Error.error(ErrorCode.DATABASE_IS_READONLY); } if (isInMidTransaction()) { throw Error.error(ErrorCode.X_25001); } isReadOnly = readonly; }
java
private Result executeResultUpdate(Result cmd) { long id = cmd.getResultId(); int actionType = cmd.getActionType(); Result result = sessionData.getDataResult(id); if (result == null) { return Result.newErrorResult(Error.error(ErrorCode.X_24501)); } Object[] pvals = cmd.getParameterData(); Type[] types = cmd.metaData.columnTypes; StatementQuery statement = (StatementQuery) result.getStatement(); QueryExpression qe = statement.queryExpression; Table baseTable = qe.getBaseTable(); int[] columnMap = qe.getBaseTableColumnMap(); sessionContext.rowUpdateStatement.setRowActionProperties(actionType, baseTable, types, columnMap); Result resultOut = executeCompiledStatement(sessionContext.rowUpdateStatement, pvals); return resultOut; }
java
HsqlName getSchemaHsqlName(String name) { return name == null ? currentSchema : database.schemaManager.getSchemaHsqlName(name); }
java
public String getSchemaName(String name) { return name == null ? currentSchema.name : database.schemaManager.getSchemaName(name); }
java
public Table defineLocalTable(HsqlName tableName, HsqlName[] colNames, Type[] colTypes) { // I'm not sure the table type, here TableBase.CACHED_TABLE, matters // all that much. assert(localTables != null); Table newTable = TableUtil.newTable(database, TableBase.CACHED_TABLE, tableName); TableUtil.setColumnsInSchemaTable(newTable, colNames, colTypes); newTable.createPrimaryKey(new int[0]); localTables.put(tableName.name, newTable); return newTable; }
java
public void updateLocalTable(HsqlName queryName, Type[] finalTypes) { assert(localTables != null); Table tbl = getLocalTable(queryName.name); assert (tbl != null); TableUtil.updateColumnTypes(tbl, finalTypes); }
java
void logSequences() { OrderedHashSet set = sessionData.sequenceUpdateSet; if (set == null || set.isEmpty()) { return; } for (int i = 0, size = set.size(); i < size; i++) { NumberSequence sequence = (NumberSequence) set.get(i); database.logger.writeSequenceStatement(this, sequence); } sessionData.sequenceUpdateSet.clear(); }
java
public void addLiteralSchema(String ddlText) throws IOException { File temp = File.createTempFile("literalschema", "sql"); temp.deleteOnExit(); FileWriter out = new FileWriter(temp); out.write(ddlText); out.close(); addSchema(URLEncoder.encode(temp.getAbsolutePath(), "UTF-8")); }
java
public void addSchema(String schemaURL) { try { schemaURL = URLDecoder.decode(schemaURL, "UTF-8"); } catch (final UnsupportedEncodingException e) { e.printStackTrace(); System.exit(-1); } assert(m_schemas.contains(schemaURL) == false); final File schemaFile = new File(schemaURL); assert(schemaFile != null); assert(schemaFile.isDirectory() == false); // this check below fails in some valid cases (like when the file is in a jar) //assert schemaFile.canRead() // : "can't read file: " + schemaPath; m_schemas.add(schemaURL); }
java
private static boolean isParameterized(VoltXMLElement elm) { final String name = elm.name; if (name.equals("value")) { return elm.getBoolAttribute("isparam", false); } else if (name.equals("vector") || name.equals("row")) { return elm.children.stream().anyMatch(ExpressionUtil::isParameterized); } else if (name.equals("columnref") || name.equals("function") || name.equals("tablesubquery")) { return false; } else { assert name.equals("operation") : "unknown VoltXMLElement type: " + name; final ExpressionType op = mapOfVoltXMLOpType.get(elm.attributes.get("optype")); assert op != null; switch (op) { case CONJUNCTION_OR: // two operators case CONJUNCTION_AND: case COMPARE_GREATERTHAN: case COMPARE_LESSTHAN: case COMPARE_EQUAL: case COMPARE_NOTEQUAL: case COMPARE_GREATERTHANOREQUALTO: case COMPARE_LESSTHANOREQUALTO: case OPERATOR_PLUS: case OPERATOR_MINUS: case OPERATOR_MULTIPLY: case OPERATOR_DIVIDE: case OPERATOR_CONCAT: case OPERATOR_MOD: case COMPARE_IN: return isParameterized(elm.children.get(0)) || isParameterized(elm.children.get(1)); case OPERATOR_IS_NULL: // one operator case OPERATOR_EXISTS: case OPERATOR_NOT: case OPERATOR_UNARY_MINUS: return isParameterized(elm.children.get(0)); default: assert false; return false; } } }
java
private static String getType(Database db, VoltXMLElement elm) { final String type = elm.getStringAttribute("valuetype", ""); if (! type.isEmpty()) { return type; } else if (elm.name.equals("columnref")) { final String tblName = elm.getStringAttribute("table", ""); final int colIndex = elm.getIntAttribute("index", 0); return StreamSupport.stream(db.getTables().spliterator(), false) .filter(tbl -> tbl.getTypeName().equals(tblName)) .findAny() .flatMap(tbl -> StreamSupport.stream(tbl.getColumns().spliterator(), false) .filter(col -> col.getIndex() == colIndex) .findAny()) .map(Column::getType) .map(typ -> VoltType.get((byte) ((int)typ)).getName()) .orElse(""); } else { return ""; } }
java
private static String guessParameterType(Database db, VoltXMLElement elm) { if (! isParameterized(elm) || ! elm.name.equals("operation")) { return ""; } else { final ExpressionType op = mapOfVoltXMLOpType.get(elm.attributes.get("optype")); assert op != null; switch (op) { case CONJUNCTION_OR: case CONJUNCTION_AND: case OPERATOR_NOT: return "boolean"; case COMPARE_GREATERTHAN: // For these 2 operator-ops, the type is what the non-parameterized part gets set to. case COMPARE_LESSTHAN: case COMPARE_EQUAL: case COMPARE_NOTEQUAL: case COMPARE_GREATERTHANOREQUALTO: case COMPARE_LESSTHANOREQUALTO: case OPERATOR_PLUS: case OPERATOR_MINUS: case OPERATOR_MULTIPLY: case OPERATOR_DIVIDE: case OPERATOR_CONCAT: case OPERATOR_MOD: case COMPARE_IN: final VoltXMLElement left = elm.children.get(0), right = elm.children.get(1); return isParameterized(left) ? getType(db, right) : getType(db, left); case OPERATOR_UNARY_MINUS: return "integer"; case OPERATOR_IS_NULL: case OPERATOR_EXISTS: return ""; default: assert false; return ""; } } }
java