code
stringlengths
73
34.1k
label
stringclasses
1 value
public static ThreadFactory getThreadFactory( final String groupName, final String name, final int stackSize, final boolean incrementThreadNames, final Queue<String> coreList) { ThreadGroup group = null; if (groupName != null) { group = new ThreadGroup(Thread.currentThread().getThreadGroup(), groupName); } final ThreadGroup finalGroup = group; return new ThreadFactory() { private final AtomicLong m_createdThreadCount = new AtomicLong(0); private final ThreadGroup m_group = finalGroup; @Override public synchronized Thread newThread(final Runnable r) { final String threadName = name + (incrementThreadNames ? " - " + m_createdThreadCount.getAndIncrement() : ""); String coreTemp = null; if (coreList != null && !coreList.isEmpty()) { coreTemp = coreList.poll(); } final String core = coreTemp; Runnable runnable = new Runnable() { @Override public void run() { if (core != null) { // Remove Affinity for now to make this dependency dissapear from the client. // Goal is to remove client dependency on this class in the medium term. //PosixJNAAffinity.INSTANCE.setAffinity(core); } try { r.run(); } catch (Throwable t) { new VoltLogger("HOST").error("Exception thrown in thread " + threadName, t); } finally { m_threadLocalDeallocator.run(); } } }; Thread t = new Thread(m_group, runnable, threadName, stackSize); t.setDaemon(true); return t; } }; }
java
public static String getHostnameOrAddress() { final InetAddress addr = m_localAddressSupplier.get(); if (addr == null) return ""; return ReverseDNSCache.hostnameOrAddress(addr); }
java
public static final<T> ListenableFuture<T> retryHelper( final ScheduledExecutorService ses, final ExecutorService es, final Callable<T> callable, final long maxAttempts, final long startInterval, final TimeUnit startUnit, final long maxInterval, final TimeUnit maxUnit) { SettableFuture<T> future = SettableFuture.create(); retryHelper(ses, es, callable, maxAttempts, startInterval, startUnit, maxInterval, maxUnit, future); return future; }
java
public static <K extends Comparable< ? super K>,V extends Comparable< ? super V>> List<Entry<K,V>> sortKeyValuePairByValue(Map<K,V> map) { List<Map.Entry<K,V>> entries = new ArrayList<Map.Entry<K,V>>(map.entrySet()); Collections.sort(entries, new Comparator<Map.Entry<K,V>>() { @Override public int compare(Entry<K,V> o1, Entry<K,V> o2) { if (!o1.getValue().equals(o2.getValue())) { return (o1.getValue()).compareTo(o2.getValue()); } return o1.getKey().compareTo(o2.getKey()); } } ); return entries; }
java
private static NodeAVL set(PersistentStore store, NodeAVL x, boolean isleft, NodeAVL n) { if (isleft) { x = x.setLeft(store, n); } else { x = x.setRight(store, n); } if (n != null) { n.setParent(store, x); } return x; }
java
private static NodeAVL child(PersistentStore store, NodeAVL x, boolean isleft) { return isleft ? x.getLeft(store) : x.getRight(store); }
java
public static int compareRows(Object[] a, Object[] b, int[] cols, Type[] coltypes) { int fieldcount = cols.length; for (int j = 0; j < fieldcount; j++) { int i = coltypes[cols[j]].compare(a[cols[j]], b[cols[j]]); if (i != 0) { return i; } } return 0; }
java
@Override public int size(PersistentStore store) { int count = 0; readLock.lock(); try { RowIterator it = firstRow(null, store); while (it.hasNext()) { it.getNextRow(); count++; } return count; } finally { readLock.unlock(); } }
java
@Override public void insert(Session session, PersistentStore store, Row row) { NodeAVL n; NodeAVL x; boolean isleft = true; int compare = -1; writeLock.lock(); try { n = getAccessor(store); x = n; if (n == null) { store.setAccessor(this, ((RowAVL) row).getNode(position)); return; } while (true) { Row currentRow = n.getRow(store); compare = compareRowForInsertOrDelete(session, row, currentRow); if (compare == 0) { throw Error.error(ErrorCode.X_23505); } isleft = compare < 0; x = n; n = child(store, x, isleft); if (n == null) { break; } } x = set(store, x, isleft, ((RowAVL) row).getNode(position)); balance(store, x, isleft); } finally { writeLock.unlock(); } }
java
@Override public RowIterator findFirstRow(Session session, PersistentStore store, Object[] rowdata, int match) { NodeAVL node = findNode(session, store, rowdata, defaultColMap, match); return getIterator(session, store, node); }
java
@Override public RowIterator findFirstRow(Session session, PersistentStore store, Object[] rowdata) { NodeAVL node = findNode(session, store, rowdata, colIndex, colIndex.length); return getIterator(session, store, node); }
java
@Override public RowIterator findFirstRow(Session session, PersistentStore store, Object value, int compare) { readLock.lock(); try { if (compare == OpTypes.SMALLER || compare == OpTypes.SMALLER_EQUAL) { return findFirstRowNotNull(session, store); } boolean isEqual = compare == OpTypes.EQUAL || compare == OpTypes.IS_NULL; NodeAVL x = getAccessor(store); int iTest = 1; if (compare == OpTypes.GREATER) { iTest = 0; } if (value == null && !isEqual) { return emptyIterator; } // this method returns the correct node only with the following conditions boolean check = compare == OpTypes.GREATER || compare == OpTypes.EQUAL || compare == OpTypes.GREATER_EQUAL; if (!check) { Error.runtimeError(ErrorCode.U_S0500, "Index.findFirst"); } while (x != null) { boolean t = colTypes[0].compare( value, x.getRow(store).getData()[colIndex[0]]) >= iTest; if (t) { NodeAVL r = x.getRight(store); if (r == null) { break; } x = r; } else { NodeAVL l = x.getLeft(store); if (l == null) { break; } x = l; } } /* while (x != null && Column.compare(value, x.getData()[colIndex_0], colType_0) >= iTest) { x = next(x); } */ while (x != null) { Object colvalue = x.getRow(store).getData()[colIndex[0]]; int result = colTypes[0].compare(value, colvalue); if (result >= iTest) { x = next(store, x); } else { if (isEqual) { if (result != 0) { x = null; } } else if (colvalue == null) { x = next(store, x); continue; } break; } } // MVCC if (session == null || x == null) { return getIterator(session, store, x); } while (x != null) { Row row = x.getRow(store); if (compare == OpTypes.EQUAL && colTypes[0].compare( value, row.getData()[colIndex[0]]) != 0) { x = null; break; } if (session.database.txManager.canRead(session, row)) { break; } x = next(store, x); } return getIterator(session, store, x); } finally { readLock.unlock(); } }
java
@Override public RowIterator findFirstRowNotNull(Session session, PersistentStore store) { readLock.lock(); try { NodeAVL x = getAccessor(store); while (x != null) { boolean t = colTypes[0].compare( null, x.getRow(store).getData()[colIndex[0]]) >= 0; if (t) { NodeAVL r = x.getRight(store); if (r == null) { break; } x = r; } else { NodeAVL l = x.getLeft(store); if (l == null) { break; } x = l; } } while (x != null) { Object colvalue = x.getRow(store).getData()[colIndex[0]]; if (colvalue == null) { x = next(store, x); } else { break; } } // MVCC while (session != null && x != null) { Row row = x.getRow(store); if (session.database.txManager.canRead(session, row)) { break; } x = next(store, x); } return getIterator(session, store, x); } finally { readLock.unlock(); } }
java
@Override public RowIterator firstRow(Session session, PersistentStore store) { int tempDepth = 0; readLock.lock(); try { NodeAVL x = getAccessor(store); NodeAVL l = x; while (l != null) { x = l; l = x.getLeft(store); tempDepth++; } while (session != null && x != null) { Row row = x.getRow(store); if (session.database.txManager.canRead(session, row)) { break; } x = next(store, x); } return getIterator(session, store, x); } finally { depth = tempDepth; readLock.unlock(); } }
java
@Override public Row lastRow(Session session, PersistentStore store) { readLock.lock(); try { NodeAVL x = getAccessor(store); NodeAVL l = x; while (l != null) { x = l; l = x.getRight(store); } while (session != null && x != null) { Row row = x.getRow(store); if (session.database.txManager.canRead(session, row)) { break; } x = last(store, x); } return x == null ? null : x.getRow(store); } finally { readLock.unlock(); } }
java
private NodeAVL next(Session session, PersistentStore store, NodeAVL x) { if (x == null) { return null; } readLock.lock(); try { while (true) { x = next(store, x); if (x == null) { return x; } if (session == null) { return x; } Row row = x.getRow(store); if (session.database.txManager.canRead(session, row)) { return x; } } } finally { readLock.unlock(); } }
java
private void replace(PersistentStore store, NodeAVL x, NodeAVL n) { if (x.isRoot()) { if (n != null) { n = n.setParent(store, null); } store.setAccessor(this, n); } else { set(store, x.getParent(store), x.isFromLeft(store), n); } }
java
@Override public int compareRowNonUnique(Object[] a, Object[] b, int fieldcount) { for (int j = 0; j < fieldcount; j++) { int i = colTypes[j].compare(a[j], b[colIndex[j]]); if (i != 0) { return i; } } return 0; }
java
private int compareRowForInsertOrDelete(Session session, Row newRow, Row existingRow) { Object[] a = newRow.getData(); Object[] b = existingRow.getData(); int j = 0; boolean hasNull = false; for (; j < colIndex.length; j++) { Object currentvalue = a[colIndex[j]]; Object othervalue = b[colIndex[j]]; int i = colTypes[j].compare(currentvalue, othervalue); boolean nulls = currentvalue == null || othervalue == null; if (i != 0) { if (colDesc[j] && !nulls) { i = -i; } if (nullsLast[j] && nulls) { i = -i; } return i; } if (currentvalue == null) { hasNull = true; } } if (isUnique && !useRowId && !hasNull) { if (session == null || session.database.txManager.canRead(session, existingRow)) { //* debug 190 // session.database.txManager.canRead(session, existingRow); return 0; } else { int difference = newRow.getPos() - existingRow.getPos(); return difference; } } for (j = 0; j < pkCols.length; j++) { Object currentvalue = a[pkCols[j]]; int i = pkTypes[j].compare(currentvalue, b[pkCols[j]]); if (i != 0) { return i; } } if (useRowId) { int difference = newRow.getPos() - existingRow.getPos(); if (difference < 0) { difference = -1; } else if (difference > 0) { difference = 1; } return difference; } if (session == null || session.database.txManager.canRead(session, existingRow)) { return 0; } else { int difference = newRow.getPos() - existingRow.getPos(); if (difference < 0) { difference = -1; } else if (difference > 0) { difference = 1; } return difference; } }
java
private NodeAVL findNode(Session session, PersistentStore store, Object[] rowdata, int[] rowColMap, int fieldCount) { readLock.lock(); try { NodeAVL x = getAccessor(store); NodeAVL n; NodeAVL result = null; while (x != null) { int i = this.compareRowNonUnique(rowdata, rowColMap, x.getRow(store).getData(), fieldCount); if (i == 0) { result = x; n = x.getLeft(store); } else if (i > 0) { n = x.getRight(store); } else { n = x.getLeft(store); } if (n == null) { break; } x = n; } // MVCC 190 if (session == null) { return result; } while (result != null) { Row row = result.getRow(store); if (compareRowNonUnique( rowdata, rowColMap, row.getData(), fieldCount) != 0) { result = null; break; } if (session.database.txManager.canRead(session, row)) { break; } result = next(store, result); } return result; } finally { readLock.unlock(); } }
java
private void balance(PersistentStore store, NodeAVL x, boolean isleft) { while (true) { int sign = isleft ? 1 : -1; switch (x.getBalance() * sign) { case 1 : x = x.setBalance(store, 0); return; case 0 : x = x.setBalance(store, -sign); break; case -1 : NodeAVL l = child(store, x, isleft); if (l.getBalance() == -sign) { replace(store, x, l); x = set(store, x, isleft, child(store, l, !isleft)); l = set(store, l, !isleft, x); x = x.setBalance(store, 0); l = l.setBalance(store, 0); } else { NodeAVL r = child(store, l, !isleft); replace(store, x, r); l = set(store, l, !isleft, child(store, r, isleft)); r = set(store, r, isleft, l); x = set(store, x, isleft, child(store, r, !isleft)); r = set(store, r, !isleft, x); int rb = r.getBalance(); x = x.setBalance(store, (rb == -sign) ? sign : 0); l = l.setBalance(store, (rb == sign) ? -sign : 0); r = r.setBalance(store, 0); } return; } if (x.isRoot()) { return; } isleft = x.isFromLeft(store); x = x.getParent(store); } }
java
public AbstractExpression resolveTVE(TupleValueExpression tve) { AbstractExpression resolvedExpr = processTVE(tve, tve.getColumnName()); List<TupleValueExpression> tves = ExpressionUtil.getTupleValueExpressions(resolvedExpr); for (TupleValueExpression subqTve : tves) { resolveLeafTve(subqTve); } return resolvedExpr; }
java
void resolveColumnIndexesUsingSchema(NodeSchema inputSchema) { // get all the TVEs in the output columns int difftor = 0; for (SchemaColumn col : m_outputSchema) { col.setDifferentiator(difftor); ++difftor; Collection<TupleValueExpression> allTves = ExpressionUtil.getTupleValueExpressions(col.getExpression()); // and update their indexes against the table schema for (TupleValueExpression tve : allTves) { tve.setColumnIndexUsingSchema(inputSchema); assert (tve.getColumnIndex() >= 0 && tve.getColumnIndex() < inputSchema.size()); } } // DON'T RE-SORT HERE }
java
public boolean isIdentity(AbstractPlanNode childNode) throws PlanningErrorException { assert(childNode != null); // Find the output schema. // If the child node has an inline projection node, // then the output schema is the inline projection // node's output schema. Otherwise it's the output // schema of the childNode itself. NodeSchema childSchema = childNode.getTrueOutputSchema(false); assert(childSchema != null); NodeSchema outputSchema = getOutputSchema(); if (outputSchema.size() != childSchema.size()) { return false; } for (int idx = 0; idx < outputSchema.size(); idx += 1) { SchemaColumn col = outputSchema.getColumn(idx); SchemaColumn childCol = childSchema.getColumn(idx); if (col.getValueType() != childCol.getValueType()) { return false; } if ( ! (col.getExpression() instanceof TupleValueExpression)) { return false; } if ( ! (childCol.getExpression() instanceof TupleValueExpression)) { return false; } TupleValueExpression tve = (TupleValueExpression)col.getExpression(); if (tve.getColumnIndex() != idx) { return false; } } return true; }
java
public void replaceChildOutputSchemaNames(AbstractPlanNode child) { NodeSchema childSchema = child.getTrueOutputSchema(false); NodeSchema mySchema = getOutputSchema(); assert(childSchema.size() == mySchema.size()); for (int idx = 0; idx < childSchema.size(); idx += 1) { SchemaColumn cCol = childSchema.getColumn(idx); SchemaColumn myCol = mySchema.getColumn(idx); assert(cCol.getValueType() == myCol.getValueType()); assert(cCol.getExpression() instanceof TupleValueExpression); assert(myCol.getExpression() instanceof TupleValueExpression); cCol.reset(myCol.getTableName(), myCol.getTableAlias(), myCol.getColumnName(), myCol.getColumnAlias()); } }
java
void deliverToRepairLog(VoltMessage msg) { assert(Thread.currentThread().getId() == m_taskThreadId); m_repairLog.deliver(msg); }
java
private void sendInternal(long destHSId, VoltMessage message) { message.m_sourceHSId = getHSId(); m_messenger.send(destHSId, message); }
java
public static ClientAffinityStats diff(ClientAffinityStats newer, ClientAffinityStats older) { if (newer.m_partitionId != older.m_partitionId) { throw new IllegalArgumentException("Can't diff these ClientAffinityStats instances."); } ClientAffinityStats retval = new ClientAffinityStats(older.m_partitionId, newer.m_affinityWrites - older.m_affinityWrites, newer.m_rrWrites - older.m_rrWrites, newer.m_affinityReads - older.m_affinityReads, newer.m_rrReads - older.m_rrReads); return retval; }
java
private int addFramesForCompleteMessage() { boolean added = false; EncryptFrame frame = null; int delta = 0; while (!added && (frame = m_encryptedFrames.poll()) != null) { if (!frame.isLast()) { //TODO: Review - I don't think this synchronized(m_partialMessages) is required. // This is the only method with synchronized(m_partialMessages) and // it doesn't look like this method will be called from multiple threads concurrently. // Take this out 8.0 release. synchronized(m_partialMessages) { m_partialMessages.add(frame); ++m_partialSize; } continue; } final int partialSize = m_partialSize; if (partialSize > 0) { assert frame.chunks == partialSize + 1 : "partial frame buildup has wrong number of preceding pieces"; //TODO: Review - I don't think this synchronized(m_partialMessages) is required. // See comment above. // Take this out 8.0 release. synchronized(m_partialMessages) { for (EncryptFrame frm: m_partialMessages) { m_encryptedMessages.addComponent(true, frm.frame); delta += frm.delta; } m_partialMessages.clear(); m_partialSize = 0; } } m_encryptedMessages.addComponent(true, frame.frame); delta += frame.delta; m_numEncryptedMessages += frame.msgs; added = true; } return added ? delta : -1; }
java
void shutdown() { m_isShutdown = true; try { int waitFor = 1 - Math.min(m_inFlight.availablePermits(), -4); for (int i = 0; i < waitFor; ++i) { try { if (m_inFlight.tryAcquire(1, TimeUnit.SECONDS)) { m_inFlight.release(); break; } } catch (InterruptedException e) { break; } } m_ecryptgw.die(); EncryptFrame frame = null; while ((frame = m_encryptedFrames.poll()) != null) { frame.frame.release(); } for (EncryptFrame ef: m_partialMessages) { ef.frame.release(); } m_partialMessages.clear(); if (m_encryptedMessages.refCnt() > 0) m_encryptedMessages.release(); } finally { m_inFlight.drainPermits(); m_inFlight.release(); } }
java
private Runnable createCompletionTask(final Mailbox mb) { return new Runnable() { @Override public void run() { VoltDB.instance().getHostMessenger().removeMailbox(mb.getHSId()); } }; }
java
private Callable<Boolean> coalesceTruncationSnapshotPlan(String file_path, String pathType, String file_nonce, long txnId, Map<Integer, Long> partitionTransactionIds, SystemProcedureExecutionContext context, VoltTable result, ExtensibleSnapshotDigestData extraSnapshotData, SiteTracker tracker, HashinatorSnapshotData hashinatorData, long timestamp, int newPartitionCount) { final NativeSnapshotWritePlan plan = new NativeSnapshotWritePlan(); final Callable<Boolean> deferredTruncationSetup = plan.createSetupInternal(file_path, pathType, file_nonce, txnId, partitionTransactionIds, new SnapshotRequestConfig(newPartitionCount, context.getDatabase()), context, result, extraSnapshotData, tracker, hashinatorData, timestamp); m_taskListsForHSIds.putAll(plan.m_taskListsForHSIds); return new Callable<Boolean>() { @Override public Boolean call() throws Exception { final Boolean retval = deferredTruncationSetup.call(); m_targets.addAll(plan.m_targets); return retval; } }; }
java
void killSocket() { try { m_closing = true; m_socket.setKeepAlive(false); m_socket.setSoLinger(false, 0); Thread.sleep(25); m_socket.close(); Thread.sleep(25); System.gc(); Thread.sleep(25); } catch (Exception e) { // don't REALLY care if this fails e.printStackTrace(); } }
java
void send(final long destinations[], final VoltMessage message) { if (!m_isUp) { hostLog.warn("Failed to send VoltMessage because connection to host " + CoreUtils.getHostIdFromHSId(destinations[0])+ " is closed"); return; } if (destinations.length == 0) { return; } // if this link is "gone silent" for partition tests, just drop the message on the floor if (!m_linkCutForTest.get()) { m_network.enqueue( new DeferredSerialization() { @Override public final void serialize(final ByteBuffer buf) throws IOException { buf.putInt(buf.capacity() - 4); buf.putLong(message.m_sourceHSId); buf.putInt(destinations.length); for (int ii = 0; ii < destinations.length; ii++) { buf.putLong(destinations[ii]); } message.flattenToBuffer(buf); buf.flip(); } @Override public final void cancel() { /* * Can this be removed? */ } @Override public String toString() { return message.getClass().getName(); } @Override public int getSerializedSize() { final int len = 4 /* length prefix */ + 8 /* source hsid */ + 4 /* destinationCount */ + 8 * destinations.length /* destination list */ + message.getSerializedSize(); return len; } }); } long current_time = EstTime.currentTimeMillis(); long current_delta = current_time - m_lastMessageMillis.get(); /* * Try and give some warning when a connection is timing out. * Allows you to observe the liveness of the host receiving the heartbeats */ if (isPrimary() && current_delta > m_logRate) { rateLimitedLogger.log( "Have not received a message from host " + hostnameAndIPAndPort() + " for " + (current_delta / 1000.0) + " seconds", current_time); } // NodeFailureFault no longer immediately trips FHInputHandler to // set m_isUp to false, so use both that and m_closing to // avoid repeat reports of a single node failure if ((!m_closing && m_isUp) && isPrimary() && current_delta > m_deadHostTimeout) { if (m_deadReportsCount.getAndIncrement() == 0) { hostLog.error("DEAD HOST DETECTED, hostname: " + hostnameAndIPAndPort()); hostLog.info("\tcurrent time: " + current_time); hostLog.info("\tlast message: " + m_lastMessageMillis); hostLog.info("\tdelta (millis): " + current_delta); hostLog.info("\ttimeout value (millis): " + m_deadHostTimeout); VoltDB.dropStackTrace("Timed out foreign host " + hostnameAndIPAndPort() + " with delta " + current_delta); } m_hostMessenger.reportForeignHostFailed(m_hostId); } }
java
private void deliverMessage(long destinationHSId, VoltMessage message) { if (!m_hostMessenger.validateForeignHostId(m_hostId)) { hostLog.warn(String.format("Message (%s) sent to site id: %s @ (%s) at %d from %s " + "which is a known failed host. The message will be dropped\n", message.getClass().getSimpleName(), CoreUtils.hsIdToString(destinationHSId), m_socket.getRemoteSocketAddress().toString(), m_hostMessenger.getHostId(), CoreUtils.hsIdToString(message.m_sourceHSId))); return; } Mailbox mailbox = m_hostMessenger.getMailbox(destinationHSId); /* * At this point we are OK with messages going to sites that don't exist * because we are saying that things can come and go */ if (mailbox == null) { hostLog.info(String.format("Message (%s) sent to unknown site id: %s @ (%s) at %d from %s \n", message.getClass().getSimpleName(), CoreUtils.hsIdToString(destinationHSId), m_socket.getRemoteSocketAddress().toString(), m_hostMessenger.getHostId(), CoreUtils.hsIdToString(message.m_sourceHSId))); /* * If it is for the wrong host, that definitely isn't cool */ if (m_hostMessenger.getHostId() != (int)destinationHSId) { VoltDB.crashLocalVoltDB("Received a message at wrong host", false, null); } return; } // deliver the message to the mailbox mailbox.deliver(message); }
java
private void handleRead(ByteBuffer in, Connection c) throws IOException { // port is locked by VoltNetwork when in valid use. // assert(m_port.m_lock.tryLock() == true); long recvDests[] = null; final long sourceHSId = in.getLong(); final int destCount = in.getInt(); if (destCount == POISON_PILL) {//This is a poison pill //Ignore poison pill during shutdown, in tests we receive crash messages from //leader appointer during shutdown if (VoltDB.instance().getMode() == OperationMode.SHUTTINGDOWN) { return; } byte messageBytes[] = new byte[in.getInt()]; in.get(messageBytes); String message = new String(messageBytes, "UTF-8"); message = String.format("Fatal error from id,hostname(%d,%s): %s", m_hostId, hostnameAndIPAndPort(), message); //if poison pill with particular cause handle it. int cause = in.getInt(); if (cause == ForeignHost.CRASH_ME) { int hid = VoltDB.instance().getHostMessenger().getHostId(); hostLog.debug("Poison Pill with target me was sent.: " + hid); //Killing myself. VoltDB.instance().halt(); } else if (cause == ForeignHost.CRASH_ALL || cause == ForeignHost.CRASH_SPECIFIED) { org.voltdb.VoltDB.crashLocalVoltDB(message, false, null); } else if (cause == ForeignHost.PRINT_STACKTRACE) { //collect thread dumps String dumpDir = new File(VoltDB.instance().getVoltDBRootPath(), "thread_dumps").getAbsolutePath(); String fileName = m_hostMessenger.getHostname() + "_host-" + m_hostId + "_" + System.currentTimeMillis()+".jstack"; VoltDB.dumpThreadTraceToFile(dumpDir, fileName ); } else { //Should never come here. hostLog.error("Invalid Cause in poison pill: " + cause); } return; } else if (destCount == STOPNODE_NOTICE) { int targetHostId = in.getInt(); hostLog.info("Receive StopNode notice for host " + targetHostId); m_hostMessenger.addStopNodeNotice(targetHostId); return; } recvDests = new long[destCount]; for (int i = 0; i < destCount; i++) { recvDests[i] = in.getLong(); } final VoltMessage message = m_hostMessenger.getMessageFactory().createMessageFromBuffer(in, sourceHSId); // ENG-1608. We sniff for SiteFailureMessage here so // that a node will participate in the failure resolution protocol // even if it hasn't directly witnessed a node fault. if ( message instanceof SiteFailureMessage && !(message instanceof SiteFailureForwardMessage)) { SiteFailureMessage sfm = (SiteFailureMessage)message; for (FaultMessage fm: sfm.asFaultMessages()) { m_hostMessenger.relayForeignHostFailed(fm); } } for (int i = 0; i < destCount; i++) { deliverMessage( recvDests[i], message); } //m_lastMessageMillis = System.currentTimeMillis(); m_lastMessageMillis.lazySet(EstTime.currentTimeMillis()); }
java
@Nullable private AvlNode<E> firstNode() { AvlNode<E> root = rootReference.get(); if (root == null) { return null; } AvlNode<E> node; if (range.hasLowerBound()) { E endpoint = range.getLowerEndpoint(); node = rootReference.get().ceiling(comparator(), endpoint); if (node == null) { return null; } if (range.getLowerBoundType() == BoundType.OPEN && comparator().compare(endpoint, node.getElement()) == 0) { node = node.succ; } } else { node = header.succ; } return (node == header || !range.contains(node.getElement())) ? null : node; }
java
public static MediaType create(String type, String subtype) { return create(type, subtype, ImmutableListMultimap.<String, String>of()); }
java
private static int getSerializedParamSizeForApplyBinaryLog(int streamCount, int remotePartitionCount, int concatLogSize) { int serializedParamSize = 2 + 1 + 4 // placeholder byte[0] + 1 + 4 // producerClusterId Integer + 1 + 4 + 4 + (4 + 8 * remotePartitionCount) * streamCount // concatLogIds byte[] + 1 + 4 + 4 + (4 + 8 + 8 + 4 + 4 + 16) * streamCount // concatTrackerBufs (DRConsumerDrIdTracker) byte[] + 1 + 4 + 4 + 4 * streamCount + concatLogSize // concatLogs byte[] + 1 + 1 // extraOption Byte + 1 + 4; // extraParameters byte[0] return serializedParamSize; }
java
void restart() { // The poisoning path will, unfortunately, set this to true. Need to undo that. setNeedsRollback(false); // Also need to make sure that we get the original invocation in the first fragment // since some masters may not have seen it. m_haveDistributedInitTask = false; m_isRestart = true; m_haveSentfragment = false; m_drBufferChangedAgg = 0; }
java
@Override public void setupProcedureResume(int[] dependencies) { // Reset state so we can run this batch cleanly m_localWork = null; m_remoteWork = null; m_remoteDeps = null; m_remoteDepTables.clear(); }
java
public void setupProcedureResume(List<Integer> deps) { setupProcedureResume(com.google_voltpatches.common.primitives.Ints.toArray(deps)); }
java
public void restartFragment(FragmentResponseMessage message, List<Long> masters, Map<Integer, Long> partitionMastersMap) { final int partionId = message.getPartitionId(); Long restartHsid = partitionMastersMap.get(partionId); Long hsid = message.getExecutorSiteId(); if (!hsid.equals(restartHsid)) { m_masterMapForFragmentRestart.clear(); m_masterMapForFragmentRestart.put(restartHsid, hsid); //The very first fragment is to be rerouted to the new leader, then all the follow-up fragments are routed //to new leaders. updateMasters(masters, partitionMastersMap); } if (restartHsid == null) { restartHsid = hsid; } if (tmLog.isDebugEnabled()) { tmLog.debug("Rerouted fragment from " + CoreUtils.hsIdToString(hsid) + " to " + CoreUtils.hsIdToString(restartHsid) + "\n" + m_remoteWork); } m_fragmentRestarted = true; m_mbox.send(restartHsid, m_remoteWork); }
java
private boolean checkNewUniqueIndex(Index newIndex) { Table table = (Table) newIndex.getParent(); CatalogMap<Index> existingIndexes = m_originalIndexesByTable.get(table.getTypeName()); for (Index existingIndex : existingIndexes) { if (indexCovers(newIndex, existingIndex)) { return true; } } return false; }
java
private String createViewDisallowedMessage(String viewName, String singleTableName) { boolean singleTable = (singleTableName != null); return String.format( "Unable to create %sview %s %sbecause the view definition uses operations that cannot always be applied if %s.", (singleTable ? "single table " : "multi-table "), viewName, (singleTable ? String.format("on table %s ", singleTableName) : ""), (singleTable ? "the table already contains data" : "none of the source tables are empty")); }
java
private TablePopulationRequirements getMVHandlerInfoMessage(MaterializedViewHandlerInfo mvh) { if ( ! mvh.getIssafewithnonemptysources()) { TablePopulationRequirements retval; String viewName = mvh.getDesttable().getTypeName(); String errorMessage = createViewDisallowedMessage(viewName, null); retval = new TablePopulationRequirements(viewName); retval.setErrorMessage(errorMessage); for (TableRef tref : mvh.getSourcetables()) { String tableName = tref.getTable().getTypeName(); retval.addTableName(tableName); } return retval; } return null; }
java
private void writeModification(CatalogType newType, CatalogType prevType, String field) { // Don't write modifications if the field can be ignored if (checkModifyIgnoreList(newType, prevType, field)) { return; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkModifyWhitelist(newType, prevType, field); // if it's not possible with non-empty tables, check for possible with empty tables if (errorMessage != null) { List<TablePopulationRequirements> responseList = checkModifyIfTableIsEmptyWhitelist(newType, prevType, field); // handle all the error messages and state from the modify check processModifyResponses(errorMessage, responseList); } if (! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE(newType)) { m_requiresCatalogDiffCmdsApplyToEE = true; } // write the commands to make it so // they will be ignored if the change is unsupported m_serializer.writeCommandForField(newType, field, true); // record the field change for later generation of descriptive text // though skip the schema field of database because it changes all the time // and the diff will be caught elsewhere // need a better way to generalize this if ((newType instanceof Database) && field.equals("schema")) { return; } CatalogChangeGroup cgrp = m_changes.get(DiffClass.get(newType)); cgrp.processChange(newType, prevType, field); }
java
protected static boolean checkCatalogDiffShouldApplyToEE(final CatalogType suspect) { // Warning: // This check list should be consistent with catalog items defined in EE // Once a new catalog type is added in EE, we should add it here. if (suspect instanceof Cluster || suspect instanceof Database) { return true; } // Information about user-defined functions need to be applied to EE. // Because the EE needs to know about the parameter types and the return type to do // many type casting operations. if (suspect instanceof Function) { return true; } if (suspect instanceof Table || suspect instanceof TableRef || suspect instanceof Column || suspect instanceof ColumnRef || suspect instanceof Index || suspect instanceof IndexRef || suspect instanceof Constraint || suspect instanceof ConstraintRef || suspect instanceof MaterializedViewInfo || suspect instanceof MaterializedViewHandlerInfo) { return true; } // Statement can be children of Table or MaterilizedViewInfo, which should apply to EE // But if they are under Procedure, we can skip them. if (suspect instanceof Statement && (suspect.getParent() instanceof Procedure == false)) { return true; } // PlanFragment is a similar case like Statement if (suspect instanceof PlanFragment && suspect.getParent() instanceof Statement && (suspect.getParent().getParent() instanceof Procedure == false)) { return true; } if (suspect instanceof Connector || suspect instanceof ConnectorProperty || suspect instanceof ConnectorTableInfo) { // export table related change, should not skip EE return true; } // The other changes in the catalog will not be applied to EE, // including User, Group, Procedures, etc return false; }
java
private void processModifyResponses(String errorMessage, List<TablePopulationRequirements> responseList) { assert(errorMessage != null); // if no requirements, then it's just not possible if (responseList == null) { m_supported = false; m_errors.append(errorMessage + "\n"); return; } // otherwise, it's possible if a specific table is empty // collect the error message(s) and decide if it can be done inside @UAC for (TablePopulationRequirements response : responseList) { String objectName = response.getObjectName(); String nonEmptyErrorMessage = response.getErrorMessage(); assert (nonEmptyErrorMessage != null); TablePopulationRequirements popreq = m_tablesThatMustBeEmpty.get(objectName); if (popreq == null) { popreq = response; m_tablesThatMustBeEmpty.put(objectName, popreq); } else { String newErrorMessage = popreq.getErrorMessage() + "\n " + response.getErrorMessage(); popreq.setErrorMessage(newErrorMessage); } } }
java
private void writeDeletion(CatalogType prevType, CatalogType newlyChildlessParent, String mapName) { // Don't write deletions if the field can be ignored if (checkDeleteIgnoreList(prevType, newlyChildlessParent, mapName, prevType.getTypeName())) { return; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkAddDropWhitelist(prevType, ChangeType.DELETION); // if it's not possible with non-empty tables, check for possible with empty tables if (errorMessage != null) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist(prevType, ChangeType.DELETION); List<TablePopulationRequirements> responseList = null; if (response != null) { responseList = Collections.singletonList(response); } processModifyResponses(errorMessage, responseList); } if (! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE(prevType)) { m_requiresCatalogDiffCmdsApplyToEE = true; } // write the commands to make it so // they will be ignored if the change is unsupported m_serializer.writeDeleteDiffStatement(prevType, mapName); // add it to the set of deletions to later compute descriptive text CatalogChangeGroup cgrp = m_changes.get(DiffClass.get(prevType)); cgrp.processDeletion(prevType, newlyChildlessParent); }
java
private void writeAddition(CatalogType newType) { // Don't write additions if the field can be ignored if (checkAddIgnoreList(newType)) { return; } // verify this is possible, write an error and mark return code false if so String errorMessage = checkAddDropWhitelist(newType, ChangeType.ADDITION); // if it's not possible with non-empty tables, check for possible with empty tables if (errorMessage != null) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist(newType, ChangeType.ADDITION); // handle all the error messages and state from the modify check List<TablePopulationRequirements> responseList = null; if (response != null) { responseList = Collections.singletonList(response); } processModifyResponses(errorMessage, responseList); } if (! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE(newType)) { m_requiresCatalogDiffCmdsApplyToEE = true; } // write the commands to make it so // they will be ignored if the change is unsupported newType.accept(m_serializer); // add it to the set of additions to later compute descriptive text CatalogChangeGroup cgrp = m_changes.get(DiffClass.get(newType)); cgrp.processAddition(newType); }
java
private void getCommandsToDiff(String mapName, CatalogMap<? extends CatalogType> prevMap, CatalogMap<? extends CatalogType> newMap) { assert(prevMap != null); assert(newMap != null); // in previous, not in new for (CatalogType prevType : prevMap) { String name = prevType.getTypeName(); CatalogType newType = newMap.get(name); if (newType == null) { writeDeletion(prevType, newMap.m_parent, mapName); continue; } diffRecursively(prevType, newType); } // in new, not in previous for (CatalogType newType : newMap) { CatalogType prevType = prevMap.get(newType.getTypeName()); if (prevType != null) continue; writeAddition(newType); } }
java
public String getSQL() { StringBuffer sb = new StringBuffer(64); switch (opType) { case OpTypes.VALUE : if (valueData == null) { return Tokens.T_NULL; } return dataType.convertToSQLString(valueData); case OpTypes.ROW : sb.append('('); for (int i = 0; i < nodes.length; i++) { sb.append(nodes[i].getSQL()); if (i < nodes.length - 1) { sb.append(','); } } sb.append(')'); return sb.toString(); // case OpTypes.TABLE : for (int i = 0; i < nodes.length; i++) { sb.append(nodes[i].getSQL()); if (i < nodes.length - 1) { sb.append(','); } } return sb.toString(); } switch (opType) { case OpTypes.ROW_SUBQUERY : case OpTypes.TABLE_SUBQUERY : /* buf.append('('); buf.append(subSelect.getSQL()); buf.append(')'); */ break; default : throw Error.runtimeError(ErrorCode.U_S0500, "Expression"); } return sb.toString(); }
java
void setDataType(Session session, Type type) { if (opType == OpTypes.VALUE) { valueData = type.convertToType(session, valueData, dataType); } dataType = type; }
java
Expression replaceAliasInOrderBy(Expression[] columns, int length) { for (int i = 0; i < nodes.length; i++) { if (nodes[i] == null) { continue; } nodes[i] = nodes[i].replaceAliasInOrderBy(columns, length); } return this; }
java
public HsqlList resolveColumnReferences(RangeVariable[] rangeVarArray, HsqlList unresolvedSet) { return resolveColumnReferences(rangeVarArray, rangeVarArray.length, unresolvedSet, true); }
java
void insertValuesIntoSubqueryTable(Session session, PersistentStore store) { for (int i = 0; i < nodes.length; i++) { Object[] data = nodes[i].getRowValue(session); for (int j = 0; j < nodeDataTypes.length; j++) { data[j] = nodeDataTypes[j].convertToType(session, data[j], nodes[i].nodes[j].dataType); } Row row = (Row) store.getNewCachedObject(session, data); try { store.indexRow(session, row); } //XXX: what conditions are being casually ignored here? catch (HsqlException e) {} } }
java
static QuerySpecification getCheckSelect(Session session, Table t, Expression e) { CompileContext compileContext = new CompileContext(session); QuerySpecification s = new QuerySpecification(compileContext); s.exprColumns = new Expression[1]; s.exprColumns[0] = EXPR_TRUE; RangeVariable range = new RangeVariable(t, null, null, null, compileContext); s.rangeVariables = new RangeVariable[]{ range }; HsqlList unresolved = e.resolveColumnReferences(s.rangeVariables, null); ExpressionColumn.checkColumnsResolved(unresolved); e.resolveTypes(session, null); if (Type.SQL_BOOLEAN != e.getDataType()) { throw Error.error(ErrorCode.X_42568); } Expression condition = new ExpressionLogical(OpTypes.NOT, e); s.queryCondition = condition; s.resolveReferences(session); s.resolveTypes(session); return s; }
java
static void collectAllExpressions(HsqlList set, Expression e, OrderedIntHashSet typeSet, OrderedIntHashSet stopAtTypeSet) { if (e == null) { return; } if (stopAtTypeSet.contains(e.opType)) { return; } for (int i = 0; i < e.nodes.length; i++) { collectAllExpressions(set, e.nodes[i], typeSet, stopAtTypeSet); } if (typeSet.contains(e.opType)) { set.add(e); } if (e.subQuery != null && e.subQuery.queryExpression != null) { e.subQuery.queryExpression.collectAllExpressions(set, typeSet, stopAtTypeSet); } }
java
protected String getUniqueId(final Session session) { if (cached_id != null) { return cached_id; } // // Calculated an new Id // // this line ripped from the "describe" method // seems to help with some types like "equal" cached_id = new String(); // // If object is a leaf node, then we'll use John's original code... // Otherwise we need to generate and Id based on what our children are // if (getType() != OpTypes.VALUE && getType() != OpTypes.COLUMN) { // // Horribly inefficient, but it works for now... // traverse(this, session); } long nodeId = session.getNodeIdForExpression(this); cached_id = Long.toString(nodeId); return cached_id; }
java
private VoltXMLElement convertUsingColumnrefToCoaleseExpression(Session session, VoltXMLElement exp, Type dataType) throws org.hsqldb_voltpatches.HSQLInterface.HSQLParseException { // Hsql has check dataType can not be null. assert(dataType != null); exp.attributes.put("valuetype", dataType.getNameString()); // Extract unique columnref HashSet<String> tables = new HashSet<>(); ArrayDeque<VoltXMLElement> uniqueColumnrefs = new ArrayDeque<>(); for (VoltXMLElement columnref : exp.children) { String table = columnref.attributes.get("table"); String tableAlias = columnref.attributes.get("tablealias"); assert (table != null); String tableOrAlias = (tableAlias == null) ? table : tableAlias; if (tables.contains(tableOrAlias)) { continue; } tables.add(tableOrAlias); uniqueColumnrefs.add(columnref); } // Delete original children exp.children.clear(); // There should be at least 2 columnref expressions assert(uniqueColumnrefs.size() > 1); VoltXMLElement lastAlternativeExpr = null; VoltXMLElement resultColaesceExpr = null; while (true) { VoltXMLElement next = uniqueColumnrefs.pop(); if (uniqueColumnrefs.isEmpty()) { // Last columnref. Simply plug it in to the last THEN Expression assert(lastAlternativeExpr != null); // Add next as the first child lastAlternativeExpr.children.add(0, next); break; } // IS_NULL expression VoltXMLElement isnull_expr = prototypes.get(OpTypes.IS_NULL); if (isnull_expr == null) { throwForUnsupportedExpression(OpTypes.IS_NULL); } isnull_expr = isnull_expr.duplicate(); isnull_expr.attributes.put("id", this.getUniqueId(session)); isnull_expr.children.add(next); // Alternative expression VoltXMLElement alt_expr = prototypes.get(OpTypes.ALTERNATIVE); if (alt_expr == null) { throwForUnsupportedExpression(OpTypes.ALTERNATIVE); } alt_expr = alt_expr.duplicate(); alt_expr.attributes.put("id", this.getUniqueId(session)); alt_expr.attributes.put("valuetype", dataType.getNameString()); // The next expression should be a second child // but for now we keep it as the first one alt_expr.children.add(next); // COALESCE expression VoltXMLElement coalesceExpr = exp.duplicate(); coalesceExpr.attributes.put("alias", next.attributes.get("alias")); coalesceExpr.attributes.put("column", next.attributes.get("column")); // Add IS NULL and ALTERNATIVE expressions to the coalesceExpr coalesceExpr.children.add(isnull_expr); coalesceExpr.children.add(alt_expr); if (resultColaesceExpr == null) { resultColaesceExpr = coalesceExpr; } else { assert(lastAlternativeExpr != null); // Add coalesceExpr as the first child to the last alternative expression lastAlternativeExpr.children.add(0, coalesceExpr); } lastAlternativeExpr = alt_expr; } assert(resultColaesceExpr != null); return resultColaesceExpr; }
java
private void appendOptionGroup(StringBuffer buff, OptionGroup group) { if (!group.isRequired()) { buff.append("["); } List<Option> optList = new ArrayList<Option>(group.getOptions()); if (getOptionComparator() != null) { Collections.sort(optList, getOptionComparator()); } // for each option in the OptionGroup for (Iterator<Option> it = optList.iterator(); it.hasNext();) { // whether the option is required or not is handled at group level appendOption(buff, it.next(), true); if (it.hasNext()) { buff.append(" | "); } } if (!group.isRequired()) { buff.append("]"); } }
java
private void appendOption(StringBuffer buff, Option option, boolean required) { if (!required) { buff.append("["); } if (option.getOpt() != null) { buff.append("-").append(option.getOpt()); } else { buff.append("--").append(option.getLongOpt()); } // if the Option has a value and a non blank argname if (option.hasArg() && (option.getArgName() == null || option.getArgName().length() != 0)) { buff.append(option.getOpt() == null ? longOptSeparator : " "); buff.append("<").append(option.getArgName() != null ? option.getArgName() : getArgName()).append(">"); } // if the Option is not a required option if (!required) { buff.append("]"); } }
java
public void printUsage(PrintWriter pw, int width, String cmdLineSyntax) { int argPos = cmdLineSyntax.indexOf(' ') + 1; printWrapped(pw, width, getSyntaxPrefix().length() + argPos, getSyntaxPrefix() + cmdLineSyntax); }
java
protected StringBuffer renderOptions(StringBuffer sb, int width, Options options, int leftPad, int descPad) { final String lpad = createPadding(leftPad); final String dpad = createPadding(descPad); // first create list containing only <lpad>-a,--aaa where // -a is opt and --aaa is long opt; in parallel look for // the longest opt string this list will be then used to // sort options ascending int max = 0; List<StringBuffer> prefixList = new ArrayList<StringBuffer>(); List<Option> optList = options.helpOptions(); if (getOptionComparator() != null) { Collections.sort(optList, getOptionComparator()); } for (Option option : optList) { StringBuffer optBuf = new StringBuffer(); if (option.getOpt() == null) { optBuf.append(lpad).append(" ").append(getLongOptPrefix()).append(option.getLongOpt()); } else { optBuf.append(lpad).append(getOptPrefix()).append(option.getOpt()); if (option.hasLongOpt()) { optBuf.append(',').append(getLongOptPrefix()).append(option.getLongOpt()); } } if (option.hasArg()) { String argName = option.getArgName(); if (argName != null && argName.length() == 0) { // if the option has a blank argname optBuf.append(' '); } else { optBuf.append(option.hasLongOpt() ? longOptSeparator : " "); optBuf.append("<").append(argName != null ? option.getArgName() : getArgName()).append(">"); } } prefixList.add(optBuf); max = optBuf.length() > max ? optBuf.length() : max; } int x = 0; for (Iterator<Option> it = optList.iterator(); it.hasNext();) { Option option = it.next(); StringBuilder optBuf = new StringBuilder(prefixList.get(x++).toString()); if (optBuf.length() < max) { optBuf.append(createPadding(max - optBuf.length())); } optBuf.append(dpad); int nextLineTabStop = max + descPad; if (option.getDescription() != null) { optBuf.append(option.getDescription()); } renderWrappedText(sb, width, nextLineTabStop, optBuf.toString()); if (it.hasNext()) { sb.append(getNewLine()); } } return sb; }
java
protected StringBuffer renderWrappedText(StringBuffer sb, int width, int nextLineTabStop, String text) { int pos = findWrapPos(text, width, 0); if (pos == -1) { sb.append(rtrim(text)); return sb; } sb.append(rtrim(text.substring(0, pos))).append(getNewLine()); if (nextLineTabStop >= width) { // stops infinite loop happening nextLineTabStop = 1; } // all following lines must be padded with nextLineTabStop space characters final String padding = createPadding(nextLineTabStop); while (true) { text = padding + text.substring(pos).trim(); pos = findWrapPos(text, width, 0); if (pos == -1) { sb.append(text); return sb; } if (text.length() > width && pos == nextLineTabStop - 1) { pos = width; } sb.append(rtrim(text.substring(0, pos))).append(getNewLine()); } }
java
private static boolean functionMatches(FunctionDescriptor existingFd, Type returnType, Type[] parameterTypes) { if (returnType != existingFd.m_type) { return false; } if (parameterTypes.length != existingFd.m_paramTypes.length) { return false; } for (int idx = 0; idx < parameterTypes.length; idx++) { if (parameterTypes[idx] != existingFd.m_paramTypes[idx]) { return false; } } return true; }
java
private static FunctionDescriptor findFunction(String functionName, Type returnType, Type[] parameterType) { m_logger.debug("Looking for UDF " + functionName); FunctionDescriptor fd = FunctionDescriptor.m_by_LC_name.get(functionName); if (fd == null) { m_logger.debug(" Not defined in by_LC_name. Maybe it's saved."); fd = FunctionDescriptor.m_saved_functions.get(functionName); } if (fd != null && functionMatches(fd, returnType, parameterType) ) { m_logger.debug(" " + functionName + " is defined or saved. id == " + fd.getId()); return fd; } m_logger.debug(" " + functionName + " is not defined or saved."); return null; }
java
public static synchronized int registerTokenForUDF(String functionName, int functionId, VoltType voltReturnType, VoltType[] voltParameterTypes) { int retFunctionId; Type hsqlReturnType = hsqlTypeFromVoltType(voltReturnType); Type[] hsqlParameterTypes = hsqlTypeFromVoltType(voltParameterTypes); // If the token is already registered in the map, do not bother again. FunctionDescriptor oldFd = findFunction(functionName, hsqlReturnType, hsqlParameterTypes); if (oldFd != null) { // This may replace functionName with itself. This will not be an error. FunctionDescriptor.addDefinedFunction(functionName, oldFd); retFunctionId = oldFd.getId(); // If we were given a non-negative function id, it // was defined in the catalog. Our re-verification here // should have a value which we put into the catalog sometime // earlier. So, this earlier value should match the one we // were told to return. assert((functionId < 0) || (functionId == retFunctionId)); } else { // if the function was not already defined, then // if functionId is a valid UDF id or pre-defined SQL function id, then use it // otherwise, we want a new number. // if (functionId > 0) { retFunctionId = functionId; } else { retFunctionId = getNextFunctionId(); } FunctionDescriptor fd = makeFunctionDescriptorFromParts(functionName, retFunctionId, hsqlReturnType, hsqlParameterTypes); // if the function id belongs to UDF, put it into the defined_function map if (isUserDefinedFunctionId(retFunctionId)) { FunctionDescriptor.addDefinedFunction(functionName, fd); } m_logger.debug(String.format("Added UDF \"%s\"(%d) with %d parameters", functionName, retFunctionId, voltParameterTypes.length)); } // Ensure that m_udfSeqId is larger than all the // ones we've seen so far. if (m_udfSeqId <= retFunctionId) { m_udfSeqId = retFunctionId + 1; } return retFunctionId; }
java
public static Type hsqlTypeFromVoltType(VoltType voltReturnType) { Class<?> typeClass = VoltType.classFromByteValue(voltReturnType.getValue()); int typeNo = Types.getParameterSQLTypeNumber(typeClass); return Type.getDefaultTypeWithSize(typeNo); }
java
public static Type[] hsqlTypeFromVoltType(VoltType[] voltParameterTypes) { Type[] answer = new Type[voltParameterTypes.length]; for (int idx = 0; idx < voltParameterTypes.length; idx++) { answer[idx] = hsqlTypeFromVoltType(voltParameterTypes[idx]); } return answer; }
java
void setNewNodes() { int index = tTable.getIndexCount(); nPrimaryNode = new NodeAVLMemoryPointer(this); NodeAVL n = nPrimaryNode; for (int i = 1; i < index; i++) { n.nNext = new NodeAVLMemoryPointer(this); n = n.nNext; } }
java
private void bufferCatchup(int messageSize) throws IOException { // If the current buffer has too many tasks logged, queue it and // create a new one. if (m_tail != null && m_tail.size() > 0 && messageSize > m_bufferHeadroom) { // compile the invocation buffer m_tail.compile(); final RejoinTaskBuffer boundTail = m_tail; final Runnable r = new Runnable() { @Override public void run() { try { m_buffers.offer(boundTail.getContainer()); if (m_reader.sizeInBytes() > m_overflowLimit * 1024 * 1024) { // we can never catch up, should break rejoin. VoltDB.crashLocalVoltDB("On-disk task log is full. Please reduce " + "workload and try live rejoin again, or use blocking rejoin."); } } catch (Throwable t) { VoltDB.crashLocalVoltDB("Error in task log buffering transactions", true, t); } } }; m_es.execute(r); // Reset m_tail = null; m_tasksPendingInCurrentTail = 0; } // create a new buffer if (m_tail == null) { m_tail = new RejoinTaskBuffer(m_partitionId, messageSize); m_bufferHeadroom = RejoinTaskBuffer.DEFAULT_BUFFER_SIZE; } }
java
@Override public TransactionInfoBaseMessage getNextMessage() throws IOException { if (m_closed) { throw new IOException("Closed"); } if (m_head == null) { // Get another buffer asynchronously final Runnable r = new Runnable() { @Override public void run() { try { BBContainer cont = m_reader.poll(PersistentBinaryDeque.UNSAFE_CONTAINER_FACTORY); if (cont != null) { m_headBuffers.offer(new RejoinTaskBuffer(cont)); } } catch (Throwable t) { VoltDB.crashLocalVoltDB("Error retrieving buffer data in task log", true, t); } finally { m_pendingPolls.decrementAndGet(); } } }; //Always keep three buffers ready to go for (int ii = m_pendingPolls.get() + m_headBuffers.size(); ii < 3; ii++) { m_pendingPolls.incrementAndGet(); m_es.execute(r); } m_head = m_headBuffers.poll(); } TransactionInfoBaseMessage nextTask = null; if (m_head != null) { nextTask = m_head.nextTask(); if (nextTask == null) { scheduleDiscard(m_head); // current buffer is completely consumed, move to the next m_head = null; } else { m_taskCount--; } } else if ((m_taskCount - m_tasksPendingInCurrentTail == 0) && m_tail != null) { m_tasksPendingInCurrentTail = 0; /* * there is only one buffer left which hasn't been pushed into the * queue yet. set it to head directly, short-circuiting the queue. */ m_tail.compile(); if (m_head != null) { scheduleDiscard(m_head); } m_head = m_tail; m_tail = null; nextTask = getNextMessage(); } // SPs or fragments that's before the actual snapshot fragment may end up in the task log, // because there can be multiple snapshot fragments enabling the task log due to snapshot // collision. Need to filter tasks here based on their spHandles. if (nextTask != null && nextTask.getSpHandle() > m_snapshotSpHandle) { return nextTask; } else { return null; } }
java
void sendBufferSync(ByteBuffer bb) { try { /* configure socket to be blocking * so that we dont have to do write in * a tight while loop */ sock.configureBlocking(true); if (bb != closeConn) { if (sock != null) { sock.write(bb); } packetSent(); } } catch (IOException ie) { LOG.error("Error sending data synchronously ", ie); } }
java
private void cleanupWriterSocket(PrintWriter pwriter) { try { if (pwriter != null) { pwriter.flush(); pwriter.close(); } } catch (Exception e) { LOG.info("Error closing PrintWriter ", e); } finally { try { close(); } catch (Exception e) { LOG.error("Error closing a command socket ", e); } } }
java
private boolean readLength(SelectionKey k) throws IOException { // Read the length, now get the buffer int len = lenBuffer.getInt(); if (!initialized && checkFourLetterWord(k, len)) { return false; } if (len < 0 || len > BinaryInputArchive.maxBuffer) { throw new IOException("Len error " + len); } if (zk == null) { throw new IOException("ZooKeeperServer not running"); } incomingBuffer = ByteBuffer.allocate(len); return true; }
java
private void closeSock() { if (sock == null) { return; } LOG.debug("Closed socket connection for client " + sock.socket().getRemoteSocketAddress() + (sessionId != 0 ? " which had sessionid 0x" + Long.toHexString(sessionId) : " (no session established for client)")); try { /* * The following sequence of code is stupid! You would think that * only sock.close() is needed, but alas, it doesn't work that way. * If you just do sock.close() there are cases where the socket * doesn't actually close... */ sock.socket().shutdownOutput(); } catch (IOException e) { // This is a relatively common exception that we can't avoid if (LOG.isDebugEnabled()) { LOG.debug("ignoring exception during output shutdown", e); } } try { sock.socket().shutdownInput(); } catch (IOException e) { // This is a relatively common exception that we can't avoid if (LOG.isDebugEnabled()) { LOG.debug("ignoring exception during input shutdown", e); } } try { sock.socket().close(); } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("ignoring exception during socket close", e); } } try { sock.close(); // XXX The next line doesn't seem to be needed, but some posts // to forums suggest that it is needed. Keep in mind if errors in // this section arise. // factory.selector.wakeup(); } catch (IOException e) { if (LOG.isDebugEnabled()) { LOG.debug("ignoring exception during socketchannel close", e); } } sock = null; }
java
void increment() { long id = rand.nextInt(config.tuples); long toIncrement = rand.nextInt(5); // 0 - 4 try { client.callProcedure(new CMCallback(), "Increment", toIncrement, id); } catch (IOException e) { // This is not ideal error handling for production, but should be // harmless in a benchmark like this try { Thread.sleep(50); } catch (Exception e2) {} } }
java
public synchronized void writeToLog(Session session, String statement) { if (logStatements && log != null) { log.writeStatement(session, statement); } }
java
public DataFileCache openTextCache(Table table, String source, boolean readOnlyData, boolean reversed) { return log.openTextCache(table, source, readOnlyData, reversed); }
java
protected void initParams(Database database, String baseFileName) { HsqlDatabaseProperties props = database.getProperties(); fileName = baseFileName + ".data"; backupFileName = baseFileName + ".backup"; this.database = database; fa = database.getFileAccess(); int cacheScale = props.getIntegerProperty(HsqlDatabaseProperties.hsqldb_cache_scale, 14, 8, 18); int cacheSizeScale = props.getIntegerProperty( HsqlDatabaseProperties.hsqldb_cache_size_scale, 10, 6, 20); int cacheFreeCountScale = props.getIntegerProperty( HsqlDatabaseProperties.hsqldb_cache_free_count_scale, 9, 6, 12); incBackup = database.getProperties().isPropertyTrue( HsqlDatabaseProperties.hsqldb_inc_backup); cacheFileScale = database.getProperties().getIntegerProperty( HsqlDatabaseProperties.hsqldb_cache_file_scale, 8); if (cacheFileScale != 1) { cacheFileScale = 8; } cachedRowPadding = 8; if (cacheFileScale > 8) { cachedRowPadding = cacheFileScale; } cacheReadonly = database.isFilesReadOnly(); int lookupTableLength = 1 << cacheScale; int avgRowBytes = 1 << cacheSizeScale; maxCacheSize = lookupTableLength * 3; maxCacheBytes = maxCacheSize * avgRowBytes; maxDataFileSize = cacheFileScale == 1 ? Integer.MAX_VALUE : (long) Integer.MAX_VALUE * cacheFileScale; maxFreeBlocks = 1 << cacheFreeCountScale; dataFile = null; shadowFile = null; }
java
public void close(boolean write) { SimpleLog appLog = database.logger.appLog; try { if (cacheReadonly) { if (dataFile != null) { dataFile.close(); dataFile = null; } return; } StopWatch sw = new StopWatch(); appLog.sendLine(SimpleLog.LOG_NORMAL, "DataFileCache.close(" + write + ") : start"); if (write) { cache.saveAll(); Error.printSystemOut("saveAll: " + sw.elapsedTime()); appLog.sendLine(SimpleLog.LOG_NORMAL, "DataFileCache.close() : save data"); if (fileModified || freeBlocks.isModified()) { // set empty dataFile.seek(LONG_EMPTY_SIZE); dataFile.writeLong(freeBlocks.getLostBlocksSize()); // set end dataFile.seek(LONG_FREE_POS_POS); dataFile.writeLong(fileFreePosition); // set saved flag; dataFile.seek(FLAGS_POS); int flag = BitMap.set(0, FLAG_ISSAVED); if (hasRowInfo) { flag = BitMap.set(flag, FLAG_ROWINFO); } dataFile.writeInt(flag); appLog.sendLine(SimpleLog.LOG_NORMAL, "DataFileCache.close() : flags"); // if (dataFile.length() != fileFreePosition) { dataFile.seek(fileFreePosition); } appLog.sendLine(SimpleLog.LOG_NORMAL, "DataFileCache.close() : seek end"); Error.printSystemOut("pos and flags: " + sw.elapsedTime()); } } if (dataFile != null) { dataFile.close(); appLog.sendLine(SimpleLog.LOG_NORMAL, "DataFileCache.close() : close"); dataFile = null; Error.printSystemOut("close: " + sw.elapsedTime()); } boolean empty = fileFreePosition == INITIAL_FREE_POS; if (empty) { fa.removeElement(fileName); fa.removeElement(backupFileName); } } catch (Throwable e) { appLog.logContext(e, null); throw Error.error(ErrorCode.FILE_IO_ERROR, ErrorCode.M_DataFileCache_close, new Object[] { e, fileName }); } }
java
public void defrag() { if (cacheReadonly) { return; } if (fileFreePosition == INITIAL_FREE_POS) { return; } database.logger.appLog.logContext(SimpleLog.LOG_NORMAL, "start"); try { boolean wasNio = dataFile.wasNio(); cache.saveAll(); DataFileDefrag dfd = new DataFileDefrag(database, this, fileName); dfd.process(); close(false); deleteFile(wasNio); renameDataFile(wasNio); backupFile(); database.getProperties().setProperty( HsqlDatabaseProperties.hsqldb_cache_version, HsqlDatabaseProperties.THIS_CACHE_VERSION); database.getProperties().save(); cache.clear(); cache = new Cache(this); open(cacheReadonly); dfd.updateTableIndexRoots(); dfd.updateTransactionRowIDs(); } catch (Throwable e) { database.logger.appLog.logContext(e, null); if (e instanceof HsqlException) { throw (HsqlException) e; } else { throw new HsqlException( e, Error.getMessage(ErrorCode.GENERAL_IO_ERROR), ErrorCode.GENERAL_IO_ERROR); } } database.logger.appLog.logContext(SimpleLog.LOG_NORMAL, "end"); }
java
public void remove(int i, PersistentStore store) { writeLock.lock(); try { CachedObject r = release(i); if (r != null) { int size = r.getStorageSize(); freeBlocks.add(i, size); } } finally { writeLock.unlock(); } }
java
public void restore(CachedObject object) { writeLock.lock(); try { int i = object.getPos(); cache.put(i, object); // was previously used for text tables if (storeOnInsert) { saveRow(object); } } finally { writeLock.unlock(); } }
java
static void deleteOrResetFreePos(Database database, String filename) { ScaledRAFile raFile = null; database.getFileAccess().removeElement(filename); // OOo related code if (database.isStoredFileAccess()) { return; } // OOo end if (!database.getFileAccess().isStreamElement(filename)) { return; } try { raFile = new ScaledRAFile(database, filename, false); raFile.seek(LONG_FREE_POS_POS); raFile.writeLong(INITIAL_FREE_POS); } catch (IOException e) { database.logger.appLog.logContext(e, null); } finally { if (raFile != null) { try { raFile.close(); } catch (IOException e) { database.logger.appLog.logContext(e, null); } } } }
java
public static boolean isDurableFragment(byte[] planHash) { long fragId = VoltSystemProcedure.hashToFragId(planHash); return (fragId == PF_prepBalancePartitions || fragId == PF_balancePartitions || fragId == PF_balancePartitionsData || fragId == PF_balancePartitionsClearIndex || fragId == PF_distribute || fragId == PF_applyBinaryLog); }
java
protected void set(ClientResponse response) { if (!this.status.compareAndSet(STATUS_RUNNING, STATUS_SUCCESS)) return; this.response = response; this.latch.countDown(); }
java
private static List<JoinNode> generateInnerJoinOrdersForTree(JoinNode subTree) { // Get a list of the leaf nodes(tables) to permute them List<JoinNode> tableNodes = subTree.generateLeafNodesJoinOrder(); List<List<JoinNode>> joinOrders = PermutationGenerator.generatePurmutations(tableNodes); List<JoinNode> newTrees = new ArrayList<>(); for (List<JoinNode> joinOrder: joinOrders) { newTrees.add(JoinNode.reconstructJoinTreeFromTableNodes(joinOrder, JoinType.INNER)); } //Collect all the join/where conditions to reassign them later AbstractExpression combinedWhereExpr = subTree.getAllFilters(); List<JoinNode> treePermutations = new ArrayList<>(); for (JoinNode newTree : newTrees) { if (combinedWhereExpr != null) { newTree.setWhereExpression(combinedWhereExpr.clone()); } // The new tree root node id must match the original one to be able to reconnect the // subtrees newTree.setId(subTree.getId()); treePermutations.add(newTree); } return treePermutations; }
java
private static List<JoinNode> generateOuterJoinOrdersForTree(JoinNode subTree) { List<JoinNode> treePermutations = new ArrayList<>(); treePermutations.add(subTree); return treePermutations; }
java
private static List<JoinNode> generateFullJoinOrdersForTree(JoinNode subTree) { assert(subTree != null); List<JoinNode> joinOrders = new ArrayList<>(); if (!(subTree instanceof BranchNode)) { // End of recursion joinOrders.add(subTree); return joinOrders; } BranchNode branchNode = (BranchNode) subTree; // Descend to the left branch assert(branchNode.getLeftNode() != null); List<JoinNode> leftJoinOrders = generateFullJoinOrdersForTree(branchNode.getLeftNode()); assert(!leftJoinOrders.isEmpty()); // Descend to the right branch assert(branchNode.getRightNode() != null); List<JoinNode> rightJoinOrders = generateFullJoinOrdersForTree(branchNode.getRightNode()); assert(!rightJoinOrders.isEmpty()); // Create permutation pairing left and right nodes and the revere variant for (JoinNode leftNode : leftJoinOrders) { for (JoinNode rightNode : rightJoinOrders) { JoinNode resultOne = new BranchNode(branchNode.getId(), branchNode.getJoinType(), (JoinNode) leftNode.clone(), (JoinNode) rightNode.clone()); JoinNode resultTwo = new BranchNode(branchNode.getId(), branchNode.getJoinType(), (JoinNode) rightNode.clone(), (JoinNode) leftNode.clone()); if (branchNode.getJoinExpression() != null) { resultOne.setJoinExpression(branchNode.getJoinExpression().clone()); resultTwo.setJoinExpression(branchNode.getJoinExpression().clone()); } if (branchNode.getWhereExpression() != null) { resultOne.setWhereExpression(branchNode.getWhereExpression().clone()); resultTwo.setWhereExpression(branchNode.getWhereExpression().clone()); } joinOrders.add(resultOne); joinOrders.add(resultTwo); } } return joinOrders; }
java
private void generateMorePlansForJoinTree(JoinNode joinTree) { assert(joinTree != null); // generate the access paths for all nodes generateAccessPaths(joinTree); List<JoinNode> nodes = joinTree.generateAllNodesJoinOrder(); generateSubPlanForJoinNodeRecursively(joinTree, 0, nodes); }
java
private void generateInnerAccessPaths(BranchNode parentNode) { JoinNode innerChildNode = parentNode.getRightNode(); assert(innerChildNode != null); // In case of inner join WHERE and JOIN expressions can be merged if (parentNode.getJoinType() == JoinType.INNER) { parentNode.m_joinInnerOuterList.addAll(parentNode.m_whereInnerOuterList); parentNode.m_whereInnerOuterList.clear(); parentNode.m_joinInnerList.addAll(parentNode.m_whereInnerList); parentNode.m_whereInnerList.clear(); } if (innerChildNode instanceof BranchNode) { generateOuterAccessPaths((BranchNode)innerChildNode); generateInnerAccessPaths((BranchNode)innerChildNode); // The inner node is a join node itself. Only naive access path is possible innerChildNode.m_accessPaths.add( getRelevantNaivePath(parentNode.m_joinInnerOuterList, parentNode.m_joinInnerList)); return; } // The inner table can have multiple index access paths based on // inner and inner-outer join expressions plus the naive one. List<AbstractExpression> filterExprs = null; List<AbstractExpression> postExprs = null; // For the FULL join type, the inner join expressions must stay at the join node and // not go down to the inner node as filters (as predicates for SeqScan nodes and/or // index expressions for Index Scan). The latter case (IndexScan) won't work for NLJ because // the inner join expression will effectively filter out inner tuple prior to the NLJ. if (parentNode.getJoinType() != JoinType.FULL) { filterExprs = parentNode.m_joinInnerList; } else { postExprs = parentNode.m_joinInnerList; } StmtTableScan innerTable = innerChildNode.getTableScan(); assert(innerTable != null); innerChildNode.m_accessPaths.addAll( getRelevantAccessPathsForTable(innerTable, parentNode.m_joinInnerOuterList, filterExprs, postExprs)); // If there are inner expressions AND inner-outer expressions, it could be that there // are indexed access paths that use elements of both in the indexing expressions, // especially in the case of a compound index. // These access paths can not be considered for use with an NLJ because they rely on // inner-outer expressions. // If there is a possibility that NLIJ will not be an option due to the // "special case" processing that puts a send/receive plan between the join node // and its inner child node, other access paths need to be considered that use the // same indexes as those identified so far but in a simpler, less effective way // that does not rely on inner-outer expressions. // The following simplistic method of finding these access paths is to force // inner-outer expressions to be handled as NLJ-compatible post-filters and repeat // the search for access paths. // This will typically generate some duplicate access paths, including the naive // sequential scan path and any indexed paths that happened to use only the inner // expressions. // For now, we deal with this redundancy by dropping (and re-generating) all // access paths EXCPT those that reference the inner-outer expressions. // TODO: implementing access path hash and equality and possibly using a "Set" // would allow deduping as new access paths are added OR // the simplified access path search process could be based on // the existing indexed access paths -- for each access path that "hasInnerOuterIndexExpression" // try to generate and add a simpler access path using the same index, // this time with the inner-outer expressions used only as non-indexable post-filters. // Don't bother generating these redundant or inferior access paths unless there is // an inner-outer expression and a chance that NLIJ will be taken out of the running. boolean mayNeedInnerSendReceive = ( ! m_partitioning.wasSpecifiedAsSingle()) && (m_partitioning.getCountOfPartitionedTables() > 0) && (parentNode.getJoinType() != JoinType.INNER) && ! innerTable.getIsReplicated(); // too expensive/complicated to test here? (parentNode.m_leftNode has a replicated result?) && if (mayNeedInnerSendReceive && ! parentNode.m_joinInnerOuterList.isEmpty()) { List<AccessPath> innerOuterAccessPaths = new ArrayList<>(); for (AccessPath innerAccessPath : innerChildNode.m_accessPaths) { if ((innerAccessPath.index != null) && hasInnerOuterIndexExpression(innerChildNode.getTableAlias(), innerAccessPath.indexExprs, innerAccessPath.initialExpr, innerAccessPath.endExprs)) { innerOuterAccessPaths.add(innerAccessPath); } } if (parentNode.getJoinType() != JoinType.FULL) { filterExprs = parentNode.m_joinInnerList; postExprs = parentNode.m_joinInnerOuterList; } else { // For FULL join type the inner join expressions must be part of the post predicate // in order to stay at the join node and not be pushed down to the inner node filterExprs = null; postExprs = new ArrayList<>(parentNode.m_joinInnerList); postExprs.addAll(parentNode.m_joinInnerOuterList); } Collection<AccessPath> nljAccessPaths = getRelevantAccessPathsForTable( innerTable, null, filterExprs, postExprs); innerChildNode.m_accessPaths.clear(); innerChildNode.m_accessPaths.addAll(nljAccessPaths); innerChildNode.m_accessPaths.addAll(innerOuterAccessPaths); } assert(innerChildNode.m_accessPaths.size() > 0); }
java
private AbstractPlanNode getSelectSubPlanForJoinNode(JoinNode joinNode) { assert(joinNode != null); if (joinNode instanceof BranchNode) { BranchNode branchJoinNode = (BranchNode)joinNode; // Outer node AbstractPlanNode outerScanPlan = getSelectSubPlanForJoinNode(branchJoinNode.getLeftNode()); if (outerScanPlan == null) { return null; } // Inner Node. AbstractPlanNode innerScanPlan = getSelectSubPlanForJoinNode((branchJoinNode).getRightNode()); if (innerScanPlan == null) { return null; } // Join Node IndexSortablePlanNode answer = getSelectSubPlanForJoin(branchJoinNode, outerScanPlan, innerScanPlan); // Propagate information used for order by clauses in window functions // and the statement level order by clause. This is only if the // branch node is an inner join. if ((answer != null) && (branchJoinNode.getJoinType() == JoinType.INNER) && outerScanPlan instanceof IndexSortablePlanNode) { IndexUseForOrderBy indexUseForJoin = answer.indexUse(); IndexUseForOrderBy indexUseFromScan = ((IndexSortablePlanNode)outerScanPlan).indexUse(); indexUseForJoin.setWindowFunctionUsesIndex(indexUseFromScan.getWindowFunctionUsesIndex()); indexUseForJoin.setWindowFunctionIsCompatibleWithOrderBy(indexUseFromScan.isWindowFunctionCompatibleWithOrderBy()); indexUseForJoin.setFinalExpressionOrderFromIndexScan(indexUseFromScan.getFinalExpressionOrderFromIndexScan()); indexUseForJoin.setSortOrderFromIndexScan(indexUseFromScan.getSortOrderFromIndexScan()); } if (answer == null) { return null; } return answer.planNode(); } // End of recursion AbstractPlanNode scanNode = getAccessPlanForTable(joinNode); // Connect the sub-query tree if any if (joinNode instanceof SubqueryLeafNode) { StmtSubqueryScan tableScan = ((SubqueryLeafNode)joinNode).getSubqueryScan(); CompiledPlan subQueryPlan = tableScan.getBestCostPlan(); assert(subQueryPlan != null); assert(subQueryPlan.rootPlanGraph != null); // The sub-query best cost plan needs to be un-linked from the previous parent plan // it's the same child plan that gets re-attached to many parents one at a time subQueryPlan.rootPlanGraph.disconnectParents(); scanNode.addAndLinkChild(subQueryPlan.rootPlanGraph); } return scanNode; }
java
private static List<AbstractExpression> filterSingleTVEExpressions(List<AbstractExpression> exprs, List<AbstractExpression> otherExprs) { List<AbstractExpression> singleTVEExprs = new ArrayList<>(); for (AbstractExpression expr : exprs) { List<TupleValueExpression> tves = ExpressionUtil.getTupleValueExpressions(expr); if (tves.size() == 1) { singleTVEExprs.add(expr); } else { otherExprs.add(expr); } } return singleTVEExprs; }
java
public void notifyShutdown() { if (m_shutdown.compareAndSet(false, true)) { for (KafkaExternalConsumerRunner consumer : m_consumers) { consumer.shutdown(); } close(); } }
java
protected void runDDL(String ddl, boolean transformDdl) { String modifiedDdl = (transformDdl ? transformDDL(ddl) : ddl); printTransformedSql(ddl, modifiedDdl); super.runDDL(modifiedDdl); }
java
@Override protected String getVoltColumnTypeName(String columnTypeName) { String equivalentTypeName = m_PostgreSQLTypeNames.get(columnTypeName); return (equivalentTypeName == null) ? columnTypeName.toUpperCase() : equivalentTypeName; }
java
static private int numOccurencesOfCharIn(String str, char ch) { boolean inMiddleOfQuote = false; int num = 0, previousIndex = 0; for (int index = str.indexOf(ch); index >= 0 ; index = str.indexOf(ch, index+1)) { if (hasOddNumberOfSingleQuotes(str.substring(previousIndex, index))) { inMiddleOfQuote = !inMiddleOfQuote; } if (!inMiddleOfQuote) { num++; } previousIndex = index; } return num; }
java