code
stringlengths
73
34.1k
label
stringclasses
1 value
private boolean watchNextLowerNode() throws KeeperException, InterruptedException { /* * Iterate through the sorted list of children and find the given node, * then setup a electionWatcher on the previous node if it exists, otherwise the * previous of the previous...until we reach the beginning, then we are * the lowest node. */ List<String> children = zk.getChildren(dir, false); Collections.sort(children); ListIterator<String> iter = children.listIterator(); String me = null; //Go till I find myself. while (iter.hasNext()) { me = ZKUtil.joinZKPath(dir, iter.next()); if (me.equals(node)) { break; } } assert (me != null); //Back on me iter.previous(); //Until we have previous nodes and we set a watch on previous node. while (iter.hasPrevious()) { //Proess my lower nodes and put a watch on whats live String previous = ZKUtil.joinZKPath(dir, iter.previous()); if (zk.exists(previous, electionWatcher) != null) { return false; } } return true; }
java
@CanIgnoreReturnValue public CharEscaperBuilder addEscape(char c, String r) { map.put(c, checkNotNull(r)); if (c > max) { max = c; } return this; }
java
@CanIgnoreReturnValue public CharEscaperBuilder addEscapes(char[] cs, String r) { checkNotNull(r); for (char c : cs) { addEscape(c, r); } return this; }
java
private void deliverReadyTxns() { // First, pull all the sequenced messages, if any. VoltMessage m = m_replaySequencer.poll(); while(m != null) { deliver(m); m = m_replaySequencer.poll(); } // Then, try to pull all the drainable messages, if any. m = m_replaySequencer.drain(); while (m != null) { if (m instanceof Iv2InitiateTaskMessage) { // Send IGNORED response for all SPs Iv2InitiateTaskMessage task = (Iv2InitiateTaskMessage) m; final InitiateResponseMessage response = new InitiateResponseMessage(task); response.setResults(new ClientResponseImpl(ClientResponse.UNEXPECTED_FAILURE, new VoltTable[0], ClientResponseImpl.IGNORED_TRANSACTION)); m_mailbox.send(response.getInitiatorHSId(), response); } m = m_replaySequencer.drain(); } }
java
@Override public boolean sequenceForReplay(VoltMessage message) { boolean canDeliver = false; long sequenceWithUniqueId = Long.MIN_VALUE; boolean commandLog = (message instanceof TransactionInfoBaseMessage && (((TransactionInfoBaseMessage)message).isForReplay())); boolean sentinel = message instanceof MultiPartitionParticipantMessage; boolean replay = commandLog || sentinel; boolean sequenceForReplay = m_isLeader && replay; if (replay) { sequenceWithUniqueId = ((TransactionInfoBaseMessage)message).getUniqueId(); } if (sequenceForReplay) { InitiateResponseMessage dupe = m_replaySequencer.dedupe(sequenceWithUniqueId, (TransactionInfoBaseMessage) message); if (dupe != null) { // Duplicate initiate task message, send response m_mailbox.send(dupe.getInitiatorHSId(), dupe); } else if (!m_replaySequencer.offer(sequenceWithUniqueId, (TransactionInfoBaseMessage) message)) { canDeliver = true; } else { deliverReadyTxns(); } // If it's a DR sentinel, send an acknowledgement if (sentinel && !commandLog) { MultiPartitionParticipantMessage mppm = (MultiPartitionParticipantMessage) message; final InitiateResponseMessage response = new InitiateResponseMessage(mppm); ClientResponseImpl clientResponse = new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE, new VoltTable[0], ClientResponseImpl.IGNORED_TRANSACTION); response.setResults(clientResponse); m_mailbox.send(response.getInitiatorHSId(), response); } } else { if (replay) { // Update last seen and last polled uniqueId for replicas m_replaySequencer.updateLastSeenUniqueId(sequenceWithUniqueId, (TransactionInfoBaseMessage) message); m_replaySequencer.updateLastPolledUniqueId(sequenceWithUniqueId, (TransactionInfoBaseMessage) message); } canDeliver = true; } return canDeliver; }
java
private void doLocalInitiateOffer(Iv2InitiateTaskMessage msg) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI); if (traceLog != null) { final String threadName = Thread.currentThread().getName(); // Thread name has to be materialized here traceLog.add(() -> VoltTrace.meta("process_name", "name", CoreUtils.getHostnameOrAddress())) .add(() -> VoltTrace.meta("thread_name", "name", threadName)) .add(() -> VoltTrace.meta("thread_sort_index", "sort_index", Integer.toString(10000))) .add(() -> VoltTrace.beginAsync("initsp", MiscUtils.hsIdPairTxnIdToString(m_mailbox.getHSId(), m_mailbox.getHSId(), msg.getSpHandle(), msg.getClientInterfaceHandle()), "ciHandle", msg.getClientInterfaceHandle(), "txnId", TxnEgo.txnIdToString(msg.getTxnId()), "partition", m_partitionId, "read", msg.isReadOnly(), "name", msg.getStoredProcedureName(), "hsId", CoreUtils.hsIdToString(m_mailbox.getHSId()))); } final String procedureName = msg.getStoredProcedureName(); final SpProcedureTask task = new SpProcedureTask(m_mailbox, procedureName, m_pendingTasks, msg); ListenableFuture<Object> durabilityBackpressureFuture = m_cl.log(msg, msg.getSpHandle(), null, m_durabilityListener, task); if (traceLog != null && durabilityBackpressureFuture != null) { traceLog.add(() -> VoltTrace.beginAsync("durability", MiscUtils.hsIdTxnIdToString(m_mailbox.getHSId(), msg.getSpHandle()), "txnId", TxnEgo.txnIdToString(msg.getTxnId()), "partition", Integer.toString(m_partitionId))); } //Durability future is always null for sync command logging //the transaction will be delivered again by the CL for execution once durable //Async command logging has to offer the task immediately with a Future for backpressure if (m_cl.canOfferTask()) { m_pendingTasks.offer(task.setDurabilityBackpressureFuture(durabilityBackpressureFuture)); } }
java
private void handleBorrowTaskMessage(BorrowTaskMessage message) { // borrows do not advance the sp handle. The handle would // move backwards anyway once the next message is received // from the SP leader. long newSpHandle = getMaxScheduledTxnSpHandle(); Iv2Trace.logFragmentTaskMessage(message.getFragmentTaskMessage(), m_mailbox.getHSId(), newSpHandle, true); final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI); if (traceLog != null) { traceLog.add(() -> VoltTrace.beginAsync("recvfragment", MiscUtils.hsIdPairTxnIdToString(m_mailbox.getHSId(), m_mailbox.getHSId(), newSpHandle, 0), "txnId", TxnEgo.txnIdToString(message.getTxnId()), "partition", m_partitionId, "hsId", CoreUtils.hsIdToString(m_mailbox.getHSId()))); } TransactionState txn = m_outstandingTxns.get(message.getTxnId()); if (txn == null) { // If the borrow is the first fragment for a transaction, run it as // a single partition fragment; Must not engage/pause this // site on a MP transaction before the SP instructs to do so. // Do not track the borrow task as outstanding - it completes // immediately and is not a valid transaction state for // full MP participation (it claims everything can run as SP). txn = new BorrowTransactionState(newSpHandle, message); } // BorrowTask is a read only task embedded in a MP transaction // and its response (FragmentResponseMessage) should not be buffered if (message.getFragmentTaskMessage().isSysProcTask()) { final SysprocBorrowedTask task = new SysprocBorrowedTask(m_mailbox, (ParticipantTransactionState)txn, m_pendingTasks, message.getFragmentTaskMessage(), message.getInputDepMap()); task.setResponseNotBufferable(); m_pendingTasks.offer(task); } else { final BorrowedTask task = new BorrowedTask(m_mailbox, (ParticipantTransactionState)txn, m_pendingTasks, message.getFragmentTaskMessage(), message.getInputDepMap()); task.setResponseNotBufferable(); m_pendingTasks.offer(task); } }
java
void handleFragmentTaskMessage(FragmentTaskMessage message) { FragmentTaskMessage msg = message; long newSpHandle; //The site has been marked as non-leader. The follow-up batches or fragments are processed here if (!message.isForReplica() && (m_isLeader || message.isExecutedOnPreviousLeader())) { // message processed on leader // Quick hack to make progress...we need to copy the FragmentTaskMessage // before we start mucking with its state (SPHANDLE). We need to revisit // all the messaging mess at some point. msg = new FragmentTaskMessage(message.getInitiatorHSId(), message.getCoordinatorHSId(), message); //Not going to use the timestamp from the new Ego because the multi-part timestamp is what should be used msg.setTimestamp(message.getTimestamp()); msg.setExecutedOnPreviousLeader(message.isExecutedOnPreviousLeader()); if (!message.isReadOnly()) { TxnEgo ego = advanceTxnEgo(); newSpHandle = ego.getTxnId(); if (m_outstandingTxns.get(msg.getTxnId()) == null) { updateMaxScheduledTransactionSpHandle(newSpHandle); } } else { newSpHandle = getMaxScheduledTxnSpHandle(); } msg.setSpHandle(newSpHandle); msg.setLastSpUniqueId(m_uniqueIdGenerator.getLastUniqueId()); logRepair(msg); if (msg.getInitiateTask() != null) { msg.getInitiateTask().setSpHandle(newSpHandle);//set the handle //Trigger reserialization so the new handle is used msg.setStateForDurability(msg.getInitiateTask(), msg.getInvolvedPartitions()); } /* * If there a replicas to send it to, forward it! * Unless... it's read only AND not a sysproc. Read only sysprocs may expect to be sent * everywhere. * In that case don't propagate it to avoid a determinism check and extra messaging overhead */ if (IS_KSAFE_CLUSTER && (!message.isReadOnly() || msg.isSysProcTask())) { for (long hsId : m_sendToHSIds) { FragmentTaskMessage finalMsg = msg; final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI); if (traceLog != null) { traceLog.add(() -> VoltTrace.beginAsync("replicatefragment", MiscUtils.hsIdPairTxnIdToString(m_mailbox.getHSId(), hsId, finalMsg.getSpHandle(), finalMsg.getTxnId()), "txnId", TxnEgo.txnIdToString(finalMsg.getTxnId()), "dest", CoreUtils.hsIdToString(hsId))); } } FragmentTaskMessage replmsg = new FragmentTaskMessage(m_mailbox.getHSId(), m_mailbox.getHSId(), msg); replmsg.setForReplica(true); replmsg.setTimestamp(msg.getTimestamp()); // K-safety cluster doesn't always mean partition has replicas, // node failure may reduce the number of replicas for each partition. if (m_sendToHSIds.length > 0) { m_mailbox.send(m_sendToHSIds,replmsg); } DuplicateCounter counter; /* * Non-determinism should be impossible to happen with MP fragments. * if you see "MP_DETERMINISM_ERROR" as procedure name in the crash logs * something has horribly gone wrong. */ if (message.getFragmentTaskType() != FragmentTaskMessage.SYS_PROC_PER_SITE) { counter = new DuplicateCounter( msg.getCoordinatorHSId(), msg.getTxnId(), m_replicaHSIds, replmsg); } else { counter = new SysProcDuplicateCounter( msg.getCoordinatorHSId(), msg.getTxnId(), m_replicaHSIds, replmsg); } safeAddToDuplicateCounterMap(new DuplicateCounterKey(message.getTxnId(), newSpHandle), counter); } } else { // message processed on replica logRepair(msg); newSpHandle = msg.getSpHandle(); setMaxSeenTxnId(newSpHandle); } Iv2Trace.logFragmentTaskMessage(message, m_mailbox.getHSId(), newSpHandle, false); doLocalFragmentOffer(msg); }
java
public void offerPendingMPTasks(long txnId) { Queue<TransactionTask> pendingTasks = m_mpsPendingDurability.get(txnId); if (pendingTasks != null) { for (TransactionTask task : pendingTasks) { if (task instanceof SpProcedureTask) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI); if (traceLog != null) { traceLog.add(() -> VoltTrace.endAsync("durability", MiscUtils.hsIdTxnIdToString(m_mailbox.getHSId(), task.getSpHandle()))); } } else if (task instanceof FragmentTask) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.SPI); if (traceLog != null) { traceLog.add(() -> VoltTrace.endAsync("durability", MiscUtils.hsIdTxnIdToString(m_mailbox.getHSId(), ((FragmentTask) task).m_fragmentMsg.getSpHandle()))); } } m_pendingTasks.offer(task); } m_mpsPendingDurability.remove(txnId); } }
java
private void queueOrOfferMPTask(TransactionTask task) { // The pending map will only have an entry for the transaction if the first fragment is // still pending durability. Queue<TransactionTask> pendingTasks = m_mpsPendingDurability.get(task.getTxnId()); if (pendingTasks != null) { pendingTasks.offer(task); } else { m_pendingTasks.offer(task); } }
java
private void handleIv2LogFaultMessage(Iv2LogFaultMessage message) { //call the internal log write with the provided SP handle and wait for the fault log IO to complete SettableFuture<Boolean> written = writeIv2ViableReplayEntryInternal(message.getSpHandle()); // Get the Fault Log Status here to ensure the replica completes the log fault task is finished before // it starts processing transactions again blockFaultLogWriteStatus(written); setMaxSeenTxnId(message.getSpHandle()); // Also initialize the unique ID generator and the last durable unique ID using // the value sent by the master m_uniqueIdGenerator.updateMostRecentlyGeneratedUniqueId(message.getSpUniqueId()); m_cl.initializeLastDurableUniqueId(m_durabilityListener, m_uniqueIdGenerator.getLastUniqueId()); }
java
private void blockFaultLogWriteStatus(SettableFuture<Boolean> written) { boolean logWritten = false; if (written != null) { try { logWritten = written.get(); } catch (InterruptedException e) { } catch (ExecutionException e) { if (tmLog.isDebugEnabled()) { tmLog.debug("Could not determine fault log state for partition: " + m_partitionId, e); } } if (!logWritten) { tmLog.warn("Attempted fault log not written for partition: " + m_partitionId); } } }
java
SettableFuture<Boolean> writeIv2ViableReplayEntryInternal(long spHandle) { SettableFuture<Boolean> written = null; if (m_replayComplete) { written = m_cl.logIv2Fault(m_mailbox.getHSId(), new HashSet<Long>(m_replicaHSIds), m_partitionId, spHandle); } return written; }
java
public void updateReplicasFromMigrationLeaderFailedHost(int failedHostId) { List<Long> replicas = new ArrayList<>(); for (long hsid : m_replicaHSIds) { if (failedHostId != CoreUtils.getHostIdFromHSId(hsid)) { replicas.add(hsid); } } ((InitiatorMailbox)m_mailbox).updateReplicas(replicas, null); }
java
public void forwardPendingTaskToRejoinNode(long[] replicasAdded, long snapshotSpHandle) { if (tmLog.isDebugEnabled()) { tmLog.debug("Forward pending tasks in backlog to rejoin node: " + Arrays.toString(replicasAdded)); } if (replicasAdded.length == 0) { return; } boolean sentAny = false; for (Map.Entry<DuplicateCounterKey, DuplicateCounter> entry : m_duplicateCounters.entrySet()) { if (snapshotSpHandle < entry.getKey().m_spHandle) { if (!sentAny) { sentAny = true; if (tmLog.isDebugEnabled()) { tmLog.debug("Start forwarding pending tasks to rejoin node."); } } // Then forward any message after the MP txn, I expect them are all Iv2InitiateMessages if (tmLog.isDebugEnabled()) { tmLog.debug(entry.getValue().getOpenMessage().getMessageInfo()); } m_mailbox.send(replicasAdded, entry.getValue().getOpenMessage()); } } if (sentAny && tmLog.isDebugEnabled()) { tmLog.debug("Finish forwarding pending tasks to rejoin node."); } }
java
@Override public void cleanupTransactionBacklogOnRepair() { if (m_isLeader && m_sendToHSIds.length > 0) { m_mailbox.send(m_sendToHSIds, new MPBacklogFlushMessage()); } Iterator<Entry<Long, TransactionState>> iter = m_outstandingTxns.entrySet().iterator(); while (iter.hasNext()) { Entry<Long, TransactionState> entry = iter.next(); TransactionState txnState = entry.getValue(); if (TxnEgo.getPartitionId(entry.getKey()) == MpInitiator.MP_INIT_PID ) { if (txnState.isReadOnly()) { txnState.setDone(); m_duplicateCounters.entrySet().removeIf((e) -> e.getKey().m_txnId == entry.getKey()); iter.remove(); } } } // flush all RO transactions out of backlog m_pendingTasks.removeMPReadTransactions(); }
java
synchronized void reset() { schemaMap.clear(); sqlLookup.clear(); csidMap.clear(); sessionUseMap.clear(); useMap.clear(); next_cs_id = 0; }
java
synchronized void resetStatements() { Iterator it = csidMap.values().iterator(); while (it.hasNext()) { Statement cs = (Statement) it.next(); cs.clearVariables(); } }
java
private long getStatementID(HsqlName schema, String sql) { LongValueHashMap sqlMap = (LongValueHashMap) schemaMap.get(schema.hashCode()); if (sqlMap == null) { return -1; } return sqlMap.get(sql, -1); }
java
public synchronized Statement getStatement(Session session, long csid) { Statement cs = (Statement) csidMap.get(csid); if (cs == null) { return null; } if (!cs.isValid()) { String sql = (String) sqlLookup.get(csid); // revalidate with the original schema try { Session sys = database.sessionManager.getSysSession( session.currentSchema.name, session.getUser()); cs = sys.compileStatement(sql); cs.setID(csid); csidMap.put(csid, cs); } catch (Throwable t) { freeStatement(csid, session.getId(), true); return null; } } return cs; }
java
private void linkSession(long csid, long sessionID) { LongKeyIntValueHashMap scsMap; scsMap = (LongKeyIntValueHashMap) sessionUseMap.get(sessionID); if (scsMap == null) { scsMap = new LongKeyIntValueHashMap(); sessionUseMap.put(sessionID, scsMap); } int count = scsMap.get(csid, 0); scsMap.put(csid, count + 1); if (count == 0) { useMap.put(csid, useMap.get(csid, 0) + 1); } }
java
private long registerStatement(long csid, Statement cs) { if (csid < 0) { csid = nextID(); int schemaid = cs.getSchemaName().hashCode(); LongValueHashMap sqlMap = (LongValueHashMap) schemaMap.get(schemaid); if (sqlMap == null) { sqlMap = new LongValueHashMap(); schemaMap.put(schemaid, sqlMap); } sqlMap.put(cs.getSQL(), csid); sqlLookup.put(csid, cs.getSQL()); } cs.setID(csid); csidMap.put(csid, cs); return csid; }
java
synchronized void removeSession(long sessionID) { LongKeyIntValueHashMap scsMap; long csid; Iterator i; scsMap = (LongKeyIntValueHashMap) sessionUseMap.remove(sessionID); if (scsMap == null) { return; } i = scsMap.keySet().iterator(); while (i.hasNext()) { csid = i.nextLong(); int usecount = useMap.get(csid, 1) - 1; if (usecount == 0) { Statement cs = (Statement) csidMap.remove(csid); if (cs != null) { int schemaid = cs.getSchemaName().hashCode(); LongValueHashMap sqlMap = (LongValueHashMap) schemaMap.get(schemaid); String sql = (String) sqlLookup.remove(csid); sqlMap.remove(sql); } useMap.remove(csid); } else { useMap.put(csid, usecount); } } }
java
synchronized Statement compile(Session session, Result cmd) throws Throwable { String sql = cmd.getMainString(); long csid = getStatementID(session.currentSchema, sql); Statement cs = (Statement) csidMap.get(csid); if (cs == null || !cs.isValid() || !session.isAdmin()) { Session sys = database.sessionManager.getSysSession( session.currentSchema.name, session.getUser()); cs = sys.compileStatement(sql); csid = registerStatement(csid, cs); } linkSession(csid, session.getId()); return cs; }
java
private void startupInstance() throws IOException { assert (m_blockPathMap.isEmpty()); try { clearSwapDir(); } catch (Exception e) { throw new IOException("Unable to clear large query swap directory: " + e.getMessage()); } }
java
void storeBlock(BlockId blockId, ByteBuffer block) throws IOException { synchronized (m_accessLock) { if (m_blockPathMap.containsKey(blockId)) { throw new IllegalArgumentException("Request to store block that is already stored: " + blockId.toString()); } int origPosition = block.position(); block.position(0); Path blockPath = makeBlockPath(blockId); try (SeekableByteChannel channel = Files.newByteChannel(blockPath, OPEN_OPTIONS, PERMISSIONS)) { channel.write(block); } finally { block.position(origPosition); } m_blockPathMap.put(blockId, blockPath); } }
java
void loadBlock(BlockId blockId, ByteBuffer block) throws IOException { synchronized (m_accessLock) { if (! m_blockPathMap.containsKey(blockId)) { throw new IllegalArgumentException("Request to load block that is not stored: " + blockId); } int origPosition = block.position(); block.position(0); Path blockPath = m_blockPathMap.get(blockId); try (SeekableByteChannel channel = Files.newByteChannel(blockPath)) { channel.read(block); } finally { block.position(origPosition); } } }
java
void releaseBlock(BlockId blockId) throws IOException { synchronized (m_accessLock) { if (! m_blockPathMap.containsKey(blockId)) { throw new IllegalArgumentException("Request to release block that is not stored: " + blockId); } Path blockPath = m_blockPathMap.get(blockId); Files.delete(blockPath); m_blockPathMap.remove(blockId); } }
java
private void releaseAllBlocks() throws IOException { synchronized (m_accessLock) { Set<Map.Entry<BlockId, Path>> entries = m_blockPathMap.entrySet(); while (! entries.isEmpty()) { Map.Entry<BlockId, Path> entry = entries.iterator().next(); Files.delete(entry.getValue()); m_blockPathMap.remove(entry.getKey()); entries = m_blockPathMap.entrySet(); } } }
java
Path makeBlockPath(BlockId id) { String filename = id.fileNameString(); return m_largeQuerySwapPath.resolve(filename); }
java
public static List<Shard> discoverShards(String regionName, String streamName, String accessKey, String secretKey, String appName) { try { Region region = RegionUtils.getRegion(regionName); if (region != null) { final AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey); AmazonKinesis kinesisClient = new AmazonKinesisClient(credentials, getClientConfigWithUserAgent(appName)); kinesisClient.setRegion(region); DescribeStreamResult result = kinesisClient.describeStream(streamName); if (!"ACTIVE".equals(result.getStreamDescription().getStreamStatus())) { throw new IllegalArgumentException("Kinesis stream " + streamName + " is not active."); } return result.getStreamDescription().getShards(); } } catch (ResourceNotFoundException e) { LOGGER.warn("Kinesis stream " + streamName + " does not exist.", e); } catch (Exception e) { LOGGER.warn("Error found while describing the kinesis stream " + streamName, e); } return null; }
java
public static String getProperty(Properties props, String propertyName, String defaultValue) { String value = props.getProperty(propertyName, defaultValue).trim(); if (value.isEmpty()) { throw new IllegalArgumentException( "Property " + propertyName + " is missing in Kinesis importer configuration."); } return value; }
java
public static long getPropertyAsLong(Properties props, String propertyName, long defaultValue) { String value = props.getProperty(propertyName, "").trim(); if (value.isEmpty()) { return defaultValue; } try { long val = Long.parseLong(value); if (val <= 0) { throw new IllegalArgumentException( "Value of " + propertyName + " should be positive, but current value is " + val); } return val; } catch (NumberFormatException e) { throw new IllegalArgumentException( "Property " + propertyName + " must be a number in Kinesis importer configuration."); } }
java
public synchronized boolean addChild(String child) { if (children == null) { // let's be conservative on the typical number of children children = new HashSet<String>(8); } return children.add(child); }
java
protected String getHostHeader() { if (m_hostHeader != null) { return m_hostHeader; } if (!httpAdminListener.m_publicIntf.isEmpty()) { m_hostHeader = httpAdminListener.m_publicIntf; return m_hostHeader; } InetAddress addr = null; int httpPort = VoltDB.DEFAULT_HTTP_PORT; try { String localMetadata = VoltDB.instance().getLocalMetadata(); JSONObject jsObj = new JSONObject(localMetadata); JSONArray interfaces = jsObj.getJSONArray("interfaces"); //The first interface is external interface if specified. String iface = interfaces.getString(0); addr = InetAddress.getByName(iface); httpPort = jsObj.getInt("httpPort"); } catch (Exception e) { m_log.warn("Failed to get HTTP interface information.", e); } if (addr == null) { addr = org.voltcore.utils.CoreUtils.getLocalAddress(); } //Make the header string. m_hostHeader = addr.getHostAddress() + ":" + httpPort; return m_hostHeader; }
java
void handleReportPage(HttpServletRequest request, HttpServletResponse response) { try { String report = ReportMaker.liveReport(); response.setContentType(HTML_CONTENT_TYPE); response.setStatus(HttpServletResponse.SC_OK); response.getWriter().print(report); } catch (IOException ex) { m_log.warn("Failed to get catalog report.", ex); } }
java
public int getIntegerProperty(String key, int defaultValue, int[] values) { String prop = getProperty(key); int value = defaultValue; try { if (prop != null) { value = Integer.parseInt(prop); } } catch (NumberFormatException e) {} if (ArrayUtil.find(values, value) == -1) { return defaultValue; } return value; }
java
public void save() throws Exception { if (fileName == null || fileName.length() == 0) { throw new java.io.FileNotFoundException( Error.getMessage(ErrorCode.M_HsqlProperties_load)); } String filestring = fileName + ".properties"; save(filestring); }
java
public void save(String fileString) throws Exception { // oj@openoffice.org fa.createParentDirs(fileString); OutputStream fos = fa.openOutputStreamElement(fileString); FileAccess.FileSync outDescriptor = fa.getFileSync(fos); JavaSystem.saveProperties( stringProps, HsqlDatabaseProperties.PRODUCT_NAME + " " + HsqlDatabaseProperties.THIS_FULL_VERSION, fos); fos.flush(); outDescriptor.sync(); fos.close(); return; }
java
private void addError(int code, String key) { errorCodes = (int[]) ArrayUtil.resizeArray(errorCodes, errorCodes.length + 1); errorKeys = (String[]) ArrayUtil.resizeArray(errorKeys, errorKeys.length + 1); errorCodes[errorCodes.length - 1] = code; errorKeys[errorKeys.length - 1] = key; }
java
public void checkPassword(String value) { if (!value.equals(password)) { throw Error.error(ErrorCode.X_28000); } }
java
public String getCreateUserSQL() { StringBuffer sb = new StringBuffer(64); sb.append(Tokens.T_CREATE).append(' '); sb.append(Tokens.T_USER).append(' '); sb.append(getStatementName()).append(' '); sb.append(Tokens.T_PASSWORD).append(' '); sb.append(StringConverter.toQuotedString(password, '"', true)); return sb.toString(); }
java
public String getConnectUserSQL() { StringBuffer sb = new StringBuffer(); sb.append(Tokens.T_SET).append(' '); sb.append(Tokens.T_SESSION).append(' '); sb.append(Tokens.T_AUTHORIZATION).append(' '); sb.append(StringConverter.toQuotedString(getNameString(), '\'', true)); return sb.toString(); }
java
static ImmutableMap<String, PublicSuffixType> parseTrie(CharSequence encoded) { ImmutableMap.Builder<String, PublicSuffixType> builder = ImmutableMap.builder(); int encodedLen = encoded.length(); int idx = 0; while (idx < encodedLen) { idx += doParseTrieToBuilder( Lists.<CharSequence>newLinkedList(), encoded.subSequence(idx, encodedLen), builder); } return builder.build(); }
java
private static int doParseTrieToBuilder( List<CharSequence> stack, CharSequence encoded, ImmutableMap.Builder<String, PublicSuffixType> builder) { int encodedLen = encoded.length(); int idx = 0; char c = '\0'; // Read all of the characters for this node. for (; idx < encodedLen; idx++) { c = encoded.charAt(idx); if (c == '&' || c == '?' || c == '!' || c == ':' || c == ',') { break; } } stack.add(0, reverse(encoded.subSequence(0, idx))); if (c == '!' || c == '?' || c == ':' || c == ',') { // '!' represents an interior node that represents an ICANN entry in the map. // '?' represents a leaf node, which represents an ICANN entry in map. // ':' represents an interior node that represents a private entry in the map // ',' represents a leaf node, which represents a private entry in the map. String domain = PREFIX_JOINER.join(stack); if (domain.length() > 0) { builder.put(domain, PublicSuffixType.fromCode(c)); } } idx++; if (c != '?' && c != ',') { while (idx < encodedLen) { // Read all the children idx += doParseTrieToBuilder(stack, encoded.subSequence(idx, encodedLen), builder); if (encoded.charAt(idx) == '?' || encoded.charAt(idx) == ',') { // An extra '?' or ',' after a child node indicates the end of all children of this node. idx++; break; } } } stack.remove(0); return idx; }
java
public static synchronized void initialize( int myHostId, CatalogContext catalogContext, boolean isRejoin, boolean forceCreate, HostMessenger messenger, List<Pair<Integer, Integer>> partitions) throws ExportManager.SetupException { ExportManager em = new ExportManager(myHostId, catalogContext, messenger); m_self = em; if (forceCreate) { em.clearOverflowData(); } em.initialize(catalogContext, partitions, isRejoin); RealVoltDB db=(RealVoltDB)VoltDB.instance(); db.getStatsAgent().registerStatsSource(StatsSelector.EXPORT, myHostId, // m_siteId, em.getExportStats()); }
java
private void initialize(CatalogContext catalogContext, List<Pair<Integer, Integer>> localPartitionsToSites, boolean isRejoin) { try { CatalogMap<Connector> connectors = CatalogUtil.getConnectors(catalogContext); if (exportLog.isDebugEnabled()) { exportLog.debug("initialize for " + connectors.size() + " connectors."); CatalogUtil.dumpConnectors(exportLog, connectors); } if (!CatalogUtil.hasExportedTables(connectors)) { return; } if (exportLog.isDebugEnabled()) { exportLog.debug("Creating processor " + m_loaderClass); } ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet(m_processorConfig); m_processor.set(newProcessor); File exportOverflowDirectory = new File(VoltDB.instance().getExportOverflowPath()); ExportGeneration generation = new ExportGeneration(exportOverflowDirectory, m_messenger); generation.initialize(m_hostId, catalogContext, connectors, newProcessor, localPartitionsToSites, exportOverflowDirectory); m_generation.set(generation); newProcessor.setExportGeneration(generation); newProcessor.readyForData(); } catch (final ClassNotFoundException e) { exportLog.l7dlog( Level.ERROR, LogKeys.export_ExportManager_NoLoaderExtensions.name(), e); throw new RuntimeException(e); } catch (final Exception e) { exportLog.error("Initialize failed with:", e); throw new RuntimeException(e); } }
java
private void swapWithNewProcessor( final CatalogContext catalogContext, ExportGeneration generation, CatalogMap<Connector> connectors, List<Pair<Integer, Integer>> partitions, Map<String, Pair<Properties, Set<String>>> config) { ExportDataProcessor oldProcessor = m_processor.get(); if (exportLog.isDebugEnabled()) { exportLog.debug("Shutdown guestprocessor"); } oldProcessor.shutdown(); if (exportLog.isDebugEnabled()) { exportLog.debug("Processor shutdown completed, install new export processor"); } generation.unacceptMastership(); if (exportLog.isDebugEnabled()) { exportLog.debug("Existing export datasources unassigned."); } try { ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet(config); //Load any missing tables. generation.initializeGenerationFromCatalog(catalogContext, connectors, newProcessor, m_hostId, partitions, true); for (Pair<Integer, Integer> partition : partitions) { generation.updateAckMailboxes(partition.getFirst(), null); } //We create processor even if we dont have any streams. newProcessor.setExportGeneration(generation); if (m_startPolling && !config.isEmpty()) { newProcessor.startPolling(); } m_processor.getAndSet(newProcessor); newProcessor.readyForData(); } catch (Exception crash) { VoltDB.crashLocalVoltDB("Error creating next export processor", true, crash); } for (int partitionId : m_masterOfPartitions) { generation.acceptMastership(partitionId); } }
java
@Override public String calculateContentDeterminismMessage() { String ans = getContentDeterminismMessage(); if (ans != null) { return ans; } if (m_subquery != null) { updateContentDeterminismMessage(m_subquery.calculateContentDeterminismMessage()); return getContentDeterminismMessage(); } if (m_columns != null) { for (AbstractExpression expr : m_columns.values()) { String emsg = expr.getContentDeterminismMessage(); if (emsg != null) { updateContentDeterminismMessage(emsg); return emsg; } } } return null; }
java
public int getSortIndexOfOrderByExpression(AbstractExpression partitionByExpression) { for (int idx = 0; idx < m_orderByExpressions.size(); ++idx) { if (m_orderByExpressions.get(idx).equals(partitionByExpression)) { return idx; } } return -1; }
java
private boolean rewriteSelectStmt() { if (m_mvi != null) { final Table view = m_mvi.getDest(); final String viewName = view.getTypeName(); // Get the map of select stmt's display column index -> view table (column name, column index) m_selectStmt.getFinalProjectionSchema() .resetTableName(viewName, viewName) .toTVEAndFixColumns(m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx.entrySet().stream() .collect(Collectors.toMap(kv -> kv.getKey().getFirst(), Map.Entry::getValue))); // change to display column index-keyed map final Map<Integer, Pair<String, Integer>> colSubIndx = m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx .entrySet().stream().collect(Collectors.toMap(kv -> kv.getKey().getSecond(), Map.Entry::getValue)); ParsedSelectStmt.updateTableNames(m_selectStmt.m_aggResultColumns, viewName); ParsedSelectStmt.fixColumns(m_selectStmt.m_aggResultColumns, colSubIndx); ParsedSelectStmt.updateTableNames(m_selectStmt.m_displayColumns, viewName); ParsedSelectStmt.fixColumns(m_selectStmt.m_displayColumns, colSubIndx); m_selectStmt.rewriteAsMV(view); m_mvi = null; // makes this method re-entrant safe return true; } else { // scans all sub-queries for rewriting opportunities return m_selectStmt.allScans().stream() .map(scan -> scan instanceof StmtSubqueryScan && rewriteTableAlias((StmtSubqueryScan) scan)) .reduce(Boolean::logicalOr).get(); } }
java
private static boolean rewriteTableAlias(StmtSubqueryScan scan) { final AbstractParsedStmt stmt = scan.getSubqueryStmt(); return stmt instanceof ParsedSelectStmt && (new MVQueryRewriter((ParsedSelectStmt)stmt)).rewrite(); }
java
private static List<Integer> extractTVEIndices(AbstractExpression e, List<Integer> accum) { if (e != null) { if (e instanceof TupleValueExpression) { accum.add(((TupleValueExpression) e).getColumnIndex()); } else { extractTVEIndices(e.getRight(), extractTVEIndices(e.getLeft(), accum)); if (e.getArgs() != null) { e.getArgs().forEach(ex -> extractTVEIndices(ex, accum)); } } } return accum; }
java
private Map<Pair<String, Integer>, Pair<String, Integer>> gbyMatches(MaterializedViewInfo mv) { final FilterMatcher filter = new FilterMatcher(m_selectStmt.m_joinTree.getJoinExpression(), predicate_of(mv)); // *** Matching criteria/order: *** // 1. Filters match; // 2. Group-by-columns' table is same as MV's source table; // 3. Those group-by-column's columns are same as MV's group-by columns; // 4. Select stmt's group-by column names match with MV's // 5. Each column's aggregation type match, in the sense of set equality; if (filter.match() && gbyTablesEqual(mv) && gbyColumnsMatch(mv)) { return getViewColumnMaps(mv); } else { return null; } }
java
private static Map<MaterializedViewInfo, Table> getMviAndViews(List<Table> tbls) { return tbls.stream().flatMap(tbl -> StreamSupport.stream(((Iterable<MaterializedViewInfo>) () -> tbl.getViews().iterator()).spliterator(), false) .map(mv -> Pair.of(mv, mv.getDest()))) .collect(Collectors.toMap(Pair::getFirst, Pair::getSecond)); }
java
private static AbstractExpression transformExpressionRidofPVE(AbstractExpression src) { AbstractExpression left = src.getLeft(), right = src.getRight(); if (left != null) { left = transformExpressionRidofPVE(left); } if (right != null) { right = transformExpressionRidofPVE(right); } final AbstractExpression dst; if (src instanceof ParameterValueExpression) { // assert(((ParameterValueExpression) src).getOriginalValue() != null); dst = ((ParameterValueExpression) src).getOriginalValue().clone(); } else { dst = src.clone(); } dst.setLeft(left); dst.setRight(right); return dst; }
java
private static List<AbstractExpression> getGbyExpressions(MaterializedViewInfo mv) { try { return AbstractExpression.fromJSONArrayString(mv.getGroupbyexpressionsjson(), null); } catch (JSONException e) { return new ArrayList<>(); } }
java
private boolean isNpTxn(Iv2InitiateTaskMessage msg) { return msg.getStoredProcedureName().startsWith("@") && msg.getStoredProcedureName().equalsIgnoreCase("@BalancePartitions") && (byte) msg.getParameters()[1] != 1; // clearIndex is MP, normal rebalance is NP }
java
private Set<Integer> getBalancePartitions(Iv2InitiateTaskMessage msg) { try { JSONObject jsObj = new JSONObject((String) msg.getParameters()[0]); BalancePartitionsRequest request = new BalancePartitionsRequest(jsObj); return Sets.newHashSet(request.partitionPairs.get(0).srcPartition, request.partitionPairs.get(0).destPartition); } catch (JSONException e) { hostLog.warn("Unable to determine partitions for @BalancePartitions", e); return null; } }
java
public void handleInitiateResponseMessage(InitiateResponseMessage message) { final VoltTrace.TraceEventBatch traceLog = VoltTrace.log(VoltTrace.Category.MPI); if (traceLog != null) { traceLog.add(() -> VoltTrace.endAsync("initmp", message.getTxnId())); } DuplicateCounter counter = m_duplicateCounters.get(message.getTxnId()); // A transaction may be routed back here for EveryPartitionTask via leader migration if (counter != null && message.isMisrouted()) { tmLog.info("The message on the partition is misrouted. TxnID: " + TxnEgo.txnIdToString(message.getTxnId())); Long newLeader = m_leaderMigrationMap.get(message.m_sourceHSId); if (newLeader != null) { // Update the DuplicateCounter with new replica counter.updateReplica(message.m_sourceHSId, newLeader); m_leaderMigrationMap.remove(message.m_sourceHSId); // Leader migration has updated the leader, send the request to the new leader m_mailbox.send(newLeader, counter.getOpenMessage()); } else { // Leader migration not done yet. m_mailbox.send(message.m_sourceHSId, counter.getOpenMessage()); } return; } if (counter != null) { int result = counter.offer(message); if (result == DuplicateCounter.DONE) { m_duplicateCounters.remove(message.getTxnId()); // Only advance the truncation point on committed transactions that sent fragments to SPIs. // See ENG-4211 & ENG-14563 if (message.shouldCommit() && message.haveSentMpFragment()) { m_repairLogTruncationHandle = m_repairLogAwaitingCommit; m_repairLogAwaitingCommit = message.getTxnId(); } m_outstandingTxns.remove(message.getTxnId()); m_mailbox.send(counter.m_destinationId, message); } else if (result == DuplicateCounter.MISMATCH) { VoltDB.crashLocalVoltDB("HASH MISMATCH running every-site system procedure.", true, null); } else if (result == DuplicateCounter.ABORT) { VoltDB.crashLocalVoltDB("PARTIAL ROLLBACK/ABORT running every-site system procedure.", true, null); } // doing duplicate suppresion: all done. } else { // Only advance the truncation point on committed transactions that sent fragments to SPIs. if (message.shouldCommit() && message.haveSentMpFragment()) { m_repairLogTruncationHandle = m_repairLogAwaitingCommit; m_repairLogAwaitingCommit = message.getTxnId(); } MpTransactionState txn = (MpTransactionState)m_outstandingTxns.remove(message.getTxnId()); assert(txn != null); // the initiatorHSId is the ClientInterface mailbox. Yeah. I know. m_mailbox.send(message.getInitiatorHSId(), message); // We actually completed this MP transaction. Create a fake CompleteTransactionMessage // to send to our local repair log so that the fate of this transaction is never forgotten // even if all the masters somehow die before forwarding Complete on to their replicas. CompleteTransactionMessage ctm = new CompleteTransactionMessage(m_mailbox.getHSId(), message.m_sourceHSId, message.getTxnId(), message.isReadOnly(), 0, !message.shouldCommit(), false, false, false, txn.isNPartTxn(), message.m_isFromNonRestartableSysproc, false); ctm.setTruncationHandle(m_repairLogTruncationHandle); // dump it in the repair log // hacky castage ((MpInitiatorMailbox)m_mailbox).deliverToRepairLog(ctm); } }
java
public void handleEOLMessage() { Iv2EndOfLogMessage msg = new Iv2EndOfLogMessage(m_partitionId); MPIEndOfLogTransactionState txnState = new MPIEndOfLogTransactionState(msg); MPIEndOfLogTask task = new MPIEndOfLogTask(m_mailbox, m_pendingTasks, txnState, m_iv2Masters); m_pendingTasks.offer(task); }
java
private static ProClass<MpProcedureTask> loadNpProcedureTaskClass() { return ProClass .<MpProcedureTask>load("org.voltdb.iv2.NpProcedureTask", "N-Partition", MiscUtils.isPro() ? ProClass.HANDLER_LOG : ProClass.HANDLER_IGNORE) .errorHandler(tmLog::error) .useConstructorFor(Mailbox.class, String.class, TransactionTaskQueue.class, Iv2InitiateTaskMessage.class, Map.class, long.class, boolean.class, int.class); }
java
void safeAddToDuplicateCounterMap(long dpKey, DuplicateCounter counter) { DuplicateCounter existingDC = m_duplicateCounters.get(dpKey); if (existingDC != null) { // this is a collision and is bad existingDC.logWithCollidingDuplicateCounters(counter); VoltDB.crashGlobalVoltDB("DUPLICATE COUNTER MISMATCH: two duplicate counter keys collided.", true, null); } else { m_duplicateCounters.put(dpKey, counter); } }
java
Table ADMINISTRABLE_ROLE_AUTHORIZATIONS() { Table t = sysTables[ADMINISTRABLE_ROLE_AUTHORIZATIONS]; if (t == null) { t = createBlankTable( sysTableHsqlNames[ADMINISTRABLE_ROLE_AUTHORIZATIONS]); addColumn(t, "GRANTEE", SQL_IDENTIFIER); addColumn(t, "ROLE_NAME", SQL_IDENTIFIER); addColumn(t, "IS_GRANTABLE", SQL_IDENTIFIER); HsqlName name = HsqlNameManager.newInfoSchemaObjectName( sysTableHsqlNames[ADMINISTRABLE_ROLE_AUTHORIZATIONS].name, false, SchemaObject.INDEX); t.createPrimaryKey(name, new int[] { 0, 1, 2 }, false); return t; } if (session.isAdmin()) { insertRoles(t, session.getGrantee(), true); } return t; }
java
Table ROUTINE_ROUTINE_USAGE() { Table t = sysTables[ROUTINE_ROUTINE_USAGE]; if (t == null) { t = createBlankTable(sysTableHsqlNames[ROUTINE_ROUTINE_USAGE]); addColumn(t, "SPECIFIC_CATALOG", SQL_IDENTIFIER); addColumn(t, "SPECIFIC_SCHEMA", SQL_IDENTIFIER); addColumn(t, "SPECIFIC_NAME", SQL_IDENTIFIER); addColumn(t, "ROUTINE_CATALOG", SQL_IDENTIFIER); addColumn(t, "ROUTINE_SCHEMA", SQL_IDENTIFIER); addColumn(t, "ROUTINE_NAME", SQL_IDENTIFIER); HsqlName name = HsqlNameManager.newInfoSchemaObjectName( sysTableHsqlNames[ROUTINE_ROUTINE_USAGE].name, false, SchemaObject.INDEX); t.createPrimaryKey(name, new int[] { 0, 1, 2, 3, 4, 5 }, false); return t; } // column number mappings final int specific_catalog = 0; final int specific_schema = 1; final int specific_name = 2; final int routine_catalog = 3; final int routine_schema = 4; final int routine_name = 5; // PersistentStore store = database.persistentStoreCollection.getStore(t); Iterator it; Object[] row; it = database.schemaManager.databaseObjectIterator( SchemaObject.ROUTINE); while (it.hasNext()) { RoutineSchema routine = (RoutineSchema) it.next(); if (!session.getGrantee().isAccessible(routine)) { continue; } Routine[] specifics = routine.getSpecificRoutines(); for (int m = 0; m < specifics.length; m++) { OrderedHashSet set = specifics[m].getReferences(); for (int i = 0; i < set.size(); i++) { HsqlName refName = (HsqlName) set.get(i); if (refName.type != SchemaObject.FUNCTION && refName.type != SchemaObject.PROCEDURE) { continue; } if (!session.getGrantee().isAccessible(refName)) { continue; } row = t.getEmptyRowData(); row[specific_catalog] = database.getCatalogName().name; row[specific_schema] = specifics[m].getSchemaName().name; row[specific_name] = specifics[m].getName().name; row[routine_catalog] = database.getCatalogName().name; row[routine_schema] = refName.schema.name; row[routine_name] = refName.name; try { t.insertSys(store, row); } catch (HsqlException e) {} } } } return t; }
java
public ListenableFuture<?> closeAndDelete() { // We're going away, so shut ourselves from the external world m_closed = true; m_ackMailboxRefs.set(null); // Export mastership should have been released: force it. m_mastershipAccepted.set(false); // FIXME: necessary? Old processor should have been shut down. // Returning null indicates end of stream try { if (m_pollTask != null) { m_pollTask.setFuture(null); } } catch (RejectedExecutionException reex) { // Ignore, {@code GuestProcessor} was closed } m_pollTask = null; return m_es.submit(new Runnable() { @Override public void run() { try { // Discard the pending container, shortcutting the standard discard logic AckingContainer ack = m_pendingContainer.getAndSet(null); if (ack != null) { if (exportLog.isDebugEnabled()) { exportLog.debug("Discard pending container, lastSeqNo: " + ack.getLastSeqNo()); } ack.internalDiscard(); } m_committedBuffers.closeAndDelete(); m_adFile.delete(); } catch(IOException e) { exportLog.rateLimitedLog(60, Level.WARN, e, "Error closing commit buffers"); } finally { m_es.shutdown(); } } }); }
java
public void setPendingContainer(AckingContainer container) { Preconditions.checkNotNull(m_pendingContainer.get() != null, "Pending container must be null."); if (m_closed) { // A very slow export decoder must have noticed the export processor shutting down exportLog.info("Discarding stale pending container"); container.internalDiscard(); } else { m_pendingContainer.set(container); } }
java
public void remoteAck(final long seq) { //In replicated only master will be doing this. m_es.execute(new Runnable() { @Override public void run() { try { // ENG-12282: A race condition between export data source // master promotion and getting acks from the previous // failed master can occur. The failed master could have // sent out an ack with Long.MIN and fails immediately after // that, which causes a new master to be elected. The // election and the receiving of this ack message happens on // two different threads on the new master. If it's promoted // while processing the ack, the ack may call `m_onDrain` // while the other thread is polling buffers, which may // never get discarded. // // Now that we are on the same thread, check to see if we // are already promoted to be the master. If so, ignore the // ack. if (!m_es.isShutdown() && !m_mastershipAccepted.get()) { setCommittedSeqNo(seq); ackImpl(seq); } } catch (Exception e) { exportLog.error("Error acking export buffer", e); } catch (Error e) { VoltDB.crashLocalVoltDB("Error acking export buffer", true, e); } } }); }
java
private void handleDrainedSource() throws IOException { if (!inCatalog() && m_committedBuffers.isEmpty()) { //Returning null indicates end of stream try { if (m_pollTask != null) { m_pollTask.setFuture(null); } } catch (RejectedExecutionException reex) { // Ignore, {@code GuestProcessor} was closed } m_pollTask = null; m_generation.onSourceDrained(m_partitionId, m_tableName); return; } }
java
public synchronized void acceptMastership() { if (m_onMastership == null) { if (exportLog.isDebugEnabled()) { exportLog.debug("Mastership Runnable not yet set for table " + getTableName() + " partition " + getPartitionId()); } return; } if (m_mastershipAccepted.get()) { if (exportLog.isDebugEnabled()) { exportLog.debug("Export table " + getTableName() + " mastership already accepted for partition " + getPartitionId()); } return; } m_es.execute(new Runnable() { @Override public void run() { try { if (!m_es.isShutdown() || !m_closed) { if (exportLog.isDebugEnabled()) { exportLog.debug("Export table " + getTableName() + " accepting mastership for partition " + getPartitionId()); } if (m_mastershipAccepted.compareAndSet(false, true)) { // Either get enough responses or have received TRANSFER_MASTER event, clear the response sender HSids. m_queryResponses.clear(); m_onMastership.run(); } } } catch (Exception e) { exportLog.error("Error in accepting mastership", e); } } }); }
java
public void setOnMastership(Runnable toBeRunOnMastership) { Preconditions.checkNotNull(toBeRunOnMastership, "mastership runnable is null"); m_onMastership = toBeRunOnMastership; // If connector "replicated" property is set to true then every // replicated export stream is its own master if (m_runEveryWhere) { //export stream for run-everywhere clients doesn't need ack mailbox m_ackMailboxRefs.set(null); acceptMastership(); } }
java
public void handleQueryMessage(final long senderHSId, long requestId, long gapStart) { m_es.execute(new Runnable() { @Override public void run() { long lastSeq = Long.MIN_VALUE; Pair<Long, Long> range = m_gapTracker.getRangeContaining(gapStart); if (range != null) { lastSeq = range.getSecond(); } sendQueryResponse(senderHSId, requestId, lastSeq); } }); }
java
private void resetStateInRejoinOrRecover(long initialSequenceNumber, boolean isRejoin) { if (isRejoin) { if (!m_gapTracker.isEmpty()) { m_lastReleasedSeqNo = Math.max(m_lastReleasedSeqNo, m_gapTracker.getFirstSeqNo() - 1); } } else { m_lastReleasedSeqNo = Math.max(m_lastReleasedSeqNo, initialSequenceNumber); } // Rejoin or recovery should be on a transaction boundary (except maybe in a gap situation) m_committedSeqNo = m_lastReleasedSeqNo; m_firstUnpolledSeqNo = m_lastReleasedSeqNo + 1; m_tuplesPending.set(m_gapTracker.sizeInSequence()); }
java
public static Date getDateFromTransactionId(long txnId) { long time = txnId >> (COUNTER_BITS + INITIATORID_BITS); time += VOLT_EPOCH; return new Date(time); }
java
private AbstractTopology recoverPartitions(AbstractTopology topology, String haGroup, Set<Integer> recoverPartitions) { long version = topology.version; if (!recoverPartitions.isEmpty()) { // In rejoin case, partition list from the rejoining node could be out of range if the rejoining // host is a previously elastic removed node or some other used nodes, if out of range, do not restore if (Collections.max(recoverPartitions) > Collections.max(m_cartographer.getPartitions())) { recoverPartitions.clear(); } } AbstractTopology recoveredTopo = AbstractTopology.mutateRecoverTopology(topology, m_messenger.getLiveHostIds(), m_messenger.getHostId(), haGroup, recoverPartitions); if (recoveredTopo == null) { return null; } List<Integer> partitions = Lists.newArrayList(recoveredTopo.getPartitionIdList(m_messenger.getHostId())); if (partitions != null && partitions.size() == m_catalogContext.getNodeSettings().getLocalSitesCount()) { TopologyZKUtils.updateTopologyToZK(m_messenger.getZK(), recoveredTopo); } if (version < recoveredTopo.version && !recoverPartitions.isEmpty()) { consoleLog.info("Partition placement layout has been restored for rejoining."); } return recoveredTopo; }
java
private boolean stopRejoiningHost() { // The host failure notification could come before mesh determination, wait for the determination try { m_meshDeterminationLatch.await(); } catch (InterruptedException e) { } if (m_rejoining) { VoltDB.crashLocalVoltDB("Another node failed before this node could finish rejoining. " + "As a result, the rejoin operation has been canceled. Please try again."); return true; } return false; }
java
private void checkExportStreamMastership() { for (Initiator initiator : m_iv2Initiators.values()) { if (initiator.getPartitionId() != MpInitiator.MP_INIT_PID) { SpInitiator spInitiator = (SpInitiator)initiator; if (spInitiator.isLeader()) { ExportManager.instance().takeMastership(spInitiator.getPartitionId()); } } } }
java
void scheduleDailyLoggingWorkInNextCheckTime() { DailyRollingFileAppender dailyAppender = null; Enumeration<?> appenders = Logger.getRootLogger().getAllAppenders(); while (appenders.hasMoreElements()) { Appender appender = (Appender) appenders.nextElement(); if (appender instanceof DailyRollingFileAppender){ dailyAppender = (DailyRollingFileAppender) appender; } } final DailyRollingFileAppender dailyRollingFileAppender = dailyAppender; Field field = null; if (dailyRollingFileAppender != null) { try { field = dailyRollingFileAppender.getClass().getDeclaredField("nextCheck"); field.setAccessible(true); } catch (NoSuchFieldException e) { hostLog.error("Failed to set daily system info logging: " + e.getMessage()); } } final Field nextCheckField = field; long nextCheck = System.currentTimeMillis(); // the next part may throw exception, current time is the default value if (dailyRollingFileAppender != null && nextCheckField != null) { try { nextCheck = nextCheckField.getLong(dailyRollingFileAppender); scheduleWork(new DailyLogTask(), nextCheck - System.currentTimeMillis() + 30 * 1000, 0, TimeUnit.MILLISECONDS); } catch (Exception e) { hostLog.error("Failed to set daily system info logging: " + e.getMessage()); } } }
java
private void schedulePeriodicWorks() { // JMX stats broadcast m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { // A null here was causing a steady stream of annoying but apparently inconsequential // NPEs during a debug session of an unrelated unit test. if (m_statsManager != null) { m_statsManager.sendNotification(); } } }, 0, StatsManager.POLL_INTERVAL, TimeUnit.MILLISECONDS)); // clear login count m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { ScheduledExecutorService es = VoltDB.instance().getSES(false); if (es != null && !es.isShutdown()) { es.submit(new Runnable() { @Override public void run() { long timestamp = System.currentTimeMillis(); m_flc.checkCounter(timestamp); } }); } } }, 0, 10, TimeUnit.SECONDS)); // small stats samples m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { SystemStatsCollector.asyncSampleSystemNow(false, false); } }, 0, 5, TimeUnit.SECONDS)); // medium stats samples m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { SystemStatsCollector.asyncSampleSystemNow(true, false); } }, 0, 1, TimeUnit.MINUTES)); // large stats samples m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { SystemStatsCollector.asyncSampleSystemNow(true, true); } }, 0, 6, TimeUnit.MINUTES)); // export stream master check m_periodicWorks.add(scheduleWork(new Runnable() { @Override public void run() { checkExportStreamMastership(); } }, 0, 1, TimeUnit.MINUTES)); // other enterprise setup EnterpriseMaintenance em = EnterpriseMaintenance.get(); if (em != null) { em.setupMaintenaceTasks(); } GCInspector.instance.start(m_periodicPriorityWorkThread, m_gcStats); }
java
private boolean determineIfEligibleAsLeader(Collection<Integer> partitions, Set<Integer> partitionGroupPeers, AbstractTopology topology) { if (partitions.contains(Integer.valueOf(0))) { return true; } for (Integer host : topology.getHostIdList(0)) { if (partitionGroupPeers.contains(host)) { return true; } } return false; }
java
@Override public void run() { if (m_restoreAgent != null) { // start restore process m_restoreAgent.restore(); } else { onSnapshotRestoreCompletion(); onReplayCompletion(Long.MIN_VALUE, m_iv2InitiatorStartingTxnIds); } // Start the rejoin coordinator if (m_joinCoordinator != null) { try { m_statusTracker.set(NodeState.REJOINING); if (!m_joinCoordinator.startJoin(m_catalogContext.database)) { VoltDB.crashLocalVoltDB("Failed to join the cluster", true, null); } } catch (Exception e) { VoltDB.crashLocalVoltDB("Failed to join the cluster", true, e); } } m_isRunning = true; }
java
@Override public void cleanUpTempCatalogJar() { File configInfoDir = getConfigDirectory(); if (!configInfoDir.exists()) { return; } File tempJar = new VoltFile(configInfoDir.getPath(), InMemoryJarfile.TMP_CATALOG_JAR_FILENAME); if(tempJar.exists()) { tempJar.delete(); } }
java
private void shutdownInitiators() { if (m_iv2Initiators == null) { return; } m_iv2Initiators.descendingMap().values().stream().forEach(p->p.shutdown()); }
java
public void createRuntimeReport(PrintStream out) { // This function may be running in its own thread. out.print("MIME-Version: 1.0\n"); out.print("Content-type: multipart/mixed; boundary=\"reportsection\""); out.print("\n\n--reportsection\nContent-Type: text/plain\n\nClientInterface Report\n"); if (m_clientInterface != null) { out.print(m_clientInterface.toString() + "\n"); } }
java
private void initializeDRProducer() { try { if (m_producerDRGateway != null) { m_producerDRGateway.startAndWaitForGlobalAgreement(); for (Initiator iv2init : m_iv2Initiators.values()) { iv2init.initDRGateway(m_config.m_startAction, m_producerDRGateway, isLowestSiteId(iv2init)); } m_producerDRGateway.completeInitialization(); } } catch (Exception ex) { CoreUtils.printPortsInUse(hostLog); VoltDB.crashLocalVoltDB("Failed to initialize DR producer", false, ex); } }
java
static public long computeMinimumHeapRqt(int tableCount, int sitesPerHost, int kfactor) { long baseRqt = 384; long tableRqt = 10 * tableCount; // K-safety Heap consumption drop to 8 MB (per node) // Snapshot cost 32 MB (per node) // Theoretically, 40 MB (per node) should be enough long rejoinRqt = (kfactor > 0) ? 128 * sitesPerHost : 0; return baseRqt + tableRqt + rejoinRqt; }
java
synchronized void prepareCommit(Session session) { RowActionBase action = this; do { if (action.session == session && action.commitTimestamp == 0) { action.prepared = true; } action = action.next; } while (action != null); }
java
synchronized void rollback(Session session, long timestamp) { RowActionBase action = this; do { if (action.session == session && action.commitTimestamp == 0) { if (action.actionTimestamp >= timestamp || action.actionTimestamp == 0) { action.commitTimestamp = session.actionTimestamp; action.rolledback = true; action.prepared = false; } } action = action.next; } while (action != null); }
java
synchronized int getCommitType(long timestamp) { RowActionBase action = this; int type = ACTION_NONE; do { if (action.commitTimestamp == timestamp) { type = action.type; } action = action.next; } while (action != null); return type; }
java
synchronized boolean canCommit(Session session, OrderedHashSet set) { RowActionBase action; long timestamp = session.transactionTimestamp; long commitTimestamp = 0; final boolean readCommitted = session.isolationMode == SessionInterface.TX_READ_COMMITTED; action = this; if (readCommitted) { do { if (action.session == session) { // for READ_COMMITTED, use action timestamp for later conflicts if (action.commitTimestamp == 0) { timestamp = action.actionTimestamp; } } action = action.next; } while (action != null); action = this; } do { if (action.rolledback || action.type == ACTION_NONE) { action = action.next; continue; } if (action.session != session) { if (action.prepared) { return false; } if (action.commitTimestamp == 0 && action.actionTimestamp != 0) { set.add(action.session); } else if (action.commitTimestamp > commitTimestamp) { commitTimestamp = action.commitTimestamp; } } action = action.next; } while (action != null); return commitTimestamp < timestamp; }
java
synchronized void mergeRollback(Row row) { RowActionBase action = this; RowActionBase head = null; RowActionBase tail = null; if (type == RowActionBase.ACTION_DELETE_FINAL || type == RowActionBase.ACTION_NONE) { return; } do { if (action.rolledback) { if (tail != null) { tail.next = null; } } else { if (head == null) { head = tail = action; } else { tail.next = action; tail = action; } } action = action.next; } while (action != null); if (head == null) { boolean exists = (type == RowActionBase.ACTION_DELETE); if (exists) { setAsNoOp(row); } else { setAsDeleteFinal(); } } else { if (head != this) { setAsAction(head); } } }
java
private void adjustReplicationFactorForURI(HttpPut httpPut) throws URISyntaxException{ String queryString = httpPut.getURI().getQuery(); if(!StringUtils.isEmpty(queryString) && queryString.contains("op=CREATE") && (queryString.contains("replication=") || !StringUtils.isEmpty(m_blockReplication))){ rateLimitedLogWarn(m_logger, "Set block replication factor in the target system."); if(!StringUtils.isEmpty(m_blockReplication) && !queryString.contains("replication=")){ StringBuilder builder = new StringBuilder(128); builder.append(queryString).append("&replication=").append(m_blockReplication); URI oldUri = httpPut.getURI(); URI newUri = new URI(oldUri.getScheme(), oldUri.getAuthority(),oldUri.getPath(), builder.toString(), oldUri.getFragment()); httpPut.setURI(newUri); } } }
java
private List<NameValuePair> sign(URI uri, final List<NameValuePair> params) { Preconditions.checkNotNull(m_secret); final List<NameValuePair> sortedParams = Lists.newArrayList(params); Collections.sort(sortedParams, new Comparator<NameValuePair>() { @Override public int compare(NameValuePair left, NameValuePair right) { return left.getName().compareTo(right.getName()); } }); final StringBuilder paramSb = new StringBuilder(); String separator = ""; for (NameValuePair param : sortedParams) { paramSb.append(separator).append(param.getName()); if (param.getValue() != null) { paramSb.append("=").append(param.getValue()); } separator = "&"; } final StringBuilder baseSb = new StringBuilder(); baseSb.append(m_method).append('\n'); baseSb.append(uri.getHost()).append('\n'); baseSb.append(uri.getPath().isEmpty() ? '/' : uri.getPath()).append('\n'); baseSb.append(paramSb.toString()); final Mac hmac; final Key key; try { hmac = Mac.getInstance(m_signatureMethod); key = new SecretKeySpec(m_secret.getBytes(Charsets.UTF_8), m_signatureMethod); hmac.init(key); } catch (NoSuchAlgorithmException e) { // should never happen rateLimitedLogError(m_logger, "Fail to get HMAC instance %s", Throwables.getStackTraceAsString(e)); return null; } catch (InvalidKeyException e) { rateLimitedLogError(m_logger, "Fail to sign the message %s", Throwables.getStackTraceAsString(e)); return null; } sortedParams.add(new BasicNameValuePair(m_signatureName, NVPairsDecoder.percentEncode(Encoder.base64Encode(hmac.doFinal(baseSb.toString().getBytes(Charsets.UTF_8)))))); return sortedParams; }
java
public final FluentIterable<T> preOrderTraversal(final T root) { checkNotNull(root); return new FluentIterable<T>() { @Override public UnmodifiableIterator<T> iterator() { return preOrderIterator(root); } }; }
java
public final FluentIterable<T> breadthFirstTraversal(final T root) { checkNotNull(root); return new FluentIterable<T>() { @Override public UnmodifiableIterator<T> iterator() { return new BreadthFirstIterator(root); } }; }
java
public String getUserName() throws SQLException { ResultSet rs = execute("CALL USER()"); rs.next(); String result = rs.getString(1); rs.close(); return result; }
java
public boolean isReadOnly() throws SQLException { ResultSet rs = execute("CALL isReadOnlyDatabase()"); rs.next(); boolean result = rs.getBoolean(1); rs.close(); return result; }
java
private StringBuffer toQueryPrefixNoSelect(String t) { StringBuffer sb = new StringBuffer(255); return sb.append(t).append(whereTrue); }
java
public static ConstantValueExpression makeExpression(VoltType dataType, String value) { ConstantValueExpression constantExpr = new ConstantValueExpression(); constantExpr.setValueType(dataType); constantExpr.setValue(value); return constantExpr; }
java
Result executeUpdateStatement(Session session) { int count = 0; Expression[] colExpressions = updateExpressions; HashMappedList rowset = new HashMappedList(); Type[] colTypes = baseTable.getColumnTypes(); RangeIteratorBase it = RangeVariable.getIterator(session, targetRangeVariables); Expression checkCondition = null; if (targetTable != baseTable) { checkCondition = ((TableDerived) targetTable).getQueryExpression() .getMainSelect().checkQueryCondition; } while (it.next()) { session.sessionData.startRowProcessing(); Row row = it.getCurrentRow(); Object[] data = row.getData(); Object[] newData = getUpdatedData(session, baseTable, updateColumnMap, colExpressions, colTypes, data); if (checkCondition != null) { it.currentData = newData; boolean check = checkCondition.testCondition(session); if (!check) { throw Error.error(ErrorCode.X_44000); } } rowset.add(row, newData); } /* debug 190 if (rowset.size() == 0) { System.out.println(targetTable.getName().name + " zero update: session " + session.getId()); } else if (rowset.size() >1) { System.out.println("multiple update: session " + session.getId() + ", " + rowset.size()); } //* debug 190 */ count = update(session, baseTable, rowset); return Result.getUpdateCountResult(count); }
java