code
stringlengths
73
34.1k
label
stringclasses
1 value
public void closeForeignHostSocket(int hostId) { Iterator<ForeignHost> it = m_foreignHosts.get(hostId).iterator(); while (it.hasNext()) { ForeignHost fh = it.next(); if (fh.isUp()) { fh.killSocket(); } } reportForeignHostFailed(hostId); }
java
public void cutLink(int hostIdA, int hostIdB) { if (m_localHostId == hostIdA) { Iterator<ForeignHost> it = m_foreignHosts.get(hostIdB).iterator(); while (it.hasNext()) { ForeignHost fh = it.next(); fh.cutLink(); } } if (m_localHostId == hostIdB) { Iterator<ForeignHost> it = m_foreignHosts.get(hostIdA).iterator(); while (it.hasNext()) { ForeignHost fh = it.next(); fh.cutLink(); } } }
java
void execute() { String sCmd = null; if (4096 <= ifHuge.length()) { sCmd = ifHuge; } else { sCmd = txtCommand.getText(); } if (sCmd.startsWith("-->>>TEST<<<--")) { testPerformance(); return; } String[] g = new String[1]; lTime = System.currentTimeMillis(); try { if (sStatement == null) { return; } sStatement.execute(sCmd); lTime = System.currentTimeMillis() - lTime; int r = sStatement.getUpdateCount(); if (r == -1) { formatResultSet(sStatement.getResultSet()); } else { g[0] = "update count"; gResult.setHead(g); g[0] = String.valueOf(r); gResult.addRow(g); } addToRecent(txtCommand.getText()); } catch (SQLException e) { lTime = System.currentTimeMillis() - lTime; g[0] = "SQL Error"; gResult.setHead(g); String s = e.getMessage(); s += " / Error Code: " + e.getErrorCode(); s += " / State: " + e.getSQLState(); g[0] = s; gResult.addRow(g); } updateResult(); System.gc(); }
java
public void writeAll(java.sql.ResultSet rs, boolean includeColumnNames) throws SQLException, IOException { if (includeColumnNames) { writeColumnNames(rs); } while (rs.next()) { writeNext(resultService.getColumnValues(rs)); } }
java
public void addRecord(String key, String value) throws TarMalformatException, IOException { if (key == null || value == null || key.length() < 1 || value.length() < 1) { throw new TarMalformatException( RB.singleton.getString(RB.ZERO_WRITE)); } int lenWithoutIlen = key.length() + value.length() + 3; // "Ilen" means Initial Length field. +3 = SPACE + = + \n int lenW = 0; // lenW = Length With initial-length-field if (lenWithoutIlen < 8) { lenW = lenWithoutIlen + 1; // Takes just 1 char to report total } else if (lenWithoutIlen < 97) { lenW = lenWithoutIlen + 2; // Takes 2 chars to report this total } else if (lenWithoutIlen < 996) { lenW = lenWithoutIlen + 3; // Takes 3... } else if (lenWithoutIlen < 9995) { lenW = lenWithoutIlen + 4; // ditto } else if (lenWithoutIlen < 99994) { lenW = lenWithoutIlen + 5; } else { throw new TarMalformatException( RB.singleton.getString(RB.PIF_TOOBIG, 99991)); } writer.write(Integer.toString(lenW)); writer.write(' '); writer.write(key); writer.write('='); writer.write(value); writer.write('\n'); writer.flush(); // Does this do anything with a BAOS? }
java
synchronized public void shutdown() { m_shutdown.set(true); if (m_isExecutorServiceLocal) { try { m_es.shutdown(); m_es.awaitTermination(365, TimeUnit.DAYS); } catch (InterruptedException e) { repairLog.warn("Unexpected interrupted exception", e); } } }
java
public static Pair<BabySitter, List<String>> blockingFactory(ZooKeeper zk, String dir, Callback cb) throws InterruptedException, ExecutionException { ExecutorService es = CoreUtils.getCachedSingleThreadExecutor("Babysitter-" + dir, 15000); Pair<BabySitter, List<String>> babySitter = blockingFactory(zk, dir, cb, es); babySitter.getFirst().m_isExecutorServiceLocal = true; return babySitter; }
java
public static Pair<BabySitter, List<String>> blockingFactory(ZooKeeper zk, String dir, Callback cb, ExecutorService es) throws InterruptedException, ExecutionException { BabySitter bs = new BabySitter(zk, dir, cb, es); List<String> initialChildren; try { initialChildren = bs.m_eventHandler.call(); } catch (Exception e) { throw new ExecutionException(e); } return new Pair<BabySitter, List<String>>(bs, initialChildren); }
java
public static BabySitter nonblockingFactory(ZooKeeper zk, String dir, Callback cb, ExecutorService es) throws InterruptedException, ExecutionException { BabySitter bs = new BabySitter(zk, dir, cb, es); bs.m_es.submit(bs.m_eventHandler); return bs; }
java
void checkClosed() throws SQLException { if (isClosed) { throw Util.sqlException(ErrorCode.X_07501); } if (connection.isClosed) { close(); throw Util.sqlException(ErrorCode.X_08503); } }
java
void performPostExecute() throws SQLException { resultOut.clearLobResults(); generatedResult = null; if (resultIn == null) { return; } Result current = resultIn; while (current.getChainedResult() != null) { current = current.getUnlinkChainedResult(); if (current.getType() == ResultConstants.WARNING) { SQLWarning w = Util.sqlWarning(current); if (rootWarning == null) { rootWarning = w; } else { rootWarning.setNextWarning(w); } } else if (current.getType() == ResultConstants.ERROR) { errorResult = current; } else if (current.getType() == ResultConstants.DATA) { generatedResult = current; } } if (resultIn.isData()) { currentResultSet = new JDBCResultSet(connection.sessionProxy, this, resultIn, resultIn.metaData, connection.connProperties); } }
java
boolean getMoreResults(int current) throws SQLException { checkClosed(); if (resultIn == null || !resultIn.isData()) { return false; } if (resultSetCounter == 0) { resultSetCounter++; return true; } if (currentResultSet != null && current != KEEP_CURRENT_RESULT) { currentResultSet.close(); } resultIn = null; return false; }
java
void closeResultData() throws SQLException { if (currentResultSet != null) { currentResultSet.close(); } if (generatedResultSet != null) { generatedResultSet.close(); } generatedResultSet = null; generatedResult = null; resultIn = null; }
java
public boolean compatibleWithTable(VoltTable table) { String candidateName = getTableName(table); // table can't have the same name as the view if (candidateName.equals(viewName)) { return false; } // view is for a different table if (candidateName.equals(srcTableName) == false) { return false; } try { // ignore ret value here - just looking to not throw int groupColIndex = table.getColumnIndex(groupColName); VoltType groupColType = table.getColumnType(groupColIndex); if (groupColType == VoltType.DECIMAL) { // no longer a good type to group return false; } // check the sum col is still value int sumColIndex = table.getColumnIndex(sumColName); VoltType sumColType = table.getColumnType(sumColIndex); if ((sumColType == VoltType.TINYINT) || (sumColType == VoltType.SMALLINT) || (sumColType == VoltType.INTEGER)) { return true; } else { // no longer a good type to sum return false; } } catch (IllegalArgumentException e) { // column index is bad return false; } }
java
@Beta @GwtIncompatible // concurrency public static ThreadFactory platformThreadFactory() { if (!isAppEngine()) { return Executors.defaultThreadFactory(); } try { return (ThreadFactory) Class.forName("com.google_voltpatches.appengine.api.ThreadManager") .getMethod("currentRequestThreadFactory") .invoke(null); } catch (IllegalAccessException e) { throw new RuntimeException("Couldn't invoke ThreadManager.currentRequestThreadFactory", e); } catch (ClassNotFoundException e) { throw new RuntimeException("Couldn't invoke ThreadManager.currentRequestThreadFactory", e); } catch (NoSuchMethodException e) { throw new RuntimeException("Couldn't invoke ThreadManager.currentRequestThreadFactory", e); } catch (InvocationTargetException e) { throw Throwables.propagate(e.getCause()); } }
java
public static void verifySnapshots( final List<String> directories, final Set<String> snapshotNames) { FileFilter filter = new SnapshotFilter(); if (!snapshotNames.isEmpty()) { filter = new SpecificSnapshotFilter(snapshotNames); } Map<String, Snapshot> snapshots = new HashMap<String, Snapshot>(); for (String directory : directories) { SnapshotUtil.retrieveSnapshotFiles(new File(directory), snapshots, filter, true, SnapshotPathType.SNAP_PATH, CONSOLE_LOG); } if (snapshots.isEmpty()) { System.out.println("Snapshot corrupted"); System.out.println("No files found"); } for (Snapshot s : snapshots.values()) { System.out.println(SnapshotUtil.generateSnapshotReport(s.getTxnId(), s).getSecond()); } }
java
public long lowestEquivalentValue(final long value) { final int bucketIndex = getBucketIndex(value); final int subBucketIndex = getSubBucketIndex(value, bucketIndex); long thisValueBaseLevel = valueFromIndex(bucketIndex, subBucketIndex); return thisValueBaseLevel; }
java
public double getMean() { if (getTotalCount() == 0) { return 0.0; } recordedValuesIterator.reset(); double totalValue = 0; while (recordedValuesIterator.hasNext()) { HistogramIterationValue iterationValue = recordedValuesIterator.next(); totalValue += medianEquivalentValue(iterationValue.getValueIteratedTo()) * iterationValue.getCountAtValueIteratedTo(); } return (totalValue * 1.0) / getTotalCount(); }
java
public double getStdDeviation() { if (getTotalCount() == 0) { return 0.0; } final double mean = getMean(); double geometric_deviation_total = 0.0; recordedValuesIterator.reset(); while (recordedValuesIterator.hasNext()) { HistogramIterationValue iterationValue = recordedValuesIterator.next(); Double deviation = (medianEquivalentValue(iterationValue.getValueIteratedTo()) * 1.0) - mean; geometric_deviation_total += (deviation * deviation) * iterationValue.getCountAddedInThisIterationStep(); } double std_deviation = Math.sqrt(geometric_deviation_total / getTotalCount()); return std_deviation; }
java
public void reestablishTotalCount() { // On overflow, the totalCount accumulated counter will (always) not match the total of counts long totalCounted = 0; for (int i = 0; i < countsArrayLength; i++) { totalCounted += getCountAtIndex(i); } setTotalCount(totalCounted); }
java
static void setTableColumnsForSubquery(Table table, QueryExpression queryExpression, boolean fullIndex) { table.columnList = queryExpression.getColumns(); table.columnCount = queryExpression.getColumnCount(); table.createPrimaryKey(); if (fullIndex) { int[] colIndexes = null; colIndexes = table.getNewColumnMap(); ArrayUtil.fillSequence(colIndexes); table.fullIndex = table.createIndexForColumns(colIndexes); } }
java
public void considerCandidatePlan(CompiledPlan plan, AbstractParsedStmt parsedStmt) { //System.out.println(String.format("[Raw plan]:%n%s", rawplan.rootPlanGraph.toExplainPlanString())); // run the set of microptimizations, which may return many plans (or not) ScanDeterminizer.apply(plan, m_detMode); // add in the sql to the plan plan.sql = m_sql; // compute resource usage using the single stats collector m_stats = new PlanStatistics(); AbstractPlanNode planGraph = plan.rootPlanGraph; // compute statistics about a plan planGraph.computeEstimatesRecursively(m_stats, m_estimates, m_paramHints); // compute the cost based on the resources using the current cost model plan.cost = m_costModel.getPlanCost(m_stats); // filename for debug output String filename = String.valueOf(m_planId++); //* enable for debug */ System.out.println("DEBUG [new plan]: Cost:" + plan.cost + plan.rootPlanGraph.toExplainPlanString()); // find the minimum cost plan if (m_bestPlan == null || plan.cost < m_bestPlan.cost) { // free the PlanColumns held by the previous best plan m_bestPlan = plan; m_bestFilename = filename; //* enable for debug */ System.out.println("DEBUG [Best plan] updated ***\n"); } outputPlan(plan, planGraph, filename); }
java
public static PartitionDRGateway getInstance(int partitionId, ProducerDRGateway producerGateway, StartAction startAction) { // if this is a primary cluster in a DR-enabled scenario // try to load the real version of this class PartitionDRGateway pdrg = null; if (producerGateway != null) { pdrg = tryToLoadProVersion(); } if (pdrg == null) { pdrg = new PartitionDRGateway(); } // init the instance and return try { pdrg.init(partitionId, producerGateway, startAction); } catch (Exception e) { VoltDB.crashLocalVoltDB(e.getMessage(), true, e); } // Regarding apparent lack of thread safety: this is called serially // while looping over the SPIs during database initialization assert !m_partitionDRGateways.containsKey(partitionId); ImmutableMap.Builder<Integer, PartitionDRGateway> builder = ImmutableMap.builder(); builder.putAll(m_partitionDRGateways); builder.put(partitionId, pdrg); m_partitionDRGateways = builder.build(); return pdrg; }
java
public String[] getUserPermissionList(String userName) { if (!m_enabled) { return m_perm_list; } if (userName == null) { return new String[] {}; } AuthUser user = getUser(userName); if (user == null) { return new String[] {}; } return user.m_permissions_list; }
java
public void callProcedure(AuthUser user, boolean isAdmin, int timeout, ProcedureCallback cb, String procName, Object[] args) { // since we know the caller, this is safe assert(cb != null); StoredProcedureInvocation task = new StoredProcedureInvocation(); task.setProcName(procName); task.setParams(args); if (timeout != BatchTimeoutOverrideType.NO_TIMEOUT) { task.setBatchTimeout(timeout); } InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes( DEFAULT_INTERNAL_ADAPTER_NAME, isAdmin, connectionId()); assert(m_dispatcher != null); // JHH: I have no idea why we need to do this, but CL crashes if we don't. Sigh. try { task = MiscUtils.roundTripForCL(task); } catch (Exception e) { String msg = String.format("Cannot invoke procedure %s. failed to create task: %s", procName, e.getMessage()); m_logger.rateLimitedLog(SUPPRESS_INTERVAL, Level.ERROR, null, msg); ClientResponseImpl cri = new ClientResponseImpl(ClientResponse.UNEXPECTED_FAILURE, new VoltTable[0], msg); try { cb.clientCallback(cri); } catch (Exception e1) { throw new IllegalStateException(e1); } } createTransaction(kattrs, cb, task, user); }
java
public static void printSystemOut(String message1, long message2) { if (TRACESYSTEMOUT) { System.out.print(message1); System.out.println(message2); } }
java
public static boolean acceptsPrecision(int type) { switch (type) { case Types.SQL_BINARY : case Types.SQL_BIT : case Types.SQL_BIT_VARYING : case Types.SQL_BLOB : case Types.SQL_CHAR : case Types.SQL_NCHAR : case Types.SQL_CLOB : case Types.NCLOB : case Types.SQL_VARBINARY : case Types.SQL_VARCHAR : case Types.SQL_NVARCHAR : case Types.VARCHAR_IGNORECASE : case Types.SQL_DECIMAL : case Types.SQL_NUMERIC : case Types.SQL_FLOAT : case Types.SQL_TIME : case Types.SQL_TIMESTAMP : case Types.SQL_INTERVAL_YEAR : case Types.SQL_INTERVAL_YEAR_TO_MONTH : case Types.SQL_INTERVAL_MONTH : case Types.SQL_INTERVAL_DAY : case Types.SQL_INTERVAL_DAY_TO_HOUR : case Types.SQL_INTERVAL_DAY_TO_MINUTE : case Types.SQL_INTERVAL_DAY_TO_SECOND : case Types.SQL_INTERVAL_HOUR : case Types.SQL_INTERVAL_HOUR_TO_MINUTE : case Types.SQL_INTERVAL_HOUR_TO_SECOND : case Types.SQL_INTERVAL_MINUTE : case Types.SQL_INTERVAL_MINUTE_TO_SECOND : case Types.SQL_INTERVAL_SECOND : case Types.VOLT_GEOGRAPHY : return true; default : return false; } }
java
public static <T> Iterable<T> cycle(T... elements) { return cycle(Lists.newArrayList(elements)); }
java
@Override protected void coreLoadCatalog(long timestamp, final byte[] catalogBytes) throws EEException { LOG.trace("Loading Application Catalog..."); int errorCode = 0; errorCode = nativeLoadCatalog(pointer, timestamp, catalogBytes); checkErrorCode(errorCode); //LOG.info("Loaded Catalog."); }
java
@Override public void coreUpdateCatalog(long timestamp, boolean isStreamUpdate, final String catalogDiffs) throws EEException { LOG.trace("Loading Application Catalog..."); int errorCode = 0; errorCode = nativeUpdateCatalog(pointer, timestamp, isStreamUpdate, getStringBytes(catalogDiffs)); checkErrorCode(errorCode); }
java
@Override public int extractPerFragmentStats(int batchSize, long[] executionTimesOut) { m_perFragmentStatsBuffer.clear(); // Discard the first byte since it is the timing on/off switch. m_perFragmentStatsBuffer.get(); int succeededFragmentsCount = m_perFragmentStatsBuffer.getInt(); if (executionTimesOut != null) { assert(executionTimesOut.length >= succeededFragmentsCount); for (int i = 0; i < succeededFragmentsCount; i++) { executionTimesOut[i] = m_perFragmentStatsBuffer.getLong(); } // This is the time for the failed fragment. if (succeededFragmentsCount < executionTimesOut.length) { executionTimesOut[succeededFragmentsCount] = m_perFragmentStatsBuffer.getLong(); } } return succeededFragmentsCount; }
java
@Override public VoltTable[] getStats( final StatsSelector selector, final int locators[], final boolean interval, final Long now) { //Clear is destructive, do it before the native call m_nextDeserializer.clear(); final int numResults = nativeGetStats(pointer, selector.ordinal(), locators, interval, now); if (numResults == -1) { throwExceptionForError(ERRORCODE_ERROR); } try { m_nextDeserializer.readInt();//Ignore the length of the result tables final VoltTable results[] = new VoltTable[numResults]; for (int ii = 0; ii < numResults; ii++) { int len = m_nextDeserializer.readInt(); byte[] bufCopy = new byte[len]; m_nextDeserializer.readFully(bufCopy, 0, len); // This Table should be readonly (true), but table stats need to be updated // Stream stats until Stream stats are deprecated from Table stats results[ii] = PrivateVoltTableFactory.createVoltTableFromBuffer(ByteBuffer.wrap(bufCopy), false); } return results; } catch (final IOException ex) { LOG.error("Failed to deserialze result table for getStats" + ex); throw new EEException(ERRORCODE_WRONG_SERIALIZED_BYTES); } }
java
public boolean storeLargeTempTableBlock(long siteId, long blockCounter, ByteBuffer block) { LargeBlockTask task = LargeBlockTask.getStoreTask(new BlockId(siteId, blockCounter), block); return executeLargeBlockTaskSynchronously(task); }
java
public boolean loadLargeTempTableBlock(long siteId, long blockCounter, ByteBuffer block) { LargeBlockTask task = LargeBlockTask.getLoadTask(new BlockId(siteId, blockCounter), block); return executeLargeBlockTaskSynchronously(task); }
java
public boolean releaseLargeTempTableBlock(long siteId, long blockCounter) { LargeBlockTask task = LargeBlockTask.getReleaseTask(new BlockId(siteId, blockCounter)); return executeLargeBlockTaskSynchronously(task); }
java
public List<String> getSQLStatements() { List<String> sqlStatements = new ArrayList<>(plannedStatements.size()); for (AdHocPlannedStatement plannedStatement : plannedStatements) { sqlStatements.add(new String(plannedStatement.sql, Constants.UTF8ENCODING)); } return sqlStatements; }
java
public boolean isSinglePartitionCompatible() { for (AdHocPlannedStatement plannedStmt : plannedStatements) { if (plannedStmt.core.collectorFragment != null) { return false; } } return true; }
java
public ByteBuffer flattenPlanArrayToBuffer() throws IOException { int size = 0; // sizeof batch ParameterSet userParamCache = null; if (userParamSet == null) { userParamCache = ParameterSet.emptyParameterSet(); } else { Object[] typedUserParams = new Object[userParamSet.length]; int ii = 0; for (AdHocPlannedStatement cs : plannedStatements) { for (VoltType paramType : cs.core.parameterTypes) { if (ii >= typedUserParams.length) { String errorMsg = "Too few actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams.length + " vs. " + ii + ")"; // Volt-TYPE-Exception is slightly cheating, here, should there be a more general VoltArgumentException? throw new VoltTypeException(errorMsg); } typedUserParams[ii] = ParameterConverter.tryToMakeCompatible(paramType.classFromType(), userParamSet[ii]); // System.out.println("DEBUG typed parameter: " + work.userParamSet[ii] + // "using type: " + paramType + "as: " + typedUserParams[ii]); ii++; } } // Each parameter referenced in each statements should be represented // exactly once in userParams. if (ii < typedUserParams.length) { // Volt-TYPE-Exception is slightly cheating, here, should there be a more general VoltArgumentException? String errorMsg = "Too many actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams.length + " vs. " + ii + ")"; throw new VoltTypeException(errorMsg); } userParamCache = ParameterSet.fromArrayNoCopy(typedUserParams); } size += userParamCache.getSerializedSize(); size += 2; // sizeof batch for (AdHocPlannedStatement cs : plannedStatements) { size += cs.getSerializedSize(); } ByteBuffer buf = ByteBuffer.allocate(size); userParamCache.flattenToBuffer(buf); buf.putShort((short) plannedStatements.size()); for (AdHocPlannedStatement cs : plannedStatements) { cs.flattenToBuffer(buf); } return buf; }
java
public String explainStatement(int i, Database db, boolean getJSONString) { AdHocPlannedStatement plannedStatement = plannedStatements.get(i); String aggplan = new String(plannedStatement.core.aggregatorFragment, Constants.UTF8ENCODING); PlanNodeTree pnt = new PlanNodeTree(); try { String result = null; JSONObject jobj = new JSONObject(aggplan); if (getJSONString) { result = jobj.toString(4); } pnt.loadFromJSONPlan(jobj, db); if (plannedStatement.core.collectorFragment != null) { // multi-partition query plan String collplan = new String(plannedStatement.core.collectorFragment, Constants.UTF8ENCODING); PlanNodeTree collpnt = new PlanNodeTree(); // reattach plan fragments JSONObject jobMP = new JSONObject(collplan); collpnt.loadFromJSONPlan(jobMP, db); assert(collpnt.getRootPlanNode() instanceof SendPlanNode); pnt.getRootPlanNode().reattachFragment(collpnt.getRootPlanNode()); if (getJSONString) { result += "\n" + jobMP.toString(4); } } if (! getJSONString) { result = pnt.getRootPlanNode().toExplainPlanString(); } return result; } catch (JSONException e) { System.out.println(e); return "Internal Error (JSONException): " + e.getMessage(); } }
java
public synchronized static AdHocCompilerCache getCacheForCatalogHash(byte[] catalogHash) { String hashString = Encoder.hexEncode(catalogHash); AdHocCompilerCache cache = m_catalogHashMatch.getIfPresent(hashString); if (cache == null) { cache = new AdHocCompilerCache(); m_catalogHashMatch.put(hashString, cache); } return cache; }
java
synchronized void printStats() { String line1 = String.format("CACHE STATS - Literals: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n", m_literalHits, m_literalQueries, (m_literalHits * 100.0) / m_literalQueries, m_literalInsertions, m_literalEvictions); String line2 = String.format("CACHE STATS - Plans: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n", m_planHits, m_planQueries, (m_planHits * 100.0) /m_planQueries, m_planInsertions, m_planEvictions); System.out.print(line1 + line2); System.out.flush(); // reset these m_literalHits = 0; m_literalQueries = 0; m_literalInsertions = 0; m_literalEvictions = 0; m_planHits = 0; m_planQueries = 0; m_planInsertions = 0; m_planEvictions = 0; }
java
public synchronized void put(String sql, String parsedToken, AdHocPlannedStatement planIn, String[] extractedLiterals, boolean hasUserQuestionMarkParameters, boolean hasAutoParameterizedException) { assert(sql != null); assert(parsedToken != null); assert(planIn != null); AdHocPlannedStatement plan = planIn; assert(new String(plan.sql, Constants.UTF8ENCODING).equals(sql)); // hasUserQuestionMarkParameters and hasAutoParameterizedException can not be true at the same time // it means that a query can not be both user parameterized query and auto parameterized query. assert(!hasUserQuestionMarkParameters || !hasAutoParameterizedException); // uncomment this to get some raw stdout cache performance stats every 5s //startPeriodicStatsPrinting(); // deal with L2 cache if (! hasAutoParameterizedException) { BoundPlan matched = null; BoundPlan unmatched = new BoundPlan(planIn.core, planIn.parameterBindings(extractedLiterals)); // deal with the parameterized plan cache first List<BoundPlan> boundVariants = m_coreCache.get(parsedToken); if (boundVariants == null) { boundVariants = new ArrayList<BoundPlan>(); m_coreCache.put(parsedToken, boundVariants); // Note that there is an edge case in which more than one plan is getting counted as one // "plan insertion". This only happens when two different plans arose from the same parameterized // query (token) because one invocation used the correct constants to trigger an expression index and // another invocation did not. These are not counted separately (which would have to happen below // after each call to boundVariants.add) because they are not evicted separately. // It seems saner to use consistent units when counting insertions vs. evictions. ++m_planInsertions; } else { for (BoundPlan boundPlan : boundVariants) { if (boundPlan.equals(unmatched)) { matched = boundPlan; break; } } if (matched != null) { // if a different core is found, reuse it // this is useful when updating the literal cache if (unmatched.m_core != matched.m_core) { plan = new AdHocPlannedStatement(planIn, matched.m_core); plan.setBoundConstants(matched.m_constants); } } } if (matched == null) { // Don't count insertions (of possibly repeated tokens) here // -- see the comment above where only UNIQUE token insertions are being counted, instead. boundVariants.add(unmatched); } } // then deal with the L1 cache if (! hasUserQuestionMarkParameters) { AdHocPlannedStatement cachedPlan = m_literalCache.get(sql); if (cachedPlan == null) { //* enable to debug */ System.out.println("DEBUG: Caching literal '" + sql + "'"); m_literalCache.put(sql, plan); ++m_literalInsertions; } else { assert(cachedPlan.equals(plan)); } } }
java
public void startPeriodicStatsPrinting() { if (m_statsTimer == null) { m_statsTimer = new Timer(); m_statsTimer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { printStats(); } }, 5000, 5000); } }
java
@Override public Date getDate(int parameterIndex, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Date getDate(String parameterName, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Object getObject(String parameterName, Map<String,Class<?>> map) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Time getTime(int parameterIndex, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Time getTime(String parameterName, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public void registerOutParameter(String parameterName, int sqlType, int scale) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public void setNString(String parameterName, String value) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public void setURL(String parameterName, URL val) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
public static String suffixHSIdsWithMigratePartitionLeaderRequest(Long HSId) { return Long.toString(Long.MAX_VALUE) + "/" + Long.toString(HSId) + migrate_partition_leader_suffix; }
java
public void startPartitionWatch() throws InterruptedException, ExecutionException { Future<?> task = m_es.submit(new PartitionWatchEvent()); task.get(); }
java
private void processPartitionWatchEvent() throws KeeperException, InterruptedException { try { m_zk.create(m_rootNode, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); m_zk.getData(m_rootNode, m_childWatch, null); } catch (KeeperException.NodeExistsException e) { m_zk.getData(m_rootNode, m_childWatch, null); } }
java
public Object convertToDefaultType(SessionInterface session, Object a) { if (a == null) { return a; } Type otherType; if (a instanceof Number) { if (a instanceof BigInteger) { a = new BigDecimal((BigInteger) a); } else if (a instanceof Float) { a = new Double(((Float) a).doubleValue()); } else if (a instanceof Byte) { a = ValuePool.getInt(((Byte) a).intValue()); } else if (a instanceof Short) { a = ValuePool.getInt(((Short) a).intValue()); } if (a instanceof Integer) { otherType = Type.SQL_INTEGER; } else if (a instanceof Long) { otherType = Type.SQL_BIGINT; } else if (a instanceof Double) { otherType = Type.SQL_DOUBLE; } else if (a instanceof BigDecimal) { // BEGIN Cherry-picked code change from hsqldb-2.2.8 otherType = Type.SQL_DECIMAL_DEFAULT; /* if (typeCode == Types.SQL_DECIMAL || typeCode == Types.SQL_NUMERIC) { return convertToTypeLimits(session, a); } BigDecimal val = (BigDecimal) a; otherType = getNumberType(Types.SQL_DECIMAL, JavaSystem.precision(val), scale); */ // END Cherry-picked code change from hsqldb-2.2.8 } else { throw Error.error(ErrorCode.X_42561); } // BEGIN Cherry-picked code change from hsqldb-2.2.8 switch (typeCode) { case Types.TINYINT : case Types.SQL_SMALLINT : case Types.SQL_INTEGER : return convertToInt(session, a, Types.INTEGER); case Types.SQL_BIGINT : return convertToLong(session, a); case Types.SQL_REAL : case Types.SQL_FLOAT : case Types.SQL_DOUBLE : return convertToDouble(a); case Types.SQL_NUMERIC : case Types.SQL_DECIMAL : { a = convertToDecimal(a); BigDecimal dec = (BigDecimal) a; if (scale != dec.scale()) { dec = dec.setScale(scale, BigDecimal.ROUND_HALF_DOWN); } return dec; } default : throw Error.error(ErrorCode.X_42561); } // END Cherry-picked code change from hsqldb-2.2.8 } else if (a instanceof String) { otherType = Type.SQL_VARCHAR; } else { throw Error.error(ErrorCode.X_42561); } return convertToType(session, a, otherType); }
java
private static Double convertToDouble(Object a) { double value; if (a instanceof java.lang.Double) { return (Double) a; } else if (a instanceof BigDecimal) { BigDecimal bd = (BigDecimal) a; value = bd.doubleValue(); int signum = bd.signum(); BigDecimal bdd = new BigDecimal(value + signum); if (bdd.compareTo(bd) != signum) { throw Error.error(ErrorCode.X_22003); } } else { value = ((Number) a).doubleValue(); } return ValuePool.getDouble(Double.doubleToLongBits(value)); }
java
public Object mod(Object a, Object b) { if (a == null || b == null) { return null; } switch (typeCode) { case Types.SQL_REAL : case Types.SQL_FLOAT : case Types.SQL_DOUBLE : { double ad = ((Number) a).doubleValue(); double bd = ((Number) b).doubleValue(); if (bd == 0) { throw Error.error(ErrorCode.X_22012); } return ValuePool.getDouble(Double.doubleToLongBits(ad % bd)); } case Types.SQL_DECIMAL : { if ((b).equals(0)) { throw Error.error(ErrorCode.X_22012); } return ValuePool.getBigDecimal(((BigDecimal) a).remainder((BigDecimal) b)); } case Types.TINYINT : case Types.SQL_SMALLINT : case Types.SQL_INTEGER : { int ai = ((Number) a).intValue(); int bi = ((Number) b).intValue(); if (bi == 0) { throw Error.error(ErrorCode.X_22012); } return ValuePool.getInt(ai % bi); } case Types.SQL_BIGINT : { long al = ((Number) a).longValue(); long bl = ((Number) b).longValue(); if (bl == 0) { throw Error.error(ErrorCode.X_22012); } return ValuePool.getLong(al % bl); } default : throw Error.runtimeError(ErrorCode.U_S0500, "NumberType"); } }
java
public synchronized void write(int c) throws IOException { checkClosed(); int newcount = count + 1; if (newcount > buf.length) { buf = copyOf(buf, Math.max(buf.length << 1, newcount)); } buf[count] = (char) c; count = newcount; }
java
private void initiateSPIMigrationIfRequested(Iv2InitiateTaskMessage msg) { if (!"@MigratePartitionLeader".equals(msg.getStoredProcedureName())) { return; } final Object[] params = msg.getParameters(); int pid = Integer.parseInt(params[1].toString()); if (pid != m_partitionId) { tmLog.warn(String.format("@MigratePartitionLeader executed at a wrong partition %d for partition %d.", m_partitionId, pid)); return; } RealVoltDB db = (RealVoltDB)VoltDB.instance(); int hostId = Integer.parseInt(params[2].toString()); Long newLeaderHSId = db.getCartographer().getHSIDForPartitionHost(hostId, pid); if (newLeaderHSId == null || newLeaderHSId == m_hsId) { tmLog.warn(String.format("@MigratePartitionLeader the partition leader is already on the host %d or the host id is invalid.", hostId)); return; } SpScheduler scheduler = (SpScheduler)m_scheduler; scheduler.checkPointMigratePartitionLeader(); scheduler.m_isLeader = false; m_newLeaderHSID = newLeaderHSId; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.STARTED; LeaderCache leaderAppointee = new LeaderCache(m_messenger.getZK(), "initiateSPIMigrationIfRequested-" + m_partitionId, VoltZK.iv2appointees); try { leaderAppointee.start(true); leaderAppointee.put(pid, LeaderCache.suffixHSIdsWithMigratePartitionLeaderRequest(newLeaderHSId)); } catch (InterruptedException | ExecutionException | KeeperException e) { VoltDB.crashLocalVoltDB("fail to start MigratePartitionLeader",true, e); } finally { try { leaderAppointee.shutdown(); } catch (InterruptedException e) { } } tmLog.info("MigratePartitionLeader for partition " + pid + " to " + CoreUtils.hsIdToString(newLeaderHSId)); //notify the new leader right away if the current leader has drained all transactions. notifyNewLeaderOfTxnDoneIfNeeded(); }
java
private boolean checkMisroutedIv2IntiateTaskMessage(Iv2InitiateTaskMessage message) { if (message.isForReplica()) { return false; } if (m_scheduler.isLeader() && m_migratePartitionLeaderStatus != MigratePartitionLeaderStatus.TXN_RESTART) { //At this point, the message is sent to partition leader return false; } //At this point, the message is misrouted. //(1) If a site has been demoted via @MigratePartitionLeader, the messages which are sent to the leader will be restarted. //(2) If a site becomes new leader via @MigratePartitionLeader. Transactions will be restarted before it gets notification from old // leader that transactions on older leader have been drained. InitiateResponseMessage response = new InitiateResponseMessage(message); response.setMisrouted(message.getStoredProcedureInvocation()); response.m_sourceHSId = getHSId(); deliver(response); if (tmLog.isDebugEnabled()) { tmLog.debug("Sending message back on:" + CoreUtils.hsIdToString(m_hsId) + " isLeader:" + m_scheduler.isLeader() + " status:" + m_migratePartitionLeaderStatus + "\n" + message); } //notify the new partition leader that the old leader has completed the Txns if needed. notifyNewLeaderOfTxnDoneIfNeeded(); return true; }
java
private boolean checkMisroutedFragmentTaskMessage(FragmentTaskMessage message) { if (m_scheduler.isLeader() || message.isForReplica()) { return false; } TransactionState txnState = (((SpScheduler)m_scheduler).getTransactionState(message.getTxnId())); // If a fragment is part of a transaction which have not been seen on this site, restart it. if (txnState == null) { FragmentResponseMessage response = new FragmentResponseMessage(message, getHSId()); TransactionRestartException restart = new TransactionRestartException( "Transaction being restarted due to MigratePartitionLeader.", message.getTxnId()); restart.setMisrouted(true); response.setStatus(FragmentResponseMessage.UNEXPECTED_ERROR, restart); response.m_sourceHSId = getHSId(); response.setPartitionId(m_partitionId); if (tmLog.isDebugEnabled()) { tmLog.debug("misRoutedFragMsg on site:" + CoreUtils.hsIdToString(getHSId()) + "\n" + message); } deliver(response); return true; } // A transaction may have multiple batches or fragments. If the first batch or fragment has already been // processed, the follow-up batches or fragments should also be processed on this site. if (!m_scheduler.isLeader() && !message.isForReplica()) { message.setExecutedOnPreviousLeader(true); txnState.setLeaderMigrationInvolved(); if (tmLog.isDebugEnabled()) { tmLog.debug("Follow-up fragment will be processed on " + CoreUtils.hsIdToString(getHSId()) + "\n" + message); } } return false; }
java
private void handleLogRequest(VoltMessage message) { Iv2RepairLogRequestMessage req = (Iv2RepairLogRequestMessage)message; // It is possible for a dead host to queue messages after a repair request is processed // so make sure this can't happen by re-queuing this message after we know the dead host is gone // Since we are not checking validateForeignHostId on the PicoNetwork thread, it is possible for // the PicoNetwork thread to validateForeignHostId and queue a message behind this repair message. // Further, we loose visibility to the ForeignHost as soon as HostMessenger marks the host invalid // even though the PicoNetwork thread could still be alive so we will skeptically int deadHostId = req.getDeadHostId(); if (deadHostId != Integer.MAX_VALUE) { if (m_messenger.canCompleteRepair(deadHostId)) { // Make sure we are the last in the task queue when we know the ForeignHost is gone req.disableDeadHostCheck(); deliver(message); } else { if (req.getRepairRetryCount() > 100 && req.getRepairRetryCount() % 100 == 0) { hostLog.warn("Repair Request for dead host " + deadHostId + " has not been processed yet because connection has not closed"); } Runnable retryRepair = new Runnable() { @Override public void run() { InitiatorMailbox.this.deliver(message); } }; VoltDB.instance().scheduleWork(retryRepair, 10, -1, TimeUnit.MILLISECONDS); // the repair message will be resubmitted shortly when the ForeignHosts to the dead host have been removed } return; } List<Iv2RepairLogResponseMessage> logs = m_repairLog.contents(req.getRequestId(), req.isMPIRequest()); if (req.isMPIRequest()) { m_scheduler.cleanupTransactionBacklogOnRepair(); } for (Iv2RepairLogResponseMessage log : logs) { send(message.m_sourceHSId, log); } }
java
private void setMigratePartitionLeaderStatus(MigratePartitionLeaderMessage message) { //The host with old partition leader is down. if (message.isStatusReset()) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; return; } if (m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus.NONE) { //txn draining notification from the old leader arrives before this site is promoted m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.TXN_DRAINED; } else if (m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus.TXN_RESTART) { //if the new leader has been promoted, stop restarting txns. m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; } tmLog.info("MigratePartitionLeader new leader " + CoreUtils.hsIdToString(m_hsId) + " is notified by previous leader " + CoreUtils.hsIdToString(message.getPriorLeaderHSID()) + ". status:" + m_migratePartitionLeaderStatus); }
java
public void setMigratePartitionLeaderStatus(boolean migratePartitionLeader) { if (!migratePartitionLeader) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; m_newLeaderHSID = Long.MIN_VALUE; return; } //The previous leader has already drained all txns if (m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus.TXN_DRAINED) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; tmLog.info("MigratePartitionLeader transactions on previous partition leader are drained. New leader:" + CoreUtils.hsIdToString(m_hsId) + " status:" + m_migratePartitionLeaderStatus); return; } //Wait for the notification from old partition leader m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.TXN_RESTART; tmLog.info("MigratePartitionLeader restart txns on new leader:" + CoreUtils.hsIdToString(m_hsId) + " status:" + m_migratePartitionLeaderStatus); }
java
public void notifyNewLeaderOfTxnDoneIfNeeded() { //return quickly to avoid performance hit if (m_newLeaderHSID == Long.MIN_VALUE ) { return; } SpScheduler scheduler = (SpScheduler)m_scheduler; if (!scheduler.txnDoneBeforeCheckPoint()) { return; } MigratePartitionLeaderMessage message = new MigratePartitionLeaderMessage(m_hsId, m_newLeaderHSID); send(message.getNewLeaderHSID(), message); //reset status on the old partition leader m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; m_repairLog.setLeaderState(false); tmLog.info("MigratePartitionLeader previous leader " + CoreUtils.hsIdToString(m_hsId) + " notifies new leader " + CoreUtils.hsIdToString(m_newLeaderHSID) + " transactions are drained." + " status:" + m_migratePartitionLeaderStatus); m_newLeaderHSID = Long.MIN_VALUE; }
java
public void resetMigratePartitionLeaderStatus() { m_scheduler.m_isLeader = true; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus.NONE; m_repairLog.setLeaderState(true); m_newLeaderHSID = Long.MIN_VALUE; }
java
private Option resolveOption(String opt) { opt = Util.stripLeadingHyphens(opt); for (Option option : options) { if (opt.equals(option.getOpt())) { return option; } if (opt.equals(option.getLongOpt())) { return option; } } return null; }
java
public String[] getArgs() { String[] answer = new String[args.size()]; args.toArray(answer); return answer; }
java
public boolean processScanNodeWithReAggNode(AbstractPlanNode node, AbstractPlanNode reAggNode) { // MV table scan node can not be in in-lined nodes. for (int i = 0; i < node.getChildCount(); i++) { AbstractPlanNode child = node.getChild(i); if (child instanceof AbstractScanPlanNode) { AbstractScanPlanNode scanNode = (AbstractScanPlanNode) child; if (!scanNode.getTargetTableName().equals(getMVTableName())) { continue; } if (reAggNode != null) { // Join query case. node.setAndLinkChild(i, reAggNode); } // Process scan node. // Set up the scan plan node's scan columns. Add in-line projection node for scan node. scanNode.addInlinePlanNode(m_scanInlinedProjectionNode); m_scanNode = scanNode; return true; } else { boolean replaced = processScanNodeWithReAggNode(child, reAggNode); if (replaced) { return true; } } } return false; }
java
private void resolveColumnReferences() { if (isDistinctSelect || isGrouped) { acceptsSequences = false; } for (int i = 0; i < rangeVariables.length; i++) { Expression e = rangeVariables[i].nonIndexJoinCondition; if (e == null) { continue; } resolveColumnReferencesAndAllocate(e, i + 1, false); } resolveColumnReferencesAndAllocate(queryCondition, rangeVariables.length, false); for (int i = 0; i < indexLimitVisible; i++) { resolveColumnReferencesAndAllocate(exprColumns[i], rangeVariables.length, acceptsSequences); } for (int i = indexLimitVisible; i < indexStartOrderBy; i++) { resolveColumnReferencesAndAllocate(exprColumns[i], rangeVariables.length, false); } /************************* Volt DB Extensions *************************/ resolveColumnReferencesInGroupBy(); /**********************************************************************/ resolveColumnReferencesInOrderBy(sortAndSlice); }
java
private int getMaxRowCount(Session session, int rowCount) { int limitStart = getLimitStart(session); int limitCount = getLimitCount(session, rowCount); if (simpleLimit) { if (rowCount == 0) { rowCount = limitCount; } // A VoltDB extension to support LIMIT 0 if (rowCount > Integer.MAX_VALUE - limitStart) { /* disable 1 line ... if (rowCount == 0 || rowCount > Integer.MAX_VALUE - limitStart) { ... disabled 1 line */ // End of VoltDB extension rowCount = Integer.MAX_VALUE; } else { rowCount += limitStart; } } else { rowCount = Integer.MAX_VALUE; // A VoltDB extension to support LIMIT 0 // limitCount == 0 can be enforced/optimized as rowCount == 0 regardless of offset // even in non-simpleLimit cases (SELECT DISTINCT, GROUP BY, and/or ORDER BY). // This is an optimal handling of a hard-coded LIMIT 0, but it really shouldn't be the ONLY // enforcement for zero LIMITs -- what about "LIMIT ?" with 0 passed later as a parameter? // The HSQL executor ("HSQL back end") also needs runtime enforcement of zero limits. // The VoltDB executor has such enforcement. if (limitCount == 0) { rowCount = 0; } // End of VoltDB extension } return rowCount; }
java
protected void dumpExprColumns(String header){ System.out.println("\n\n*********************************************"); System.out.println(header); try { System.out.println(getSQL()); } catch (Exception e) { } for (int i = 0; i < exprColumns.length; ++i) { if (i == 0) System.out.println("Visible columns:"); if (i == indexStartOrderBy) System.out.println("start order by:"); if (i == indexStartAggregates) System.out.println("start aggregates:"); if (i == indexLimitVisible) System.out.println("After limit of visible columns:"); System.out.println(i + ": " + exprColumns[i]); } System.out.println("\n\n"); }
java
public void updateEECacheStats(long eeCacheSize, long hits, long misses, int partitionId) { m_cache1Level = eeCacheSize; m_cache1Hits += hits; m_cacheMisses += misses; m_invocations += hits + misses; m_partitionId = partitionId; }
java
public void endStatsCollection(long cache1Size, long cache2Size, CacheUse cacheUse, long partitionId) { if (m_currentStartTime != null) { long delta = System.nanoTime() - m_currentStartTime; if (delta < 0) { if (Math.abs(delta) > 1000000000) { log.info("Planner statistics recorded a negative planning time larger than one second: " + delta); } } else { m_totalPlanningTime += delta; m_minPlanningTime = Math.min(delta, m_minPlanningTime); m_maxPlanningTime = Math.max(delta, m_maxPlanningTime); m_lastMinPlanningTime = Math.min(delta, m_lastMinPlanningTime); m_lastMaxPlanningTime = Math.max(delta, m_lastMaxPlanningTime); } m_currentStartTime = null; } m_cache1Level = cache1Size; m_cache2Level = cache2Size; switch(cacheUse) { case HIT1: m_cache1Hits++; break; case HIT2: m_cache2Hits++; break; case MISS: m_cacheMisses++; break; case FAIL: m_failures++; break; } m_invocations++; m_partitionId = partitionId; }
java
@Override protected void updateStatsRow(Object rowKey, Object rowValues[]) { super.updateStatsRow(rowKey, rowValues); rowValues[columnNameToIndex.get("PARTITION_ID")] = m_partitionId; long totalTimedExecutionTime = m_totalPlanningTime; long minExecutionTime = m_minPlanningTime; long maxExecutionTime = m_maxPlanningTime; long cache1Level = m_cache1Level; long cache2Level = m_cache2Level; long cache1Hits = m_cache1Hits; long cache2Hits = m_cache2Hits; long cacheMisses = m_cacheMisses; long failureCount = m_failures; if (m_interval) { totalTimedExecutionTime = m_totalPlanningTime - m_lastTimedPlanningTime; m_lastTimedPlanningTime = m_totalPlanningTime; minExecutionTime = m_lastMinPlanningTime; maxExecutionTime = m_lastMaxPlanningTime; m_lastMinPlanningTime = Long.MAX_VALUE; m_lastMaxPlanningTime = Long.MIN_VALUE; cache1Level = m_cache1Level - m_lastCache1Level; m_lastCache1Level = m_cache1Level; cache2Level = m_cache2Level - m_lastCache2Level; m_lastCache2Level = m_cache2Level; cache1Hits = m_cache1Hits - m_lastCache1Hits; m_lastCache1Hits = m_cache1Hits; cache2Hits = m_cache2Hits - m_lastCache2Hits; m_lastCache2Hits = m_cache2Hits; cacheMisses = m_cacheMisses - m_lastCacheMisses; m_lastCacheMisses = m_cacheMisses; failureCount = m_failures - m_lastFailures; m_lastFailures = m_failures; m_lastInvocations = m_invocations; } rowValues[columnNameToIndex.get(VoltSystemProcedure.CNAME_SITE_ID)] = m_siteId; rowValues[columnNameToIndex.get("PARTITION_ID")] = m_partitionId; rowValues[columnNameToIndex.get("CACHE1_LEVEL")] = cache1Level; rowValues[columnNameToIndex.get("CACHE2_LEVEL")] = cache2Level; rowValues[columnNameToIndex.get("CACHE1_HITS" )] = cache1Hits; rowValues[columnNameToIndex.get("CACHE2_HITS" )] = cache2Hits; rowValues[columnNameToIndex.get("CACHE_MISSES")] = cacheMisses; rowValues[columnNameToIndex.get("PLAN_TIME_MIN")] = minExecutionTime; rowValues[columnNameToIndex.get("PLAN_TIME_MAX")] = maxExecutionTime; if (getSampleCount() != 0) { rowValues[columnNameToIndex.get("PLAN_TIME_AVG")] = (totalTimedExecutionTime / getSampleCount()); } else { rowValues[columnNameToIndex.get("PLAN_TIME_AVG")] = 0L; } rowValues[columnNameToIndex.get("FAILURES")] = failureCount; }
java
static void tag(StringBuilder sb, String color, String text) { sb.append("<span class='label label"); if (color != null) { sb.append("-").append(color); } String classText = text.replace(' ', '_'); sb.append(" l-").append(classText).append("'>").append(text).append("</span>"); }
java
public static String report(Catalog catalog, long minHeap, boolean isPro, int hostCount, int sitesPerHost, int kfactor, ArrayList<Feedback> warnings, String autoGenDDL) throws IOException { // asynchronously get platform properties new Thread() { @Override public void run() { PlatformProperties.getPlatformProperties(); } }.start(); URL url = Resources.getResource(ReportMaker.class, "template.html"); String contents = Resources.toString(url, Charsets.UTF_8); Cluster cluster = catalog.getClusters().get("cluster"); assert(cluster != null); Database db = cluster.getDatabases().get("database"); assert(db != null); String statsData = getStatsHTML(db, minHeap, warnings); contents = contents.replace("##STATS##", statsData); // generateProceduresTable needs to happen before generateSchemaTable // because some metadata used in the later is generated in the former String procData = generateProceduresTable(db.getTables(), db.getProcedures()); contents = contents.replace("##PROCS##", procData); String schemaData = generateSchemaTable(db); contents = contents.replace("##SCHEMA##", schemaData); DatabaseSizes sizes = CatalogSizing.getCatalogSizes(db, DrRoleType.XDCR.value().equals(cluster.getDrrole())); String sizeData = generateSizeTable(sizes); contents = contents.replace("##SIZES##", sizeData); String clusterConfig = generateClusterConfiguration(isPro, hostCount, sitesPerHost, kfactor); contents = contents.replace("##CLUSTERCONFIG##", clusterConfig); String sizeSummary = generateSizeSummary(sizes); contents = contents.replace("##SIZESUMMARY##", sizeSummary); String heapSummary = generateRecommendedServerSettings(sizes); contents = contents.replace("##RECOMMENDEDSERVERSETTINGS##", heapSummary); String platformData = PlatformProperties.getPlatformProperties().toHTML(); contents = contents.replace("##PLATFORM##", platformData); contents = contents.replace("##VERSION##", VoltDB.instance().getVersionString()); contents = contents.replace("##DDL##", escapeHtml4(autoGenDDL)); DateFormat df = new SimpleDateFormat("d MMM yyyy HH:mm:ss z"); contents = contents.replace("##TIMESTAMP##", df.format(m_timestamp)); String msg = Encoder.hexEncode(VoltDB.instance().getVersionString() + "," + System.currentTimeMillis()); contents = contents.replace("get.py?a=KEY&", String.format("get.py?a=%s&", msg)); return contents; }
java
public static String liveReport() { byte[] reportbytes = VoltDB.instance().getCatalogContext().getFileInJar(VoltCompiler.CATLOG_REPORT); String report = new String(reportbytes, Charsets.UTF_8); // remove commented out code report = report.replace("<!--##RESOURCES", ""); report = report.replace("##RESOURCES-->", ""); // inject the cluster overview //String clusterStr = "<h4>System Overview</h4>\n<p>" + getLiveSystemOverview() + "</p><br/>\n"; //report = report.replace("<!--##CLUSTER##-->", clusterStr); // inject the running system platform properties PlatformProperties pp = PlatformProperties.getPlatformProperties(); String ppStr = "<h4>Cluster Platform</h4>\n<p>" + pp.toHTML() + "</p><br/>\n"; report = report.replace("<!--##PLATFORM2##-->", ppStr); // change the live/static var to live if (VoltDB.instance().getConfig().m_isEnterprise) { report = report.replace("&b=r&", "&b=e&"); } else { report = report.replace("&b=r&", "&b=c&"); } return report; }
java
private static boolean turnOffClientInterface() { // we don't expect this to ever fail, but if it does, skip to dying immediately VoltDBInterface vdbInstance = instance(); if (vdbInstance != null) { ClientInterface ci = vdbInstance.getClientInterface(); if (ci != null) { if (!ci.ceaseAllPublicFacingTrafficImmediately()) { return false; } } } return true; }
java
private static void sendCrashSNMPTrap(String msg) { if (msg == null || msg.trim().isEmpty()) { return; } VoltDBInterface vdbInstance = instance(); if (vdbInstance == null) { return; } SnmpTrapSender snmp = vdbInstance.getSnmpTrapSender(); if (snmp == null) { return; } try { snmp.crash(msg); } catch (Throwable t) { VoltLogger log = new VoltLogger("HOST"); log.warn("failed to issue a crash SNMP trap", t); } }
java
public static void crashGlobalVoltDB(String errMsg, boolean stackTrace, Throwable t) { // for test code wasCrashCalled = true; crashMessage = errMsg; if (ignoreCrash) { throw new AssertionError("Faux crash of VoltDB successful."); } // end test code // send a snmp trap crash notification sendCrashSNMPTrap(errMsg); try { // turn off client interface as fast as possible // we don't expect this to ever fail, but if it does, skip to dying immediately if (!turnOffClientInterface()) { return; // this will jump to the finally block and die faster } // instruct the rest of the cluster to die instance().getHostMessenger().sendPoisonPill(errMsg); // give the pill a chance to make it through the network buffer Thread.sleep(500); } catch (Exception e) { e.printStackTrace(); // sleep even on exception in case the pill got sent before the exception try { Thread.sleep(500); } catch (InterruptedException e2) {} } // finally block does its best to ensure death, no matter what context this // is called in finally { crashLocalVoltDB(errMsg, stackTrace, t); } }
java
public static void main(String[] args) { //Thread.setDefaultUncaughtExceptionHandler(new VoltUncaughtExceptionHandler()); Configuration config = new Configuration(args); try { if (!config.validate()) { System.exit(-1); } else { if (config.m_startAction == StartAction.GET) { cli(config); } else { initialize(config); instance().run(); } } } catch (OutOfMemoryError e) { String errmsg = "VoltDB Main thread: ran out of Java memory. This node will shut down."; VoltDB.crashLocalVoltDB(errmsg, false, e); } }
java
public static String getDefaultReplicationInterface() { if (m_config.m_drInterface == null || m_config.m_drInterface.isEmpty()) { if (m_config.m_externalInterface == null) { return ""; } else { return m_config.m_externalInterface; } } else { return m_config.m_drInterface; } }
java
public void removeAllZeros() { Iterator<Map.Entry<K, AtomicLong>> entryIterator = map.entrySet().iterator(); while (entryIterator.hasNext()) { Map.Entry<K, AtomicLong> entry = entryIterator.next(); AtomicLong atomic = entry.getValue(); if (atomic != null && atomic.get() == 0L) { entryIterator.remove(); } } }
java
public CompletableFuture<ClientResponseWithPartitionKey[]> callAllPartitionProcedure(String procedureName, Object... params) { return m_runner.callAllPartitionProcedure(procedureName, params); }
java
public static ByteBuffer getTableDataReference(VoltTable vt) { ByteBuffer buf = vt.m_buffer.duplicate(); buf.rewind(); return buf; }
java
private long reserveNextTicket(double requiredPermits, long nowMicros) { resync(nowMicros); long microsToNextFreeTicket = Math.max(0, nextFreeTicketMicros - nowMicros); double storedPermitsToSpend = Math.min(requiredPermits, this.storedPermits); double freshPermits = requiredPermits - storedPermitsToSpend; long waitMicros = storedPermitsToWaitTime(this.storedPermits, storedPermitsToSpend) + (long) (freshPermits * stableIntervalMicros); this.nextFreeTicketMicros = nextFreeTicketMicros + waitMicros; this.storedPermits -= storedPermitsToSpend; return microsToNextFreeTicket; }
java
public Options addOptionGroup(OptionGroup group) { if (group.isRequired()) { requiredOpts.add(group); } for (Option option : group.getOptions()) { // an Option cannot be required if it is in an // OptionGroup, either the group is required or // nothing is required option.setRequired(false); addOption(option); optionGroups.put(option.getKey(), group); } return this; }
java
public Options addOption(String opt, String description) { addOption(opt, null, false, description); return this; }
java
public Options addOption(String opt, boolean hasArg, String description) { addOption(opt, null, hasArg, description); return this; }
java
public Options addOption(String opt, String longOpt, boolean hasArg, String description) { addOption(new Option(opt, longOpt, hasArg, description)); return this; }
java
public CommandLine makeCopy() { CommandLine cl = new CommandLine(m_startAction); // first copy the base class fields cl.m_ipcPort = m_ipcPort; cl.m_backend = m_backend; cl.m_leader = m_leader; cl.m_pathToCatalog = m_pathToCatalog; cl.m_pathToDeployment = m_pathToDeployment; cl.m_pathToLicense = m_pathToLicense; cl.m_noLoadLibVOLTDB = m_noLoadLibVOLTDB; cl.m_zkInterface = m_zkInterface; cl.m_port = m_port; cl.m_adminPort = m_adminPort; cl.m_internalPort = m_internalPort; cl.m_externalInterface = m_externalInterface; cl.m_internalInterface = m_internalInterface; cl.m_drAgentPortStart = m_drAgentPortStart; cl.m_httpPort = m_httpPort; cl.m_drPublicHost = m_drPublicHost; cl.m_drPublicPort = m_drPublicPort; // final in baseclass: cl.m_isEnterprise = m_isEnterprise; cl.m_deadHostTimeoutMS = m_deadHostTimeoutMS; cl.m_startMode = m_startMode; cl.m_selectedRejoinInterface = m_selectedRejoinInterface; cl.m_quietAdhoc = m_quietAdhoc; // final in baseclass: cl.m_commitLogDir = new File("/tmp"); cl.m_timestampTestingSalt = m_timestampTestingSalt; cl.m_isRejoinTest = m_isRejoinTest; cl.m_tag = m_tag; cl.m_vemTag = m_vemTag; cl.m_versionStringOverrideForTest = m_versionStringOverrideForTest; cl.m_versionCompatibilityRegexOverrideForTest = m_versionCompatibilityRegexOverrideForTest; cl.m_buildStringOverrideForTest = m_buildStringOverrideForTest; cl.m_forceVoltdbCreate = m_forceVoltdbCreate; cl.m_userSchemas = m_userSchemas; cl.m_stagedClassesPaths = m_stagedClassesPaths; // second, copy the derived class fields cl.includeTestOpts = includeTestOpts; cl.debugPort = debugPort; cl.zkport = zkport; cl.buildDir = buildDir; cl.volt_root = volt_root; cl.java_library_path = java_library_path; cl.rmi_host_name = rmi_host_name; cl.log4j = log4j; cl.gcRollover = gcRollover; cl.voltFilePrefix = voltFilePrefix; cl.initialHeap = initialHeap; cl.maxHeap = maxHeap; cl.classPath = classPath; cl.javaExecutable = javaExecutable; cl.jmxPort = jmxPort; cl.jmxHost = jmxHost; cl.customCmdLn = customCmdLn; cl.m_isPaused = m_isPaused; cl.m_meshBrokers = m_meshBrokers; cl.m_coordinators = ImmutableSortedSet.copyOf(m_coordinators); cl.m_hostCount = m_hostCount; cl.m_enableAdd = m_enableAdd; cl.m_voltdbRoot = m_voltdbRoot; cl.m_newCli = m_newCli; cl.m_sslEnable = m_sslEnable; cl.m_sslExternal = m_sslExternal; cl.m_sslInternal = m_sslInternal; cl.m_placementGroup = m_placementGroup; // deep copy the property map if it exists if (javaProperties != null) { cl.javaProperties = new TreeMap<>(); for (Entry<String, String> e : javaProperties.entrySet()) { cl.javaProperties.put(e.getKey(), e.getValue()); } } cl.m_missingHostCount = m_missingHostCount; return cl; }
java
public static VoltXMLElement mergeTwoElementsUsingOperator(String opName, String opElementId, VoltXMLElement first, VoltXMLElement second) { if (first == null || second == null) { return first == null ? second : first; } if (opName == null || opElementId == null) { return null; } VoltXMLElement retval = new VoltXMLElement("operation"); retval.attributes.put( "id", opElementId ); retval.attributes.put( "optype", opName ); retval.children.add( first ); retval.children.add( second ); return retval; }
java
public static List<VoltXMLElement> buildLimitElements(int limit, String limitValueElementId) { if (limitValueElementId == null) { return null; } List<VoltXMLElement> retval = new ArrayList<VoltXMLElement>(); retval.add( new VoltXMLElement("offset") ); VoltXMLElement limitElement = new VoltXMLElement("limit"); String strLimit = String.valueOf( limit ); limitElement.attributes.put( "limit", strLimit ); limitElement.children.add( buildValueElement( limitValueElementId, false, strLimit, "BIGINT" ) ); retval.add( limitElement ); return retval; }
java
public static VoltXMLElement buildColumnParamJoincondElement(String opName, VoltXMLElement leftElement, String valueParamElementId, String opElementId) { VoltXMLElement valueParamElement = buildValueElement(valueParamElementId); return mergeTwoElementsUsingOperator(opName, opElementId, leftElement, valueParamElement); }
java
public static VoltXMLElement buildParamElement(String elementId, String index, String valueType) { VoltXMLElement retval = new VoltXMLElement("parameter"); retval.attributes.put("id", elementId); retval.attributes.put("index", index); retval.attributes.put("valuetype", valueType); return retval; }
java
@Override public void loadFromJSONObject( JSONObject jobj, Database db ) throws JSONException { super.loadFromJSONObject(jobj, db); m_lookupType = IndexLookupType.get( jobj.getString( Members.LOOKUP_TYPE.name() ) ); m_sortDirection = SortDirectionType.get( jobj.getString( Members.SORT_DIRECTION.name() ) ); if (jobj.has(Members.HAS_OFFSET_RANK.name())) { m_hasOffsetRankOptimization = jobj.getBoolean(Members.HAS_OFFSET_RANK.name()); } m_purpose = jobj.has(Members.PURPOSE.name()) ? jobj.getInt(Members.PURPOSE.name()) : FOR_SCANNING_PERFORMANCE_OR_ORDERING; m_targetIndexName = jobj.getString(Members.TARGET_INDEX_NAME.name()); m_catalogIndex = db.getTables().get(super.m_targetTableName).getIndexes().get(m_targetIndexName); // load end_expression m_endExpression = AbstractExpression.fromJSONChild(jobj, Members.END_EXPRESSION.name(), m_tableScan); // load initial_expression m_initialExpression = AbstractExpression.fromJSONChild(jobj, Members.INITIAL_EXPRESSION.name(), m_tableScan); // load searchkey_expressions AbstractExpression.loadFromJSONArrayChild(m_searchkeyExpressions, jobj, Members.SEARCHKEY_EXPRESSIONS.name(), m_tableScan); // load COMPARE_NOTDISTINCT flag vector loadBooleanArrayFromJSONObject(jobj, Members.COMPARE_NOTDISTINCT.name(), m_compareNotDistinct); // load skip_null_predicate m_skip_null_predicate = AbstractExpression.fromJSONChild(jobj, Members.SKIP_NULL_PREDICATE.name(), m_tableScan); }
java
public boolean isPredicatesOptimizableForAggregate() { // for reverse scan, need to examine "added" predicates List<AbstractExpression> predicates = ExpressionUtil.uncombinePredicate(m_predicate); // if the size of predicates doesn't equal 1, can't be our added artifact predicates if (predicates.size() != 1) { return false; } // examin the possible "added" predicates: NOT NULL expr. AbstractExpression expr = predicates.get(0); if (expr.getExpressionType() != ExpressionType.OPERATOR_NOT) { return false; } if (expr.getLeft().getExpressionType() != ExpressionType.OPERATOR_IS_NULL) { return false; } // Not reverse scan. if (m_lookupType != IndexLookupType.LT && m_lookupType != IndexLookupType.LTE) { return false; } return true; }
java