code
stringlengths
73
34.1k
label
stringclasses
1 value
private final <T> ImmutableList<Callable<T>> wrapTasks(Collection<? extends Callable<T>> tasks) { ImmutableList.Builder<Callable<T>> builder = ImmutableList.builder(); for (Callable<T> task : tasks) { builder.add(wrapTask(task)); } return builder.build(); }
java
public void loadProcedures(CatalogContext catalogContext, boolean isInitOrReplay) { m_defaultProcManager = catalogContext.m_defaultProcs; // default proc caches clear on catalog update m_defaultProcCache.clear(); m_plannerTool = catalogContext.m_ptool; // reload all system procedures from beginning m_sysProcs = loadSystemProcedures(catalogContext, m_site); try { if (isInitOrReplay) { // reload user procedures m_userProcs = loadUserProcedureRunners(catalogContext.database.getProcedures(), catalogContext.getCatalogJar().getLoader(), null, m_site); } else { // When catalog updates, only user procedures needs to be reloaded. m_userProcs = catalogContext.getPreparedUserProcedureRunners(m_site); } } catch (Exception e) { VoltDB.crashLocalVoltDB("Error trying to load user procedures: " + e.getMessage()); } }
java
private static SQLPatternPart makeInnerProcedureModifierClausePattern(boolean captureTokens) { return SPF.oneOf( SPF.clause( SPF.token("allow"), SPF.group(captureTokens, SPF.commaList(SPF.userName())) ), SPF.clause( SPF.token("partition"), SPF.token("on"), SPF.token("table"), SPF.group(captureTokens, SPF.databaseObjectName()), SPF.token("column"), SPF.group(captureTokens, SPF.databaseObjectName()), SPF.optional( SPF.clause( SPF.token("parameter"), SPF.group(captureTokens, SPF.integer()) ) ), // parse a two-partition transaction clause SPF.optional( SPF.clause( SPF.token("and"), SPF.token("on"), SPF.token("table"), SPF.group(captureTokens, SPF.databaseObjectName()), SPF.token("column"), SPF.group(captureTokens, SPF.databaseObjectName()), SPF.optional( SPF.clause( SPF.token("parameter"), SPF.group(captureTokens, SPF.integer()) ) ) ) ) ) ); }
java
static SQLPatternPart unparsedProcedureModifierClauses() { // Force the leading space to go inside the repeat block. return SPF.capture(SPF.repeat(makeInnerProcedureModifierClausePattern(false))).withFlags(SQLPatternFactory.ADD_LEADING_SPACE_TO_CHILD); }
java
private static SQLPatternPart makeInnerStreamModifierClausePattern(boolean captureTokens) { return SPF.oneOf( SPF.clause( SPF.token("export"),SPF.token("to"),SPF.token("target"), SPF.group(captureTokens, SPF.databaseObjectName()) ), SPF.clause( SPF.token("partition"), SPF.token("on"), SPF.token("column"), SPF.group(captureTokens, SPF.databaseObjectName()) ) ); }
java
private static SQLPatternPart unparsedStreamModifierClauses() { // Force the leading space to go inside the repeat block. return SPF.capture(SPF.repeat(makeInnerStreamModifierClausePattern(false))).withFlags(SQLPatternFactory.ADD_LEADING_SPACE_TO_CHILD); }
java
private static List<String> parseExecParameters(String paramText) { final String SafeParamStringValuePattern = "#(SQL_PARSER_SAFE_PARAMSTRING)"; // Find all quoted strings. // Mask out strings that contain whitespace or commas // that must not be confused with parameter separators. // "Safe" strings that don't contain these characters don't need to be masked // but they DO need to be found and explicitly skipped so that their closing // quotes don't trigger a false positive for the START of an unsafe string. // Skipping is accomplished by resetting paramText to an offset substring // after copying the skipped (or substituted) text to a string builder. ArrayList<String> originalString = new ArrayList<>(); Matcher stringMatcher = SingleQuotedString.matcher(paramText); StringBuilder safeText = new StringBuilder(); while (stringMatcher.find()) { // Save anything before the found string. safeText.append(paramText.substring(0, stringMatcher.start())); String asMatched = stringMatcher.group(); if (SingleQuotedStringContainingParameterSeparators.matcher(asMatched).matches()) { // The matched string is unsafe, provide cover for it in safeText. originalString.add(asMatched); safeText.append(SafeParamStringValuePattern); } else { // The matched string is safe. Add it to safeText. safeText.append(asMatched); } paramText = paramText.substring(stringMatcher.end()); stringMatcher = SingleQuotedString.matcher(paramText); } // Save anything after the last found string. safeText.append(paramText); ArrayList<String> params = new ArrayList<>(); int subCount = 0; int neededSubs = originalString.size(); // Split the params at the separators String[] split = safeText.toString().split("[\\s,]+"); for (String fragment : split) { if (fragment.isEmpty()) { continue; // ignore effects of leading or trailing separators } // Replace each substitution in order exactly once. if (subCount < neededSubs) { // Substituted strings will normally take up an entire parameter, // but some cases like parameters containing escaped single quotes // may require multiple serial substitutions. while (fragment.indexOf(SafeParamStringValuePattern) > -1) { fragment = fragment.replace(SafeParamStringValuePattern, originalString.get(subCount)); ++subCount; } } params.add(fragment); } assert(subCount == neededSubs); return params; }
java
public static ParseRecallResults parseRecallStatement(String statement, int lineMax) { Matcher matcher = RecallToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); String lineNumberText = matcher.group(2); String error; if (OneWhitespace.matcher(commandWordTerminator).matches()) { String trailings = matcher.group(3) + ";" + matcher.group(4); // In a valid command, both "trailings" groups should be empty. if (trailings.equals(";")) { try { int line = Integer.parseInt(lineNumberText) - 1; if (line < 0 || line > lineMax) { throw new NumberFormatException(); } // Return the recall line number. return new ParseRecallResults(line); } catch (NumberFormatException e) { error = "Invalid RECALL line number argument: '" + lineNumberText + "'"; } } // For an invalid form of the command, // return an approximation of the garbage input. else { error = "Invalid RECALL line number argument: '" + lineNumberText + " " + trailings + "'"; } } else if (commandWordTerminator.equals("") || commandWordTerminator.equals(";")) { error = "Incomplete RECALL command. RECALL expects a line number argument."; } else { error = "Invalid RECALL command: a space and line number are required after 'recall'"; } return new ParseRecallResults(error); } return null; }
java
public static List<FileInfo> parseFileStatement(FileInfo parentContext, String statement) { Matcher fileMatcher = FileToken.matcher(statement); if (! fileMatcher.lookingAt()) { // This input does not start with FILE, // so it's not a file command, it's something else. // Return to caller a null and no errors. return null; } String remainder = statement.substring(fileMatcher.end(), statement.length()); List<FileInfo> filesInfo = new ArrayList<>(); Matcher inlineBatchMatcher = DashInlineBatchToken.matcher(remainder); if (inlineBatchMatcher.lookingAt()) { remainder = remainder.substring(inlineBatchMatcher.end(), remainder.length()); Matcher delimiterMatcher = DelimiterToken.matcher(remainder); // use matches here (not lookingAt) because we want to match // all of the remainder, not just beginning if (delimiterMatcher.matches()) { String delimiter = delimiterMatcher.group(1); filesInfo.add(new FileInfo(parentContext, FileOption.INLINEBATCH, delimiter)); return filesInfo; } throw new SQLParser.Exception( "Did not find valid delimiter for \"file -inlinebatch\" command."); } // It is either a plain or a -batch file command. FileOption option = FileOption.PLAIN; Matcher batchMatcher = DashBatchToken.matcher(remainder); if (batchMatcher.lookingAt()) { option = FileOption.BATCH; remainder = remainder.substring(batchMatcher.end(), remainder.length()); } // remove spaces before and after filenames remainder = remainder.trim(); // split filenames assuming they are separated by space ignoring spaces within quotes // tests for parsing in TestSqlCmdInterface.java List<String> filenames = new ArrayList<>(); Pattern regex = Pattern.compile("[^\\s\']+|'[^']*'"); Matcher regexMatcher = regex.matcher(remainder); while (regexMatcher.find()) { filenames.add(regexMatcher.group()); } for (String filename : filenames) { Matcher filenameMatcher = FilenameToken.matcher(filename); // Use matches to match all input, not just beginning if (filenameMatcher.matches()) { filename = filenameMatcher.group(1); // Trim whitespace from beginning and end of the file name. // User may have wanted quoted whitespace at the beginning or end // of the file name, but that seems very unlikely. filename = filename.trim(); if (filename.startsWith("~")) { filename = filename.replaceFirst("~", System.getProperty("user.home")); } filesInfo.add(new FileInfo(parentContext, option, filename)); } } // If no filename, or a filename of only spaces, then throw an error. if ( filesInfo.size() == 0 ) { String msg = String.format("Did not find valid file name in \"file%s\" command.", option == FileOption.BATCH ? " -batch" : ""); throw new SQLParser.Exception(msg); } return filesInfo; }
java
public static String parseShowStatementSubcommand(String statement) { Matcher matcher = ShowToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches()) { String trailings = matcher.group(3) + ";" + matcher.group(4); // In a valid command, both "trailings" groups should be empty. if (trailings.equals(";")) { // Return the subcommand keyword -- possibly a valid one. return matcher.group(2); } // For an invalid form of the command, // return an approximation of the garbage input. return matcher.group(2) + " " + trailings; } if (commandWordTerminator.equals("") || commandWordTerminator.equals(";")) { return commandWordTerminator; // EOL or ; reached before subcommand } } return null; }
java
public static String parseHelpStatement(String statement) { Matcher matcher = HelpToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches()) { String trailings = matcher.group(3) + ";" + matcher.group(4); // In a valid command, both "trailings" groups should be empty. if (trailings.equals(";")) { // Return the subcommand keyword -- possibly a valid one. return matcher.group(2); } // For an invalid form of the command, // return an approximation of the garbage input. return matcher.group(2) + " " + trailings; } if (commandWordTerminator.equals("") || commandWordTerminator.equals(";")) { return ""; // EOL or ; reached before subcommand } return matcher.group(1).trim(); } return null; }
java
public static String getDigitsFromHexLiteral(String paramString) { Matcher matcher = SingleQuotedHexLiteral.matcher(paramString); if (matcher.matches()) { return matcher.group(1); } return null; }
java
public static long hexDigitsToLong(String hexDigits) throws SQLParser.Exception { // BigInteger.longValue() will truncate to the lowest 64 bits, // so we need to explicitly check if there's too many digits. if (hexDigits.length() > 16) { throw new SQLParser.Exception("Too many hexadecimal digits for BIGINT value"); } if (hexDigits.length() == 0) { throw new SQLParser.Exception("Zero hexadecimal digits is invalid for BIGINT value"); } // The method // Long.parseLong(<digits>, <radix>); // Doesn't quite do what we want---it expects a '-' to // indicate negative values, and doesn't want the sign bit set // in the hex digits. // // Once we support Java 1.8, we can use Long.parseUnsignedLong(<digits>, 16) // instead. long val = new BigInteger(hexDigits, 16).longValue(); return val; }
java
public static ExecuteCallResults parseExecuteCall( String statement, Map<String,Map<Integer, List<String>>> procedures) throws SQLParser.Exception { assert(procedures != null); return parseExecuteCallInternal(statement, procedures); }
java
private static ExecuteCallResults parseExecuteCallInternal( String statement, Map<String,Map<Integer, List<String>>> procedures ) throws SQLParser.Exception { Matcher matcher = ExecuteCallPreamble.matcher(statement); if ( ! matcher.lookingAt()) { return null; } String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches() || // Might as well accept a comma delimiter anywhere in the exec command, // even near the start commandWordTerminator.equals(",")) { ExecuteCallResults results = new ExecuteCallResults(); String rawParams = statement.substring(matcher.end()); results.params = parseExecParameters(rawParams); results.procedure = results.params.remove(0); // TestSqlCmdInterface passes procedures==null because it // doesn't need/want the param types. if (procedures == null) { results.paramTypes = null; return results; } Map<Integer, List<String>> signature = procedures.get(results.procedure); if (signature == null) { throw new SQLParser.Exception("Undefined procedure: %s", results.procedure); } results.paramTypes = signature.get(results.params.size()); if (results.paramTypes == null || results.params.size() != results.paramTypes.size()) { String expectedSizes = ""; for (Integer expectedSize : signature.keySet()) { expectedSizes += expectedSize + ", "; } throw new SQLParser.Exception( "Invalid parameter count for procedure: %s (expected: %s received: %d)", results.procedure, expectedSizes, results.params.size()); } return results; } if (commandWordTerminator.equals(";")) { // EOL or ; reached before subcommand throw new SQLParser.Exception( "Incomplete EXECUTE command. EXECUTE requires a procedure name argument."); } throw new SQLParser.Exception( "Invalid EXECUTE command. unexpected input: '" + commandWordTerminator + "'."); }
java
public static boolean appearsToBeValidDDLBatch(String batch) { BufferedReader reader = new BufferedReader(new StringReader(batch)); String line; try { while ((line = reader.readLine()) != null) { if (isWholeLineComment(line)) { continue; } line = line.trim(); if (line.equals("")) continue; // we have a non-blank line that contains more than just a comment. return queryIsDDL(line); } } catch (IOException e) { // This should never happen for a StringReader assert(false); } // trivial empty batch: no lines are non-blank or non-comments return true; }
java
public static String parseEchoStatement(String statement) { Matcher matcher = EchoToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches()) { return matcher.group(2); } return ""; } return null; }
java
public static String parseEchoErrorStatement(String statement) { Matcher matcher = EchoErrorToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches()) { return matcher.group(2); } return ""; } return null; }
java
public static String parseDescribeStatement(String statement) { Matcher matcher = DescribeToken.matcher(statement); if (matcher.matches()) { String commandWordTerminator = matcher.group(1); if (OneWhitespace.matcher(commandWordTerminator).matches()) { String trailings = matcher.group(3) + ";" + matcher.group(4); // In a valid command, both "trailings" groups should be empty. if (trailings.equals(";")) { // Return the subcommand keyword -- possibly a valid one. return matcher.group(2); } // For an invalid form of the command, // return an approximation of the garbage input. return matcher.group(2) + " " + trailings; } if (commandWordTerminator.equals("") || commandWordTerminator.equals(";")) { return commandWordTerminator; // EOL or ; reached before subcommand } } return null; }
java
void resolveColumnRefernecesInUnionOrderBy() { int orderCount = sortAndSlice.getOrderLength(); if (orderCount == 0) { return; } String[] unionColumnNames = getColumnNames(); for (int i = 0; i < orderCount; i++) { Expression sort = (Expression) sortAndSlice.exprList.get(i); Expression e = sort.getLeftNode(); if (e.getType() == OpTypes.VALUE) { if (e.getDataType().typeCode == Types.SQL_INTEGER) { int index = ((Integer) e.getValue(null)).intValue(); if (0 < index && index <= unionColumnNames.length) { sort.getLeftNode().queryTableColumnIndex = index - 1; continue; } } } else if (e.getType() == OpTypes.COLUMN) { int index = ArrayUtil.find(unionColumnNames, e.getColumnName()); if (index >= 0) { sort.getLeftNode().queryTableColumnIndex = index; continue; } } throw Error.error(ErrorCode.X_42576); } sortAndSlice.prepare(null); }
java
public void setTableColumnNames(HashMappedList list) { if (resultTable != null) { ((TableDerived) resultTable).columnList = list; return; } leftQueryExpression.setTableColumnNames(list); }
java
public void setAsTopLevel() { if (compileContext.getSequences().length > 0) { throw Error.error(ErrorCode.X_42598); } isTopLevel = true; setReturningResultSet(); }
java
void setReturningResultSet() { if (unionCorresponding) { persistenceScope = TableBase.SCOPE_SESSION; columnMode = TableBase.COLUMNS_UNREFERENCED; return; } leftQueryExpression.setReturningResultSet(); }
java
public void schedulePeriodicStats() { Runnable statsPrinter = new Runnable() { @Override public void run() { printStatistics(); } }; m_scheduler.scheduleWithFixedDelay(statsPrinter, m_config.displayinterval, m_config.displayinterval, TimeUnit.SECONDS); }
java
public synchronized void printResults() throws Exception { ClientStats stats = m_fullStatsContext.fetch().getStats(); System.out.print(HORIZONTAL_RULE); System.out.println(" Client Workload Statistics"); System.out.println(HORIZONTAL_RULE); System.out.printf("Average throughput: %,9d txns/sec\n", stats.getTxnThroughput()); if(m_config.latencyreport) { System.out.printf("Average latency: %,9.2f ms\n", stats.getAverageLatency()); System.out.printf("10th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.1)); System.out.printf("25th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.25)); System.out.printf("50th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.5)); System.out.printf("75th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.75)); System.out.printf("90th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.9)); System.out.printf("95th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.95)); System.out.printf("99th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.99)); System.out.printf("99.5th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.995)); System.out.printf("99.9th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.999)); System.out.print("\n" + HORIZONTAL_RULE); System.out.println(" System Server Statistics"); System.out.println(HORIZONTAL_RULE); System.out.printf("Reported Internal Avg Latency: %,9.2f ms\n", stats.getAverageInternalLatency()); System.out.print("\n" + HORIZONTAL_RULE); System.out.println(" Latency Histogram"); System.out.println(HORIZONTAL_RULE); System.out.println(stats.latencyHistoReport()); } // 4. Write stats to file if requested m_client.writeSummaryCSV(stats, m_config.statsfile); }
java
private void shutdown() { // Stop the stats printer, the bid generator and the nibble deleter. m_scheduler.shutdown(); try { m_scheduler.awaitTermination(60, TimeUnit.SECONDS); } catch (InterruptedException e) { e.printStackTrace(); } try { // block until all outstanding txns return m_client.drain(); // close down the client connections m_client.close(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } }
java
private void requestAd() { long deviceId = Math.abs(m_rand.nextLong()) % AdBrokerBenchmark.NUM_DEVICES; GeographyPointValue point = getRandomPoint(); try { m_client.callProcedure(new NullCallback(), "GetHighestBidForLocation", deviceId, point); } catch (IOException e) { e.printStackTrace(); } }
java
public void promoteSinglePartitionInfo( HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence, Set< Set<AbstractExpression> > eqSets) { assert(getScanPartitioning() != null); if (getScanPartitioning().getCountOfPartitionedTables() == 0 || getScanPartitioning().requiresTwoFragments()) { return; } // This subquery is a single partitioned query on partitioned tables // promoting the single partition expression up to its parent level. AbstractExpression spExpr = getScanPartitioning().singlePartitioningExpression(); for (SchemaColumn col: m_partitioningColumns) { AbstractExpression tveKey = col.getExpression(); assert(tveKey instanceof TupleValueExpression); Set<AbstractExpression> values = null; if (valueEquivalence.containsKey(tveKey)) { values = valueEquivalence.get(tveKey); } else if (valueEquivalence.containsKey(spExpr)) { values = valueEquivalence.get(spExpr); } else { for (SchemaColumn otherCol: m_partitioningColumns) { if (col != otherCol && valueEquivalence.containsKey(otherCol.getExpression())) { values = valueEquivalence.get(otherCol.getExpression()); break; } } if (values == null) { values = new HashSet<>(); } } updateEqualSets(values, valueEquivalence, eqSets, tveKey, spExpr); } }
java
private void updateEqualSets(Set<AbstractExpression> values, HashMap<AbstractExpression, Set<AbstractExpression>> valueEquivalence, Set< Set<AbstractExpression> > eqSets, AbstractExpression tveKey, AbstractExpression spExpr) { boolean hasLegacyValues = false; if (eqSets.contains(values)) { eqSets.remove(values); hasLegacyValues = true; } values.add(spExpr); values.add(tveKey); if (hasLegacyValues) { eqSets.add(values); } valueEquivalence.put(spExpr, values); valueEquivalence.put(tveKey, values); }
java
@Override public boolean getIsReplicated() { for (StmtTableScan tableScan : m_subqueryStmt.allScans()) { if ( ! tableScan.getIsReplicated()) { return false; } } return true; }
java
public TupleValueExpression getOutputExpression(int index) { SchemaColumn schemaCol = getSchemaColumn(index); TupleValueExpression tve = new TupleValueExpression(getTableAlias(), getTableAlias(), schemaCol.getColumnAlias(), schemaCol.getColumnAlias(), index); return tve; }
java
static private ComparisonExpression rangeFilterFromPrefixLike(AbstractExpression leftExpr, ExpressionType rangeComparator, String comparand) { ConstantValueExpression cve = new ConstantValueExpression(); cve.setValueType(VoltType.STRING); cve.setValue(comparand); cve.setValueSize(comparand.length()); ComparisonExpression rangeFilter = new ComparisonExpression(rangeComparator, leftExpr, cve); return rangeFilter; }
java
public NodeSchema resetTableName(String tbName, String tbAlias) { m_columns.forEach(sc -> sc.reset(tbName, tbAlias, sc.getColumnName(), sc.getColumnAlias())); m_columnsMapHelper.forEach((k, v) -> k.reset(tbName, tbAlias, k.getColumnName(), k.getColumnAlias())); return this; }
java
public void addColumn(SchemaColumn column) { int size = m_columns.size(); m_columnsMapHelper.put(column, size); m_columns.add(column); }
java
public SchemaColumn find(String tableName, String tableAlias, String columnName, String columnAlias) { SchemaColumn col = new SchemaColumn(tableName, tableAlias, columnName, columnAlias); int index = findIndexOfColumn(col); if (index != -1) { return m_columns.get(index); } return null; }
java
void sortByTveIndex(int fromIndex, int toIndex) { Collections.sort(m_columns.subList(fromIndex, toIndex), TVE_IDX_COMPARE); }
java
public boolean equalsOnlyNames(NodeSchema otherSchema) { if (otherSchema == null) { return false; } if (otherSchema.size() != size()) { return false; } for (int colIndex = 0; colIndex < size(); colIndex++ ) { SchemaColumn col1 = otherSchema.getColumn(colIndex); SchemaColumn col2 = m_columns.get(colIndex); if (col1.compareNames(col2) != 0) { return false; } } return true; }
java
NodeSchema copyAndReplaceWithTVE() { NodeSchema copy = new NodeSchema(); int colIndex = 0; for (SchemaColumn column : m_columns) { copy.addColumn(column.copyAndReplaceWithTVE(colIndex)); ++colIndex; } return copy; }
java
public boolean harmonize(NodeSchema otherSchema, String schemaKindName) { if (size() != otherSchema.size()) { throw new PlanningErrorException( "The " + schemaKindName + "schema and the statement output schemas have different lengths."); } boolean changedSomething = false; for (int idx = 0; idx < size(); idx += 1) { SchemaColumn myColumn = getColumn(idx); SchemaColumn otherColumn = otherSchema.getColumn(idx); VoltType myType = myColumn.getValueType(); VoltType otherType = otherColumn.getValueType(); VoltType commonType = myType; if (! myType.canExactlyRepresentAnyValueOf(otherType)) { if (otherType.canExactlyRepresentAnyValueOf(myType)) { commonType = otherType; } else { throw new PlanningErrorException( "The " + schemaKindName + " column type and the statement output type for column " + idx + " are incompatible."); } } if (myType != commonType) { changedSomething = true; myColumn.setValueType(commonType); } // Now determine the length, and the "in bytes" flag if needed assert (myType.isVariableLength() == otherType.isVariableLength()); // The type will be one of: // - fixed size // - VARCHAR (need special logic for bytes vs. chars) // - Some other variable length type int commonSize; if (! myType.isVariableLength()) { commonSize = myType.getLengthInBytesForFixedTypesWithoutCheck(); } else if (myType == VoltType.STRING) { boolean myInBytes = myColumn.getInBytes(); boolean otherInBytes = otherColumn.getInBytes(); if (myInBytes == otherInBytes) { commonSize = Math.max(myColumn.getValueSize(), otherColumn.getValueSize()); } else { // one is in bytes and the other is in characters int mySizeInBytes = (myColumn.getInBytes() ? 1 : 4) * myColumn.getValueSize(); int otherSizeInBytes = (otherColumn.getInBytes() ? 1 : 4) * otherColumn.getValueSize(); if (! myColumn.getInBytes()) { myColumn.setInBytes(true); changedSomething = true; } commonSize = Math.max(mySizeInBytes, otherSizeInBytes); if (commonSize > VoltType.MAX_VALUE_LENGTH) { commonSize = VoltType.MAX_VALUE_LENGTH; } } } else { commonSize = Math.max(myColumn.getValueSize(), otherColumn.getValueSize()); } if (commonSize != myColumn.getValueSize()) { myColumn.setValueSize(commonSize); changedSomething = true; } } return changedSomething; }
java
void set(final long valueIteratedTo, final long valueIteratedFrom, final long countAtValueIteratedTo, final long countInThisIterationStep, final long totalCountToThisValue, final long totalValueToThisValue, final double percentile, final double percentileLevelIteratedTo, double integerToDoubleValueConversionRatio) { this.valueIteratedTo = valueIteratedTo; this.valueIteratedFrom = valueIteratedFrom; this.countAtValueIteratedTo = countAtValueIteratedTo; this.countAddedInThisIterationStep = countInThisIterationStep; this.totalCountToThisValue = totalCountToThisValue; this.totalValueToThisValue = totalValueToThisValue; this.percentile = percentile; this.percentileLevelIteratedTo = percentileLevelIteratedTo; this.integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio; }
java
@Override public List<AbstractExpression> bindingToIndexedExpression(AbstractExpression expr) { if (m_originalValue == null || ! m_originalValue.equals(expr)) { return null; } // This parameter's value was matched, so return this as one bound parameter. List<AbstractExpression> result = new ArrayList<AbstractExpression>(); result.add(this); return result; }
java
Object getParameterAtIndex(int partitionIndex) { try { if (serializedParams != null) { return ParameterSet.getParameterAtIndex(partitionIndex, serializedParams.duplicate()); } else { return params.get().getParam(partitionIndex); } } catch (Exception ex) { throw new RuntimeException("Invalid partitionIndex: " + partitionIndex, ex); } }
java
public void flattenToBufferForOriginalVersion(ByteBuffer buf) throws IOException { assert((params != null) || (serializedParams != null)); // for self-check assertion int startPosition = buf.position(); buf.put(ProcedureInvocationType.ORIGINAL.getValue()); SerializationHelper.writeVarbinary(getProcNameBytes(), buf); buf.putLong(clientHandle); serializeParams(buf); int len = buf.position() - startPosition; assert(len == getSerializedSizeForOriginalVersion()); }
java
@Override public synchronized void submit(long offset) { if (submittedOffset == -1L && offset >= 0) { committedOffsets[idx(offset)] = safeOffset = submittedOffset = offset; } if (firstOffset == -1L) { firstOffset = offset; } if ((offset - safeOffset) >= committedOffsets.length) { offerOffset = offset; try { wait(m_gapFullWait); } catch (InterruptedException e) { LOGGER.rateLimitedLog(LOG_SUPPRESSION_INTERVAL_SECONDS, Level.WARN, e, "CommitTracker wait was interrupted for group " + consumerGroup + " topic " + topic + " partition " + partition); } } if (offset > submittedOffset) { submittedOffset = offset; } }
java
@Override public synchronized long commit(long offset) { if (offset <= submittedOffset && offset > safeOffset) { int ggap = (int)Math.min(committedOffsets.length, offset-safeOffset); if (ggap == committedOffsets.length) { LOGGER.rateLimitedLog(LOG_SUPPRESSION_INTERVAL_SECONDS ,Level.WARN, null, "CommitTracker moving topic commit point from %d to %d for topic " + topic + " partition " + partition + " group:" + consumerGroup, safeOffset, (offset - committedOffsets.length + 1) ); safeOffset = offset - committedOffsets.length + 1; committedOffsets[idx(safeOffset)] = safeOffset; } committedOffsets[idx(offset)] = offset; while (ggap > 0 && committedOffsets[idx(safeOffset)]+1 == committedOffsets[idx(safeOffset+1)]) { ++safeOffset; } if (offerOffset >=0 && (offerOffset-safeOffset) < committedOffsets.length) { offerOffset = -1L; notify(); } } if (offset == firstOffset) { firstOffsetCommitted = true; } return safeOffset; }
java
public void log(long now, Level level, Throwable cause, String stemformat, Object...args) { if (now - m_lastLogTime > m_maxLogIntervalMillis) { synchronized (this) { if (now - m_lastLogTime > m_maxLogIntervalMillis) { String message = formatMessage(cause, stemformat, args); switch(level) { case DEBUG: m_logger.debug(message); break; case ERROR: m_logger.error(message); break; case FATAL: m_logger.fatal(message); break; case INFO: m_logger.info(message); break; case TRACE: m_logger.trace(message); break; case WARN: m_logger.warn(message); break; } m_lastLogTime = now; } } } }
java
private void sendFirstFragResponse() { if (ELASTICLOG.isDebugEnabled()) { ELASTICLOG.debug("P" + m_partitionId + " sending first fragment response to coordinator " + CoreUtils.hsIdToString(m_coordinatorHsId)); } RejoinMessage msg = new RejoinMessage(m_mailbox.getHSId(), RejoinMessage.Type.FIRST_FRAGMENT_RECEIVED); m_mailbox.send(m_coordinatorHsId, msg); m_firstFragResponseSent = true; }
java
private void runForBlockingDataTransfer(SiteProcedureConnection siteConnection) { boolean sourcesReady = false; RestoreWork restoreWork = m_dataSink.poll(m_snapshotBufferAllocator); if (restoreWork != null) { restoreBlock(restoreWork, siteConnection); sourcesReady = true; } // The completion monitor may fire even if m_dataSink has not reached EOF in the case that there's no // replicated table in the database, so check for both conditions. if (m_dataSink.isEOF() || m_snapshotCompletionMonitor.isDone()) { // No more data from this data sink, close and remove it from the list m_dataSink.close(); if (m_streamSnapshotMb != null) { VoltDB.instance().getHostMessenger().removeMailbox(m_streamSnapshotMb.getHSId()); m_streamSnapshotMb = null; ELASTICLOG.debug(m_whoami + " data transfer is finished"); } if (m_snapshotCompletionMonitor.isDone()) { try { SnapshotCompletionEvent event = m_snapshotCompletionMonitor.get(); siteConnection.setDRProtocolVersion(event.drVersion); assert(event != null); ELASTICLOG.debug("P" + m_partitionId + " noticed data transfer completion"); m_completionAction.setSnapshotTxnId(event.multipartTxnId); setJoinComplete(siteConnection, event.exportSequenceNumbers, event.drSequenceNumbers, event.drMixedClusterSizeConsumerState, false /* requireExistingSequenceNumbers */, event.clusterCreateTime); } catch (InterruptedException e) { // isDone() already returned true, this shouldn't happen VoltDB.crashLocalVoltDB("Impossible interruption happend", true, e); } catch (ExecutionException e) { VoltDB.crashLocalVoltDB("Error waiting for snapshot to finish", true, e); } } else { m_taskQueue.offer(this); } } else { // The sources are not set up yet, don't block the site, // return here and retry later. returnToTaskQueue(sourcesReady); } }
java
public <T> T getService(URI bundleURI, Class<T> svcClazz) { return m_bundles.getService(bundleURI, svcClazz); }
java
public void setPos(int pos) { position = pos; NodeAVL n = nPrimaryNode; while (n != null) { ((NodeAVLDisk) n).iData = position; n = n.nNext; } }
java
void setNewNodes() { int indexcount = tTable.getIndexCount(); nPrimaryNode = new NodeAVLDisk(this, 0); NodeAVL n = nPrimaryNode; for (int i = 1; i < indexcount; i++) { n.nNext = new NodeAVLDisk(this, i); n = n.nNext; } }
java
public void write(RowOutputInterface out) { try { writeNodes(out); if (hasDataChanged) { out.writeData(rowData, tTable.colTypes); out.writeEnd(); hasDataChanged = false; } } catch (IOException e) {} }
java
private void writeNodes(RowOutputInterface out) throws IOException { out.writeSize(storageSize); NodeAVL n = nPrimaryNode; while (n != null) { n.write(out); n = n.nNext; } hasNodesChanged = false; }
java
public void serializeToBuffer(ByteBuffer b) { assert(getSerializedSize() <= b.remaining()); b.putInt(getSerializedSize() - 4); b.put((byte)getExceptionType().ordinal()); if (m_message != null) { final byte messageBytes[] = m_message.getBytes(); b.putInt(messageBytes.length); b.put(messageBytes); } else { b.putInt(0); } p_serializeToBuffer(b); }
java
protected void populateColumnSchema(ArrayList<ColumnInfo> columns) { columns.add(new ColumnInfo("TIMESTAMP", VoltType.BIGINT)); columns.add(new ColumnInfo(VoltSystemProcedure.CNAME_HOST_ID, VoltSystemProcedure.CTYPE_ID)); columns.add(new ColumnInfo("HOSTNAME", VoltType.STRING)); }
java
public Object[][] getStatsRows(boolean interval, final Long now) { this.now = now; /* * Synchronizing on this allows derived classes to maintain thread safety */ synchronized (this) { Iterator<Object> i = getStatsRowKeyIterator(interval); ArrayList<Object[]> rows = new ArrayList<Object[]>(); while (i.hasNext()) { Object rowKey = i.next(); Object rowValues[] = new Object[columns.size()]; updateStatsRow(rowKey, rowValues); rows.add(rowValues); } return rows.toArray(new Object[0][]); } }
java
protected void updateStatsRow(Object rowKey, Object rowValues[]) { rowValues[0] = now; rowValues[1] = m_hostId; rowValues[2] = m_hostname; }
java
@Override public long deserialize(DataTree dt, Map<Long, Long> sessions) throws IOException { // we run through 100 snapshots (not all of them) // if we cannot get it running within 100 snapshots // we should give up List<File> snapList = findNValidSnapshots(100); if (snapList.size() == 0) { return -1L; } File snap = null; boolean foundValid = false; for (int i = 0; i < snapList.size(); i++) { snap = snapList.get(i); InputStream snapIS = null; CheckedInputStream crcIn = null; try { LOG.info("Reading snapshot " + snap); snapIS = new BufferedInputStream(new FileInputStream(snap)); crcIn = new CheckedInputStream(snapIS, new Adler32()); InputArchive ia = BinaryInputArchive.getArchive(crcIn); deserialize(dt,sessions, ia); long checkSum = crcIn.getChecksum().getValue(); long val = ia.readLong("val"); if (val != checkSum) { throw new IOException("CRC corruption in snapshot : " + snap); } foundValid = true; break; } catch(IOException e) { LOG.warn("problem reading snap file " + snap, e); } finally { if (snapIS != null) snapIS.close(); if (crcIn != null) crcIn.close(); } } if (!foundValid) { throw new IOException("Not able to find valid snapshots in " + snapDir); } dt.lastProcessedZxid = Util.getZxidFromName(snap.getName(), "snapshot"); return dt.lastProcessedZxid; }
java
public void deserialize(DataTree dt, Map<Long, Long> sessions, InputArchive ia) throws IOException { FileHeader header = new FileHeader(); header.deserialize(ia, "fileheader"); if (header.getMagic() != SNAP_MAGIC) { throw new IOException("mismatching magic headers " + header.getMagic() + " != " + FileSnap.SNAP_MAGIC); } SerializeUtils.deserializeSnapshot(dt,ia,sessions); }
java
@Override public File findMostRecentSnapshot() throws IOException { List<File> files = findNValidSnapshots(1); if (files.size() == 0) { return null; } return files.get(0); }
java
public List<File> findNRecentSnapshots(int n) throws IOException { List<File> files = Util.sortDataDir(snapDir.listFiles(), "snapshot", false); int i = 0; List<File> list = new ArrayList<File>(); for (File f: files) { if (i==n) break; i++; list.add(f); } return list; }
java
protected void serialize(DataTree dt,Map<Long, Long> sessions, OutputArchive oa, FileHeader header) throws IOException { // this is really a programmatic error and not something that can // happen at runtime if(header==null) throw new IllegalStateException( "Snapshot's not open for writing: uninitialized header"); header.serialize(oa, "fileheader"); SerializeUtils.serializeSnapshot(dt,oa,sessions); }
java
@Override public synchronized void serialize(DataTree dt, Map<Long, Long> sessions, File snapShot) throws IOException { if (!close) { OutputStream sessOS = new BufferedOutputStream(new FileOutputStream(snapShot)); CheckedOutputStream crcOut = new CheckedOutputStream(sessOS, new Adler32()); //CheckedOutputStream cout = new CheckedOutputStream() OutputArchive oa = BinaryOutputArchive.getArchive(crcOut); FileHeader header = new FileHeader(SNAP_MAGIC, VERSION, dbId); serialize(dt,sessions,oa, header); long val = crcOut.getChecksum().getValue(); oa.writeLong(val, "val"); oa.writeString("/", "path"); sessOS.flush(); crcOut.close(); sessOS.close(); } }
java
public static Datum sampleSystemNow(final boolean medium, final boolean large) { Datum d = generateCurrentSample(); if (d == null) return null; historyS.addLast(d); if (historyS.size() > historySize) historyS.removeFirst(); if (medium) { historyM.addLast(d); if (historyM.size() > historySize) historyM.removeFirst(); } if (large) { historyL.addLast(d); if (historyL.size() > historySize) historyL.removeFirst(); } return d; }
java
public static synchronized void asyncSampleSystemNow(final boolean medium, final boolean large) { // slow mode starts an async thread if (mode == GetRSSMode.PS) { if (thread != null) { if (thread.isAlive()) return; else thread = null; } thread = new Thread(new Runnable() { @Override public void run() { sampleSystemNow(medium, large); } }); thread.start(); } // fast mode doesn't spawn a thread else { sampleSystemNow(medium, large); } }
java
private static synchronized void initialize() { PlatformProperties pp = PlatformProperties.getPlatformProperties(); String processName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName(); String pidString = processName.substring(0, processName.indexOf('@')); pid = Integer.valueOf(pidString); initialized = true; // get the RSS and other stats from scraping "ps" from the command line PSScraper.PSData psdata = PSScraper.getPSData(pid); assert(psdata.rss > 0); // figure out how much memory this thing has memorysize = pp.ramInMegabytes; assert(memorysize > 0); // now try to figure out the best way to get the rss size long rss = -1; // try the mac method try { rss = ExecutionEngine.nativeGetRSS(); } // This catch is broad to specifically include the UnsatisfiedLinkError that arises when // using the hsqldb backend on linux -- along with any other exceptions that might arise. // Otherwise, the hsql backend would get an annoying report to stdout // as the useless stats thread got needlessly killed. catch (Throwable e) { } if (rss > 0) mode = GetRSSMode.MACOSX_NATIVE; // try procfs rss = getRSSFromProcFS(); if (rss > 0) mode = GetRSSMode.PROCFS; // notify users if stats collection might be slow if (mode == GetRSSMode.PS) { VoltLogger logger = new VoltLogger("HOST"); logger.warn("System statistics will be collected in a sub-optimal " + "manner because either procfs couldn't be read from or " + "the native library couldn't be loaded."); } }
java
private static long getRSSFromProcFS() { try { File statFile = new File(String.format("/proc/%d/stat", pid)); FileInputStream fis = new FileInputStream(statFile); try { BufferedReader r = new BufferedReader(new InputStreamReader(fis)); String stats = r.readLine(); String[] parts = stats.split(" "); return Long.parseLong(parts[23]) * 4 * 1024; } finally { fis.close(); } } catch (Exception e) { return -1; } }
java
private static synchronized Datum generateCurrentSample() { // Code used to fake system statistics by tests if (testStatsProducer!=null) { return testStatsProducer.getCurrentStatsData(); } // get this info once if (!initialized) initialize(); long rss = -1; switch (mode) { case MACOSX_NATIVE: rss = ExecutionEngine.nativeGetRSS(); break; case PROCFS: rss = getRSSFromProcFS(); break; case PS: rss = PSScraper.getPSData(pid).rss; break; } // create a new Datum which adds java stats Datum d = new Datum(rss); return d; }
java
public static synchronized String getGoogleChartURL(int minutes, int width, int height, String timeLabel) { ArrayDeque<Datum> history = historyS; if (minutes > 2) history = historyM; if (minutes > 30) history = historyL; HTMLChartHelper chart = new HTMLChartHelper(); chart.width = width; chart.height = height; chart.timeLabel = timeLabel; HTMLChartHelper.DataSet Jds = new HTMLChartHelper.DataSet(); chart.data.add(Jds); Jds.title = "UsedJava"; Jds.belowcolor = "ff9999"; HTMLChartHelper.DataSet Rds = new HTMLChartHelper.DataSet(); chart.data.add(Rds); Rds.title = "RSS"; Rds.belowcolor = "ff0000"; HTMLChartHelper.DataSet RUds = new HTMLChartHelper.DataSet(); chart.data.add(RUds); RUds.title = "RSS+UnusedJava"; RUds.dashlength = 6; RUds.spacelength = 3; RUds.thickness = 2; RUds.belowcolor = "ffffff"; long cropts = System.currentTimeMillis(); cropts -= (60 * 1000 * minutes); long modulo = (60 * 1000 * minutes) / 30; double maxmemdatum = 0; for (Datum d : history) { if (d.timestamp < cropts) continue; double javaused = d.javausedheapmem + d.javausedsysmem; double javaunused = SystemStatsCollector.javamaxheapmem - d.javausedheapmem; javaused /= 1204 * 1024; javaunused /= 1204 * 1024; double rss = d.rss / 1024 / 1024; long ts = (d.timestamp / modulo) * modulo; if ((rss + javaunused) > maxmemdatum) maxmemdatum = rss + javaunused; RUds.append(ts, rss + javaunused); Rds.append(ts, rss); Jds.append(ts, javaused); } chart.megsMax = 2; while (chart.megsMax < maxmemdatum) chart.megsMax *= 2; return chart.getURL(minutes); }
java
public static void main(String[] args) { int repeat = 1000; long start, duration, correct; double per; String processName = java.lang.management.ManagementFactory.getRuntimeMXBean().getName(); String pidString = processName.substring(0, processName.indexOf('@')); pid = Integer.valueOf(pidString); // ETHAN (11/7/2018): If loading the native library does not have to succeed, why load? // org.voltdb.NativeLibraryLoader.loadVoltDB(false); // test the default fallback performance start = System.currentTimeMillis(); correct = 0; for (int i = 0; i < repeat; i++) { long rss = PSScraper.getPSData(pid).rss; if (rss > 0) correct++; } duration = System.currentTimeMillis() - start; per = duration / (double) repeat; System.out.printf("%.2f ms per \"ps\" call / %d / %d correct\n", per, correct, repeat); // test linux procfs performance start = System.currentTimeMillis(); correct = 0; for (int i = 0; i < repeat; i++) { long rss = getRSSFromProcFS(); if (rss > 0) correct++; } duration = System.currentTimeMillis() - start; per = duration / (double) repeat; System.out.printf("%.2f ms per procfs read / %d / %d correct\n", per, correct, repeat); // test mac performance start = System.currentTimeMillis(); correct = 0; for (int i = 0; i < repeat; i++) { long rss = ExecutionEngine.nativeGetRSS(); if (rss > 0) correct++; } duration = System.currentTimeMillis() - start; per = duration / (double) repeat; System.out.printf("%.2f ms per ee.nativeGetRSS call / %d / %d correct\n", per, correct, repeat); }
java
void rollbackPartial(Session session, int start, long timestamp) { Object[] list = session.rowActionList.getArray(); int limit = session.rowActionList.size(); if (start == limit) { return; } for (int i = start; i < limit; i++) { RowAction action = (RowAction) list[i]; if (action != null) { action.rollback(session, timestamp); } else { System.out.println("null action in rollback " + start); } } // rolled back transactions can always be merged as they have never been // seen by other sessions mergeRolledBackTransaction(session.rowActionList.getArray(), start, limit); rowActionMapRemoveTransaction(session.rowActionList.getArray(), start, limit, false); session.rowActionList.setSize(start); }
java
public boolean canRead(Session session, Row row) { synchronized (row) { RowAction action = row.rowAction; if (action == null) { return true; } return action.canRead(session); } }
java
public void setTransactionInfo(CachedObject object) { Row row = (Row) object; if (row.rowAction != null) { return; } RowAction rowact = (RowAction) rowActionMap.get(row.position); row.rowAction = rowact; }
java
void mergeRolledBackTransaction(Object[] list, int start, int limit) { for (int i = start; i < limit; i++) { RowAction rowact = (RowAction) list[i]; if (rowact == null || rowact.type == RowActionBase.ACTION_NONE || rowact.type == RowActionBase.ACTION_DELETE_FINAL) { continue; } Row row = rowact.memoryRow; if (row == null) { PersistentStore store = rowact.session.sessionData.getRowStore(rowact.table); row = (Row) store.get(rowact.getPos(), false); } if (row == null) { continue; } synchronized (row) { rowact.mergeRollback(row); } } // } catch (Throwable t) { // System.out.println("throw in merge"); // t.printStackTrace(); // } }
java
void addToCommittedQueue(Session session, Object[] list) { synchronized (committedTransactionTimestamps) { // add the txList according to commit timestamp committedTransactions.addLast(list); // get session commit timestamp committedTransactionTimestamps.addLast(session.actionTimestamp); /* debug 190 if (committedTransactions.size() > 64) { System.out.println("******* excessive transaction queue"); } // debug 190 */ } }
java
void mergeExpiredTransactions(Session session) { long timestamp = getFirstLiveTransactionTimestamp(); while (true) { long commitTimestamp = 0; Object[] actions = null; synchronized (committedTransactionTimestamps) { if (committedTransactionTimestamps.isEmpty()) { break; } commitTimestamp = committedTransactionTimestamps.getFirst(); if (commitTimestamp < timestamp) { committedTransactionTimestamps.removeFirst(); actions = (Object[]) committedTransactions.removeFirst(); } else { break; } } mergeTransaction(session, actions, 0, actions.length, commitTimestamp); rowActionMapRemoveTransaction(actions, 0, actions.length, true); } }
java
void endTransaction(Session session) { try { writeLock.lock(); long timestamp = session.transactionTimestamp; synchronized (liveTransactionTimestamps) { session.isTransaction = false; int index = liveTransactionTimestamps.indexOf(timestamp); liveTransactionTimestamps.remove(index); } mergeExpiredTransactions(session); } finally { writeLock.unlock(); } }
java
RowAction[] getRowActionList() { try { writeLock.lock(); Session[] sessions = database.sessionManager.getAllSessions(); int[] tIndex = new int[sessions.length]; RowAction[] rowActions; int rowActionCount = 0; { int actioncount = 0; for (int i = 0; i < sessions.length; i++) { actioncount += sessions[i].getTransactionSize(); } rowActions = new RowAction[actioncount]; } while (true) { boolean found = false; long minChangeNo = Long.MAX_VALUE; int sessionIndex = 0; // find the lowest available SCN across all sessions for (int i = 0; i < sessions.length; i++) { int tSize = sessions[i].getTransactionSize(); if (tIndex[i] < tSize) { RowAction current = (RowAction) sessions[i].rowActionList.get( tIndex[i]); if (current.actionTimestamp < minChangeNo) { minChangeNo = current.actionTimestamp; sessionIndex = i; } found = true; } } if (!found) { break; } HsqlArrayList currentList = sessions[sessionIndex].rowActionList; for (; tIndex[sessionIndex] < currentList.size(); ) { RowAction current = (RowAction) currentList.get(tIndex[sessionIndex]); // if the next change no is in this session, continue adding if (current.actionTimestamp == minChangeNo + 1) { minChangeNo++; } if (current.actionTimestamp == minChangeNo) { rowActions[rowActionCount++] = current; tIndex[sessionIndex]++; } else { break; } } } return rowActions; } finally { writeLock.unlock(); } }
java
public DoubleIntIndex getTransactionIDList() { writeLock.lock(); try { DoubleIntIndex lookup = new DoubleIntIndex(10, false); lookup.setKeysSearchTarget(); Iterator it = this.rowActionMap.keySet().iterator(); for (; it.hasNext(); ) { lookup.addUnique(it.nextInt(), 0); } return lookup; } finally { writeLock.unlock(); } }
java
public void convertTransactionIDs(DoubleIntIndex lookup) { writeLock.lock(); try { RowAction[] list = new RowAction[rowActionMap.size()]; Iterator it = this.rowActionMap.values().iterator(); for (int i = 0; it.hasNext(); i++) { list[i] = (RowAction) it.next(); } rowActionMap.clear(); for (int i = 0; i < list.length; i++) { int pos = lookup.lookupFirstEqual(list[i].getPos()); list[i].setPos(pos); rowActionMap.put(pos, list[i]); } } finally { writeLock.unlock(); } }
java
@Override protected VoltMessage instantiate_local(byte messageType) { // instantiate a new message instance according to the id VoltMessage message = null; switch (messageType) { case INITIATE_TASK_ID: message = new InitiateTaskMessage(); break; case INITIATE_RESPONSE_ID: message = new InitiateResponseMessage(); break; case FRAGMENT_TASK_ID: message = new FragmentTaskMessage(); break; case FRAGMENT_RESPONSE_ID: message = new FragmentResponseMessage(); break; case PARTICIPANT_NOTICE_ID: message = new MultiPartitionParticipantMessage(); break; case COALESCED_HEARTBEAT_ID: message = new CoalescedHeartbeatMessage(); break; case COMPLETE_TRANSACTION_ID: message = new CompleteTransactionMessage(); break; case COMPLETE_TRANSACTION_RESPONSE_ID: message = new CompleteTransactionResponseMessage(); break; case IV2_INITIATE_TASK_ID: message = new Iv2InitiateTaskMessage(); break; case IV2_REPAIR_LOG_REQUEST: message = new Iv2RepairLogRequestMessage(); break; case IV2_REPAIR_LOG_RESPONSE: message = new Iv2RepairLogResponseMessage(); break; case REJOIN_RESPONSE_ID: message = new RejoinMessage(); break; case REJOIN_DATA_ID: message = new RejoinDataMessage(); break; case REJOIN_DATA_ACK_ID: message = new RejoinDataAckMessage(); break; case FRAGMENT_TASK_LOG_ID: message = new FragmentTaskLogMessage(); break; case IV2_LOG_FAULT_ID: message = new Iv2LogFaultMessage(); break; case IV2_EOL_ID: message = new Iv2EndOfLogMessage(); break; case DUMP: message = new DumpMessage(); break; case MP_REPLAY_ID: message = new MpReplayMessage(); break; case MP_REPLAY_ACK_ID: message = new MpReplayAckMessage(); break; case SNAPSHOT_CHECK_REQUEST_ID: message = new SnapshotCheckRequestMessage(); break; case SNAPSHOT_CHECK_RESPONSE_ID: message = new SnapshotCheckResponseMessage(); break; case IV2_REPAIR_LOG_TRUNCATION: message = new RepairLogTruncationMessage(); break; case DR2_MULTIPART_TASK_ID: message = new Dr2MultipartTaskMessage(); break; case DR2_MULTIPART_RESPONSE_ID: message = new Dr2MultipartResponseMessage(); break; case DUMMY_TRANSACTION_TASK_ID: message = new DummyTransactionTaskMessage(); break; case DUMMY_TRANSACTION_RESPONSE_ID: message = new DummyTransactionResponseMessage(); break; case Migrate_Partition_Leader_MESSAGE_ID: message = new MigratePartitionLeaderMessage(); break; case DUMP_PLAN_ID: message = new DumpPlanThenExitMessage(); break; case FLUSH_RO_TXN_MESSAGE_ID: message = new MPBacklogFlushMessage(); break; default: message = null; } return message; }
java
void clearStructures() { if (schemaManager != null) { schemaManager.clearStructures(); } granteeManager = null; userManager = null; nameManager = null; schemaManager = null; sessionManager = null; dbInfo = null; }
java
public Result getScript(boolean indexRoots) { Result r = Result.newSingleColumnResult("COMMAND", Type.SQL_VARCHAR); String[] list = getSettingsSQL(); addRows(r, list); list = getGranteeManager().getSQL(); addRows(r, list); // schemas and schema objects such as tables, sequences, etc. list = schemaManager.getSQLArray(); addRows(r, list); // index roots if (indexRoots) { list = schemaManager.getIndexRootsSQL(); addRows(r, list); } // user session start schema names list = getUserManager().getInitialSchemaSQL(); addRows(r, list); // grantee rights list = getGranteeManager().getRightstSQL(); addRows(r, list); list = getPropertiesSQL(); addRows(r, list); return r; }
java
private Expression readWindowSpecification(int tokenT, Expression aggExpr) { SortAndSlice sortAndSlice = null; readThis(Tokens.OPENBRACKET); List<Expression> partitionByList = new ArrayList<>(); if (token.tokenType == Tokens.PARTITION) { read(); readThis(Tokens.BY); while (true) { Expression partitionExpr = XreadValueExpression(); partitionByList.add(partitionExpr); if (token.tokenType == Tokens.COMMA) { read(); continue; } break; } } if (token.tokenType == Tokens.ORDER) { // order by clause read(); readThis(Tokens.BY); sortAndSlice = XreadOrderBy(); } readThis(Tokens.CLOSEBRACKET); // We don't really care about aggExpr any more. It has the // aggregate expression as a non-windowed expression. We do // care about its parameters and whether it's specified as // unique though. assert(aggExpr == null || aggExpr instanceof ExpressionAggregate); Expression nodes[]; boolean isDistinct; if (aggExpr != null) { ExpressionAggregate winAggExpr = (ExpressionAggregate)aggExpr; nodes = winAggExpr.nodes; isDistinct = winAggExpr.isDistinctAggregate; } else { nodes = Expression.emptyExpressionArray; isDistinct = false; } ExpressionWindowed windowedExpr = new ExpressionWindowed(tokenT, nodes, isDistinct, sortAndSlice, partitionByList); return windowedExpr; }
java
private ExpressionLogical XStartsWithPredicateRightPart(Expression left) { readThis(Tokens.WITH); if (token.tokenType == Tokens.QUESTION) { // handle user parameter case Expression right = XreadRowValuePredicand(); if (left.isParam() && right.isParam()) { // again make sure the left side is valid throw Error.error(ErrorCode.X_42567); } /** In this case, we make the right parameter as the lower bound, * and the right parameter concatenating a special char (greater than any other chars) as the upper bound. * It now becomes a range scan for all the strings with right parameter as its prefix. */ Expression l = new ExpressionLogical(OpTypes.GREATER_EQUAL, left, right); Expression r = new ExpressionLogical(OpTypes.SMALLER_EQUAL, left, new ExpressionArithmetic(OpTypes.CONCAT, right, new ExpressionValue("\uffff", Type.SQL_CHAR))); return new ExpressionLogical(OpTypes.AND, l, r); } else { // handle plain string value and the column Expression right = XreadStringValueExpression(); return new ExpressionStartsWith(left, right, this.isCheckOrTriggerCondition); } }
java
Expression XreadRowValueConstructor() { Expression e; e = XreadExplicitRowValueConstructorOrNull(); if (e != null) { return e; } e = XreadRowOrCommonValueExpression(); if (e != null) { return e; } return XreadBooleanValueExpression(); }
java
Expression XreadExplicitRowValueConstructorOrNull() { Expression e; switch (token.tokenType) { case Tokens.OPENBRACKET : { read(); int position = getPosition(); int brackets = readOpenBrackets(); switch (token.tokenType) { case Tokens.TABLE : case Tokens.VALUES : case Tokens.SELECT : rewind(position); SubQuery sq = XreadSubqueryBody(false, OpTypes.ROW_SUBQUERY); readThis(Tokens.CLOSEBRACKET); return new Expression(OpTypes.ROW_SUBQUERY, sq); default : rewind(position); e = XreadRowElementList(true); readThis(Tokens.CLOSEBRACKET); return e; } } case Tokens.ROW : { read(); readThis(Tokens.OPENBRACKET); e = XreadRowElementList(false); readThis(Tokens.CLOSEBRACKET); return e; } } return null; }
java
private Expression readCaseWhen(final Expression l) { readThis(Tokens.WHEN); Expression condition = null; if (l == null) { condition = XreadBooleanValueExpression(); } else { while (true) { Expression newCondition = XreadPredicateRightPart(l); if (l == newCondition) { newCondition = new ExpressionLogical(l, XreadRowValuePredicand()); } if (condition == null) { condition = newCondition; } else { condition = new ExpressionLogical(OpTypes.OR, condition, newCondition); } if (token.tokenType == Tokens.COMMA) { read(); } else { break; } } } readThis(Tokens.THEN); Expression current = XreadValueExpression(); Expression elseExpr = null; if (token.tokenType == Tokens.WHEN) { elseExpr = readCaseWhen(l); } else if (token.tokenType == Tokens.ELSE) { read(); elseExpr = XreadValueExpression(); readThis(Tokens.END); readIfThis(Tokens.CASE); } else { elseExpr = new ExpressionValue((Object) null, Type.SQL_ALL_TYPES); readThis(Tokens.END); readIfThis(Tokens.CASE); } Expression alternatives = new ExpressionOp(OpTypes.ALTERNATIVE, current, elseExpr); Expression casewhen = new ExpressionOp(OpTypes.CASEWHEN, condition, alternatives); return casewhen; }
java
private Expression readCaseWhenExpression() { Expression l = null; read(); readThis(Tokens.OPENBRACKET); l = XreadBooleanValueExpression(); readThis(Tokens.COMMA); Expression thenelse = XreadRowValueExpression(); readThis(Tokens.COMMA); thenelse = new ExpressionOp(OpTypes.ALTERNATIVE, thenelse, XreadValueExpression()); l = new ExpressionOp(OpTypes.CASEWHEN, l, thenelse); readThis(Tokens.CLOSEBRACKET); return l; }
java
private Expression readCastExpression() { boolean isConvert = token.tokenType == Tokens.CONVERT; read(); readThis(Tokens.OPENBRACKET); Expression l = this.XreadValueExpressionOrNull(); if (isConvert) { readThis(Tokens.COMMA); } else { readThis(Tokens.AS); } Type typeObject = readTypeDefinition(true); if (l.isParam()) { l.setDataType(session, typeObject); } l = new ExpressionOp(l, typeObject); readThis(Tokens.CLOSEBRACKET); return l; }
java
private Expression readNullIfExpression() { // turn into a CASEWHEN read(); readThis(Tokens.OPENBRACKET); Expression c = XreadValueExpression(); readThis(Tokens.COMMA); Expression thenelse = new ExpressionOp(OpTypes.ALTERNATIVE, new ExpressionValue((Object) null, (Type) null), c); c = new ExpressionLogical(c, XreadValueExpression()); c = new ExpressionOp(OpTypes.CASEWHEN, c, thenelse); readThis(Tokens.CLOSEBRACKET); return c; }
java
private Expression readCoalesceExpression() { Expression c = null; // turn into a CASEWHEN read(); readThis(Tokens.OPENBRACKET); Expression leaf = null; while (true) { Expression current = XreadValueExpression(); if (leaf != null && token.tokenType == Tokens.CLOSEBRACKET) { readThis(Tokens.CLOSEBRACKET); leaf.setLeftNode(current); break; } Expression condition = new ExpressionLogical(OpTypes.IS_NULL, current); Expression alternatives = new ExpressionOp(OpTypes.ALTERNATIVE, new ExpressionValue((Object) null, (Type) null), current); Expression casewhen = new ExpressionOp(OpTypes.CASEWHEN, condition, alternatives); if (c == null) { c = casewhen; } else { leaf.setLeftNode(casewhen); } leaf = alternatives; readThis(Tokens.COMMA); } return c; }
java
StatementDMQL compileCursorSpecification() { QueryExpression queryExpression = XreadQueryExpression(); queryExpression.setAsTopLevel(); queryExpression.resolve(session); if (token.tokenType == Tokens.FOR) { read(); if (token.tokenType == Tokens.READ) { read(); readThis(Tokens.ONLY); } else { readThis(Tokens.UPDATE); if (token.tokenType == Tokens.OF) { readThis(Tokens.OF); OrderedHashSet colNames = readColumnNameList(null, false); } } } StatementDMQL cs = new StatementQuery(session, queryExpression, compileContext); return cs; }
java
public long toLong() { byte[] data = getBytes(); if (data == null || data.length <= 0 || data.length > 8) { // Assume that we're in a numeric context and that the user // made a typo entering a hex string. throw Error.error(ErrorCode.X_42585); // malformed numeric constant } byte[] dataWithLeadingZeros = new byte[] {0, 0, 0, 0, 0, 0, 0, 0}; int lenDiff = 8 - data.length; for (int j = lenDiff; j < 8; ++j) { dataWithLeadingZeros[j] = data[j - lenDiff]; } BigInteger bi = new BigInteger(dataWithLeadingZeros); return bi.longValue(); }
java
public static void main(String args[]) { Statement stmts[] = null; try { stmts = getStatements(args[0]); } catch (Throwable e) { System.out.println(e.getMessage()); return; } for (Statement s : stmts) { System.out.print(s.statement); } }
java
void watchPartition(int pid, ExecutorService es, boolean shouldBlock) throws InterruptedException, ExecutionException { String dir = LeaderElector.electionDirForPartition(VoltZK.leaders_initiators, pid); m_callbacks.put(pid, new PartitionCallback(pid)); BabySitter babySitter; if (shouldBlock) { babySitter = BabySitter.blockingFactory(m_zk, dir, m_callbacks.get(pid), es).getFirst(); } else { babySitter = BabySitter.nonblockingFactory(m_zk, dir, m_callbacks.get(pid), es); } m_partitionWatchers.put(pid, babySitter); }
java
private int getInitialPartitionCount() throws IllegalAccessException { AppointerState currentState = m_state.get(); if (currentState != AppointerState.INIT && currentState != AppointerState.CLUSTER_START) { throw new IllegalAccessException("Getting cached partition count after cluster " + "startup"); } return m_initialPartitionCount; }
java
public void updatePartitionLeader(int partitionId, long newMasterHISD, boolean isLeaderMigrated) { PartitionCallback cb = m_callbacks.get(partitionId); if (cb != null && cb.m_currentLeader != newMasterHISD) { cb.m_previousLeader = cb.m_currentLeader; cb.m_currentLeader = newMasterHISD; cb.m_isLeaderMigrated = isLeaderMigrated; } }
java
public int compare(String a, String b) { int i; if (collator == null) { i = a.compareTo(b); } else { i = collator.compare(a, b); } return (i == 0) ? 0 : (i < 0 ? -1 : 1); }
java
int get(int rowSize) { if (lookup.size() == 0) { return -1; } int index = lookup.findFirstGreaterEqualKeyIndex(rowSize); if (index == -1) { return -1; } // statistics for successful requests only - to be used later for midSize requestCount++; requestSize += rowSize; int length = lookup.getValue(index); int difference = length - rowSize; int key = lookup.getKey(index); lookup.remove(index); if (difference >= midSize) { int pos = key + (rowSize / scale); lookup.add(pos, difference); } else { lostFreeBlockSize += difference; } return key; }
java