code
stringlengths
73
34.1k
label
stringclasses
1 value
private void getFKStatement(StringBuffer a) { if (!getName().isReservedName()) { a.append(Tokens.T_CONSTRAINT).append(' '); a.append(getName().statementName); a.append(' '); } a.append(Tokens.T_FOREIGN).append(' ').append(Tokens.T_KEY); int[] col = getRefColumns(); getColumnList(getRef(), col, col.length, a); a.append(' ').append(Tokens.T_REFERENCES).append(' '); a.append(getMain().getName().getSchemaQualifiedStatementName()); col = getMainColumns(); getColumnList(getMain(), col, col.length, a); if (getDeleteAction() != Constraint.NO_ACTION) { a.append(' ').append(Tokens.T_ON).append(' ').append( Tokens.T_DELETE).append(' '); a.append(getDeleteActionString()); } if (getUpdateAction() != Constraint.NO_ACTION) { a.append(' ').append(Tokens.T_ON).append(' ').append( Tokens.T_UPDATE).append(' '); a.append(getUpdateActionString()); } }
java
private static void getColumnList(Table t, int[] col, int len, StringBuffer a) { a.append('('); for (int i = 0; i < len; i++) { a.append(t.getColumn(col[i]).getName().statementName); if (i < len - 1) { a.append(','); } } a.append(')'); }
java
private static String getActionString(int action) { switch (action) { case Constraint.RESTRICT : return Tokens.T_RESTRICT; case Constraint.CASCADE : return Tokens.T_CASCADE; case Constraint.SET_DEFAULT : return Tokens.T_SET + ' ' + Tokens.T_DEFAULT; case Constraint.SET_NULL : return Tokens.T_SET + ' ' + Tokens.T_NULL; default : return Tokens.T_NO + ' ' + Tokens.T_ACTION; } }
java
boolean isUniqueWithColumns(int[] cols) { if (constType != UNIQUE || core.mainCols.length != cols.length) { return false; } return ArrayUtil.haveEqualSets(core.mainCols, cols, cols.length); }
java
boolean isEquivalent(Table mainTable, int[] mainCols, Table refTable, int[] refCols) { if (constType != Constraint.MAIN && constType != Constraint.FOREIGN_KEY) { return false; } if (mainTable != core.mainTable || refTable != core.refTable) { return false; } return ArrayUtil.areEqualSets(core.mainCols, mainCols) && ArrayUtil.areEqualSets(core.refCols, refCols); }
java
void updateTable(Session session, Table oldTable, Table newTable, int colIndex, int adjust) { if (oldTable == core.mainTable) { core.mainTable = newTable; if (core.mainIndex != null) { core.mainIndex = core.mainTable.getIndex(core.mainIndex.getName().name); core.mainCols = ArrayUtil.toAdjustedColumnArray(core.mainCols, colIndex, adjust); } } if (oldTable == core.refTable) { core.refTable = newTable; if (core.refIndex != null) { core.refIndex = core.refTable.getIndex(core.refIndex.getName().name); core.refCols = ArrayUtil.toAdjustedColumnArray(core.refCols, colIndex, adjust); } } // CHECK if (constType == CHECK) { recompile(session, newTable); } }
java
void checkInsert(Session session, Table table, Object[] row) { switch (constType) { case CHECK : if (!isNotNull) { checkCheckConstraint(session, table, row); } return; case FOREIGN_KEY : PersistentStore store = session.sessionData.getRowStore(core.mainTable); if (ArrayUtil.hasNull(row, core.refCols)) { if (core.matchType == OpTypes.MATCH_SIMPLE) { return; } if (core.refCols.length == 1) { return; } if (ArrayUtil.hasAllNull(row, core.refCols)) { return; } // core.matchType == OpTypes.MATCH_FULL } else if (core.mainIndex.exists(session, store, row, core.refCols)) { return; } else if (core.mainTable == core.refTable) { // special case: self referencing table and self referencing row int compare = core.mainIndex.compareRowNonUnique(row, core.refCols, row); if (compare == 0) { return; } } String[] info = new String[] { core.refName.name, core.mainTable.getName().name }; throw Error.error(ErrorCode.X_23502, ErrorCode.CONSTRAINT, info); } }
java
boolean checkHasMainRef(Session session, Object[] row) { if (ArrayUtil.hasNull(row, core.refCols)) { return false; } PersistentStore store = session.sessionData.getRowStore(core.mainTable); boolean exists = core.mainIndex.exists(session, store, row, core.refCols); if (!exists) { String[] info = new String[] { core.refName.name, core.mainTable.getName().name }; throw Error.error(ErrorCode.X_23502, ErrorCode.CONSTRAINT, info); } return exists; }
java
void checkReferencedRows(Session session, Table table, int[] rowColArray) { Index mainIndex = getMainIndex(); PersistentStore store = session.sessionData.getRowStore(table); RowIterator it = table.rowIterator(session); while (true) { Row row = it.getNextRow(); if (row == null) { break; } Object[] rowData = row.getData(); if (ArrayUtil.hasNull(rowData, rowColArray)) { if (core.matchType == OpTypes.MATCH_SIMPLE) { continue; } } else if (mainIndex.exists(session, store, rowData, rowColArray)) { continue; } if (ArrayUtil.hasAllNull(rowData, rowColArray)) { continue; } String colValues = ""; for (int i = 0; i < rowColArray.length; i++) { Object o = rowData[rowColArray[i]]; colValues += table.getColumnTypes()[i].convertToString(o); colValues += ","; } String[] info = new String[] { getName().name, getMain().getName().name }; throw Error.error(ErrorCode.X_23502, ErrorCode.CONSTRAINT, info); } }
java
@VisibleForTesting static int chooseTableSize(int setSize) { if (setSize == 1) { return 2; } // Correct the size for open addressing to match desired load factor. // Round up to the next highest power of 2. int tableSize = Integer.highestOneBit(setSize - 1) << 1; while (tableSize * DESIRED_LOAD_FACTOR < setSize) { tableSize <<= 1; } return tableSize; }
java
@Override public String[] decode(long generation, String tableName, List<VoltType> types, List<String> names, String[] to, Object[] fields) throws RuntimeException { Preconditions.checkArgument( fields != null && fields.length > m_firstFieldOffset, "null or inapropriately sized export row array" ); /* * Builds a list of string formatters that reflects the row * column types. */ StringFieldDecoder [] fieldDecoders; if (!m_fieldDecoders.containsKey(generation)) { int fieldCount = 0; Map<String, DecodeType> typeMap = getTypeMap(generation, types, names); ImmutableList.Builder<StringFieldDecoder> lb = ImmutableList.builder(); for (org.voltdb.exportclient.decode.DecodeType dt: typeMap.values()) { lb.add(dt.accept(decodingVisitor, fieldCount++, null)); } fieldDecoders = lb.build().toArray(new StringFieldDecoder[0]); m_fieldDecoders.put(generation, fieldDecoders); } else { fieldDecoders = m_fieldDecoders.get(generation); } if (to == null || to.length < fieldDecoders.length) { to = new String[fieldDecoders.length]; } for ( int i = m_firstFieldOffset, j = 0; i < fields.length && j < fieldDecoders.length; ++i, ++j ) { fieldDecoders[j].decode(to, fields[i]); } return to; }
java
Iv2InFlight findHandle(long ciHandle) { assert(!shouldCheckThreadIdAssertion() || m_expectedThreadId == Thread.currentThread().getId()); /* * Check the partition specific queue of handles */ int partitionId = getPartIdFromHandle(ciHandle); PartitionInFlightTracker partitionStuff = m_trackerMap.get(partitionId); if (partitionStuff == null) { // whoa, bad tmLog.error("Unable to find handle list for partition: " + partitionId + ", client interface handle: " + ciHandle); return null; } Iv2InFlight inFlight = partitionStuff.m_inFlights.remove(ciHandle); if (inFlight != null) { m_acg.reduceBackpressure(inFlight.m_messageSize); m_outstandingTxns--; return inFlight; } return null; }
java
void freeOutstandingTxns() { assert(!shouldCheckThreadIdAssertion() || m_expectedThreadId == Thread.currentThread().getId()); for (PartitionInFlightTracker tracker : m_trackerMap.values()) { for (Iv2InFlight inflight : tracker.m_inFlights.values()) { m_outstandingTxns--; m_acg.reduceBackpressure(inflight.m_messageSize); } } }
java
void loadSchema(Reader reader, Database db, DdlProceduresToLoad whichProcs) throws VoltCompiler.VoltCompilerException { int currLineNo = 1; DDLStatement stmt = getNextStatement(reader, m_compiler, currLineNo); while (stmt != null) { // Some statements are processed by VoltDB and the rest are handled by HSQL. processVoltDBStatements(db, whichProcs, stmt); stmt = getNextStatement(reader, m_compiler, stmt.endLineNo); } try { reader.close(); } catch (IOException e) { throw m_compiler.new VoltCompilerException("Error closing schema file"); } // process extra classes m_tracker.addExtraClasses(m_classMatcher.getMatchedClassList()); // possibly save some memory m_classMatcher.clear(); }
java
private String generateDDLForDRConflictsTable(Database currentDB, Database previousDBIfAny, boolean isCurrentXDCR) { StringBuilder sb = new StringBuilder(); if (isCurrentXDCR) { createDRConflictTables(sb, previousDBIfAny); } else { dropDRConflictTablesIfNeeded(sb); } return sb.toString(); }
java
private void processCreateStreamStatement(DDLStatement stmt, Database db, DdlProceduresToLoad whichProcs) throws VoltCompilerException { String statement = stmt.statement; Matcher statementMatcher = SQLParser.matchCreateStream(statement); if (statementMatcher.matches()) { // check the table portion String tableName = checkIdentifierStart(statementMatcher.group(1), statement); String targetName = null; String columnName = null; // Parse the EXPORT and PARTITION clauses. if ((statementMatcher.groupCount() > 1) && (statementMatcher.group(2) != null) && (!statementMatcher.group(2).isEmpty())) { String clauses = statementMatcher.group(2); Matcher matcher = SQLParser.matchAnyCreateStreamStatementClause(clauses); int start = 0; while ( matcher.find(start)) { start = matcher.end(); if (matcher.group(1) != null) { // Add target info if it's an Export clause. Only one is allowed if (targetName != null) { throw m_compiler.new VoltCompilerException( "Only one Export clause is allowed for CREATE STREAM."); } targetName = matcher.group(1); } else { // Add partition info if it's a PARTITION clause. Only one is allowed. if (columnName != null) { throw m_compiler.new VoltCompilerException( "Only one PARTITION clause is allowed for CREATE STREAM."); } columnName = matcher.group(2); } } } VoltXMLElement tableXML = m_schema.findChild("table", tableName.toUpperCase()); if (tableXML != null) { tableXML.attributes.put("stream", "true"); } else { throw m_compiler.new VoltCompilerException(String.format( "Invalid STREAM statement: table %s does not exist", tableName)); } // process partition if specified if (columnName != null) { tableXML.attributes.put("partitioncolumn", columnName.toUpperCase()); // Column validity check done by VoltCompiler in post-processing // mark the table as dirty for the purposes of caching sql statements m_compiler.markTableAsDirty(tableName); } // process export targetName = (targetName != null) ? checkIdentifierStart( targetName, statement) : Constants.DEFAULT_EXPORT_CONNECTOR_NAME; if (tableXML.attributes.containsKey("drTable") && "ENABLE".equals(tableXML.attributes.get("drTable"))) { throw m_compiler.new VoltCompilerException(String.format( "Invalid CREATE STREAM statement: table %s is a DR table.", tableName)); } else { tableXML.attributes.put("export", targetName); } } else { throw m_compiler.new VoltCompilerException(String.format("Invalid CREATE STREAM statement: \"%s\", " + "expected syntax: CREATE STREAM <table> [PARTITION ON COLUMN <column-name>] [EXPORT TO TARGET <target>] (column datatype, ...); ", statement.substring(0, statement.length() - 1))); } }
java
private void fillTrackerFromXML() { for (VoltXMLElement e : m_schema.children) { if (e.name.equals("table")) { String tableName = e.attributes.get("name"); String partitionCol = e.attributes.get("partitioncolumn"); String export = e.attributes.get("export"); String drTable = e.attributes.get("drTable"); String migrateTarget = e.attributes.get("migrateExport"); export = StringUtil.isEmpty(export) ? migrateTarget : export; final boolean isStream = (e.attributes.get("stream") != null); if (partitionCol != null) { m_tracker.addPartition(tableName, partitionCol); } else { m_tracker.removePartition(tableName); } if (!StringUtil.isEmpty(export)) { m_tracker.addExportedTable(tableName, export, isStream); } else { m_tracker.removeExportedTable(tableName, isStream); } if (drTable != null) { m_tracker.addDRedTable(tableName, drTable); } } } }
java
private static boolean indexesAreDups(Index idx1, Index idx2) { // same attributes? if (idx1.getType() != idx2.getType()) { return false; } if (idx1.getCountable() != idx2.getCountable()) { return false; } if (idx1.getUnique() != idx2.getUnique()) { return false; } if (idx1.getAssumeunique() != idx2.getAssumeunique()) { return false; } // same column count? if (idx1.getColumns().size() != idx2.getColumns().size()) { return false; } //TODO: For index types like HASH that support only random access vs. scanned ranges, indexes on different // permutations of the same list of columns/expressions could be considered dupes. This code skips that edge // case optimization in favor of using a simpler more exact permutation-sensitive algorithm for all indexes. if ( ! (idx1.getExpressionsjson().equals(idx2.getExpressionsjson()))) { return false; } // Simple column indexes have identical empty expression strings so need to be distinguished other ways. // More complex expression indexes that have the same expression strings always have the same set of (base) // columns referenced in the same order, but we fall through and check them, anyway. // sort in index order the columns of idx1, each identified by its index in the base table int[] idx1baseTableOrder = new int[idx1.getColumns().size()]; for (ColumnRef cref : idx1.getColumns()) { int index = cref.getIndex(); int baseTableIndex = cref.getColumn().getIndex(); idx1baseTableOrder[index] = baseTableIndex; } // sort in index order the columns of idx2, each identified by its index in the base table int[] idx2baseTableOrder = new int[idx2.getColumns().size()]; for (ColumnRef cref : idx2.getColumns()) { int index = cref.getIndex(); int baseTableIndex = cref.getColumn().getIndex(); idx2baseTableOrder[index] = baseTableIndex; } // Duplicate indexes have identical columns in identical order. if ( ! Arrays.equals(idx1baseTableOrder, idx2baseTableOrder) ) { return false; } // Check the predicates if (idx1.getPredicatejson().length() > 0) { return idx1.getPredicatejson().equals(idx2.getPredicatejson()); } if (idx2.getPredicatejson().length() > 0) { return idx2.getPredicatejson().equals(idx1.getPredicatejson()); } return true; }
java
private void addConstraintToCatalog(Table table, VoltXMLElement node, Map<String, String> indexReplacementMap, Map<String, Index> indexMap) throws VoltCompilerException { assert node.name.equals("constraint"); String name = node.attributes.get("name"); String typeName = node.attributes.get("constrainttype"); ConstraintType type = ConstraintType.valueOf(typeName); String tableName = table.getTypeName(); if (type == ConstraintType.LIMIT) { int tupleLimit = Integer.parseInt(node.attributes.get("rowslimit")); if (tupleLimit < 0) { throw m_compiler.new VoltCompilerException("Invalid constraint limit number '" + tupleLimit + "'"); } if (tableLimitConstraintCounter.contains(tableName)) { throw m_compiler.new VoltCompilerException("Too many table limit constraints for table " + tableName); } else { tableLimitConstraintCounter.add(tableName); } table.setTuplelimit(tupleLimit); String deleteStmt = node.attributes.get("rowslimitdeletestmt"); if (deleteStmt != null) { Statement catStmt = table.getTuplelimitdeletestmt().add("limit_delete"); catStmt.setSqltext(deleteStmt); validateTupleLimitDeleteStmt(catStmt); } return; } if (type == ConstraintType.CHECK) { String msg = "VoltDB does not enforce check constraints. "; msg += "Constraint on table " + tableName + " will be ignored."; m_compiler.addWarn(msg); return; } else if (type == ConstraintType.FOREIGN_KEY) { String msg = "VoltDB does not enforce foreign key references and constraints. "; msg += "Constraint on table " + tableName + " will be ignored."; m_compiler.addWarn(msg); return; } else if (type == ConstraintType.MAIN) { // should never see these assert(false); } else if (type == ConstraintType.NOT_NULL) { // these get handled by table metadata inspection return; } else if (type != ConstraintType.PRIMARY_KEY && type != ConstraintType.UNIQUE) { throw m_compiler.new VoltCompilerException("Invalid constraint type '" + typeName + "'"); } // else, create the unique index below // primary key code is in other places as well // The constraint is backed by an index, therefore we need to create it // TODO: We need to be able to use indexes for foreign keys. I am purposely // leaving those out right now because HSQLDB just makes too many of them. Constraint catalog_const = table.getConstraints().add(name); String indexName = node.attributes.get("index"); assert(indexName != null); // handle replacements from duplicate index pruning if (indexReplacementMap.containsKey(indexName)) { indexName = indexReplacementMap.get(indexName); } Index catalog_index = indexMap.get(indexName); // Attach the index to the catalog constraint (catalog_const). if (catalog_index != null) { catalog_const.setIndex(catalog_index); // This may be redundant. catalog_index.setUnique(true); boolean assumeUnique = Boolean.parseBoolean(node.attributes.get("assumeunique")); catalog_index.setAssumeunique(assumeUnique); } catalog_const.setType(type.getValue()); }
java
private static AbstractExpression buildPartialIndexPredicate( AbstractParsedStmt dummy, String indexName, VoltXMLElement predicateXML, Table table, VoltCompiler compiler) throws VoltCompilerException { // Make sure all column expressions refer to the same index table // before we can parse the XML to avoid the AbstractParsedStmt // exception/assertion String tableName = table.getTypeName(); assert(tableName != null); StringBuffer msg = new StringBuffer("Partial index \"" + indexName + "\" "); // Make sure all column expressions refer the index table List<VoltXMLElement> columnRefs= predicateXML.findChildrenRecursively("columnref"); for (VoltXMLElement columnRef : columnRefs) { String columnRefTableName = columnRef.attributes.get("table"); if (columnRefTableName != null && !tableName.equals(columnRefTableName)) { msg.append("with expression(s) involving other tables is not supported."); throw compiler.new VoltCompilerException(msg.toString()); } } // Now it safe to parse the expression tree AbstractExpression predicate = dummy.parseExpressionTree(predicateXML); if ( ! predicate.isValidExprForIndexesAndMVs(msg, false) ) { throw compiler.new VoltCompilerException(msg.toString()); } return predicate; }
java
public Result getLob(Session session, long lobID, long offset, long length) { throw Error.runtimeError(ErrorCode.U_S0500, "LobManager"); }
java
@Override public void close() throws SQLException { try { isClosed = true; JDBC4ClientConnectionPool.dispose(NativeConnection); } catch(Exception x) { throw SQLError.get(x); } }
java
@Override public Array createArrayOf(String typeName, Object[] elements) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public Statement createStatement() throws SQLException { checkClosed(); try { return new JDBC4Statement(this); } catch(Exception x) { throw SQLError.get(x); } }
java
@Override public Struct createStruct(String typeName, Object[] attributes) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { if ((resultSetType == ResultSet.TYPE_SCROLL_INSENSITIVE || resultSetType == ResultSet.TYPE_FORWARD_ONLY) && resultSetConcurrency == ResultSet.CONCUR_READ_ONLY) { return prepareStatement(sql); } checkClosed(); throw SQLError.noSupport(); }
java
@Override public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public void rollback() throws SQLException { checkClosed(); if (props.getProperty(ROLLBACK_THROW_EXCEPTION, "true").equalsIgnoreCase("true")) { throw SQLError.noSupport(); } }
java
@Override public void setAutoCommit(boolean autoCommit) throws SQLException { checkClosed(); // Always true - error out only if the client is trying to set somethign else if (!autoCommit && (props.getProperty(COMMIT_THROW_EXCEPTION, "true").equalsIgnoreCase("true"))) { throw SQLError.noSupport(); } else { this.autoCommit = autoCommit; } }
java
@Override public void setReadOnly(boolean readOnly) throws SQLException { checkClosed(); if (!Boolean.parseBoolean(props.getProperty("enableSetReadOnly","false"))){ throw SQLError.noSupport(); } }
java
@Override public void setTypeMap(Map<String,Class<?>> map) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public void saveStatistics(ClientStats stats, String file) throws IOException { this.NativeConnection.saveStatistics(stats, file); }
java
private static boolean trimProcQuotas(ZooKeeper zk, String path) throws KeeperException, IOException, InterruptedException { if (Quotas.quotaZookeeper.equals(path)) { return true; } List<String> children = zk.getChildren(path, false); if (children.size() == 0) { zk.delete(path, -1); String parent = path.substring(0, path.lastIndexOf('/')); return trimProcQuotas(zk, parent); } else { return true; } }
java
public static boolean delQuota(ZooKeeper zk, String path, boolean bytes, boolean numNodes) throws KeeperException, IOException, InterruptedException { String parentPath = Quotas.quotaZookeeper + path; String quotaPath = Quotas.quotaZookeeper + path + "/" + Quotas.limitNode; if (zk.exists(quotaPath, false) == null) { System.out.println("Quota does not exist for " + path); return true; } byte[] data = null; try { data = zk.getData(quotaPath, false, new Stat()); } catch (KeeperException.NoNodeException ne) { System.err.println("quota does not exist for " + path); return true; } StatsTrack strack = new StatsTrack(new String(data)); if (bytes && !numNodes) { strack.setBytes(-1L); zk.setData(quotaPath, strack.toString().getBytes(), -1); } else if (!bytes && numNodes) { strack.setCount(-1); zk.setData(quotaPath, strack.toString().getBytes(), -1); } else if (bytes && numNodes) { // delete till you can find a node with more than // one child List<String> children = zk.getChildren(parentPath, false); // / delete the direct children first for (String child : children) { zk.delete(parentPath + "/" + child, -1); } // cut the tree till their is more than one child trimProcQuotas(zk, parentPath); } return true; }
java
private static int generateCrudPKeyWhereClause(Column partitioncolumn, Constraint pkey, StringBuilder sb) { // Sort the catalog index columns by index column order. ArrayList<ColumnRef> indexColumns = new ArrayList<ColumnRef>(pkey.getIndex().getColumns().size()); for (ColumnRef c : pkey.getIndex().getColumns()) { indexColumns.add(c); } Collections.sort(indexColumns, new ColumnRefComparator()); boolean first = true; int partitionOffset = -1; sb.append(" WHERE "); for (ColumnRef pkc : indexColumns) { if (!first) sb.append(" AND "); first = false; sb.append("(" + pkc.getColumn().getName() + " = ?" + ")"); if (pkc.getColumn() == partitioncolumn) { partitionOffset = pkc.getIndex(); } } return partitionOffset; }
java
private static void generateCrudExpressionColumns(Table table, StringBuilder sb) { boolean first = true; // Sort the catalog table columns by column order. ArrayList<Column> tableColumns = new ArrayList<Column>(table.getColumns().size()); for (Column c : table.getColumns()) { tableColumns.add(c); } Collections.sort(tableColumns, new TableColumnComparator()); for (Column c : tableColumns) { if (!first) sb.append(", "); first = false; sb.append(c.getName() + " = ?"); } }
java
public InProcessVoltDBServer start() { DeploymentBuilder depBuilder = new DeploymentBuilder(sitesPerHost, 1, 0); depBuilder.setEnableCommandLogging(false); depBuilder.setUseDDLSchema(true); depBuilder.setHTTPDPort(8080); depBuilder.setJSONAPIEnabled(true); VoltDB.Configuration config = new VoltDB.Configuration(); if (pathToLicense != null) { config.m_pathToLicense = pathToLicense; } else { config.m_pathToLicense = "./license.xml"; } File tempDeployment = null; try { tempDeployment = File.createTempFile("volt_deployment_", ".xml"); } catch (IOException e) { e.printStackTrace(); System.exit(-1); } depBuilder.writeXML(tempDeployment.getAbsolutePath()); config.m_pathToDeployment = tempDeployment.getAbsolutePath(); server = new ServerThread(config); server.start(); server.waitForInitialization(); return this; }
java
public void compile(Session session) { if (!database.schemaManager.schemaExists(compileTimeSchema.name)) { compileTimeSchema = session.getSchemaHsqlName(null); } session.setSchema(compileTimeSchema.name); ParserDQL p = new ParserDQL(session, new Scanner(statement)); p.read(); viewSubQuery = p.XreadViewSubquery(this); queryExpression = viewSubQuery.queryExpression; if (getColumnCount() == 0) { if (columnNames == null) { columnNames = viewSubQuery.queryExpression.getResultColumnNames(); } if (columnNames.length != viewSubQuery.queryExpression.getColumnCount()) { throw Error.error(ErrorCode.X_42593, tableName.statementName); } TableUtil.setColumnsInSchemaTable( this, columnNames, queryExpression.getColumnTypes()); } // viewSubqueries = p.compileContext.getSubqueries(); for (int i = 0; i < viewSubqueries.length; i++) { if (viewSubqueries[i].parentView == null) { viewSubqueries[i].parentView = this; } } // viewSubQuery.getTable().view = this; viewSubQuery.getTable().columnList = columnList; schemaObjectNames = p.compileContext.getSchemaObjectNames(); baseTable = queryExpression.getBaseTable(); if (baseTable == null) { return; } switch (check) { case SchemaObject.ViewCheckModes.CHECK_NONE : break; case SchemaObject.ViewCheckModes.CHECK_LOCAL : checkExpression = queryExpression.getCheckCondition(); break; case SchemaObject.ViewCheckModes.CHECK_CASCADE : break; default : throw Error.runtimeError(ErrorCode.U_S0500, "View"); } }
java
public static Pair<InMemoryJarfile, String> loadAndUpgradeCatalogFromJar(byte[] catalogBytes, boolean isXDCR) throws IOException { // Throws IOException on load failure. InMemoryJarfile jarfile = loadInMemoryJarFile(catalogBytes); return loadAndUpgradeCatalogFromJar(jarfile, isXDCR); }
java
public static Pair<InMemoryJarfile, String> loadAndUpgradeCatalogFromJar(InMemoryJarfile jarfile, boolean isXDCR) throws IOException { // Let VoltCompiler do a version check and upgrade the catalog on the fly. // I.e. jarfile may be modified. VoltCompiler compiler = new VoltCompiler(isXDCR); String upgradedFromVersion = compiler.upgradeCatalogAsNeeded(jarfile); return new Pair<>(jarfile, upgradedFromVersion); }
java
public static String getSerializedCatalogStringFromJar(InMemoryJarfile jarfile) { byte[] serializedCatalogBytes = jarfile.get(CatalogUtil.CATALOG_FILENAME); String serializedCatalog = new String(serializedCatalogBytes, Constants.UTF8ENCODING); return serializedCatalog; }
java
public static String[] getBuildInfoFromJar(InMemoryJarfile jarfile) throws IOException { // Read the raw build info bytes. byte[] buildInfoBytes = jarfile.get(CATALOG_BUILDINFO_FILENAME); if (buildInfoBytes == null) { throw new IOException("Catalog build information not found - please build your application using the current version of VoltDB."); } // Convert the bytes to a string and split by lines. String buildInfo; buildInfo = new String(buildInfoBytes, Constants.UTF8ENCODING); String[] buildInfoLines = buildInfo.split("\n"); // Sanity check the number of lines and the version string. if (buildInfoLines.length < 1) { throw new IOException("Catalog build info has no version string."); } String versionFromCatalog = buildInfoLines[0].trim(); if (!CatalogUtil.isCatalogVersionValid(versionFromCatalog)) { throw new IOException(String.format( "Catalog build info version (%s) is bad.", versionFromCatalog)); } // Trim leading/trailing whitespace. for (int i = 0; i < buildInfoLines.length; ++i) { buildInfoLines[i] = buildInfoLines[i].trim(); } return buildInfoLines; }
java
public static String getAutoGenDDLFromJar(InMemoryJarfile jarfile) throws IOException { // Read the raw auto generated ddl bytes. byte[] ddlBytes = jarfile.get(VoltCompiler.AUTOGEN_DDL_FILE_NAME); if (ddlBytes == null) { throw new IOException("Auto generated schema DDL not found - please make sure the database is initialized with valid schema."); } String ddl = new String(ddlBytes, StandardCharsets.UTF_8); return ddl.trim(); }
java
public static InMemoryJarfile getCatalogJarWithoutDefaultArtifacts(final InMemoryJarfile jarfile) { InMemoryJarfile cloneJar = jarfile.deepCopy(); for (String entry : CATALOG_DEFAULT_ARTIFACTS) { cloneJar.remove(entry); } return cloneJar; }
java
public static InMemoryJarfile loadInMemoryJarFile(byte[] catalogBytes) throws IOException { assert(catalogBytes != null); InMemoryJarfile jarfile = new InMemoryJarfile(catalogBytes); if (!jarfile.containsKey(CATALOG_FILENAME)) { throw new IOException("Database catalog not found - please build your application using the current version of VoltDB."); } return jarfile; }
java
public static boolean isSnapshotablePersistentTableView(Database db, Table table) { Table materializer = table.getMaterializer(); if (materializer == null) { // Return false if it is not a materialized view. return false; } if (CatalogUtil.isTableExportOnly(db, materializer)) { // The view source table should not be a streamed table. return false; } if (! table.getIsreplicated() && table.getPartitioncolumn() == null) { // If the view table is implicitly partitioned (maybe was not in snapshot), // its maintenance is not turned off during the snapshot restore process. // Let it take care of its own data by itself. // Do not attempt to restore data for it. return false; } return true; }
java
public static boolean isSnapshotableStreamedTableView(Database db, Table table) { Table materializer = table.getMaterializer(); if (materializer == null) { // Return false if it is not a materialized view. return false; } if (! CatalogUtil.isTableExportOnly(db, materializer)) { // Test if the view source table is a streamed table. return false; } // Non-partitioned export table are not allowed so it should not get here. Column sourcePartitionColumn = materializer.getPartitioncolumn(); if (sourcePartitionColumn == null) { return false; } // Make sure the partition column is present in the view. // Export table views are special, we use column names to match.. Column pc = table.getColumns().get(sourcePartitionColumn.getName()); if (pc == null) { return false; } return true; }
java
public static long getUniqueIdForFragment(PlanFragment frag) { long retval = 0; CatalogType parent = frag.getParent(); retval = ((long) parent.getParent().getRelativeIndex()) << 32; retval += ((long) parent.getRelativeIndex()) << 16; retval += frag.getRelativeIndex(); return retval; }
java
public static <T extends CatalogType> List<T> getSortedCatalogItems(CatalogMap<T> items, String sortFieldName) { assert(items != null); assert(sortFieldName != null); // build a treemap based on the field value TreeMap<Object, T> map = new TreeMap<>(); boolean hasField = false; for (T item : items) { // check the first time through for the field if (hasField == false) { hasField = ArrayUtils.contains(item.getFields(), sortFieldName); } assert(hasField == true); map.put(item.getField(sortFieldName), item); } // create a sorted list from the map ArrayList<T> retval = new ArrayList<>(); for (T item : map.values()) { retval.add(item); } return retval; }
java
public static <T extends CatalogType> void getSortedCatalogItems(CatalogMap<T> items, String sortFieldName, List<T> result) { result.addAll(getSortedCatalogItems(items, sortFieldName )); }
java
public static Index getPrimaryKeyIndex(Table catalogTable) throws Exception { // We first need to find the pkey constraint Constraint catalog_constraint = null; for (Constraint c : catalogTable.getConstraints()) { if (c.getType() == ConstraintType.PRIMARY_KEY.getValue()) { catalog_constraint = c; break; } } if (catalog_constraint == null) { throw new Exception("ERROR: Table '" + catalogTable.getTypeName() + "' does not have a PRIMARY KEY constraint"); } // And then grab the index that it is using return (catalog_constraint.getIndex()); }
java
public static Collection<Column> getPrimaryKeyColumns(Table catalogTable) { Collection<Column> columns = new ArrayList<>(); Index catalog_idx = null; try { catalog_idx = CatalogUtil.getPrimaryKeyIndex(catalogTable); } catch (Exception ex) { // IGNORE return (columns); } assert(catalog_idx != null); for (ColumnRef catalog_col_ref : getSortedCatalogItems(catalog_idx.getColumns(), "index")) { columns.add(catalog_col_ref.getColumn()); } return (columns); }
java
public static boolean isTableExportOnly(org.voltdb.catalog.Database database, org.voltdb.catalog.Table table) { int type = table.getTabletype(); if (TableType.isInvalidType(type)) { // This implementation uses connectors instead of just looking at the tableType // because snapshots or catalogs from pre-9.0 versions (DR) will not have this new tableType field. for (Connector connector : database.getConnectors()) { // iterate the connector tableinfo list looking for tableIndex // tableInfo has a reference to a table - can compare the reference // to the desired table by looking at the relative index. ick. for (ConnectorTableInfo tableInfo : connector.getTableinfo()) { if (tableInfo.getTable().getRelativeIndex() == table.getRelativeIndex()) { return true; } } } // Found no connectors return false; } else { return TableType.isStream(type); } }
java
public static boolean isTableMaterializeViewSource(org.voltdb.catalog.Database database, org.voltdb.catalog.Table table) { CatalogMap<Table> tables = database.getTables(); for (Table t : tables) { Table matsrc = t.getMaterializer(); if ((matsrc != null) && (matsrc.getRelativeIndex() == table.getRelativeIndex())) { return true; } } return false; }
java
public static List<Table> getMaterializeViews(org.voltdb.catalog.Database database, org.voltdb.catalog.Table table) { ArrayList<Table> tlist = new ArrayList<>(); CatalogMap<Table> tables = database.getTables(); for (Table t : tables) { Table matsrc = t.getMaterializer(); if ((matsrc != null) && (matsrc.getRelativeIndex() == table.getRelativeIndex())) { tlist.add(t); } } return tlist; }
java
public static boolean isCatalogCompatible(String catalogVersionStr) { if (catalogVersionStr == null || catalogVersionStr.isEmpty()) { return false; } //Check that it is a properly formed verstion string Object[] catalogVersion = MiscUtils.parseVersionString(catalogVersionStr); if (catalogVersion == null) { throw new IllegalArgumentException("Invalid version string " + catalogVersionStr); } if (!catalogVersionStr.equals(VoltDB.instance().getVersionString())) { return false; } return true; }
java
public static boolean isCatalogVersionValid(String catalogVersionStr) { // Do we have a version string? if (catalogVersionStr == null || catalogVersionStr.isEmpty()) { return false; } //Check that it is a properly formed version string Object[] catalogVersion = MiscUtils.parseVersionString(catalogVersionStr); if (catalogVersion == null) { return false; } // It's valid. return true; }
java
public static String compileDeployment(Catalog catalog, DeploymentType deployment, boolean isPlaceHolderCatalog) { String errmsg = null; try { validateDeployment(catalog, deployment); // add our hacky Deployment to the catalog if (catalog.getClusters().get("cluster").getDeployment().get("deployment") == null) { catalog.getClusters().get("cluster").getDeployment().add("deployment"); } // set the cluster info setClusterInfo(catalog, deployment); //Set the snapshot schedule setSnapshotInfo(catalog, deployment.getSnapshot()); //Set enable security setSecurityEnabled(catalog, deployment.getSecurity()); // set the users info // We'll skip this when building the dummy catalog on startup // so that we don't spew misleading user/role warnings if (!isPlaceHolderCatalog) { setUsersInfo(catalog, deployment.getUsers()); } // set the HTTPD info setHTTPDInfo(catalog, deployment.getHttpd(), deployment.getSsl()); setDrInfo(catalog, deployment.getDr(), deployment.getCluster(), isPlaceHolderCatalog); if (!isPlaceHolderCatalog) { setExportInfo(catalog, deployment.getExport()); setImportInfo(catalog, deployment.getImport()); setSnmpInfo(deployment.getSnmp()); } setCommandLogInfo( catalog, deployment.getCommandlog()); //This is here so we can update our local list of paths. //I would not have needed this if validateResourceMonitorInfo didnt exist here. VoltDB.instance().loadLegacyPathProperties(deployment); setupPaths(deployment.getPaths()); validateResourceMonitorInfo(deployment); } catch (Exception e) { // Anything that goes wrong anywhere in trying to handle the deployment file // should return an error, and let the caller decide what to do (crash or not, for // example) errmsg = "Error validating deployment configuration: " + e.getMessage(); hostLog.error(errmsg); return errmsg; } return null; }
java
public static DeploymentType parseDeployment(String deploymentURL) { // get the URL/path for the deployment and prep an InputStream InputStream deployIS = null; try { URL deployURL = new URL(deploymentURL); deployIS = deployURL.openStream(); } catch (MalformedURLException ex) { // Invalid URL. Try as a file. try { deployIS = new FileInputStream(deploymentURL); } catch (FileNotFoundException e) { deployIS = null; } } catch (IOException ioex) { deployIS = null; } // make sure the file exists if (deployIS == null) { hostLog.error("Could not locate deployment info at given URL: " + deploymentURL); return null; } else { hostLog.info("URL of deployment info: " + deploymentURL); } return getDeployment(deployIS); }
java
public static DeploymentType parseDeploymentFromString(String deploymentString) { ByteArrayInputStream byteIS; byteIS = new ByteArrayInputStream(deploymentString.getBytes(Constants.UTF8ENCODING)); // get deployment info from xml file return getDeployment(byteIS); }
java
public static String getDeployment(DeploymentType deployment, boolean indent) throws IOException { try { if (m_jc == null || m_schema == null) { throw new RuntimeException("Error schema validation."); } Marshaller marshaller = m_jc.createMarshaller(); marshaller.setSchema(m_schema); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.valueOf(indent)); StringWriter sw = new StringWriter(); marshaller.marshal(new JAXBElement<>(new QName("","deployment"), DeploymentType.class, deployment), sw); return sw.toString(); } catch (JAXBException e) { // Convert some linked exceptions to more friendly errors. if (e.getLinkedException() instanceof java.io.FileNotFoundException) { hostLog.error(e.getLinkedException().getMessage()); return null; } else if (e.getLinkedException() instanceof org.xml.sax.SAXParseException) { hostLog.error("Error schema validating deployment.xml file. " + e.getLinkedException().getMessage()); return null; } else { throw new RuntimeException(e); } } }
java
private static void validateDeployment(Catalog catalog, DeploymentType deployment) { if (deployment.getSecurity() != null && deployment.getSecurity().isEnabled()) { if (deployment.getUsers() == null) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file."; throw new RuntimeException(msg); } boolean foundAdminUser = false; for (UsersType.User user : deployment.getUsers().getUser()) { if (user.getRoles() == null) { continue; } for (String role : extractUserRoles(user)) { if (role.equalsIgnoreCase(ADMIN)) { foundAdminUser = true; break; } } } if (!foundAdminUser) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file."; throw new RuntimeException(msg); } } }
java
private static void setClusterInfo(Catalog catalog, DeploymentType deployment) { ClusterType cluster = deployment.getCluster(); int kFactor = cluster.getKfactor(); Cluster catCluster = catalog.getClusters().get("cluster"); // copy the deployment info that is currently not recorded anywhere else Deployment catDeploy = catCluster.getDeployment().get("deployment"); catDeploy.setKfactor(kFactor); if (deployment.getPartitionDetection().isEnabled()) { catCluster.setNetworkpartition(true); } else { catCluster.setNetworkpartition(false); } setSystemSettings(deployment, catDeploy); catCluster.setHeartbeattimeout(deployment.getHeartbeat().getTimeout()); // copy schema modification behavior from xml to catalog if (cluster.getSchema() != null) { catCluster.setUseddlschema(cluster.getSchema() == SchemaType.DDL); } else { // Don't think we can get here, deployment schema guarantees a default value hostLog.warn("Schema modification setting not found. " + "Forcing default behavior of UpdateCatalog to modify database schema."); catCluster.setUseddlschema(false); } }
java
private static void setImportInfo(Catalog catalog, ImportType importType) { if (importType == null) { return; } List<String> streamList = new ArrayList<>(); List<ImportConfigurationType> kafkaConfigs = new ArrayList<>(); for (ImportConfigurationType importConfiguration : importType.getConfiguration()) { boolean connectorEnabled = importConfiguration.isEnabled(); if (!connectorEnabled) { continue; } if (importConfiguration.getType().equals(ServerImportEnum.KAFKA)) { kafkaConfigs.add(importConfiguration); } if (!streamList.contains(importConfiguration.getModule())) { streamList.add(importConfiguration.getModule()); } buildImportProcessorConfiguration(importConfiguration, true); } validateKafkaConfig(kafkaConfigs); }
java
private static void validateKafkaConfig(List<ImportConfigurationType> configs) { if (configs.isEmpty()) { return; } // We associate each group id with the set of topics that belong to it HashMap<String, HashSet<String>> groupidToTopics = new HashMap<>(); for (ImportConfigurationType config : configs) { String groupid = ""; HashSet<String> topics = new HashSet<>(); // Fetch topics and group id from each configuration for (PropertyType pt : config.getProperty()) { if (pt.getName().equals("topics")) { topics.addAll(Arrays.asList(pt.getValue().split("\\s*,\\s*"))); } else if (pt.getName().equals("groupid")) { groupid = pt.getValue(); } } if (groupidToTopics.containsKey(groupid)) { // Under this group id, we first union the set of already-stored topics with the set of newly-seen topics. HashSet<String> union = new HashSet<>(groupidToTopics.get(groupid)); union.addAll(topics); if (union.size() == (topics.size() + groupidToTopics.get(groupid).size())) { groupidToTopics.put(groupid, union); } else { // If the size of the union doesn't equal to the sum of sizes of newly-seen topic set and // already-stored topic set, those two sets must overlap with each other, which means that // there must be two configurations having the same group id and overlapping sets of topics. // Thus, we throw the RuntimeException. throw new RuntimeException("Invalid import configuration. Two Kafka entries have the same groupid and topic."); } } else { groupidToTopics.put(groupid, topics); } } }
java
private static void setSnmpInfo(SnmpType snmpType) { if (snmpType == null || !snmpType.isEnabled()) { return; } //Validate Snmp Configuration. if (snmpType.getTarget() == null || snmpType.getTarget().trim().length() == 0) { throw new IllegalArgumentException("Target must be specified for SNMP configuration."); } if (snmpType.getAuthkey() != null && snmpType.getAuthkey().length() < 8) { throw new IllegalArgumentException("SNMP Authkey must be > 8 characters."); } if (snmpType.getPrivacykey() != null && snmpType.getPrivacykey().length() < 8) { throw new IllegalArgumentException("SNMP Privacy Key must be > 8 characters."); } }
java
private static void mergeKafka10ImportConfigurations(Map<String, ImportConfiguration> processorConfig) { if (processorConfig.isEmpty()) { return; } Map<String, ImportConfiguration> kafka10ProcessorConfigs = new HashMap<>(); Iterator<Map.Entry<String, ImportConfiguration>> iter = processorConfig.entrySet().iterator(); while (iter.hasNext()) { String configName = iter.next().getKey(); ImportConfiguration importConfig = processorConfig.get(configName); Properties properties = importConfig.getmoduleProperties(); String importBundleJar = properties.getProperty(ImportDataProcessor.IMPORT_MODULE); Preconditions.checkNotNull(importBundleJar, "Import source is undefined or custom import plugin class missing."); //handle special cases for kafka 10 and maybe late versions String[] bundleJar = importBundleJar.split("kafkastream"); if (bundleJar.length > 1) { String version = bundleJar[1].substring(0, bundleJar[1].indexOf(".jar")); if (!version.isEmpty()) { int versionNumber = Integer.parseInt(version); if (versionNumber == 10) { kafka10ProcessorConfigs.put(configName, importConfig); iter.remove(); } } } } if (kafka10ProcessorConfigs.isEmpty()) { return; } Map<String, ImportConfiguration> mergedConfigs = new HashMap<>(); iter = kafka10ProcessorConfigs.entrySet().iterator(); while (iter.hasNext()) { ImportConfiguration importConfig = iter.next().getValue(); Properties props = importConfig.getmoduleProperties(); //organize the kafka10 importer by the broker list and group id //All importers must be configured by either broker list or zookeeper in the same group //otherwise, these importers can not be correctly merged. String brokers = props.getProperty("brokers"); String groupid = props.getProperty("groupid", "voltdb"); if (brokers == null) { brokers = props.getProperty("zookeeper"); } String brokersGroup = brokers + "_" + groupid; ImportConfiguration config = mergedConfigs.get(brokersGroup); if (config == null) { mergedConfigs.put(brokersGroup, importConfig); } else { config.mergeProperties(props); } } processorConfig.putAll(mergedConfigs); }
java
private static void setSecurityEnabled( Catalog catalog, SecurityType security) { Cluster cluster = catalog.getClusters().get("cluster"); Database database = cluster.getDatabases().get("database"); cluster.setSecurityenabled(security.isEnabled()); database.setSecurityprovider(security.getProvider().value()); }
java
private static void setSnapshotInfo(Catalog catalog, SnapshotType snapshotSettings) { Database db = catalog.getClusters().get("cluster").getDatabases().get("database"); SnapshotSchedule schedule = db.getSnapshotschedule().get("default"); if (schedule == null) { schedule = db.getSnapshotschedule().add("default"); } schedule.setEnabled(snapshotSettings.isEnabled()); String frequency = snapshotSettings.getFrequency(); if (!frequency.endsWith("s") && !frequency.endsWith("m") && !frequency.endsWith("h")) { hostLog.error( "Snapshot frequency " + frequency + " needs to end with time unit specified" + " that is one of [s, m, h] (seconds, minutes, hours)" + " Defaulting snapshot frequency to 10m."); frequency = "10m"; } int frequencyInt = 0; String frequencySubstring = frequency.substring(0, frequency.length() - 1); try { frequencyInt = Integer.parseInt(frequencySubstring); } catch (Exception e) { hostLog.error("Frequency " + frequencySubstring + " is not an integer. Defaulting frequency to 10m."); frequency = "10m"; frequencyInt = 10; } String prefix = snapshotSettings.getPrefix(); if (prefix == null || prefix.isEmpty()) { hostLog.error("Snapshot prefix " + prefix + " is not a valid prefix. Using prefix of 'SNAPSHOTNONCE' "); prefix = "SNAPSHOTNONCE"; } if (prefix.contains("-") || prefix.contains(",")) { String oldprefix = prefix; prefix = prefix.replaceAll("-", "_"); prefix = prefix.replaceAll(",", "_"); hostLog.error("Snapshot prefix " + oldprefix + " cannot include , or -." + " Using the prefix: " + prefix + " instead."); } int retain = snapshotSettings.getRetain(); if (retain < 1) { hostLog.error("Snapshot retain value " + retain + " is not a valid value. Must be 1 or greater." + " Defaulting snapshot retain to 1."); retain = 1; } schedule.setFrequencyunit( frequency.substring(frequency.length() - 1, frequency.length())); schedule.setFrequencyvalue(frequencyInt); schedule.setPrefix(prefix); schedule.setRetain(retain); }
java
private static void setupPaths( PathsType paths) { File voltDbRoot; // Handles default voltdbroot (and completely missing "paths" element). voltDbRoot = getVoltDbRoot(paths); //Snapshot setupSnapshotPaths(paths.getSnapshots(), voltDbRoot); //export overflow setupExportOverflow(paths.getExportoverflow(), voltDbRoot); // only use these directories in the enterprise version setupCommandLog(paths.getCommandlog(), voltDbRoot); setupCommandLogSnapshot(paths.getCommandlogsnapshot(), voltDbRoot); setupDROverflow(paths.getDroverflow(), voltDbRoot); setupLargeQuerySwap(paths.getLargequeryswap(), voltDbRoot); }
java
public static File getVoltDbRoot(PathsType paths) { File voltDbRoot; if (paths == null || paths.getVoltdbroot() == null || VoltDB.instance().getVoltDBRootPath(paths.getVoltdbroot()) == null) { voltDbRoot = new VoltFile(VoltDB.DBROOT); if (!voltDbRoot.exists()) { hostLog.info("Creating voltdbroot directory: " + voltDbRoot.getAbsolutePath()); if (!voltDbRoot.mkdirs()) { hostLog.fatal("Failed to create voltdbroot directory \"" + voltDbRoot.getAbsolutePath() + "\""); } } } else { voltDbRoot = new VoltFile(VoltDB.instance().getVoltDBRootPath(paths.getVoltdbroot())); if (!voltDbRoot.exists()) { hostLog.info("Creating voltdbroot directory: " + voltDbRoot.getAbsolutePath()); if (!voltDbRoot.mkdirs()) { hostLog.fatal("Failed to create voltdbroot directory \"" + voltDbRoot.getAbsolutePath() + "\""); } } } validateDirectory("volt root", voltDbRoot); return voltDbRoot; }
java
private static void setUsersInfo(Catalog catalog, UsersType users) throws RuntimeException { if (users == null) { return; } // The database name is not available in deployment.xml (it is defined // in project.xml). However, it must always be named "database", so // I've temporarily hardcoded it here until a more robust solution is // available. Database db = catalog.getClusters().get("cluster").getDatabases().get("database"); SecureRandom sr = new SecureRandom(); for (UsersType.User user : users.getUser()) { Set<String> roles = extractUserRoles(user); String sha1hex = user.getPassword(); String sha256hex = user.getPassword(); if (user.isPlaintext()) { sha1hex = extractPassword(user.getPassword(), ClientAuthScheme.HASH_SHA1); sha256hex = extractPassword(user.getPassword(), ClientAuthScheme.HASH_SHA256); } else if (user.getPassword().length() == 104) { int sha1len = ClientAuthScheme.getHexencodedDigestLength(ClientAuthScheme.HASH_SHA1); sha1hex = sha1hex.substring(0, sha1len); sha256hex = sha256hex.substring(sha1len); } else { // if one user has invalid password, give a warn. hostLog.warn("User \"" + user.getName() + "\" has invalid masked password in deployment file."); // throw exception disable user with invalid masked password throw new RuntimeException("User \"" + user.getName() + "\" has invalid masked password in deployment file"); } org.voltdb.catalog.User catUser = db.getUsers().get(user.getName()); if (catUser == null) { catUser = db.getUsers().add(user.getName()); } // generate salt only once for sha1 and sha256 String saltGen = BCrypt.gensalt(BCrypt.GENSALT_DEFAULT_LOG2_ROUNDS,sr); String hashedPW = BCrypt.hashpw( sha1hex, saltGen); String hashedPW256 = BCrypt.hashpw( sha256hex, saltGen); catUser.setShadowpassword(hashedPW); catUser.setSha256shadowpassword(hashedPW256); //use fixed seed for comparison catUser.setPassword( BCrypt.hashpw(sha256hex, "$2a$10$pWO/a/OQkFyQWQDpchZdEe")); // process the @groups and @roles comma separated list for (final String role : roles) { final Group catalogGroup = db.getGroups().get(role); // if the role doesn't exist, ignore it. if (catalogGroup != null) { GroupRef groupRef = catUser.getGroups().get(role); if (groupRef == null) { groupRef = catUser.getGroups().add(role); } groupRef.setGroup(catalogGroup); } else { hostLog.warn("User \"" + user.getName() + "\" is assigned to non-existent role \"" + role + "\" " + "and may not have the expected database permissions."); } } } }
java
private static Set<String> extractUserRoles(final UsersType.User user) { Set<String> roles = new TreeSet<>(); if (user == null) { return roles; } if (user.getRoles() != null && !user.getRoles().trim().isEmpty()) { String [] rolelist = user.getRoles().trim().split(","); for (String role: rolelist) { if( role == null || role.trim().isEmpty()) { continue; } roles.add(role.trim().toLowerCase()); } } return roles; }
java
public static byte[] makeDeploymentHash(byte[] inbytes) { MessageDigest md = null; try { md = MessageDigest.getInstance("SHA-1"); } catch (NoSuchAlgorithmException e) { VoltDB.crashLocalVoltDB("Bad JVM has no SHA-1 hash.", true, e); } md.update(inbytes); byte[] hash = md.digest(); assert(hash.length == 20); // sha-1 length return hash; }
java
public static Pair<Set<String>, Set<String>> getSnapshotableTableNamesFromInMemoryJar(InMemoryJarfile jarfile) { Set<String> fullTableNames = new HashSet<>(); Set<String> optionalTableNames = new HashSet<>(); Catalog catalog = new Catalog(); catalog.execute(getSerializedCatalogStringFromJar(jarfile)); Database db = catalog.getClusters().get("cluster").getDatabases().get("database"); Pair<List<Table>, Set<String>> ret; ret = getSnapshotableTables(db, true); ret.getFirst().forEach(table -> fullTableNames.add(table.getTypeName())); optionalTableNames.addAll(ret.getSecond()); ret = getSnapshotableTables(db, false); ret.getFirst().forEach(table -> fullTableNames.add(table.getTypeName())); optionalTableNames.addAll(ret.getSecond()); return new Pair<Set<String>, Set<String>>(fullTableNames, optionalTableNames); }
java
public static Pair<List<Table>, Set<String>> getSnapshotableTables(Database catalog, boolean isReplicated) { List<Table> tables = new ArrayList<>(); Set<String> optionalTableNames = new HashSet<>(); for (Table table : catalog.getTables()) { if (table.getIsreplicated() != isReplicated) { // We handle replicated tables and partitioned tables separately. continue; } if (isTableExportOnly(catalog, table)) { // Streamed tables are not considered as "normal" tables here. continue; } if (table.getMaterializer() != null) { if (isSnapshotablePersistentTableView(catalog, table)) { // Some persistent table views are added to the snapshot starting from // V8.2, they are since then considered as "normal" tables, too. // But their presence in the snapshot is not compulsory for backward // compatibility reasons. optionalTableNames.add(table.getTypeName()); } else if (! isSnapshotableStreamedTableView(catalog, table)) { continue; } } tables.add(table); } return new Pair<List<Table>, Set<String>>(tables, optionalTableNames); }
java
public static List<Table> getNormalTables(Database catalog, boolean isReplicated) { List<Table> tables = new ArrayList<>(); for (Table table : catalog.getTables()) { if ((table.getIsreplicated() == isReplicated) && table.getMaterializer() == null && !CatalogUtil.isTableExportOnly(catalog, table)) { tables.add(table); continue; } //Handle views which are on STREAM only partitioned STREAM allow view and must have partition //column as part of view. if ((table.getMaterializer() != null) && !isReplicated && (CatalogUtil.isTableExportOnly(catalog, table.getMaterializer()))) { //Non partitioned export table are not allowed so it should not get here. Column bpc = table.getMaterializer().getPartitioncolumn(); if (bpc != null) { String bPartName = bpc.getName(); Column pc = table.getColumns().get(bPartName); if (pc != null) { tables.add(table); } } } } return tables; }
java
public static boolean isDurableProc(String procName) { SystemProcedureCatalog.Config sysProc = SystemProcedureCatalog.listing.get(procName); return sysProc == null || sysProc.isDurable(); }
java
public static File createTemporaryEmptyCatalogJarFile(boolean isXDCR) throws IOException { File emptyJarFile = File.createTempFile("catalog-empty", ".jar"); emptyJarFile.deleteOnExit(); VoltCompiler compiler = new VoltCompiler(isXDCR); if (!compiler.compileEmptyCatalog(emptyJarFile.getAbsolutePath())) { return null; } return emptyJarFile; }
java
public static String getSignatureForTable(String name, SortedMap<Integer, VoltType> schema) { StringBuilder sb = new StringBuilder(); sb.append(name).append(SIGNATURE_TABLE_NAME_SEPARATOR); for (VoltType t : schema.values()) { sb.append(t.getSignatureChar()); } return sb.toString(); }
java
public static Pair<Long, String> calculateDrTableSignatureAndCrc(Database catalog) { SortedSet<Table> tables = Sets.newTreeSet(); tables.addAll(getSnapshotableTables(catalog, true).getFirst()); tables.addAll(getSnapshotableTables(catalog, false).getFirst()); final PureJavaCrc32 crc = new PureJavaCrc32(); final StringBuilder sb = new StringBuilder(); String delimiter = ""; for (Table t : tables) { if (t.getIsdred()) { crc.update(t.getSignature().getBytes(Charsets.UTF_8)); sb.append(delimiter).append(t.getSignature()); delimiter = SIGNATURE_DELIMITER; } } return Pair.of(crc.getValue(), sb.toString()); }
java
public static Map<String, String> deserializeCatalogSignature(String signature) { Map<String, String> tableSignatures = Maps.newHashMap(); for (String oneSig : signature.split(Pattern.quote(SIGNATURE_DELIMITER))) { if (!oneSig.isEmpty()) { final String[] parts = oneSig.split(Pattern.quote(SIGNATURE_TABLE_NAME_SEPARATOR), 2); tableSignatures.put(parts[0], parts[1]); } } return tableSignatures; }
java
public static String getLimitPartitionRowsDeleteStmt(Table table) { CatalogMap<Statement> map = table.getTuplelimitdeletestmt(); if (map.isEmpty()) { return null; } assert (map.size() == 1); return map.iterator().next().getSqltext(); }
java
public static ExportType addExportConfigToDRConflictsTable(ExportType export) { if (export == null) { export = new ExportType(); } boolean userDefineStream = false; for (ExportConfigurationType exportConfiguration : export.getConfiguration()) { if (exportConfiguration.getTarget().equals(DR_CONFLICTS_TABLE_EXPORT_GROUP)) { userDefineStream = true; } } if (!userDefineStream) { ExportConfigurationType defaultConfiguration = new ExportConfigurationType(); defaultConfiguration.setEnabled(true); defaultConfiguration.setTarget(DR_CONFLICTS_TABLE_EXPORT_GROUP); defaultConfiguration.setType(ServerExportEnum.FILE); // type PropertyType type = new PropertyType(); type.setName("type"); type.setValue(DEFAULT_DR_CONFLICTS_EXPORT_TYPE); defaultConfiguration.getProperty().add(type); // nonce PropertyType nonce = new PropertyType(); nonce.setName("nonce"); nonce.setValue(DEFAULT_DR_CONFLICTS_NONCE); defaultConfiguration.getProperty().add(nonce); // outdir PropertyType outdir = new PropertyType(); outdir.setName("outdir"); outdir.setValue(DEFAULT_DR_CONFLICTS_DIR); defaultConfiguration.getProperty().add(outdir); // k-safe file export PropertyType ksafe = new PropertyType(); ksafe.setName("replicated"); ksafe.setValue("true"); defaultConfiguration.getProperty().add(ksafe); // skip internal export columns PropertyType skipinternal = new PropertyType(); skipinternal.setName("skipinternals"); skipinternal.setValue("true"); defaultConfiguration.getProperty().add(skipinternal); export.getConfiguration().add(defaultConfiguration); } return export; }
java
public synchronized void printResults() throws Exception { ClientStats stats = fullStatsContext.fetch().getStats(); String display = "\nA total of %d login requests were received...\n"; System.out.printf(display, stats.getInvocationsCompleted()); System.out.printf("Average throughput: %,9d txns/sec\n", stats.getTxnThroughput()); System.out.printf("Average latency: %,9.2f ms\n", stats.getAverageLatency()); System.out.printf("10th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.1)); System.out.printf("25th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.25)); System.out.printf("50th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.5)); System.out.printf("75th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.75)); System.out.printf("90th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.9)); System.out.printf("95th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.95)); System.out.printf("99th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.99)); System.out.printf("99.5th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.995)); System.out.printf("99.9th percentile latency: %,9.2f ms\n", stats.kPercentileLatencyAsDouble(.999)); System.out.println("\n\n" + stats.latencyHistoReport()); }
java
private void doLogin(LoginGenerator.LoginRecord login) { // Synchronously call the "Login" procedure passing in a json string containing // login-specific structure/data. try { ClientResponse response = client.callProcedure("Login", login.username, login.password, login.json); long resultCode = response.getResults()[0].asScalarLong(); if (resultCode == LOGIN_SUCCESSFUL) acceptedLogins.incrementAndGet(); else badLogins.incrementAndGet(); } catch (Exception e) { badLogins.incrementAndGet(); e.printStackTrace(); } }
java
public void loadDatabase() throws Exception { // create/start the requested number of threads int thread_count = 10; Thread[] loginThreads = new Thread[thread_count]; for (int i = 0; i < thread_count; ++i) { loginThreads[i] = new Thread(new LoginThread()); loginThreads[i].start(); } // Initialize the statistics fullStatsContext.fetchAndResetBaseline(); // Run the data loading for 10 seconds System.out.println("\nLoading database..."); Thread.sleep(1000l * 10); // stop the threads loadComplete.set(true); // block until all outstanding txns return client.drain(); // join on the threads for (Thread t : loginThreads) { t.join(); } // print the summary statistics of the data load printResults(); // Create entries that we can query on. createUniqueData(); }
java
public static void main(String[] args) throws Exception { JSONClient app = new JSONClient(); // Initialize connections app.initialize(); // load data, measuring the throughput. app.loadDatabase(); // run sample JSON queries app.runQueries(); // Disconnect app.shutdown(); }
java
@Override public synchronized void reportForeignHostFailed(int hostId) { long initiatorSiteId = CoreUtils.getHSIdFromHostAndSite(hostId, AGREEMENT_SITE_ID); m_agreementSite.reportFault(initiatorSiteId); if (!m_shuttingDown) { // should be the single console message a user sees when another node fails networkLog.warn(String.format("Host %d failed. Cluster remains operational.", hostId)); } }
java
public void start() throws Exception { /* * SJ uses this barrier if this node becomes the leader to know when ZooKeeper * has been finished bootstrapping. */ CountDownLatch zkInitBarrier = new CountDownLatch(1); /* * If start returns true then this node is the leader, it bound to the coordinator address * It needs to bootstrap its agreement site so that other nodes can join */ if(m_joiner.start(zkInitBarrier)) { m_network.start(); /* * m_localHostId is 0 of course. */ long agreementHSId = getHSIdForLocalSite(AGREEMENT_SITE_ID); /* * A set containing just the leader (this node) */ HashSet<Long> agreementSites = new HashSet<Long>(); agreementSites.add(agreementHSId); /* * A basic site mailbox for the agreement site */ SiteMailbox sm = new SiteMailbox(this, agreementHSId); createMailbox(agreementHSId, sm); /* * Construct the site with just this node */ m_agreementSite = new AgreementSite( agreementHSId, agreementSites, 0, sm, new InetSocketAddress( m_config.zkInterface.split(":")[0], Integer.parseInt(m_config.zkInterface.split(":")[1])), m_config.backwardsTimeForgivenessWindow, m_failedHostsCallback); m_agreementSite.start(); m_agreementSite.waitForRecovery(); m_zk = org.voltcore.zk.ZKUtil.getClient( m_config.zkInterface, 60 * 1000, VERBOTEN_THREADS); if (m_zk == null) { throw new Exception("Timed out trying to connect local ZooKeeper instance"); } CoreZK.createHierarchy(m_zk); /* * This creates the ephemeral sequential node with host id 0 which * this node already used for itself. Just recording that fact. */ final int selectedHostId = selectNewHostId(m_config.coordinatorIp.toString()); if (selectedHostId != 0) { org.voltdb.VoltDB.crashLocalVoltDB("Selected host id for coordinator was not 0, " + selectedHostId, false, null); } /* * seed the leader host criteria ad leader is always host id 0 */ m_acceptor.accrue(selectedHostId, m_acceptor.decorate(new JSONObject(), Optional.empty())); // Store the components of the instance ID in ZK JSONObject instance_id = new JSONObject(); instance_id.put("coord", ByteBuffer.wrap(m_config.coordinatorIp.getAddress().getAddress()).getInt()); instance_id.put("timestamp", System.currentTimeMillis()); hostLog.debug("Cluster will have instance ID:\n" + instance_id.toString(4)); byte[] payload = instance_id.toString(4).getBytes("UTF-8"); m_zk.create(CoreZK.instance_id, payload, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); /* * Store all the hosts and host ids here so that waitForGroupJoin * knows the size of the mesh. This part only registers this host */ final HostInfo hostInfo = new HostInfo(m_config.coordinatorIp.toString(), m_config.group, m_config.localSitesCount, m_config.recoveredPartitions); m_zk.create(CoreZK.hosts_host + selectedHostId, hostInfo.toBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } zkInitBarrier.countDown(); }
java
public InstanceId getInstanceId() { if (m_instanceId == null) { try { byte[] data = m_zk.getData(CoreZK.instance_id, false, null); JSONObject idJSON = new JSONObject(new String(data, "UTF-8")); m_instanceId = new InstanceId(idJSON.getInt("coord"), idJSON.getLong("timestamp")); } catch (Exception e) { String msg = "Unable to get instance ID info from " + CoreZK.instance_id; hostLog.error(msg); throw new RuntimeException(msg, e); } } return m_instanceId; }
java
@Override public void requestJoin(SocketChannel socket, SSLEngine sslEngine, MessagingChannel messagingChannel, InetSocketAddress listeningAddress, JSONObject jo) throws Exception { /* * Generate the host id via creating an ephemeral sequential node */ Integer hostId = selectNewHostId(socket.socket().getInetAddress().getHostAddress()); prepSocketChannel(socket); ForeignHost fhost = null; try { try { JoinAcceptor.PleaDecision decision = m_acceptor.considerMeshPlea(m_zk, hostId, jo); /* * Write the response that advertises the cluster topology */ writeRequestJoinResponse(hostId, decision, socket, messagingChannel); if (!decision.accepted) { socket.close(); return; } /* * Wait for the a response from the joining node saying that it connected * to all the nodes we just advertised. Use a timeout so that the cluster can't be stuck * on failed joins. */ ByteBuffer finishedJoining = ByteBuffer.allocate(1); socket.configureBlocking(false); long start = System.currentTimeMillis(); while (finishedJoining.hasRemaining() && System.currentTimeMillis() - start < 120000) { // This is just one byte to indicate that it finished joining. // No need to encrypt because the value of it doesn't matter int read = socket.read(finishedJoining); if (read == -1) { networkLog.info("New connection was unable to establish mesh"); socket.close(); return; } else if (read < 1) { Thread.sleep(5); } } /* * Now add the host to the mailbox system */ PicoNetwork picoNetwork = createPicoNetwork(sslEngine, socket, false); fhost = new ForeignHost(this, hostId, socket, m_config.deadHostTimeout, listeningAddress, picoNetwork); putForeignHost(hostId, fhost); fhost.enableRead(VERBOTEN_THREADS); m_acceptor.accrue(hostId, jo); } catch (Exception e) { networkLog.error("Error joining new node", e); addFailedHost(hostId); synchronized(HostMessenger.this) { removeForeignHost(hostId); } m_acceptor.detract(m_zk, hostId); socket.close(); return; } /* * And the last step is to wait for the new node to join ZooKeeper. * This node is the one to create the txn that will add the new host to the list of hosts * with agreement sites across the cluster. */ long hsId = CoreUtils.getHSIdFromHostAndSite(hostId, AGREEMENT_SITE_ID); if (!m_agreementSite.requestJoin(hsId).await(60, TimeUnit.SECONDS)) { reportForeignHostFailed(hostId); } } catch (Throwable e) { org.voltdb.VoltDB.crashLocalVoltDB("", true, e); } }
java
@Override public void notifyOfConnection( int hostId, SocketChannel socket, SSLEngine sslEngine, InetSocketAddress listeningAddress) throws Exception { networkLog.info("Host " + getHostId() + " receives a new connection from host " + hostId); prepSocketChannel(socket); // Auxiliary connection never time out ForeignHost fhost = new ForeignHost(this, hostId, socket, Integer.MAX_VALUE, listeningAddress, createPicoNetwork(sslEngine, socket, true)); putForeignHost(hostId, fhost); fhost.enableRead(VERBOTEN_THREADS); // Do all peers have enough secondary connections? for (int hId : m_peers) { if (m_foreignHosts.get(hId).size() != (m_secondaryConnections + 1)) { return; } } // Now it's time to use secondary pico network, see comments in presend() to know why we can't // do this earlier. m_hasAllSecondaryConnectionCreated = true; }
java
public Map<Integer, HostInfo> waitForGroupJoin(int expectedHosts) { Map<Integer, HostInfo> hostInfos = Maps.newTreeMap(); try { while (true) { ZKUtil.FutureWatcher fw = new ZKUtil.FutureWatcher(); final List<String> children = m_zk.getChildren(CoreZK.hosts, fw); final int numChildren = children.size(); for (String child : children) { final HostInfo info = HostInfo.fromBytes(m_zk.getData(ZKUtil.joinZKPath(CoreZK.hosts, child), false, null)); hostInfos.put(parseHostId(child), info); } /* * If the target number of hosts has been reached * break out */ if ( numChildren == expectedHosts) { break; } /* * If there are extra hosts that means too many Volt procs were started. * Kill this node based on the assumption that we are the extra one. In most * cases this is correct and fine and in the worst case the cluster will hang coming up * because two or more hosts killed themselves */ if ( numChildren > expectedHosts) { org.voltdb.VoltDB.crashLocalVoltDB("Expected to find " + expectedHosts + " hosts in cluster at startup but found " + numChildren + ". Terminating this host.", false, null); } fw.get(); } } catch (Exception e) { org.voltdb.VoltDB.crashLocalVoltDB("Error waiting for hosts to be ready", false, e); } assert hostInfos.size() == expectedHosts; return hostInfos; }
java
@Override public String getHostnameForHostID(int hostId) { if (hostId == m_localHostId) { return CoreUtils.getHostnameOrAddress(); } Iterator<ForeignHost> it = m_foreignHosts.get(hostId).iterator(); if (it.hasNext()) { ForeignHost fh = it.next(); return fh.hostname(); } return m_knownFailedHosts.get(hostId) != null ? m_knownFailedHosts.get(hostId) : "UNKNOWN"; }
java
public void removeMailbox(long hsId) { synchronized (m_mapLock) { ImmutableMap.Builder<Long, Mailbox> b = ImmutableMap.builder(); for (Map.Entry<Long, Mailbox> e : m_siteMailboxes.entrySet()) { if (e.getKey().equals(hsId)) { continue; } b.put(e.getKey(), e.getValue()); } m_siteMailboxes = b.build(); } }
java
public void waitForAllHostsToBeReady(int expectedHosts) { try { m_zk.create(CoreZK.readyhosts_host, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); while (true) { ZKUtil.FutureWatcher fw = new ZKUtil.FutureWatcher(); int readyHosts = m_zk.getChildren(CoreZK.readyhosts, fw).size(); if ( readyHosts == expectedHosts) { break; } fw.get(); } } catch (KeeperException | InterruptedException e) { org.voltdb.VoltDB.crashLocalVoltDB("Error waiting for hosts to be ready", false, e); } }
java
public void waitForJoiningHostsToBeReady(int expectedHosts, int localHostId) { try { //register this host as joining. The host registration will be deleted after joining is completed. m_zk.create(ZKUtil.joinZKPath(CoreZK.readyjoininghosts, Integer.toString(localHostId)) , null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); while (true) { ZKUtil.FutureWatcher fw = new ZKUtil.FutureWatcher(); int readyHosts = m_zk.getChildren(CoreZK.readyjoininghosts, fw).size(); if ( readyHosts == expectedHosts) { break; } fw.get(); } } catch (KeeperException | InterruptedException e) { org.voltdb.VoltDB.crashLocalVoltDB("Error waiting for hosts to be ready", false, e); } }
java
public int countForeignHosts() { int retval = 0; for (ForeignHost host : m_foreignHosts.values()) { if ((host != null) && (host.isUp())) { retval++; } } return retval; }
java