code
stringlengths
73
34.1k
label
stringclasses
1 value
@Override public String describe(Session session) { try { return describeImpl(session); } catch (Exception e) { e.printStackTrace(); return e.toString(); } }
java
static boolean isGroupByColumn(QuerySpecification select, int index) { if (!select.isGrouped) { return false; } for (int ii = 0; ii < select.groupIndex.getColumnCount(); ii++) { if (index == select.groupIndex.getColumns()[ii]) { return true; } } return false; }
java
private static List<Expression> getDisplayColumnsForSetOp(QueryExpression queryExpr) { assert(queryExpr != null); if (queryExpr.getLeftQueryExpression() == null) { // end of recursion. This is a QuerySpecification assert(queryExpr instanceof QuerySpecification); QuerySpecification select = (QuerySpecification) queryExpr; return select.displayCols; } else { // recurse return getDisplayColumnsForSetOp(queryExpr.getLeftQueryExpression()); } }
java
protected static List<VoltXMLElement> voltGetLimitOffsetXMLFromSortAndSlice(Session session, SortAndSlice sortAndSlice) throws HSQLParseException { List<VoltXMLElement> result = new ArrayList<>(); if (sortAndSlice == null || sortAndSlice == SortAndSlice.noSort) { return result; } if (sortAndSlice.limitCondition != null) { Expression limitCondition = sortAndSlice.limitCondition; if (limitCondition.nodes.length != 2) { throw new HSQLParseException( "Parser did not create limit and offset expression for LIMIT."); } try { // read offset. it may be a parameter token. VoltXMLElement offset = new VoltXMLElement("offset"); Expression offsetExpr = limitCondition.getLeftNode(); if (offsetExpr.isParam == false) { Integer offsetValue = (Integer)offsetExpr.getValue(session); if (offsetValue > 0) { Expression expr = new ExpressionValue(offsetValue, org.hsqldb_voltpatches.types.Type.SQL_BIGINT); offset.children.add(expr.voltGetXML(session)); offset.attributes.put("offset", offsetValue.toString()); } } else { offset.attributes.put("offset_paramid", offsetExpr.getUniqueId(session)); } result.add(offset); // Limit may be null (offset with no limit), or // it may be a parameter Expression limitExpr = limitCondition.getRightNode(); if (limitExpr != null) { VoltXMLElement limit = new VoltXMLElement("limit"); if (limitExpr.isParam == false) { Integer limitValue = (Integer)limitExpr.getValue(session); Expression expr = new ExpressionValue(limitValue, org.hsqldb_voltpatches.types.Type.SQL_BIGINT); limit.children.add(expr.voltGetXML(session)); limit.attributes.put("limit", limitValue.toString()); } else { limit.attributes.put("limit_paramid", limitExpr.getUniqueId(session)); } result.add(limit); } } catch (HsqlException ex) { // XXX really? ex.printStackTrace(); } } return result; }
java
public static Object getObjectFromString(VoltType type, String value) throws ParseException { Object ret = null; switch (type) { // NOTE: All runtime integer parameters are actually Longs,so we will have problems // if we actually try to convert the object to one of the smaller numeric sizes // -------------------------------- // INTEGERS // -------------------------------- case TINYINT: //ret = Byte.valueOf(value); //break; case SMALLINT: //ret = Short.valueOf(value); //break; case INTEGER: //ret = Integer.valueOf(value); //break; case BIGINT: ret = Long.valueOf(value); break; // -------------------------------- // FLOATS // -------------------------------- case FLOAT: ret = Double.valueOf(value); break; // -------------------------------- // STRINGS // -------------------------------- case STRING: ret = value; break; case DECIMAL: case VARBINARY: if (value != null) { throw new RuntimeException("Only NULL default values for DECIMAL " + "and VARBINARY columns are supported right now"); } break; // -------------------------------- // TIMESTAMP // -------------------------------- case TIMESTAMP: { // Support either long values (microseconds since epoch) or timestamp strings. try { // Try to parse it as a long first. ret = new TimestampType(Long.parseLong(value)); } catch (NumberFormatException e) { // It failed to parse as a long - parse it as a timestamp string. Date date = new SimpleDateFormat("EEE MMM dd HH:mm:ss zzz yyyy").parse(value); ret = new TimestampType(date.getTime() * 1000); } break; } // -------------------------------- // INVALID // -------------------------------- default: LOG.severe("ERROR: Unable to get object from string for invalid ValueType '" + type + "'"); } return (ret); }
java
public static VoltType getNumericLiteralType(VoltType vt, String value) { try { Long.parseLong(value); } catch (NumberFormatException e) { // Our DECIMAL may not be bigger/smaller enough to store the constant value return VoltType.DECIMAL; } return vt; }
java
private static void writeLength(ByteBuffer buf, int length) { assert(length >= 0); assert(length == (length & (~length + 1))); // check if power of two // shockingly fast log_2 uses intrinsics in JDK >= 1.7 byte log2size = (byte) (32 - Integer.numberOfLeadingZeros(length)); buf.put(log2size); }
java
public OpsAgent getAgent(OpsSelector selector) { OpsAgent agent = m_agents.get(selector); assert (agent != null); return agent; }
java
public void shutdown() { for (Entry<OpsSelector, OpsAgent> entry : m_agents.entrySet()) { try { entry.getValue().shutdown(); } catch (InterruptedException e) {} } m_agents.clear(); }
java
public String add(ProcedureDescriptor descriptor) throws VoltCompilerException { assert descriptor != null; String className = descriptor.m_className; assert className != null && ! className.trim().isEmpty(); String shortName = deriveShortProcedureName(className); if( m_procedureMap.containsKey(shortName)) { throw m_compiler.new VoltCompilerException(String.format( "Procedure \"%s\" is already defined", className)); } m_procedureMap.put(shortName, descriptor); return shortName; }
java
public void removeProcedure(String procName, boolean ifExists) throws VoltCompilerException { assert procName != null && ! procName.trim().isEmpty(); String shortName = deriveShortProcedureName(procName); if( m_procedureMap.containsKey(shortName)) { m_procedureMap.remove(shortName); } else if (!ifExists) { throw m_compiler.new VoltCompilerException(String.format( "Dropped Procedure \"%s\" is not defined", procName)); } }
java
public void addProcedurePartitionInfoTo(String procedureName, ProcedurePartitionData data) throws VoltCompilerException { ProcedureDescriptor descriptor = m_procedureMap.get(procedureName); if( descriptor == null) { throw m_compiler.new VoltCompilerException(String.format( "Partition references an undefined procedure \"%s\"", procedureName)); } // need to re-instantiate as descriptor fields are final if( descriptor.m_stmtLiterals == null) { // the longer form constructor asserts on singleStatement descriptor = m_compiler.new ProcedureDescriptor( descriptor.m_authGroups, descriptor.m_class, data); } else { descriptor = m_compiler.new ProcedureDescriptor( descriptor.m_authGroups, descriptor.m_className, descriptor.m_stmtLiterals, descriptor.m_joinOrder, data, false, descriptor.m_class); } m_procedureMap.put(procedureName, descriptor); }
java
void addExportedTable(String tableName, String targetName, boolean isStream) { assert tableName != null && ! tableName.trim().isEmpty(); assert targetName != null && ! targetName.trim().isEmpty(); // store uppercase in the catalog as typename targetName = targetName.toUpperCase(); if (isStream) { // insert the table's name into the export group NavigableSet<String> tableGroup = m_exportsByTargetName.get(targetName); if (tableGroup == null) { tableGroup = new TreeSet<>(); m_exportsByTargetName.put(targetName, tableGroup); } tableGroup.add(tableName); return; } m_persistentTableTargetMap.put(tableName, targetName); }
java
static <E> ImmutableList<E> asImmutableList(Object[] elements, int length) { switch (length) { case 0: return of(); case 1: @SuppressWarnings("unchecked") // collection had only Es in it ImmutableList<E> list = new SingletonImmutableList<E>((E) elements[0]); return list; default: if (length < elements.length) { elements = arraysCopyOf(elements, length); } return new RegularImmutableList<E>(elements); } }
java
@CanIgnoreReturnValue // TODO(kak): Consider removing this public <E extends T> E min(Iterable<E> iterable) { return min(iterable.iterator()); }
java
static long calculateAverage(long currAvg, long currInvoc, long rowAvg, long rowInvoc) { long currTtl = currAvg * currInvoc; long rowTtl = rowAvg * rowInvoc; // If both are 0, then currTtl, rowTtl are also 0. if ((currInvoc + rowInvoc) == 0L) { return 0L; } else { return (currTtl + rowTtl) / (currInvoc + rowInvoc); } }
java
static void addToRecentConnectionSettings(Hashtable settings, ConnectionSetting newSetting) throws IOException { settings.put(newSetting.getName(), newSetting); ConnectionDialogCommon.storeRecentConnectionSettings(settings); }
java
private static void storeRecentConnectionSettings(Hashtable settings) { try { if (recentSettings == null) { setHomeDir(); if (homedir == null) { return; } recentSettings = new File(homedir, fileName); if (!recentSettings.exists()) { // recentSettings.createNewFile(); } } if (settings == null || settings.size() == 0) { return; } // setup a stream to a physical file on the filesystem FileOutputStream out = new FileOutputStream(recentSettings); ObjectOutputStream objStream = new ObjectOutputStream(out); Enumeration en = settings.elements(); while (en.hasMoreElements()) { objStream.writeObject(en.nextElement()); } objStream.flush(); objStream.close(); out.close(); } catch (Throwable t) {} }
java
static void deleteRecentConnectionSettings() { try { if (recentSettings == null) { setHomeDir(); if (homedir == null) { return; } recentSettings = new File(homedir, fileName); } if (!recentSettings.exists()) { recentSettings = null; return; } recentSettings.delete(); recentSettings = null; } catch (Throwable t) {} }
java
public Map<Integer, ClientAffinityStats> getAffinityStats() { Map<Integer, ClientAffinityStats> retval = new TreeMap<Integer, ClientAffinityStats>(); for (Entry<Integer, ClientAffinityStats> e : m_currentAffinity.entrySet()) { if (m_baselineAffinity.containsKey(e.getKey())) { retval.put(e.getKey(), ClientAffinityStats.diff(e.getValue(), m_baselineAffinity.get(e.getKey()))); } else { retval.put(e.getKey(), (ClientAffinityStats) e.getValue().clone()); } } return retval; }
java
public ClientAffinityStats getAggregateAffinityStats() { long afWrites = 0; long afReads = 0; long rrWrites = 0; long rrReads = 0; Map<Integer, ClientAffinityStats> affinityStats = getAffinityStats(); for (Entry<Integer, ClientAffinityStats> e : affinityStats.entrySet()) { afWrites += e.getValue().getAffinityWrites(); afReads += e.getValue().getAffinityReads(); rrWrites += e.getValue().getRrWrites(); rrReads += e.getValue().getRrReads(); } ClientAffinityStats retval = new ClientAffinityStats(Integer.MAX_VALUE, afWrites, rrWrites, afReads, rrReads); return retval; }
java
public static HSQLDDLInfo preprocessHSQLDDL(String ddl) { ddl = SQLLexer.stripComments(ddl); Matcher matcher = HSQL_DDL_PREPROCESSOR.matcher(ddl); if (matcher.find()) { String verbString = matcher.group("verb"); HSQLDDLInfo.Verb verb = HSQLDDLInfo.Verb.get(verbString); if (verb == null) { return null; } String nounString = matcher.group("object"); HSQLDDLInfo.Noun noun = HSQLDDLInfo.Noun.get(nounString); if (noun == null) { return null; } boolean createStream = verb.equals(HSQLDDLInfo.Verb.CREATE) && noun.equals(HSQLDDLInfo.Noun.STREAM); String name = matcher.group("name"); if (name == null) { return null; } String secondName = matcher.group("subject"); if (secondName != null) { secondName = secondName.toLowerCase(); } // cascade/if exists are interesting on alters and drops boolean cascade = false; boolean ifexists = false; if (verb != HSQLDDLInfo.Verb.CREATE) { matcher = DDL_IFEXISTS_OR_CASCADE_CHECK.matcher(ddl); if (matcher.matches()) { // Don't be too sensitive to regex specifics by assuming null always // indicates a missing clause. Look for empty too. String existsClause = matcher.group("exists"); String cascadeClause = matcher.group("cascade"); ifexists = existsClause != null && !existsClause.isEmpty(); cascade = cascadeClause != null && !cascadeClause.isEmpty(); } } return new HSQLDDLInfo(verb, noun, name.toLowerCase(), secondName, cascade, ifexists, createStream); } return null; }
java
public void doRestart(List<Long> masters, Map<Integer, Long> partitionMasters) { List<Long> copy = new ArrayList<Long>(masters); m_restartMasters.set(copy); m_restartMastersMap.set(Maps.newHashMap(partitionMasters)); }
java
public boolean activate(SystemProcedureExecutionContext context, boolean undo, byte[] predicates) { if (!context.activateTableStream(m_tableId, m_type, undo, predicates)) { String tableName = CatalogUtil.getTableNameFromId(context.getDatabase(), m_tableId); log.debug("Attempted to activate a table stream of type " + m_type + "for table " + tableName + " and failed"); return false; } return true; }
java
public Pair<ListenableFuture<?>, Boolean> streamMore(SystemProcedureExecutionContext context, List<DBBPool.BBContainer> outputBuffers, int[] rowCountAccumulator) { ListenableFuture<?> writeFuture = null; prepareBuffers(outputBuffers); Pair<Long, int[]> serializeResult = context.tableStreamSerializeMore(m_tableId, m_type, outputBuffers); if (serializeResult.getFirst() == SERIALIZATION_ERROR) { // Cancel the snapshot here for (DBBPool.BBContainer container : outputBuffers) { container.discard(); } SnapshotSerializationException ex = new SnapshotSerializationException("Snapshot of table " + m_tableId + " failed to complete."); for (SnapshotTableTask task : m_tableTasks) { task.m_target.reportSerializationFailure(ex); } return Pair.of(null, false); } if (serializeResult.getSecond()[0] > 0) { if (rowCountAccumulator != null && rowCountAccumulator.length == 1) { rowCountAccumulator[0] += getTupleDataRowCount(outputBuffers); } writeFuture = writeBlocksToTargets(outputBuffers, serializeResult.getSecond()); } else { // Return all allocated snapshot output buffers for (DBBPool.BBContainer container : outputBuffers) { container.discard(); } } return Pair.of(writeFuture, serializeResult.getFirst() > 0); }
java
private void prepareBuffers(List<DBBPool.BBContainer> buffers) { Preconditions.checkArgument(buffers.size() == m_tableTasks.size()); UnmodifiableIterator<SnapshotTableTask> iterator = m_tableTasks.iterator(); for (DBBPool.BBContainer container : buffers) { int headerSize = iterator.next().m_target.getHeaderSize(); final ByteBuffer buf = container.b(); buf.clear(); buf.position(headerSize); } }
java
private ListenableFuture<?> writeBlocksToTargets(Collection<DBBPool.BBContainer> outputBuffers, int[] serialized) { Preconditions.checkArgument(m_tableTasks.size() == serialized.length); Preconditions.checkArgument(outputBuffers.size() == serialized.length); final List<ListenableFuture<?>> writeFutures = new ArrayList<ListenableFuture<?>>(outputBuffers.size()); // The containers, the data targets, and the serialized byte counts should all line up Iterator<DBBPool.BBContainer> containerIter = outputBuffers.iterator(); int serializedIndex = 0; for (SnapshotTableTask task : m_tableTasks) { final DBBPool.BBContainer container = containerIter.next(); /* * Finalize the buffer by setting position to 0 and limit to the last used byte */ final ByteBuffer buf = container.b(); buf.limit(serialized[serializedIndex++] + task.m_target.getHeaderSize()); buf.position(0); Callable<DBBPool.BBContainer> valueForTarget = Callables.returning(container); if (task.m_filters != null) { for (SnapshotDataFilter filter : task.m_filters) { valueForTarget = filter.filter(valueForTarget); } } ListenableFuture<?> writeFuture = task.m_target.write(valueForTarget, m_tableId); if (writeFuture != null) { writeFutures.add(writeFuture); } } // Wraps all write futures in one future return Futures.allAsList(writeFutures); }
java
public void recordValue(final long value) throws ArrayIndexOutOfBoundsException { long criticalValueAtEnter = recordingPhaser.writerCriticalSectionEnter(); try { activeHistogram.recordValue(value); } finally { recordingPhaser.writerCriticalSectionExit(criticalValueAtEnter); } }
java
public static <K, V> HashBiMap<K, V> create(int expectedSize) { return new HashBiMap<K, V>(expectedSize); }
java
public void repairSurvivors() { // cancel() and repair() must be synchronized by the caller (the deliver lock, // currently). If cancelled and the last repair message arrives, don't send // out corrections! if (this.m_promotionResult.isCancelled()) { repairLogger.debug(m_whoami + "Skipping repair message creation for cancelled Term."); return; } int queued = 0; if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "received all repair logs and is repairing surviving replicas."); } for (Iv2RepairLogResponseMessage li : m_repairLogUnion) { if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "RepairResponse:\n" + li); } List<Long> needsRepair = new ArrayList<Long>(5); for (Entry<Long, ReplicaRepairStruct> entry : m_replicaRepairStructs.entrySet()) { if (entry.getValue().needs(li.getHandle())) { ++queued; if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "repairing " + CoreUtils.hsIdToString(entry.getKey()) + ". Max seen " + TxnEgo.txnIdToString(entry.getValue().m_maxSpHandleSeen) + ". Repairing with " + TxnEgo.txnIdToString(li.getHandle())); } needsRepair.add(entry.getKey()); } } if (!needsRepair.isEmpty()) { if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "repairing: " + CoreUtils.hsIdCollectionToString(needsRepair) + " with message: " + li.getPayload()); } m_mailbox.repairReplicasWith(needsRepair, li.getPayload()); } } if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "finished queuing " + queued + " replica repair messages."); } m_promotionResult.set(new RepairResult(m_maxSeenTxnId)); }
java
void init(ResultMetaData meta, HsqlProperties props) throws SQLException { resultMetaData = meta; columnCount = resultMetaData.getColumnCount(); // fredt - props is null for internal connections, so always use the // default behaviour in this case // JDBCDriver.getPropertyInfo says // default is true useColumnName = (props == null) ? true : props.isPropertyTrue( "get_column_name", true); }
java
Map<Long, Long> reconfigureOnFault(Set<Long> hsIds, FaultMessage fm) { return reconfigureOnFault(hsIds, fm, new HashSet<Long>()); }
java
public Map<Long, Long> reconfigureOnFault(Set<Long> hsIds, FaultMessage fm, Set<Long> unknownFaultedSites) { boolean proceed = false; do { Discard ignoreIt = mayIgnore(hsIds, fm); if (Discard.DoNot == ignoreIt) { m_inTrouble.put(fm.failedSite, fm.witnessed || fm.decided); m_recoveryLog.info("Agreement, Processing " + fm); proceed = true; } else { ignoreIt.log(fm); } if (Discard.Unknown == ignoreIt) { unknownFaultedSites.add(fm.failedSite); } fm = (FaultMessage) m_mailbox.recv(justFailures); } while (fm != null); if (!proceed) { return ImmutableMap.of(); } m_inTroubleCount = m_inTrouble.size(); // we are here if failed site was not previously recorded // or it was previously recorded but it became witnessed from unwitnessed m_seeker.startSeekingFor(Sets.difference(hsIds, m_failedSites), m_inTrouble); if (m_recoveryLog.isDebugEnabled()) { m_recoveryLog.debug(String.format("\n %s\n %s\n %s\n %s\n %s", m_seeker.dumpAlive(), m_seeker.dumpDead(), m_seeker.dumpReported(), m_seeker.dumpSurvivors(), dumpInTrouble())); } discoverGlobalFaultData_send(hsIds); while (discoverGlobalFaultData_rcv(hsIds)) { Map<Long, Long> lastTxnIdByFailedSite = extractGlobalFaultData(hsIds); if (lastTxnIdByFailedSite.isEmpty()) { return ImmutableMap.of(); } Set<Long> witnessed = Maps.filterValues(m_inTrouble, equalTo(Boolean.TRUE)).keySet(); Set<Long> notClosed = Sets.difference(witnessed, lastTxnIdByFailedSite.keySet()); if ( !notClosed.isEmpty()) { m_recoveryLog.warn("Agreement, witnessed but not decided: [" + CoreUtils.hsIdCollectionToString(notClosed) + "] seeker: " + m_seeker); } if (!notifyOnKill(hsIds, lastTxnIdByFailedSite)) { continue; } m_failedSites.addAll( lastTxnIdByFailedSite.keySet()); m_failedSitesCount = m_failedSites.size(); m_recoveryLog.info( "Agreement, Adding " + CoreUtils.hsIdCollectionToString(lastTxnIdByFailedSite.keySet()) + " to failed sites history"); clearInTrouble(lastTxnIdByFailedSite.keySet()); m_seeker.clear(); return lastTxnIdByFailedSite; } return ImmutableMap.of(); }
java
public static AbstractExpression createIndexExpressionForTable(Table table, Map<Integer, Integer> ranges) { HashRangeExpression predicate = new HashRangeExpression(); predicate.setRanges(ranges); predicate.setHashColumnIndex(table.getPartitioncolumn().getIndex()); return predicate; }
java
public void initialize() throws Exception { List<Long> acctList = new ArrayList<Long>(config.custcount*2); List<String> stList = new ArrayList<String>(config.custcount*2); // generate customers System.out.println("generating " + config.custcount + " customers..."); for (int c=0; c<config.custcount; c++) { if (c % 10000 == 0) { System.out.println(" "+c); } PersonGenerator.Person p = gen.newPerson(); //int ac = rand.nextInt(areaCodes.length); client.callProcedure(new BenchmarkCallback("CUSTOMER.insert"), "CUSTOMER.insert", c, p.firstname, p.lastname, "Anytown", p.state, p.phonenumber, p.dob, p.sex ); int accts = rand.nextInt(5); for (int a=0; a<accts; a++) { int acct_no = (c*100)+a; client.callProcedure(new BenchmarkCallback("ACCOUNT.insert"), "ACCOUNT.insert", acct_no, c, rand.nextInt(10000), rand.nextInt(10000), new Date(), "Y" ); acctList.add(Long.valueOf(acct_no)); stList.add(p.state); } } accounts = acctList.toArray(new Long[acctList.size()]); acct_states = stList.toArray(new String[stList.size()]); // generate vendor offers System.out.println("generating " + config.vendorcount + " vendors..."); for (int v = 0; v < config.vendorcount; v++) { if (v % 10000 == 0) { System.out.println(" " + v); } client.callProcedure(new BenchmarkCallback("VENDOR_OFFERS.insert"), "VENDOR_OFFERS.insert", v, rand.nextInt(5) + 1, 0, rand.nextInt(5) + 1, (double) rand.nextInt(100), 0, offers[rand.nextInt(offers.length)] ); } }
java
public static void createHierarchy(ZooKeeper zk) { LinkedList<ZKUtil.StringCallback> callbacks = new LinkedList<ZKUtil.StringCallback>(); for (String node : CoreZK.ZK_HIERARCHY) { ZKUtil.StringCallback cb = new ZKUtil.StringCallback(); callbacks.add(cb); zk.create(node, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null); } try { for (ZKUtil.StringCallback cb : callbacks) { cb.get(); } } catch (Exception e) { org.voltdb.VoltDB.crashLocalVoltDB( e.getMessage(), false, e); } }
java
public static int createRejoinNodeIndicator(ZooKeeper zk, int hostId) { try { zk.create(rejoin_node_blocker, ByteBuffer.allocate(4).putInt(hostId).array(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (KeeperException e) { if (e.code() == KeeperException.Code.NODEEXISTS) { try { return ByteBuffer.wrap(zk.getData(rejoin_node_blocker, false, null)).getInt(); } catch (KeeperException e1) { if (e1.code() != KeeperException.Code.NONODE) { org.voltdb.VoltDB.crashLocalVoltDB("Unable to get the current rejoining node indicator"); } } catch (InterruptedException e1) {} } else { org.voltdb.VoltDB.crashLocalVoltDB("Unable to create rejoin node Indicator", true, e); } } catch (InterruptedException e) { org.voltdb.VoltDB.crashLocalVoltDB("Unable to create rejoin node Indicator", true, e); } return -1; }
java
public static boolean removeRejoinNodeIndicatorForHost(ZooKeeper zk, int hostId) { try { Stat stat = new Stat(); final int rejoiningHost = ByteBuffer.wrap(zk.getData(rejoin_node_blocker, false, stat)).getInt(); if (hostId == rejoiningHost) { zk.delete(rejoin_node_blocker, stat.getVersion()); return true; } } catch (KeeperException e) { if (e.code() == KeeperException.Code.NONODE || e.code() == KeeperException.Code.BADVERSION) { // Okay if the rejoin blocker for the given hostId is already gone. return true; } } catch (InterruptedException e) { return false; } return false; }
java
public static boolean removeJoinNodeIndicatorForHost(ZooKeeper zk, int hostId) { try { Stat stat = new Stat(); String path = ZKUtil.joinZKPath(readyjoininghosts, Integer.toString(hostId)); zk.getData(path, false, stat); zk.delete(path, stat.getVersion()); return true; } catch (KeeperException e) { if (e.code() == KeeperException.Code.NONODE || e.code() == KeeperException.Code.BADVERSION) { // Okay if the join indicator for the given hostId is already gone. return true; } } catch (InterruptedException e) { return false; } return false; }
java
public static boolean isPartitionCleanupInProgress(ZooKeeper zk) throws KeeperException, InterruptedException { List<String> children = zk.getChildren(VoltZK.leaders_initiators, null); List<ZKUtil.ChildrenCallback> childrenCallbacks = Lists.newArrayList(); for (String child : children) { ZKUtil.ChildrenCallback callback = new ZKUtil.ChildrenCallback(); zk.getChildren(ZKUtil.joinZKPath(VoltZK.leaders_initiators, child), false, callback, null); childrenCallbacks.add(callback); } for (ZKUtil.ChildrenCallback callback : childrenCallbacks) { if (callback.get().isEmpty()) { return true; } } return false; }
java
public boolean isNullable() { boolean isNullable = super.isNullable(); if (isNullable) { if (dataType.isDomainType()) { return dataType.userTypeModifier.isNullable(); } } return isNullable; }
java
Object getDefaultValue(Session session) { return defaultExpression == null ? null : defaultExpression.getValue(session, dataType); }
java
Object getGeneratedValue(Session session) { return generatingExpression == null ? null : generatingExpression.getValue( session, dataType); }
java
public String getDefaultSQL() { String ddl = null; ddl = defaultExpression == null ? null : defaultExpression.getSQL(); return ddl; }
java
Expression getDefaultExpression() { if (defaultExpression == null) { if (dataType.isDomainType()) { return dataType.userTypeModifier.getDefaultClause(); } return null; } else { return defaultExpression; } }
java
public static ZKUtil.StringCallback createSnapshotCompletionNode(String path, String pathType, String nonce, long txnId, boolean isTruncation, String truncReqId) { if (!(txnId > 0)) { VoltDB.crashGlobalVoltDB("Txnid must be greather than 0", true, null); } byte nodeBytes[] = null; try { JSONStringer stringer = new JSONStringer(); stringer.object(); stringer.keySymbolValuePair("txnId", txnId); stringer.keySymbolValuePair("isTruncation", isTruncation); stringer.keySymbolValuePair("didSucceed", true); stringer.keySymbolValuePair("hostCount", -1); stringer.keySymbolValuePair(SnapshotUtil.JSON_PATH, path); stringer.keySymbolValuePair(SnapshotUtil.JSON_PATH_TYPE, pathType); stringer.keySymbolValuePair(SnapshotUtil.JSON_NONCE, nonce); stringer.keySymbolValuePair("truncReqId", truncReqId); stringer.key("exportSequenceNumbers").object().endObject(); stringer.endObject(); JSONObject jsonObj = new JSONObject(stringer.toString()); nodeBytes = jsonObj.toString(4).getBytes(Charsets.UTF_8); } catch (Exception e) { VoltDB.crashLocalVoltDB("Error serializing snapshot completion node JSON", true, e); } ZKUtil.StringCallback cb = new ZKUtil.StringCallback(); final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId; VoltDB.instance().getHostMessenger().getZK().create( snapshotPath, nodeBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null); return cb; }
java
public static void logParticipatingHostCount(long txnId, int participantCount) { ZooKeeper zk = VoltDB.instance().getHostMessenger().getZK(); final String snapshotPath = VoltZK.completed_snapshots + "/" + txnId; boolean success = false; while (!success) { Stat stat = new Stat(); byte data[] = null; try { data = zk.getData(snapshotPath, false, stat); } catch (KeeperException e) { if (e.code() == KeeperException.Code.NONODE) { // If snapshot creation failed for some reason, the node won't exist. ignore return; } VoltDB.crashLocalVoltDB("Failed to get snapshot completion node", true, e); } catch (InterruptedException e) { VoltDB.crashLocalVoltDB("Interrupted getting snapshot completion node", true, e); } if (data == null) { VoltDB.crashLocalVoltDB("Data should not be null if the node exists", false, null); } try { JSONObject jsonObj = new JSONObject(new String(data, Charsets.UTF_8)); if (jsonObj.getLong("txnId") != txnId) { VoltDB.crashLocalVoltDB("TxnId should match", false, null); } int hostCount = jsonObj.getInt("hostCount"); // +1 because hostCount was initialized to -1 jsonObj.put("hostCount", hostCount + participantCount + 1); zk.setData(snapshotPath, jsonObj.toString(4).getBytes(Charsets.UTF_8), stat.getVersion()); } catch (KeeperException.BadVersionException e) { continue; } catch (Exception e) { VoltDB.crashLocalVoltDB("This ZK call should never fail", true, e); } success = true; } }
java
public synchronized void close() { this.isPoolClosed = true; while (this.connectionsInactive.size() > 0) { PooledConnection connection = dequeueFirstIfAny(); if (connection != null) { closePhysically( connection, "closing inactive connection when connection pool was closed."); } } }
java
public synchronized void closeImmediatedly() { close(); Iterator iterator = this.connectionsInUse.iterator(); while (iterator.hasNext()) { PooledConnection connection = (PooledConnection) iterator.next(); SessionConnectionWrapper sessionWrapper = (SessionConnectionWrapper) this.sessionConnectionWrappers.get( connection); closeSessionWrapper( sessionWrapper, "Error closing session wrapper. Connection pool was shutdown immediatedly."); } }
java
public void setDriverClassName(String driverClassName) { if (driverClassName.equals(JDBCConnectionPoolDataSource.driver)) { return; } /** @todo: Use a HSQLDB RuntimeException subclass */ throw new RuntimeException("This class only supports JDBC driver '" + JDBCConnectionPoolDataSource.driver + "'"); }
java
@Override public void toJSONString(JSONStringer stringer) throws JSONException { super.toJSONString(stringer); stringer.key("AGGREGATE_COLUMNS") .array(); for (int ii = 0; ii < m_aggregateTypes.size(); ii++) { stringer.object(); stringer.keySymbolValuePair(Members.AGGREGATE_TYPE.name(), m_aggregateTypes.get(ii).name()); stringer.keySymbolValuePair(Members.AGGREGATE_OUTPUT_COLUMN.name(), m_aggregateOutputColumns.get(ii)); AbstractExpression.toJSONArray(stringer, Members.AGGREGATE_EXPRESSIONS.name(), m_aggregateExpressions.get(ii)); stringer.endObject(); } stringer.endArray(); AbstractExpression.toJSONArray(stringer, Members.PARTITIONBY_EXPRESSIONS.name(), m_partitionByExpressions); AbstractExpression.toJSONArrayFromSortList(stringer, m_orderByExpressions, null); }
java
@Override public void loadFromJSONObject(JSONObject jobj, Database db) throws JSONException { helpLoadFromJSONObject(jobj, db); JSONArray jarray = jobj.getJSONArray( Members.AGGREGATE_COLUMNS.name() ); int size = jarray.length(); for (int i = 0; i < size; i++) { // We only expect one of these for now. assert(i == 0); JSONObject tempObj = jarray.getJSONObject( i ); m_aggregateTypes.add( ExpressionType.get( tempObj.getString( Members.AGGREGATE_TYPE.name() ))); m_aggregateOutputColumns.add( tempObj.getInt( Members.AGGREGATE_OUTPUT_COLUMN.name() )); m_aggregateExpressions.add( AbstractExpression.loadFromJSONArrayChild(null, tempObj, Members.AGGREGATE_EXPRESSIONS.name(), null)); } m_partitionByExpressions = AbstractExpression.loadFromJSONArrayChild(null, jobj, Members.PARTITIONBY_EXPRESSIONS.name(), null); m_orderByExpressions = new ArrayList<>(); AbstractExpression.loadSortListFromJSONArray(m_orderByExpressions, null, jobj); }
java
@Override public void run() { Thread.currentThread().setName("Latency Watchdog"); LOG.info(String.format("Latency Watchdog enabled -- threshold:%d(ms) " + "wakeup_interval:%d(ms) min_log_interval:%d(ms)\n", WATCHDOG_THRESHOLD, WAKEUP_INTERVAL, MIN_LOG_INTERVAL)); while (true) { for (Entry<Thread, AtomicLong> entry : sLatencyMap.entrySet()) { Thread t = entry.getKey(); long timestamp = entry.getValue().get(); long now = System.currentTimeMillis(); if ((now - timestamp > WATCHDOG_THRESHOLD) && t.getState() != Thread.State.TERMINATED) { StringBuilder sb = new StringBuilder(); String format = t.getName() + " has been delayed for more than " + WATCHDOG_THRESHOLD + " milliseconds\n %s"; for (StackTraceElement ste : t.getStackTrace()) { sb.append(ste); sb.append("\n"); } RateLimitedLogger.tryLogForMessage(now, MIN_LOG_INTERVAL, TimeUnit.MILLISECONDS, LOG, Level.INFO, format, sb.toString()); } } try { Thread.sleep(WAKEUP_INTERVAL); } catch (Exception e) { e.printStackTrace(); } } }
java
public static ProcedurePartitionData fromPartitionInfoString(String partitionInfoString) { if (partitionInfoString == null || partitionInfoString.trim().isEmpty()) { return new ProcedurePartitionData(); } String[] partitionInfoParts = new String[0]; partitionInfoParts = partitionInfoString.split(","); assert(partitionInfoParts.length <= 2); if (partitionInfoParts.length == 2) { ProcedurePartitionData partitionInfo = fromPartitionInfoString(partitionInfoParts[0]); ProcedurePartitionData partitionInfo2 = fromPartitionInfoString(partitionInfoParts[1]); partitionInfo.addSecondPartitionInfo(partitionInfo2); return partitionInfo; } String subClause = partitionInfoParts[0]; // split on the colon String[] parts = subClause.split(":"); assert(parts.length == 2); // relabel the parts for code readability String columnInfo = parts[0].trim(); String paramIndex = parts[1].trim(); // split the columninfo parts = columnInfo.split("\\."); assert(parts.length == 2); // relabel the parts for code readability String tableName = parts[0].trim(); String columnName = parts[1].trim(); return new ProcedurePartitionData(tableName, columnName, paramIndex); }
java
@Override public void runDDL(String ddl) { String modifiedDdl = transformDDL(ddl); printTransformedSql(ddl, modifiedDdl); super.runDDL(modifiedDdl, false); }
java
public void allHostNTProcedureCallback(ClientResponse clientResponse) { synchronized(m_allHostCallbackLock) { int hostId = Integer.parseInt(clientResponse.getAppStatusString()); boolean removed = m_outstandingAllHostProcedureHostIds.remove(hostId); // log this for now... I don't expect it to ever happen, but will be interesting to see... if (!removed) { tmLog.error(String.format( "ProcedureRunnerNT.allHostNTProcedureCallback for procedure %s received late or unexepected response from hostID %d.", m_procedureName, hostId)); return; } m_allHostResponses.put(hostId, clientResponse); if (m_outstandingAllHostProcedureHostIds.size() == 0) { m_outstandingAllHostProc.set(false); m_allHostFut.complete(m_allHostResponses); } } }
java
protected CompletableFuture<Map<Integer,ClientResponse>> callAllNodeNTProcedure(String procName, Object... params) { // only one of these at a time if (!m_outstandingAllHostProc.compareAndSet(false, true)) { throw new VoltAbortException(new IllegalStateException("Only one AllNodeNTProcedure operation can be running at a time.")); } StoredProcedureInvocation invocation = new StoredProcedureInvocation(); invocation.setProcName(procName); invocation.setParams(params); invocation.setClientHandle(m_id); final Iv2InitiateTaskMessage workRequest = new Iv2InitiateTaskMessage(m_mailbox.getHSId(), m_mailbox.getHSId(), TransactionInfoBaseMessage.UNUSED_TRUNC_HANDLE, m_id, m_id, true, false, invocation, m_id, ClientInterface.NT_REMOTE_PROC_CID, false); m_allHostFut = new CompletableFuture<>(); m_allHostResponses = new HashMap<>(); // hold this lock while getting the count of live nodes // also held when long[] hsids; synchronized(m_allHostCallbackLock) { // collect the set of live client interface mailbox ids m_outstandingAllHostProcedureHostIds = VoltDB.instance().getHostMessenger().getLiveHostIds(); // convert host ids to hsids hsids = m_outstandingAllHostProcedureHostIds.stream() .mapToLong(hostId -> CoreUtils.getHSIdFromHostAndSite(hostId, HostMessenger.CLIENT_INTERFACE_SITE_ID)) .toArray(); } // send the invocation to all live nodes // n.b. can't combine this step with above because sometimes the callbacks comeback so fast // you get a concurrent modification exception for (long hsid : hsids) { m_mailbox.send(hsid, workRequest); } return m_allHostFut; }
java
private void completeCall(ClientResponseImpl response) { // if we're keeping track, calculate result size if (m_perCallStats.samplingProcedure()) { m_perCallStats.setResultSize(response.getResults()); } m_statsCollector.endProcedure(response.getStatus() == ClientResponse.USER_ABORT, (response.getStatus() != ClientResponse.USER_ABORT) && (response.getStatus() != ClientResponse.SUCCESS), m_perCallStats); // allow the GC to collect per-call stats if this proc isn't called for a while m_perCallStats = null; // send the response to the caller // must be done as IRM to CI mailbox for backpressure accounting response.setClientHandle(m_clientHandle); InitiateResponseMessage irm = InitiateResponseMessage.messageForNTProcResponse(m_ciHandle, m_ccxn.connectionId(), response); m_mailbox.deliver(irm); m_ntProcService.handleNTProcEnd(ProcedureRunnerNT.this); }
java
public void processAnyCallbacksFromFailedHosts(Set<Integer> failedHosts) { synchronized(m_allHostCallbackLock) { failedHosts.stream() .forEach(i -> { if (m_outstandingAllHostProcedureHostIds.contains(i)) { ClientResponseImpl cri = new ClientResponseImpl( ClientResponse.CONNECTION_LOST, new VoltTable[0], "Host " + i + " failed, connection lost"); // embed the hostid as a string in app status string // because the recipient expects this hack cri.setAppStatusString(String.valueOf(i)); allHostNTProcedureCallback(cri); } }); } }
java
public final static void log(int logger, int level, String statement) { if (logger < loggers.length) { switch (level) { case trace: loggers[logger].trace(statement); break; case debug: loggers[logger].debug(statement); break; case error: loggers[logger].error(statement); break; case fatal: loggers[logger].fatal(statement); break; case info: loggers[logger].info(statement); break; case warn: loggers[logger].warn(statement); break; default: throw new RuntimeException("Unhandled log level " + level); } } else { throw new RuntimeException("Attempted to log to logger index " + logger + " which doesn't exist"); } }
java
public static void restoreFile(String sourceName, String destName) throws IOException { RandomAccessFile source = new RandomAccessFile(sourceName, "r"); RandomAccessFile dest = new RandomAccessFile(destName, "rw"); while (source.getFilePointer() != source.length()) { int size = source.readInt(); long position = source.readLong(); byte[] buffer = new byte[size]; source.read(buffer); dest.seek(position); dest.write(buffer); } dest.seek(DataFileCache.LONG_FREE_POS_POS); long length = dest.readLong(); JavaSystem.setRAFileLength(dest, length); source.close(); dest.close(); }
java
String getHTMLForAdminPage(Map<String,String> params) { try { String template = m_htmlTemplates.get("admintemplate.html"); for (Entry<String, String> e : params.entrySet()) { String key = e.getKey().toUpperCase(); String value = e.getValue(); if (key == null) continue; if (value == null) value = "NULL"; template = template.replace("#" + key + "#", value); } return template; } catch (Exception e) { e.printStackTrace(); } return "<html><body>An unrecoverable error was encountered while generating this page.</body></html>"; }
java
public void start() throws InterruptedException, ExecutionException { Future<?> task = es.submit(handlePartitionChange); task.get(); }
java
public static Long update(final long now) { final long estNow = EstTime.m_now; if (estNow == now) { return null; } EstTime.m_now = now; /* * Check if updating the estimated time was especially tardy. * I am concerned that the thread responsible for updating the estimated * time might be blocking on something and want to be able to log if * that happens */ if (now - estNow > ESTIMATED_TIME_WARN_INTERVAL) { /* * Only report the error every 60 seconds to cut down on log spam */ if (lastErrorReport > now) { //Time moves backwards on occasion, check and reset lastErrorReport = now; } if (now - lastErrorReport > maxErrorReportInterval) { lastErrorReport = now; return now - estNow; } } return null; }
java
StatementDMQL compileMigrateStatement(RangeVariable[] outerRanges) { final Expression condition; assert token.tokenType == Tokens.MIGRATE; read(); readThis(Tokens.FROM); RangeVariable[] rangeVariables = { readSimpleRangeVariable(StatementTypes.MIGRATE_WHERE) }; Table table = rangeVariables[0].getTable(); if (token.tokenType == Tokens.WHERE) { read(); condition = XreadBooleanValueExpression(); HsqlList unresolved = condition.resolveColumnReferences(outerRanges, null); unresolved = Expression.resolveColumnSet(rangeVariables, unresolved, null); ExpressionColumn.checkColumnsResolved(unresolved); condition.resolveTypes(session, null); if (condition.isParam()) { condition.dataType = Type.SQL_BOOLEAN; } if (condition.getDataType() != Type.SQL_BOOLEAN) { throw Error.error(ErrorCode.X_42568); } } else { throw Error.error(ErrorCode.X_47000); } // check WHERE condition RangeVariableResolver resolver = new RangeVariableResolver(rangeVariables, condition, compileContext); resolver.processConditions(); rangeVariables = resolver.rangeVariables; return new StatementDML(session, table, rangeVariables, compileContext); }
java
StatementDMQL compileDeleteStatement(RangeVariable[] outerRanges) { Expression condition = null; boolean truncate = false; boolean restartIdentity = false; switch (token.tokenType) { case Tokens.TRUNCATE : { read(); readThis(Tokens.TABLE); truncate = true; break; } case Tokens.DELETE : { read(); readThis(Tokens.FROM); break; } } RangeVariable[] rangeVariables = { readSimpleRangeVariable(StatementTypes.DELETE_WHERE) }; Table table = rangeVariables[0].getTable(); Table baseTable = table.getBaseTable(); /* A VoltDB Extension. * Views from Streams are now updatable. * Comment out this guard and check if it is a view * from Stream or PersistentTable in planner. if (!table.isUpdatable()) { throw Error.error(ErrorCode.X_42000); } A VoltDB Extension */ if (truncate) { switch (token.tokenType) { case Tokens.CONTINUE : { read(); readThis(Tokens.IDENTITY); break; } case Tokens.RESTART : { read(); readThis(Tokens.IDENTITY); restartIdentity = true; break; } } for (int i = 0; i < table.constraintList.length; i++) { if (table.constraintList[i].getConstraintType() == Constraint.MAIN) { throw Error.error(ErrorCode.X_23501); } } } if (truncate && table != baseTable) { throw Error.error(ErrorCode.X_42000); } if (!truncate && token.tokenType == Tokens.WHERE) { read(); condition = XreadBooleanValueExpression(); HsqlList unresolved = condition.resolveColumnReferences(outerRanges, null); unresolved = Expression.resolveColumnSet(rangeVariables, unresolved, null); ExpressionColumn.checkColumnsResolved(unresolved); condition.resolveTypes(session, null); if (condition.isParam()) { condition.dataType = Type.SQL_BOOLEAN; } if (condition.getDataType() != Type.SQL_BOOLEAN) { throw Error.error(ErrorCode.X_42568); } } // VoltDB Extension: // baseTable could be null for stream views. if (baseTable != null && table != baseTable) { QuerySpecification select = ((TableDerived) table).getQueryExpression().getMainSelect(); if (condition != null) { condition = condition.replaceColumnReferences(rangeVariables[0], select.exprColumns); } rangeVariables[0] = new RangeVariable(select.rangeVariables[0]); condition = ExpressionLogical.andExpressions(select.queryCondition, condition); } if (condition != null) { RangeVariableResolver resolver = new RangeVariableResolver(rangeVariables, condition, compileContext); resolver.processConditions(); rangeVariables = resolver.rangeVariables; } // VoltDB Extension: // This needs to be done before building the compiled statement // so that parameters in LIMIT or OFFSET are retrieved from // the compileContext SortAndSlice sas = voltGetSortAndSliceForDelete(rangeVariables); StatementDMQL cs = new StatementDML(session, table, rangeVariables, compileContext, restartIdentity); // VoltDB Extension: voltAppendDeleteSortAndSlice((StatementDML)cs, sas); return cs; }
java
StatementDMQL compileUpdateStatement(RangeVariable[] outerRanges) { read(); Expression[] updateExpressions; int[] columnMap; boolean[] columnCheckList; OrderedHashSet colNames = new OrderedHashSet(); HsqlArrayList exprList = new HsqlArrayList(); RangeVariable[] rangeVariables = { readSimpleRangeVariable(StatementTypes.UPDATE_WHERE) }; Table table = rangeVariables[0].rangeTable; Table baseTable = table.getBaseTable(); readThis(Tokens.SET); readSetClauseList(rangeVariables, colNames, exprList); columnMap = table.getColumnIndexes(colNames); columnCheckList = table.getColumnCheckList(columnMap); updateExpressions = new Expression[exprList.size()]; exprList.toArray(updateExpressions); Expression condition = null; if (token.tokenType == Tokens.WHERE) { read(); condition = XreadBooleanValueExpression(); HsqlList unresolved = condition.resolveColumnReferences(outerRanges, null); unresolved = Expression.resolveColumnSet(rangeVariables, unresolved, null); ExpressionColumn.checkColumnsResolved(unresolved); condition.resolveTypes(session, null); if (condition.isParam()) { condition.dataType = Type.SQL_BOOLEAN; } else if (condition.getDataType() != Type.SQL_BOOLEAN) { throw Error.error(ErrorCode.X_42568); } } resolveUpdateExpressions(table, rangeVariables, columnMap, updateExpressions, outerRanges); if (baseTable != null && table != baseTable) { QuerySpecification select = ((TableDerived) table).getQueryExpression().getMainSelect(); if (condition != null) { condition = condition.replaceColumnReferences(rangeVariables[0], select.exprColumns); } rangeVariables[0] = new RangeVariable(select.rangeVariables[0]); condition = ExpressionLogical.andExpressions(select.queryCondition, condition); } if (condition != null) { RangeVariableResolver resolver = new RangeVariableResolver(rangeVariables, condition, compileContext); resolver.processConditions(); rangeVariables = resolver.rangeVariables; } if (baseTable != null && table != baseTable) { int[] baseColumnMap = table.getBaseTableColumnMap(); int[] newColumnMap = new int[columnMap.length]; ArrayUtil.projectRow(baseColumnMap, columnMap, newColumnMap); columnMap = newColumnMap; } StatementDMQL cs = new StatementDML(session, table, rangeVariables, columnMap, updateExpressions, columnCheckList, compileContext); return cs; }
java
private void readMergeWhen(OrderedHashSet insertColumnNames, OrderedHashSet updateColumnNames, HsqlArrayList insertExpressions, HsqlArrayList updateExpressions, RangeVariable[] targetRangeVars, RangeVariable sourceRangeVar) { Table table = targetRangeVars[0].rangeTable; int columnCount = table.getColumnCount(); readThis(Tokens.WHEN); if (token.tokenType == Tokens.MATCHED) { if (updateExpressions.size() != 0) { throw Error.error(ErrorCode.X_42547); } read(); readThis(Tokens.THEN); readThis(Tokens.UPDATE); readThis(Tokens.SET); readSetClauseList(targetRangeVars, updateColumnNames, updateExpressions); } else if (token.tokenType == Tokens.NOT) { if (insertExpressions.size() != 0) { throw Error.error(ErrorCode.X_42548); } read(); readThis(Tokens.MATCHED); readThis(Tokens.THEN); readThis(Tokens.INSERT); // parse INSERT statement // optional column list int brackets = readOpenBrackets(); if (brackets == 1) { readSimpleColumnNames(insertColumnNames, targetRangeVars[0]); readThis(Tokens.CLOSEBRACKET); brackets = 0; } readThis(Tokens.VALUES); Expression e = XreadContextuallyTypedTable(columnCount); if (e.nodes.length != 1) { throw Error.error(ErrorCode.X_21000); } insertExpressions.add(e); } else { throw unexpectedToken(); } if (token.tokenType == Tokens.WHEN) { readMergeWhen(insertColumnNames, updateColumnNames, insertExpressions, updateExpressions, targetRangeVars, sourceRangeVar); } }
java
StatementDMQL compileCallStatement(RangeVariable[] outerRanges, boolean isStrictlyProcedure) { read(); if (isIdentifier()) { checkValidCatalogName(token.namePrePrefix); RoutineSchema routineSchema = (RoutineSchema) database.schemaManager.findSchemaObject( token.tokenString, session.getSchemaName(token.namePrefix), SchemaObject.PROCEDURE); if (routineSchema != null) { read(); HsqlArrayList list = new HsqlArrayList(); readThis(Tokens.OPENBRACKET); if (token.tokenType == Tokens.CLOSEBRACKET) { read(); } else { while (true) { Expression e = XreadValueExpression(); list.add(e); if (token.tokenType == Tokens.COMMA) { read(); } else { readThis(Tokens.CLOSEBRACKET); break; } } } Expression[] arguments = new Expression[list.size()]; list.toArray(arguments); Routine routine = routineSchema.getSpecificRoutine(arguments.length); HsqlList unresolved = null; for (int i = 0; i < arguments.length; i++) { Expression e = arguments[i]; if (e.isParam()) { e.setAttributesAsColumn( routine.getParameter(i), routine.getParameter(i).isWriteable()); } else { int paramMode = routine.getParameter(i).getParameterMode(); unresolved = arguments[i].resolveColumnReferences(outerRanges, unresolved); if (paramMode != SchemaObject.ParameterModes.PARAM_IN) { if (e.getType() != OpTypes.VARIABLE) { throw Error.error(ErrorCode.X_42603); } } } } ExpressionColumn.checkColumnsResolved(unresolved); for (int i = 0; i < arguments.length; i++) { arguments[i].resolveTypes(session, null); } StatementDMQL cs = new StatementProcedure(session, routine, arguments, compileContext); return cs; } } if (isStrictlyProcedure) { throw Error.error(ErrorCode.X_42501, token.tokenString); } Expression expression = this.XreadValueExpression(); HsqlList unresolved = expression.resolveColumnReferences(outerRanges, null); ExpressionColumn.checkColumnsResolved(unresolved); expression.resolveTypes(session, null); // expression.paramMode = PARAM_OUT; StatementDMQL cs = new StatementProcedure(session, expression, compileContext); return cs; }
java
private SortAndSlice voltGetSortAndSliceForDelete(RangeVariable[] rangeVariables) { SortAndSlice sas = XreadOrderByExpression(); if (sas == null || sas == SortAndSlice.noSort) return SortAndSlice.noSort; // Resolve columns in the ORDER BY clause. This code modified // from how compileDelete resolves columns in its WHERE clause for (int i = 0; i < sas.exprList.size(); ++i) { Expression e = (Expression)sas.exprList.get(i); HsqlList unresolved = e.resolveColumnReferences(RangeVariable.emptyArray, null); unresolved = Expression.resolveColumnSet(rangeVariables, unresolved, null); ExpressionColumn.checkColumnsResolved(unresolved); e.resolveTypes(session, null); } return sas; }
java
public ClassNameMatchStatus addPattern(String classNamePattern) { boolean matchFound = false; if (m_classList == null) { m_classList = getClasspathClassFileNames(); } String preppedName = classNamePattern.trim(); // include only full classes // for nested classes, include the parent pattern int indexOfDollarSign = classNamePattern.indexOf('$'); if (indexOfDollarSign >= 0) { classNamePattern = classNamePattern.substring(0, indexOfDollarSign); } // Substitution order is critical. // Keep track of whether or not this is a wildcard expression. // '.' is specifically not a wildcard. String regExPreppedName = preppedName.replace(".", "[.]"); boolean isWildcard = regExPreppedName.contains("*"); if (isWildcard) { regExPreppedName = regExPreppedName.replace("**", "[\\w.\\$]+"); regExPreppedName = regExPreppedName.replace("*", "[\\w\\$]*"); } String regex = "^" + // (line start) regExPreppedName + "$"; // (line end) Pattern pattern = Pattern.compile(regex, Pattern.MULTILINE); Matcher matcher = pattern.matcher(m_classList); while (matcher.find()) { String match = matcher.group(); // skip nested classes; the base class will include them if (match.contains("$")) { continue; } matchFound = true; m_classNameMatches.add(match); } if (matchFound) { return ClassNameMatchStatus.MATCH_FOUND; } else { if (isWildcard) { return ClassNameMatchStatus.NO_WILDCARD_MATCH; } else { return ClassNameMatchStatus.NO_EXACT_MATCH; } } }
java
private static void processPathPart(String path, Set<String> classes) { File rootFile = new File(path); if (rootFile.isDirectory() == false) { return; } File[] files = rootFile.listFiles(); for (File f : files) { // classes in the anonymous package if (f.getName().endsWith(".class")) { String className = f.getName(); // trim the trailing .class from the end className = className.substring(0, className.length() - ".class".length()); classes.add(className); } if (f.isDirectory()) { Package p = new Package(null, f); p.process(classes); } } }
java
static String getClasspathClassFileNames() { String classpath = System.getProperty("java.class.path"); String[] pathParts = classpath.split(File.pathSeparator); Set<String> classes = new TreeSet<String>(); for (String part : pathParts) { processPathPart(part, classes); } StringBuilder sb = new StringBuilder(); for (String className : classes) { sb.append(className).append('\n'); } return sb.toString(); }
java
double getMemoryLimitSize(String sizeStr) { if (sizeStr==null || sizeStr.length()==0) { return 0; } try { if (sizeStr.charAt(sizeStr.length()-1)=='%') { // size as a percentage of total available memory int perc = Integer.parseInt(sizeStr.substring(0, sizeStr.length()-1)); if (perc<0 || perc > 99) { throw new IllegalArgumentException("Invalid memory limit percentage: " + sizeStr); } return PlatformProperties.getPlatformProperties().ramInMegabytes*1048576L*perc/100.0; } else { // size in GB double size = Double.parseDouble(sizeStr)*1073741824L; if (size<0) { throw new IllegalArgumentException("Invalid memory limit value: " + sizeStr); } return size; } } catch(NumberFormatException e) { throw new IllegalArgumentException("Invalid memory limit value " + sizeStr + ". Memory limit must be configued as a percentage of total available memory or as GB value"); } }
java
public static void validatePath(String path) throws IllegalArgumentException { if (path == null) { throw new IllegalArgumentException("Path cannot be null"); } if (path.length() == 0) { throw new IllegalArgumentException("Path length must be > 0"); } if (path.charAt(0) != '/') { throw new IllegalArgumentException( "Path \"" + path + "\" must start with / character"); } if (path.length() == 1) { // done checking - it's the root return; } if (path.charAt(path.length() - 1) == '/') { throw new IllegalArgumentException( "Path must not end with / character"); } String reason = null; char lastc = '/'; char chars[] = path.toCharArray(); char c; for (int i = 1; i < chars.length; lastc = chars[i], i++) { c = chars[i]; if (c == 0) { reason = "null character not allowed @" + i; break; } else if (c == '/' && lastc == '/') { reason = "empty node name specified @" + i; break; } else if (c == '.' && lastc == '.') { if (chars[i - 2] == '/' && ((i + 1 == chars.length) || chars[i + 1] == '/')) { reason = "relative paths not allowed @" + i; break; } } else if (c == '.') { if (chars[i - 1] == '/' && ((i + 1 == chars.length) || chars[i + 1] == '/')) { reason = "relative paths not allowed @" + i; break; } } else if (c > '\u0000' && c < '\u001f' || c > '\u007f' && c < '\u009F' || c > '\ud800' && c < '\uf8ff' || c > '\ufff0' && c < '\uffff') { reason = "invalid charater @" + i; break; } } if (reason != null) { throw new IllegalArgumentException("Invalid path string \"" + path + "\" caused by " + reason); } }
java
protected void produceCopyForTransformation(AbstractPlanNode copy) { copy.m_outputSchema = m_outputSchema; copy.m_hasSignificantOutputSchema = m_hasSignificantOutputSchema; copy.m_outputColumnHints = m_outputColumnHints; copy.m_estimatedOutputTupleCount = m_estimatedOutputTupleCount; copy.m_estimatedProcessedTupleCount = m_estimatedProcessedTupleCount; // clone is not yet implemented for every node. assert(m_inlineNodes.size() == 0); assert(m_isInline == false); // the api requires the copy is not (yet) connected assert (copy.m_parents.size() == 0); assert (copy.m_children.size() == 0); }
java
public void generateOutputSchema(Database db) { // default behavior: just copy the input schema // to the output schema assert(m_children.size() == 1); AbstractPlanNode childNode = m_children.get(0); childNode.generateOutputSchema(db); // Replace the expressions in our children's columns with TVEs. When // we resolve the indexes in these TVEs they will point back at the // correct input column, which we are assuming that the child node // has filled in with whatever expression was here before the replacement. // Output schemas defined using this standard algorithm // are just cached "fillers" that satisfy the legacy // resolveColumnIndexes/generateOutputSchema/getOutputSchema protocol // until it can be fixed up -- see the FIXME comment on generateOutputSchema. m_hasSignificantOutputSchema = false; m_outputSchema = childNode.getOutputSchema().copyAndReplaceWithTVE(); }
java
public void getTablesAndIndexes(Map<String, StmtTargetTableScan> tablesRead, Collection<String> indexes) { for (AbstractPlanNode node : m_inlineNodes.values()) { node.getTablesAndIndexes(tablesRead, indexes); } for (AbstractPlanNode node : m_children) { node.getTablesAndIndexes(tablesRead, indexes); } getTablesAndIndexesFromSubqueries(tablesRead, indexes); }
java
protected void getTablesAndIndexesFromSubqueries(Map<String, StmtTargetTableScan> tablesRead, Collection<String> indexes) { for(AbstractExpression expr : findAllSubquerySubexpressions()) { assert(expr instanceof AbstractSubqueryExpression); AbstractSubqueryExpression subquery = (AbstractSubqueryExpression) expr; AbstractPlanNode subqueryNode = subquery.getSubqueryNode(); assert(subqueryNode != null); subqueryNode.getTablesAndIndexes(tablesRead, indexes); } }
java
public boolean isOutputOrdered (List<AbstractExpression> sortExpressions, List<SortDirectionType> sortDirections) { assert(sortExpressions.size() == sortDirections.size()); if (m_children.size() == 1) { return m_children.get(0).isOutputOrdered(sortExpressions, sortDirections); } return false; }
java
public final NodeSchema getTrueOutputSchema(boolean resetBack) throws PlanningErrorException { AbstractPlanNode child; NodeSchema answer = null; // // Note: This code is translated from the C++ code in // AbstractPlanNode::getOutputSchema. It's considerably // different there, but I think this has the corner // cases covered correctly. for (child = this; child != null; child = (child.getChildCount() == 0) ? null : child.getChild(0)) { NodeSchema childSchema; if (child.m_hasSignificantOutputSchema) { childSchema = child.getOutputSchema(); assert(childSchema != null); answer = childSchema; break; } AbstractPlanNode childProj = child.getInlinePlanNode(PlanNodeType.PROJECTION); if (childProj != null) { AbstractPlanNode schemaSrc = null; AbstractPlanNode inlineInsertNode = childProj.getInlinePlanNode(PlanNodeType.INSERT); if (inlineInsertNode != null) { schemaSrc = inlineInsertNode; } else { schemaSrc = childProj; } childSchema = schemaSrc.getOutputSchema(); if (childSchema != null) { answer = childSchema; break; } } } if (child == null) { // We've gone to the end of the plan. This is a // failure in the EE. assert(false); throw new PlanningErrorException("AbstractPlanNode with no true output schema. Please notify VoltDB Support."); } // Trace back the chain of parents and reset the // output schemas of the parent. These will all be // exactly the same. Note that the source of the // schema may be an inline plan node. So we need // to set the child's output schema to be the answer. // If the schema source is the child node itself, this will // set the the output schema to itself, so no harm // will be done. if (resetBack) { do { if (child instanceof AbstractJoinPlanNode) { // In joins with inlined aggregation, the inlined // aggregate node is the one that determines the schema. // (However, the enclosing join node still has its // "m_hasSignificantOutputSchema" bit set.) // // The method resolveColumnIndexes will overwrite // a join node's schema if there is aggregation. In order // to avoid undoing the work we've done here, we must // also update the inlined aggregate node. AggregatePlanNode aggNode = AggregatePlanNode.getInlineAggregationNode(child); if (aggNode != null) { aggNode.setOutputSchema(answer); } } if (! child.m_hasSignificantOutputSchema) { child.setOutputSchema(answer); } child = (child.getParentCount() == 0) ? null : child.getParent(0); } while (child != null); } return answer; }
java
public void addAndLinkChild(AbstractPlanNode child) { assert(child != null); m_children.add(child); child.m_parents.add(this); }
java
public void setAndLinkChild(int index, AbstractPlanNode child) { assert(child != null); m_children.set(index, child); child.m_parents.add(this); }
java
public void unlinkChild(AbstractPlanNode child) { assert(child != null); m_children.remove(child); child.m_parents.remove(this); }
java
public boolean replaceChild(AbstractPlanNode oldChild, AbstractPlanNode newChild) { assert(oldChild != null); assert(newChild != null); int idx = 0; for (AbstractPlanNode child : m_children) { if (child.equals(oldChild)) { oldChild.m_parents.clear(); setAndLinkChild(idx, newChild); return true; } ++idx; } return false; }
java
public void addIntermediary(AbstractPlanNode node) { // transfer this node's children to node Iterator<AbstractPlanNode> it = m_children.iterator(); while (it.hasNext()) { AbstractPlanNode child = it.next(); it.remove(); // remove this.child from m_children assert child.getParentCount() == 1; child.clearParents(); // and reset child's parents list node.addAndLinkChild(child); // set node.child and child.parent } // and add node to this node's children assert(m_children.size() == 0); addAndLinkChild(node); }
java
public boolean hasInlinedIndexScanOfTable(String tableName) { for (int i = 0; i < getChildCount(); i++) { AbstractPlanNode child = getChild(i); if (child.hasInlinedIndexScanOfTable(tableName) == true) { return true; } } return false; }
java
protected void findAllExpressionsOfClass(Class< ? extends AbstractExpression> aeClass, Set<AbstractExpression> collected) { // Check the inlined plan nodes for (AbstractPlanNode inlineNode: getInlinePlanNodes().values()) { // For inline node we MUST go recursive to its children!!!!! inlineNode.findAllExpressionsOfClass(aeClass, collected); } // and the output column expressions if there were no projection NodeSchema schema = getOutputSchema(); if (schema != null) { schema.addAllSubexpressionsOfClassFromNodeSchema(collected, aeClass); } }
java
public String toDOTString() { StringBuilder sb = new StringBuilder(); // id [label=id: value-type <value-type-attributes>]; // id -> child_id; // id -> child_id; sb.append(m_id).append(" [label=\"").append(m_id).append(": ").append(getPlanNodeType()).append("\" "); sb.append(getValueTypeDotString()); sb.append("];\n"); for (AbstractPlanNode node : m_inlineNodes.values()) { sb.append(m_id).append(" -> ").append(node.getPlanNodeId().intValue()).append(";\n"); sb.append(node.toDOTString()); } for (AbstractPlanNode node : m_children) { sb.append(m_id).append(" -> ").append(node.getPlanNodeId().intValue()).append(";\n"); } return sb.toString(); }
java
private String getValueTypeDotString() { PlanNodeType pnt = getPlanNodeType(); if (isInline()) { return "fontcolor=\"white\" style=\"filled\" fillcolor=\"red\""; } if (pnt == PlanNodeType.SEND || pnt == PlanNodeType.RECEIVE || pnt == PlanNodeType.MERGERECEIVE) { return "fontcolor=\"white\" style=\"filled\" fillcolor=\"black\""; } return ""; }
java
public void getScanNodeList_recurse(ArrayList<AbstractScanPlanNode> collected, HashSet<AbstractPlanNode> visited) { if (visited.contains(this)) { assert(false): "do not expect loops in plangraph."; return; } visited.add(this); for (AbstractPlanNode n : m_children) { n.getScanNodeList_recurse(collected, visited); } for (AbstractPlanNode node : m_inlineNodes.values()) { node.getScanNodeList_recurse(collected, visited); } }
java
public void getPlanNodeList_recurse(ArrayList<AbstractPlanNode> collected, HashSet<AbstractPlanNode> visited) { if (visited.contains(this)) { assert(false): "do not expect loops in plangraph."; return; } visited.add(this); for (AbstractPlanNode n : m_children) { n.getPlanNodeList_recurse(collected, visited); } collected.add(this); }
java
private static Object nullValueForType(final Class<?> expectedClz) { if (expectedClz == long.class) { return VoltType.NULL_BIGINT; } else if (expectedClz == int.class) { return VoltType.NULL_INTEGER; } else if (expectedClz == short.class) { return VoltType.NULL_SMALLINT; } else if (expectedClz == byte.class) { return VoltType.NULL_TINYINT; } else if (expectedClz == double.class) { return VoltType.NULL_FLOAT; } // all non-primitive types can handle null return null; }
java
private static Object convertStringToPrimitiveOrPrimitiveWrapper(String value, final Class<?> expectedClz) throws VoltTypeException { value = value.trim(); // detect CSV null if (value.equals(Constants.CSV_NULL)) return nullValueForType(expectedClz); // Remove commas. Doing this seems kind of dubious since it lets strings like // ,,,3.1,4,,e,+,,16 // be parsed as a valid double value (for example). String commaFreeValue = thousandSeparator.matcher(value).replaceAll(""); try { // autoboxing converts to boxed types since this method returns a java Object if (isLongClass(expectedClz)) { return Long.parseLong(commaFreeValue); } if (isIntClass(expectedClz)) { return Integer.parseInt(commaFreeValue); } if (isShortClass(expectedClz)) { return Short.parseShort(commaFreeValue); } if (isByteClass(expectedClz)) { return Byte.parseByte(commaFreeValue); } if (isDoubleClass(expectedClz)) { return Double.parseDouble(commaFreeValue); } } // ignore the exception and fail through below catch (NumberFormatException nfe) { // If we failed to parse the string in decimal form it could still // be a numeric value specified as X'....' // // Do this only after trying to parse a decimal literal, which is the // most common case. if (expectedClz != double.class) { String hexDigits = SQLParser.getDigitsFromHexLiteral(value); if (hexDigits != null) { try { return SQLParser.hexDigitsToLong(hexDigits); } catch (SQLParser.Exception spe) { } } } } throw new VoltTypeException( "Unable to convert string " + value + " to " + expectedClz.getName() + " value for target parameter."); }
java
private static Object tryToMakeCompatibleArray( final Class<?> expectedComponentClz, final Class<?> inputComponentClz, Object param) throws VoltTypeException { int inputLength = Array.getLength(param); if (inputComponentClz == expectedComponentClz) { return param; } // if it's an empty array, let it through // this is a bit ugly as it might hide passing // arrays of the wrong type, but it "does the right thing" // more often that not I guess... else if (inputLength == 0) { return Array.newInstance(expectedComponentClz, 0); } // hack to make strings work with input as bytes else if (isByteArrayClass(inputComponentClz) && (expectedComponentClz == String.class)) { String[] values = new String[inputLength]; for (int i = 0; i < inputLength; i++) { try { values[i] = new String((byte[]) Array.get(param, i), "UTF-8"); } catch (UnsupportedEncodingException ex) { throw new VoltTypeException( "tryScalarMakeCompatible: Unsupported encoding:" + expectedComponentClz.getName() + " to provided " + inputComponentClz.getName()); } } return values; } // hack to make varbinary work with input as hex string else if ((inputComponentClz == String.class) && (expectedComponentClz == byte[].class)) { byte[][] values = new byte[inputLength][]; for (int i = 0; i < inputLength; i++) { values[i] = Encoder.hexDecode((String) Array.get(param, i)); } return values; } else if ((inputComponentClz == String.class) && (expectedComponentClz == Byte[].class)) { Byte[][] boxvalues = new Byte[inputLength][]; for (int i = 0; i < inputLength; i++) { boxvalues[i] = ArrayUtils.toObject( Encoder.hexDecode((String) Array.get(param, i)) ); } return boxvalues; } else { /* * Arrays can be quite large so it doesn't make sense to silently do the conversion * and incur the performance hit. The client should serialize the correct invocation * parameters */ throw new VoltTypeException( "tryScalarMakeCompatible: Unable to match parameter array:" + expectedComponentClz.getName() + " to provided " + inputComponentClz.getName()); } }
java
final static public VoltTable[] getResultsFromRawResults(String procedureName, Object result) throws InvocationTargetException { if (result == null) { return new VoltTable[0]; } if (result instanceof VoltTable[]) { VoltTable[] retval = (VoltTable[]) result; for (VoltTable table : retval) { if (table == null) { Exception e = new RuntimeException("VoltTable arrays with non-zero length cannot contain null values."); throw new InvocationTargetException(e); } // Make sure this table does not use an ee cache buffer table.convertToHeapBuffer(); } return retval; } if (result instanceof VoltTable) { VoltTable vt = (VoltTable) result; // Make sure this table does not use an ee cache buffer vt.convertToHeapBuffer(); return new VoltTable[] { vt }; } if (result instanceof Long) { VoltTable t = new VoltTable(new VoltTable.ColumnInfo("", VoltType.BIGINT)); t.addRow(result); return new VoltTable[] { t }; } throw new RuntimeException(String.format("Procedure %s unsupported procedure return type %s.", procedureName, result.getClass().getSimpleName())); }
java
public static void main(String[] args) { if (args.length > 1) { printUsage(); } if (args.length == 0 || (args.length == 1 && args[0].equals("--full"))) { System.out.println(getFullVersion()); System.exit(0); } if (args[0].equals("--short")) System.out.println(getVersion()); else if (args[0].equals("--revision")) System.out.println(getVersionRevision()); else printUsage(); System.exit(0); }
java
void setFinal(boolean isFinal) throws IOException { if (isFinal != m_isFinal) { if (PBDSegment.setFinal(m_file, isFinal)) { if (!isFinal) { // It is dangerous to leave final on a segment so make sure the metadata is flushed m_fc.force(true); } } else if (PBDSegment.isFinal(m_file) && !isFinal) { throw new IOException("Could not remove the final attribute from " + m_file.getName()); } // It is OK for m_isFinal to be true when isFinal(File) returns false but not the other way m_isFinal = isFinal; } }
java
public static String createParticipantNode(ZooKeeper zk, String dir, String prefix, byte[] data) throws KeeperException, InterruptedException { createRootIfNotExist(zk, dir); String node = zk.create(ZKUtil.joinZKPath(dir, prefix + "_"), data, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); // Unlock the dir as initialized zk.setData(dir, new byte[] {INITIALIZED}, -1); return node; }
java
synchronized public void shutdown() throws InterruptedException, KeeperException { m_shutdown = true; es.shutdown(); es.awaitTermination(365, TimeUnit.DAYS); }
java