code
stringlengths
73
34.1k
label
stringclasses
1 value
public static boolean reduce(AbstractExpression expr, Predicate<AbstractExpression> pred) { final boolean current = pred.test(expr); if (current) { return true; } else if (expr == null) { return pred.test(null); } else { return pred.test(expr.getLeft()) || pred.test(expr.getRight()) || expr.getArgs() != null && expr.getArgs().stream().anyMatch(pred); } }
java
public static Collection<AbstractExpression> uncombineAny(AbstractExpression expr) { ArrayDeque<AbstractExpression> out = new ArrayDeque<AbstractExpression>(); if (expr != null) { ArrayDeque<AbstractExpression> in = new ArrayDeque<AbstractExpression>(); // this chunk of code breaks the code into a list of expression that // all have to be true for the where clause to be true in.add(expr); AbstractExpression inExpr = null; while ((inExpr = in.poll()) != null) { if (inExpr.getExpressionType() == ExpressionType.CONJUNCTION_AND) { in.add(inExpr.getLeft()); in.add(inExpr.getRight()); } else { out.add(inExpr); } } } return out; }
java
public static List<TupleValueExpression> getTupleValueExpressions(AbstractExpression input) { ArrayList<TupleValueExpression> tves = new ArrayList<TupleValueExpression>(); // recursive stopping steps if (input == null) { return tves; } else if (input instanceof TupleValueExpression) { tves.add((TupleValueExpression) input); return tves; } // recursive calls tves.addAll(getTupleValueExpressions(input.m_left)); tves.addAll(getTupleValueExpressions(input.m_right)); if (input.m_args != null) { for (AbstractExpression argument : input.m_args) { tves.addAll(getTupleValueExpressions(argument)); } } return tves; }
java
private static boolean subqueryRequiresScalarValueExpressionFromContext(AbstractExpression parentExpr) { if (parentExpr == null) { // No context: we are a top-level expression. E.g, an item on the // select list. In this case, assume the expression must be scalar. return true; } // Exists and comparison operators can handle non-scalar subqueries. if (parentExpr.getExpressionType() == ExpressionType.OPERATOR_EXISTS || parentExpr instanceof ComparisonExpression) { return false; } // There is already a ScalarValueExpression above the subquery. if (parentExpr instanceof ScalarValueExpression) { return false; } // By default, assume that the subquery must produce a single value. return true; }
java
private static AbstractExpression addScalarValueExpression(SelectSubqueryExpression expr) { if (expr.getSubqueryScan().getOutputSchema().size() != 1) { throw new PlanningErrorException("Scalar subquery can have only one output column"); } expr.changeToScalarExprType(); AbstractExpression scalarExpr = new ScalarValueExpression(); scalarExpr.setLeft(expr); scalarExpr.setValueType(expr.getValueType()); scalarExpr.setValueSize(expr.getValueSize()); return scalarExpr; }
java
public ClientResponseImpl shouldAccept(String name, AuthSystem.AuthUser user, final StoredProcedureInvocation task, final Procedure catProc) { if (user.isAuthEnabled()) { InvocationPermissionPolicy deniedPolicy = null; InvocationPermissionPolicy.PolicyResult res = InvocationPermissionPolicy.PolicyResult.DENY; for (InvocationPermissionPolicy policy : m_permissionpolicies) { res = policy.shouldAccept(user, task, catProc); if (res == InvocationPermissionPolicy.PolicyResult.ALLOW) { deniedPolicy = null; break; } if (res == InvocationPermissionPolicy.PolicyResult.DENY) { if (deniedPolicy == null) { //Take first denied response only. deniedPolicy = policy; } } } if (deniedPolicy != null) { return deniedPolicy.getErrorResponse(user, task, catProc); } //We must have an explicit allow on of the policy must grant access. assert(res == InvocationPermissionPolicy.PolicyResult.ALLOW); return null; } //User authentication is disabled. (auth disabled user) return null; }
java
@SuppressWarnings("deprecation") public VoltTable[] run(SystemProcedureExecutionContext ctx, String username, String remoteHost, String xmlConfig) { long oldLevels = 0; if (ctx.isLowestSiteId()) { // Logger level is a global property, pick the site with lowest id to do it. hostLog.info(String.format("%s from %s changed the log4j settings", username, remoteHost)); hostLog.info(xmlConfig); oldLevels = hostLog.getLogLevels(loggers); } try { // Mimic the multi-fragment semantics as scatter-gather pattern is an overkill for this simple task. // There are chances that some sites being interrupted and update the logging before old logger level // being read, but the reasons we don't care because 1) it is rare and 2) it only effects when HOST // logger being changed from higher than INFO level to INFO or lower level. barrier.await(); } catch (InterruptedException | BrokenBarrierException dontcare) { } VoltDB.instance().logUpdate(xmlConfig, DeprecatedProcedureAPIAccess.getVoltPrivateRealTransactionId(this), ctx.getPaths().getVoltDBRoot()); ctx.updateBackendLogLevels(); if (ctx.isLowestSiteId()) { long newLevels = hostLog.getLogLevels(loggers); if (newLevels != oldLevels) { // If HOST logger wasn't able to log before and now it can, logs the setting change event. int index = (int)((oldLevels >> 3) & 7); Level before = Level.values()[index]; index = (int)((newLevels >> 3) & 7); Level after = Level.values()[index]; if (before.ordinal() > Level.INFO.ordinal() && after.ordinal() <= Level.INFO.ordinal()) { hostLog.info(String.format("%s from %s changed the log4j settings", username, remoteHost)); hostLog.info(xmlConfig); } } barrier.reset(); } VoltTable t = new VoltTable(VoltSystemProcedure.STATUS_SCHEMA); t.addRow(VoltSystemProcedure.STATUS_OK); return (new VoltTable[] {t}); }
java
public void add(String item, String value) { int maxChar = MaxLenInZChoice; if (item.length() < MaxLenInZChoice) { maxChar = item.length(); } super.add(item.substring(0, maxChar)); values.addElement(value); }
java
private int findValue(String s) { for (int i = 0; i < values.size(); i++) { if (s.equals(values.elementAt(i))) { return i; } // end of if (s.equals(values.elementAt(i))) } // end of for (int i=0; i<values.size(); i++) return -1; }
java
public static ByteBuffer getNextChunk(byte[] schemaBytes, ByteBuffer buf, CachedByteBufferAllocator resultBufferAllocator) { buf.position(buf.position() + 4);//skip partition id int length = schemaBytes.length + buf.remaining(); ByteBuffer outputBuffer = resultBufferAllocator.allocate(length); outputBuffer.put(schemaBytes); outputBuffer.put(buf); outputBuffer.flip(); return outputBuffer; }
java
private RestoreWork processMessage(DecodedContainer msg, CachedByteBufferAllocator resultBufferAllocator) { if (msg == null) { return null; } RestoreWork restoreWork = null; try { if (msg.m_msgType == StreamSnapshotMessageType.FAILURE) { VoltDB.crashLocalVoltDB("Rejoin source sent failure message.", false, null); // for test code only if (m_expectedEOFs.decrementAndGet() == 0) { m_EOF = true; } } else if (msg.m_msgType == StreamSnapshotMessageType.END) { if (rejoinLog.isTraceEnabled()) { rejoinLog.trace("Got END message " + msg.m_blockIndex + " from " + CoreUtils.hsIdToString(msg.m_srcHSId) + " (TargetId " + msg.m_dataTargetId + ")"); } if (m_expectedEOFs.decrementAndGet() == 0) { m_EOF = true; } } else if (msg.m_msgType == StreamSnapshotMessageType.SCHEMA) { rejoinLog.trace("Got SCHEMA message " + msg.m_blockIndex + " from " + CoreUtils.hsIdToString(msg.m_srcHSId) + " (TargetId " + msg.m_dataTargetId + ")"); ByteBuffer block = msg.m_container.b(); block.position(StreamSnapshotDataTarget.contentOffset); byte[] schemaBytes = new byte[block.remaining()]; block.get(schemaBytes); m_schemas.put(msg.m_tableId, schemaBytes); } else if (msg.m_msgType == StreamSnapshotMessageType.HASHINATOR) { ByteBuffer block = msg.m_container.b(); block.position(StreamSnapshotDataTarget.contentOffset); long version = block.getLong(); byte[] hashinatorConfig = new byte[block.remaining()]; block.get(hashinatorConfig); restoreWork = new HashinatorRestoreWork(version, hashinatorConfig); } else { // It's normal snapshot data afterwards rejoinLog.trace("Got DATA message " + msg.m_blockIndex + " from " + CoreUtils.hsIdToString(msg.m_srcHSId) + " (TargetId " + msg.m_dataTargetId + ")"); ByteBuffer block = msg.m_container.b(); if (!m_schemas.containsKey(msg.m_tableId)) { VoltDB.crashLocalVoltDB("No schema for table with ID " + msg.m_tableId, false, null); } // Get the byte buffer ready to be consumed block.position(StreamSnapshotDataTarget.contentOffset); ByteBuffer nextChunk = getNextChunk(m_schemas.get(msg.m_tableId), block, resultBufferAllocator); m_bytesReceived += nextChunk.remaining(); restoreWork = new TableRestoreWork(msg.m_tableId, nextChunk); } return restoreWork; } finally { msg.m_container.discard(); // Queue ack to this block (after the container has been discarded) m_ack.ack(msg.m_srcHSId, msg.m_msgType == StreamSnapshotMessageType.END, msg.m_dataTargetId, msg.m_blockIndex); } }
java
public static void copyFile(String fromPath, String toPath) throws Exception { File inputFile = new File(fromPath); File outputFile = new File(toPath); com.google_voltpatches.common.io.Files.copy(inputFile, outputFile); }
java
public static String parseRevisionString(String fullBuildString) { String build = ""; // Test for SVN revision string - example: https://svn.voltdb.com/eng/trunk?revision=2352 String[] splitted = fullBuildString.split("=", 2); if (splitted.length == 2) { build = splitted[1].trim(); if (build.length() == 0) { return null; } return build; } // Test for git build string - example: 2.0 voltdb-2.0-70-gb39f43e-dirty Pattern p = Pattern.compile("-(\\d*-\\w{8}(?:-.*)?)"); Matcher m = p.matcher(fullBuildString); if (! m.find()) { return null; } build = m.group(1).trim(); if (build.length() == 0) { return null; } return build; }
java
public static Object[] parseVersionString(String versionString) { if (versionString == null) { return null; } // check for whitespace if (versionString.matches("\\s")) { return null; } // split on the dots String[] split = versionString.split("\\."); if (split.length == 0) { return null; } Object[] v = new Object[split.length]; int i = 0; for (String s : split) { try { v[i] = Integer.parseInt(s); } catch (NumberFormatException e) { v[i] = s; } i++; } // check for a numeric beginning if (v[0] instanceof Integer) { return v; } else { return null; } }
java
public static int compareVersions(Object[] left, Object[] right) { if (left == null || right == null) { throw new IllegalArgumentException("Invalid versions"); } for (int i = 0; i < left.length; i++) { // right is shorter than left and share the same prefix => left must be larger if (right.length == i) { return 1; } if (left[i] instanceof Integer) { if (right[i] instanceof Integer) { // compare two numbers if (((Integer) left[i]) > ((Integer) right[i])) { return 1; } else if (((Integer) left[i]) < ((Integer) right[i])) { return -1; } else { continue; } } else { // numbers always greater than alphanumeric tags return 1; } } else if (right[i] instanceof Integer) { // alphanumeric tags always less than numbers return -1; } else { // compare two alphanumeric tags lexicographically int cmp = ((String) left[i]).compareTo((String) right[i]); if (cmp != 0) { return cmp; } else { // two alphanumeric tags are the same... so keep comparing continue; } } } // left is shorter than right and share the same prefix, must be less if (left.length < right.length) { return -1; } // samesies return 0; }
java
public static boolean isPro() { if (m_isPro == null) { //Allow running pro kit as community. if (!Boolean.parseBoolean(System.getProperty("community", "false"))) { m_isPro = ProClass.load("org.voltdb.CommandLogImpl", "Command logging", ProClass.HANDLER_IGNORE) .hasProClass(); } else { m_isPro = false; } } return m_isPro.booleanValue(); }
java
public static final long cheesyBufferCheckSum(ByteBuffer buffer) { final int mypos = buffer.position(); buffer.position(0); long checksum = 0; if (buffer.hasArray()) { final byte bytes[] = buffer.array(); final int end = buffer.arrayOffset() + mypos; for (int ii = buffer.arrayOffset(); ii < end; ii++) { checksum += bytes[ii]; } } else { for (int ii = 0; ii < mypos; ii++) { checksum += buffer.get(); } } buffer.position(mypos); return checksum; }
java
public static <T> T[] concatAll(final T[] empty, Iterable<T[]> arrayList) { assert(empty.length == 0); if (arrayList.iterator().hasNext() == false) { return empty; } int len = 0; for (T[] subArray : arrayList) { len += subArray.length; } int pos = 0; T[] result = Arrays.copyOf(empty, len); for (T[] subArray : arrayList) { System.arraycopy(subArray, 0, result, pos, subArray.length); pos += subArray.length; } return result; }
java
public static long getMBRss(Client client) { assert(client != null); long rssMax = 0; try { ClientResponse r = client.callProcedure("@Statistics", "MEMORY", 0); VoltTable stats = r.getResults()[0]; stats.resetRowPosition(); while (stats.advanceRow()) { long rss = stats.getLong("RSS") / 1024; if (rss > rssMax) { rssMax = rss; } } return rssMax; } catch (Exception e) { e.printStackTrace(); System.exit(-1); return 0; } }
java
public static <K, V> Multimap<K, V> zipToMap(List<K> keys, List<V> values) { if (keys.isEmpty() || values.isEmpty()) { return null; } Iterator<K> keyIter = keys.iterator(); Iterator<V> valueIter = values.iterator(); ArrayListMultimap<K, V> result = ArrayListMultimap.create(); while (keyIter.hasNext() && valueIter.hasNext()) { result.put(keyIter.next(), valueIter.next()); } // In case there are more values than keys, assign the rest of the // values to the first key K firstKey = keys.get(0); while (valueIter.hasNext()) { result.put(firstKey, valueIter.next()); } return result; }
java
public static <K> List<K> zip(Collection<Deque<K>> stuff) { final List<K> result = Lists.newArrayList(); // merge the results Iterator<Deque<K>> iter = stuff.iterator(); while (iter.hasNext()) { final K next = iter.next().poll(); if (next != null) { result.add(next); } else { iter.remove(); } if (!iter.hasNext()) { iter = stuff.iterator(); } } return result; }
java
public static <K extends Comparable<?>, V> ListMultimap<K, V> sortedArrayListMultimap() { Map<K, Collection<V>> map = Maps.newTreeMap(); return Multimaps.newListMultimap(map, new Supplier<List<V>>() { @Override public List<V> get() { return Lists.newArrayList(); } }); }
java
public static StoredProcedureInvocation roundTripForCL(StoredProcedureInvocation invocation) throws IOException { if (invocation.getSerializedParams() != null) { return invocation; } ByteBuffer buf = ByteBuffer.allocate(invocation.getSerializedSize()); invocation.flattenToBuffer(buf); buf.flip(); StoredProcedureInvocation rti = new StoredProcedureInvocation(); rti.initFromBuffer(buf); return rti; }
java
public static Map<Integer, byte[]> getBinaryPartitionKeys(TheHashinator hashinator) { Map<Integer, byte[]> partitionMap = new HashMap<>(); VoltTable partitionKeys = null; if (hashinator == null) { partitionKeys = TheHashinator.getPartitionKeys(VoltType.VARBINARY); } else { partitionKeys = TheHashinator.getPartitionKeys(hashinator, VoltType.VARBINARY); } if (partitionKeys == null) { return null; } else { // This is a shared resource so make a copy of the table to protect the cache copy in TheHashinator ByteBuffer buf = ByteBuffer.allocate(partitionKeys.getSerializedSize()); partitionKeys.flattenToBuffer(buf); buf.flip(); VoltTable keyCopy = PrivateVoltTableFactory.createVoltTableFromSharedBuffer(buf); while (keyCopy.advanceRow()) { partitionMap.put((int) keyCopy.getLong(0), keyCopy.getVarbinary(1)); } } return partitionMap; }
java
public static Properties readPropertiesFromCredentials(String credentials) { Properties props = new Properties(); File propFD = new File(credentials); if (!propFD.exists() || !propFD.isFile() || !propFD.canRead()) { throw new IllegalArgumentException("Credentials file " + credentials + " is not a read accessible file"); } else { FileReader fr = null; try { fr = new FileReader(credentials); props.load(fr); } catch (IOException e) { throw new IllegalArgumentException("Credential file not found or permission denied."); } } return props; }
java
public static int writeDeferredSerialization(ByteBuffer mbuf, DeferredSerialization ds) throws IOException { int written = 0; try { final int objStartPosition = mbuf.position(); ds.serialize(mbuf); written = mbuf.position() - objStartPosition; } finally { ds.cancel(); } return written; }
java
public NodeAVL getNode(int index) { NodeAVL n = nPrimaryNode; while (index-- > 0) { n = n.nNext; } return n; }
java
NodeAVL getNextNode(NodeAVL n) { if (n == null) { n = nPrimaryNode; } else { n = n.nNext; } return n; }
java
private boolean listACLEquals(List<ACL> lista, List<ACL> listb) { if (lista.size() != listb.size()) { return false; } for (int i = 0; i < lista.size(); i++) { ACL a = lista.get(i); ACL b = listb.get(i); if (!a.equals(b)) { return false; } } return true; }
java
public synchronized Long convertAcls(List<ACL> acls) { if (acls == null) return -1L; // get the value from the map Long ret = aclKeyMap.get(acls); // could not find the map if (ret != null) return ret; long val = incrementIndex(); longKeyMap.put(val, acls); aclKeyMap.put(acls, val); return val; }
java
public synchronized List<ACL> convertLong(Long longVal) { if (longVal == null) return null; if (longVal == -1L) return Ids.OPEN_ACL_UNSAFE; List<ACL> acls = longKeyMap.get(longVal); if (acls == null) { LOG.error("ERROR: ACL not available for long " + longVal); throw new RuntimeException("Failed to fetch acls for " + longVal); } return acls; }
java
public long approximateDataSize() { long result = 0; for (Map.Entry<String, DataNode> entry : nodes.entrySet()) { DataNode value = entry.getValue(); synchronized (value) { result += entry.getKey().length(); result += (value.data == null ? 0 : value.data.length); } } return result; }
java
boolean isSpecialPath(String path) { if (rootZookeeper.equals(path) || procZookeeper.equals(path) || quotaZookeeper.equals(path)) { return true; } return false; }
java
public void updateCount(String lastPrefix, int diff) { String statNode = Quotas.statPath(lastPrefix); DataNode node = nodes.get(statNode); StatsTrack updatedStat = null; if (node == null) { // should not happen LOG.error("Missing count node for stat " + statNode); return; } synchronized (node) { updatedStat = new StatsTrack(new String(node.data)); updatedStat.setCount(updatedStat.getCount() + diff); node.data = updatedStat.toString().getBytes(); } // now check if the counts match the quota String quotaNode = Quotas.quotaPath(lastPrefix); node = nodes.get(quotaNode); StatsTrack thisStats = null; if (node == null) { // should not happen LOG.error("Missing count node for quota " + quotaNode); return; } synchronized (node) { thisStats = new StatsTrack(new String(node.data)); } if (thisStats.getCount() < updatedStat.getCount()) { LOG .warn("Quota exceeded: " + lastPrefix + " count=" + updatedStat.getCount() + " limit=" + thisStats.getCount()); } }
java
public void deleteNode(String path, long zxid) throws KeeperException.NoNodeException { int lastSlash = path.lastIndexOf('/'); String parentName = path.substring(0, lastSlash); String childName = path.substring(lastSlash + 1); DataNode node = nodes.get(path); if (node == null) { throw new KeeperException.NoNodeException(); } nodes.remove(path); DataNode parent = nodes.get(parentName); if (parent == null) { throw new KeeperException.NoNodeException(); } synchronized (parent) { parent.removeChild(childName); parent.stat.setCversion(parent.stat.getCversion() + 1); parent.stat.setPzxid(zxid); long eowner = node.stat.getEphemeralOwner(); if (eowner != 0) { HashSet<String> nodes = ephemerals.get(eowner); if (nodes != null) { synchronized (nodes) { nodes.remove(path); } } } node.parent = null; } if (parentName.startsWith(procZookeeper)) { // delete the node in the trie. if (Quotas.limitNode.equals(childName)) { // we need to update the trie // as well pTrie.deletePath(parentName.substring(quotaZookeeper.length())); } } // also check to update the quotas for this node String lastPrefix = pTrie.findMaxPrefix(path); if (!rootZookeeper.equals(lastPrefix) && !("".equals(lastPrefix))) { // ok we have some match and need to update updateCount(lastPrefix, -1); int bytes = 0; synchronized (node) { bytes = (node.data == null ? 0 : -(node.data.length)); } updateBytes(lastPrefix, bytes); } if (LOG.isTraceEnabled()) { ZooTrace.logTraceMessage(LOG, ZooTrace.EVENT_DELIVERY_TRACE_MASK, "dataWatches.triggerWatch " + path); ZooTrace.logTraceMessage(LOG, ZooTrace.EVENT_DELIVERY_TRACE_MASK, "childWatches.triggerWatch " + parentName); } Set<Watcher> processed = dataWatches.triggerWatch(path, EventType.NodeDeleted); childWatches.triggerWatch(path, EventType.NodeDeleted, processed); childWatches.triggerWatch(parentName.equals("") ? "/" : parentName, EventType.NodeChildrenChanged); }
java
private void getCounts(String path, Counts counts) { DataNode node = getNode(path); if (node == null) { return; } String[] children = null; int len = 0; synchronized (node) { Set<String> childs = node.getChildren(); if (childs != null) { children = childs.toArray(new String[childs.size()]); } len = (node.data == null ? 0 : node.data.length); } // add itself counts.count += 1; counts.bytes += len; if (children == null || children.length == 0) { return; } for (String child : children) { getCounts(path + "/" + child, counts); } }
java
private void updateQuotaForPath(String path) { Counts c = new Counts(); getCounts(path, c); StatsTrack strack = new StatsTrack(); strack.setBytes(c.bytes); strack.setCount(c.count); String statPath = Quotas.quotaZookeeper + path + "/" + Quotas.statNode; DataNode node = getNode(statPath); // it should exist if (node == null) { LOG.warn("Missing quota stat node " + statPath); return; } synchronized (node) { node.data = strack.toString().getBytes(); } }
java
private void traverseNode(String path) { DataNode node = getNode(path); String children[] = null; synchronized (node) { Set<String> childs = node.getChildren(); if (childs != null) { children = childs.toArray(new String[childs.size()]); } } if (children != null) { if (children.length == 0) { // this node does not have a child // is the leaf node // check if its the leaf node String endString = "/" + Quotas.limitNode; if (path.endsWith(endString)) { // ok this is the limit node // get the real node and update // the count and the bytes String realPath = path.substring(Quotas.quotaZookeeper .length(), path.indexOf(endString)); updateQuotaForPath(realPath); this.pTrie.addPath(realPath); } return; } for (String child : children) { traverseNode(path + "/" + child); } } }
java
private void setupQuota() { String quotaPath = Quotas.quotaZookeeper; DataNode node = getNode(quotaPath); if (node == null) { return; } traverseNode(quotaPath); }
java
public void dumpEphemerals(PrintWriter pwriter) { Set<Long> keys = ephemerals.keySet(); pwriter.println("Sessions with Ephemerals (" + keys.size() + "):"); for (long k : keys) { pwriter.print("0x" + Long.toHexString(k)); pwriter.println(":"); HashSet<String> tmp = ephemerals.get(k); synchronized (tmp) { for (String path : tmp) { pwriter.println("\t" + path); } } } }
java
public int getCount() throws InterruptedException, KeeperException { return ByteBuffer.wrap(m_zk.getData(m_path, false, null)).getInt(); }
java
public boolean isCountedDown() throws InterruptedException, KeeperException { if (countedDown) return true; int count = ByteBuffer.wrap(m_zk.getData(m_path, false, null)).getInt(); if (count > 0) return false; countedDown = true; return true; }
java
private void copyTableSchemaFromShared() { for (SchemaColumn scol : m_sharedScan.getOutputSchema()) { SchemaColumn copy = new SchemaColumn(scol.getTableName(), getTableAlias(), scol.getColumnName(), scol.getColumnAlias(), scol.getExpression(), scol.getDifferentiator()); addOutputColumn(copy); } }
java
public void harmonizeOutputSchema() { boolean changedCurrent; boolean changedBase; boolean changedRecursive = false; NodeSchema currentSchema = getOutputSchema(); NodeSchema baseSchema = getBestCostBasePlan().rootPlanGraph.getTrueOutputSchema(false); NodeSchema recursiveSchema = (getBestCostRecursivePlan() == null) ? null : getBestCostRecursivePlan().rootPlanGraph.getTrueOutputSchema(true); // First, make the current schema // the widest. changedCurrent = currentSchema.harmonize(baseSchema, "Base Query"); if (recursiveSchema != null) { // Widen the current schema to the recursive // schema if necessary as well. boolean changedRec = currentSchema.harmonize(recursiveSchema, "Recursive Query"); changedCurrent = changedCurrent || changedRec; } // Then change the base and current // schemas. changedBase = baseSchema.harmonize(currentSchema, "Base Query"); if (recursiveSchema != null) { changedRecursive = recursiveSchema.harmonize(currentSchema, "Recursive Query"); } // If we changed something, update the output schemas // which depend on the one we changed. if (changedBase) { getBestCostBasePlan().rootPlanGraph.getTrueOutputSchema(true); } if (changedRecursive) { getBestCostRecursivePlan().rootPlanGraph.getTrueOutputSchema(true); } }
java
private static void complete(AbstractFuture<?> future) { boolean maskExecutorExceptions = future.maskExecutorExceptions; Listener next = null; outer: while (true) { future.releaseWaiters(); // We call this before the listeners in order to avoid needing to manage a separate stack data // structure for them. // afterDone() should be generally fast and only used for cleanup work... but in theory can // also be recursive and create StackOverflowErrors future.afterDone(); // push the current set of listeners onto next next = future.clearListeners(next); future = null; while (next != null) { Listener curr = next; next = next.next; Runnable task = curr.task; if (task instanceof AbstractFuture.SetFuture) { AbstractFuture.SetFuture<?> setFuture = (AbstractFuture.SetFuture) task; // We unwind setFuture specifically to avoid StackOverflowErrors in the case of long // chains of SetFutures // Handling this special case is important because there is no way to pass an executor to // setFuture, so a user couldn't break the chain by doing this themselves. It is also // potentially common if someone writes a recursive Futures.transformAsync transformer. future = setFuture.owner; if (future.value == setFuture) { Object valueToSet = getFutureValue(setFuture.future); if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { continue outer; } } // other wise the future we were trying to set is already done. } else { executeListener(task, curr.executor, maskExecutorExceptions); } } break; } }
java
public void addPath(String path) { if (path == null) { return; } String[] pathComponents = path.split("/"); TrieNode parent = rootNode; String part = null; if (pathComponents.length <= 1) { throw new IllegalArgumentException("Invalid path " + path); } for (int i=1; i<pathComponents.length; i++) { part = pathComponents[i]; if (parent.getChild(part) == null) { parent.addChild(part, new TrieNode(parent)); } parent = parent.getChild(part); } parent.setProperty(true); }
java
public void deletePath(String path) { if (path == null) { return; } String[] pathComponents = path.split("/"); TrieNode parent = rootNode; String part = null; if (pathComponents.length <= 1) { throw new IllegalArgumentException("Invalid path " + path); } for (int i=1; i<pathComponents.length; i++) { part = pathComponents[i]; if (parent.getChild(part) == null) { //the path does not exist return; } parent = parent.getChild(part); LOG.info(parent); } TrieNode realParent = parent.getParent(); realParent.deleteChild(part); }
java
public String findMaxPrefix(String path) { if (path == null) { return null; } if ("/".equals(path)) { return path; } String[] pathComponents = path.split("/"); TrieNode parent = rootNode; List<String> components = new ArrayList<String>(); if (pathComponents.length <= 1) { throw new IllegalArgumentException("Invalid path " + path); } int i = 1; String part = null; StringBuilder sb = new StringBuilder(); int lastindex = -1; while((i < pathComponents.length)) { if (parent.getChild(pathComponents[i]) != null) { part = pathComponents[i]; parent = parent.getChild(part); components.add(part); if (parent.getProperty()) { lastindex = i-1; } } else { break; } i++; } for (int j=0; j< (lastindex+1); j++) { sb.append("/" + components.get(j)); } return sb.toString(); }
java
public static VoltTable tableFromShorthand(String schema) { String name = "T"; VoltTable.ColumnInfo[] columns = null; // get a name Matcher nameMatcher = m_namePattern.matcher(schema); if (nameMatcher.find()) { name = nameMatcher.group().trim(); } // get the column schema Matcher columnDataMatcher = m_columnsPattern.matcher(schema); if (!columnDataMatcher.find()) { throw new IllegalArgumentException("No column data found in shorthand"); } String[] columnData = columnDataMatcher.group().trim().split("\\s*,\\s*"); int columnCount = columnData.length; columns = new VoltTable.ColumnInfo[columnCount]; for (int i = 0; i < columnCount; i++) { columns[i] = parseColumnShorthand(columnData[i], i); } // get the pkey Matcher pkeyMatcher = m_pkeyPattern.matcher(schema); int[] pkeyIndexes = new int[0]; // default no pkey if (pkeyMatcher.find()) { String[] pkeyColData = pkeyMatcher.group().trim().split("\\s*,\\s*"); pkeyIndexes = new int[pkeyColData.length]; for (int pkeyIndex = 0; pkeyIndex < pkeyColData.length; pkeyIndex++) { String pkeyCol = pkeyColData[pkeyIndex]; // numeric means index of column if (Character.isDigit(pkeyCol.charAt(0))) { int colIndex = Integer.parseInt(pkeyCol); pkeyIndexes[pkeyIndex] = colIndex; } else { for (int colIndex = 0; colIndex < columnCount; colIndex++) { if (columns[colIndex].name.equals(pkeyCol)) { pkeyIndexes[pkeyIndex] = colIndex; break; } } } } } // get any partitioning Matcher partitionMatcher = m_partitionPattern.matcher(schema); int partitionColumnIndex = -1; // default to replicated if (partitionMatcher.find()) { String partitionColStr = partitionMatcher.group().trim(); // numeric means index of column if (Character.isDigit(partitionColStr.charAt(0))) { partitionColumnIndex = Integer.parseInt(partitionColStr); } else { for (int colIndex = 0; colIndex < columnCount; colIndex++) { if (columns[colIndex].name.equals(partitionColStr)) { partitionColumnIndex = colIndex; break; } } } assert(partitionColumnIndex != -1) : "Regex match here means there is a partitioning column"; } VoltTable table = new VoltTable( new VoltTable.ExtraMetadata(name, partitionColumnIndex, pkeyIndexes, columns), columns, columns.length); return table; }
java
private static void swap(Object[] w, int a, int b) { Object t = w[a]; w[a] = w[b]; w[b] = t; }
java
synchronized void insertRowInTable(final VoltBulkLoaderRow nextRow) throws InterruptedException { m_partitionRowQueue.put(nextRow); if (m_partitionRowQueue.size() == m_minBatchTriggerSize) { m_es.execute(new Runnable() { @Override public void run() { try { while (m_partitionRowQueue.size() >= m_minBatchTriggerSize) { loadTable(buildTable(), m_table); } } catch (Exception e) { loaderLog.error("Failed to load batch", e); } } }); } }
java
public static List<Field> getFields(Class<?> startClass) { List<Field> currentClassFields = new ArrayList<Field>(); currentClassFields.addAll(Arrays.asList(startClass.getDeclaredFields())); Class<?> parentClass = startClass.getSuperclass(); if (parentClass != null) { List<Field> parentClassFields = (List<Field>) getFields(parentClass); currentClassFields.addAll(parentClassFields); } return currentClassFields; }
java
public static synchronized void initialize(int myHostId, CatalogContext catalogContext, HostMessenger messenger) throws BundleException, IOException { ImporterStatsCollector statsCollector = new ImporterStatsCollector(myHostId); ImportManager em = new ImportManager(myHostId, messenger, statsCollector); VoltDB.instance().getStatsAgent().registerStatsSource( StatsSelector.IMPORTER, myHostId, statsCollector); m_self = em; em.create(catalogContext); }
java
private synchronized void create(CatalogContext catalogContext) { try { Map<String, ImportConfiguration> newProcessorConfig = loadNewConfigAndBundles(catalogContext); restartImporters(newProcessorConfig); } catch (final Exception e) { VoltDB.crashLocalVoltDB("Error creating import processor", true, e); } }
java
private Map<String, ImportConfiguration> loadNewConfigAndBundles(CatalogContext catalogContext) { Map<String, ImportConfiguration> newProcessorConfig; ImportType importElement = catalogContext.getDeployment().getImport(); if (importElement == null || importElement.getConfiguration().isEmpty()) { newProcessorConfig = new HashMap<>(); } else { newProcessorConfig = CatalogUtil.getImportProcessorConfig(importElement); } Iterator<Map.Entry<String, ImportConfiguration>> iter = newProcessorConfig.entrySet().iterator(); while (iter.hasNext()) { String configName = iter.next().getKey(); ImportConfiguration importConfig = newProcessorConfig.get(configName); Properties properties = importConfig.getmoduleProperties(); String importBundleJar = properties.getProperty(ImportDataProcessor.IMPORT_MODULE); Preconditions.checkNotNull(importBundleJar, "Import source is undefined or custom import plugin class missing."); if (!importConfig.checkProcedures(catalogContext, importLog, configName)) { iter.remove(); continue; } // NOTE: if bundle is already loaded, loadImporterBundle does nothing and returns true boolean bundlePresent = loadImporterBundle(properties); if (!bundlePresent) { iter.remove(); } } m_formatterFactories.clear(); for (ImportConfiguration config : newProcessorConfig.values()) { Map<String, FormatterBuilder> formatters = config.getFormatterBuilders(); if (formatters != null) { try { for (FormatterBuilder builder : formatters.values()) { String module = builder.getFormatterProperties().getProperty(ImportDataProcessor.IMPORT_FORMATTER); AbstractFormatterFactory formatterFactory = m_formatterFactories.get(module); if (formatterFactory == null) { URI moduleURI = URI.create(module); formatterFactory = m_moduleManager.getService(moduleURI, AbstractFormatterFactory.class); if (formatterFactory == null) { VoltDB.crashLocalVoltDB("Failed to initialize formatter from: " + module); } m_formatterFactories.put(module, formatterFactory); } builder.setFormatterFactory(formatterFactory); } } catch(Throwable t) { VoltDB.crashLocalVoltDB("Failed to initialize formatter."); } } } importLog.info("Final importer count:" + newProcessorConfig.size()); return newProcessorConfig; }
java
private boolean loadImporterBundle(Properties moduleProperties){ String importModuleName = moduleProperties.getProperty(ImportDataProcessor.IMPORT_MODULE); String attrs[] = importModuleName.split("\\|"); String bundleJar = attrs[1]; String moduleType = attrs[0]; try { AbstractImporterFactory importerFactory = m_loadedBundles.get(bundleJar); if (importerFactory == null) { if (moduleType.equalsIgnoreCase("osgi")) { URI bundleURI = URI.create(bundleJar); importerFactory = m_moduleManager.getService(bundleURI, AbstractImporterFactory.class); if (importerFactory == null) { importLog.error("Failed to initialize importer from: " + bundleJar); return false; } } else { // class based importer. Class<?> reference = this.getClass().getClassLoader().loadClass(bundleJar); if (reference == null) { importLog.error("Failed to initialize importer from: " + bundleJar); return false; } importerFactory = (AbstractImporterFactory)reference.newInstance(); } String importerType = importerFactory.getTypeName(); if (importerType == null || importerType.trim().isEmpty()) { throw new RuntimeException("Importer must implement and return a valid unique name."); } Preconditions.checkState(!m_importersByType.containsKey(importerType), "Importer must implement and return a valid unique name: " + importerType); m_importersByType.put(importerType, importerFactory); m_loadedBundles.put(bundleJar, importerFactory); } } catch(Throwable t) { importLog.error("Failed to configure import handler for " + bundleJar, t); Throwables.propagate(t); } return true; }
java
protected static void printCaughtException(String exceptionMessage) { if (++countCaughtExceptions <= MAX_CAUGHT_EXCEPTION_MESSAGES) { System.out.println(exceptionMessage); } if (countCaughtExceptions == MAX_CAUGHT_EXCEPTION_MESSAGES) { System.out.println("In NonVoltDBBackend, reached limit of " + MAX_CAUGHT_EXCEPTION_MESSAGES + " exception messages to be printed."); } }
java
protected List<String> getAllColumns(String tableName) { List<String> columns = new ArrayList<String>(); try { // Lower-case table names are required for PostgreSQL; we might need to // alter this if we use another comparison database (besides HSQL) someday ResultSet rs = dbconn.getMetaData().getColumns(null, null, tableName.toLowerCase(), null); while (rs.next()) { columns.add(rs.getString(4)); } } catch (SQLException e) { printCaughtException("In NonVoltDBBackend.getAllColumns, caught SQLException: " + e); } return columns; }
java
protected List<String> getPrimaryKeys(String tableName) { List<String> pkCols = new ArrayList<String>(); try { // Lower-case table names are required for PostgreSQL; we might need to // alter this if we use another comparison database (besides HSQL) someday ResultSet rs = dbconn.getMetaData().getPrimaryKeys(null, null, tableName.toLowerCase()); while (rs.next()) { pkCols.add(rs.getString(4)); } } catch (SQLException e) { printCaughtException("In NonVoltDBBackend.getPrimaryKeys, caught SQLException: " + e); } return pkCols; }
java
protected List<String> getNonPrimaryKeyColumns(String tableName) { List<String> columns = getAllColumns(tableName); columns.removeAll(getPrimaryKeys(tableName)); return columns; }
java
protected String transformQuery(String query, QueryTransformer ... qts) { String result = query; for (QueryTransformer qt : qts) { result = transformQuery(result, qt); } return result; }
java
static protected void printTransformedSql(String originalSql, String modifiedSql) { if (transformedSqlFileWriter != null && !originalSql.equals(modifiedSql)) { try { transformedSqlFileWriter.write("original SQL: " + originalSql + "\n"); transformedSqlFileWriter.write("modified SQL: " + modifiedSql + "\n"); } catch (IOException e) { printCaughtException("Caught IOException:\n " + e + "\noriginal SQL: " + originalSql + "\nmodified SQL: " + modifiedSql); } } }
java
private static SQLPatternPart makeGroup(boolean capture, String captureLabel, SQLPatternPart part) { // Need an outer part if capturing something that's already a group (capturing or not) boolean alreadyGroup = (part.m_flags & (SQLPatternFactory.GROUP | SQLPatternFactory.CAPTURE)) != 0; SQLPatternPart retPart = alreadyGroup ? new SQLPatternPartElement(part) : part; if (capture) { retPart.m_flags |= SQLPatternFactory.CAPTURE; retPart.setCaptureLabel(captureLabel); } else { retPart.m_flags |= SQLPatternFactory.GROUP; } return retPart; }
java
public static HSQLInterface loadHsqldb(ParameterStateManager psMgr) { // Specifically set the timezone to UTC to avoid the default usage local timezone in HSQL. // This ensures that all VoltDB data paths use the same timezone for representing time. TimeZone.setDefault(TimeZone.getTimeZone("GMT+0")); String name = "hsqldbinstance-" + String.valueOf(instanceId) + "-" + String.valueOf(System.currentTimeMillis()); instanceId++; HsqlProperties props = new HsqlProperties(); try { Session sessionProxy = DatabaseManager.newSession(DatabaseURL.S_MEM, name, "SA", "", props, 0); // make HSQL case insensitive sessionProxy.executeDirectStatement("SET IGNORECASE TRUE;"); sessionProxy.setParameterStateManager(psMgr); return new HSQLInterface(sessionProxy); } catch (HsqlException caught) { m_logger.warn("Unexpected error initializing the SQL parser", caught); caught.printStackTrace(); throw caught; } }
java
public VoltXMLDiff runDDLCommandAndDiff(HSQLDDLInfo stmtInfo, String ddl) throws HSQLParseException { // name of the table we're going to have to diff (if any) String expectedTableAffected = null; // If we fail to pre-process a statement, then we want to fail, but we're // still going to run the statement through HSQL to get its error message. // This variable helps us make sure we don't fail to preprocess and then // succeed at runnign the statement through HSQL. boolean expectFailure = false; // If cascade, we're going to need to look for any views that might have // gotten deleted. So get a list of all tables and views that existed before // we run the ddl, then we'll do a comparison later. Set<String> existingTableNames = null; if (stmtInfo != null) { if (stmtInfo.cascade) { existingTableNames = getTableNames(); } // we either have an index name or a table/view name, but not both if (stmtInfo.noun == HSQLDDLInfo.Noun.INDEX) { if (stmtInfo.verb == HSQLDDLInfo.Verb.CREATE) { expectedTableAffected = stmtInfo.secondName; } else { expectedTableAffected = tableNameForIndexName(stmtInfo.name); } } else { expectedTableAffected = stmtInfo.name; } // Note that we're assuming ifexists can't happen with "create" expectFailure = (expectedTableAffected == null) && !stmtInfo.ifexists; } else { expectFailure = true; } runDDLCommand(ddl); // If we expect to fail, but the statement above didn't bail... // (Shouldn't get here ever I think) if (expectFailure) { throw new HSQLParseException("Unable to plan statement due to VoltDB DDL pre-processing error"); } // sanity checks for non-failure assert(stmtInfo != null); // get old and new XML representations for the affected table VoltXMLElement tableXMLNew = null, tableXMLOld = null; if (expectedTableAffected != null) { tableXMLNew = getXMLForTable(expectedTableAffected); tableXMLOld = lastSchema.get(expectedTableAffected); } // valid reasons for tableXMLNew to be null are DROP IF EXISTS and not much else if (tableXMLNew == null) { tableXMLNew = emptySchema; } // the old table can be null for CREATE TABLE or for IF EXISTS stuff if (tableXMLOld == null) { tableXMLOld = emptySchema; } VoltXMLDiff diff = VoltXMLElement.computeDiff(tableXMLOld, tableXMLNew); // now find any views that might be missing and make sure the diff reflects that // they're gone if (stmtInfo.cascade) { Set<String> finalTableNames = getTableNames(); for (String tableName : existingTableNames) { if (!finalTableNames.contains(tableName)) { tableName = tableName.toLowerCase(); tableXMLOld = lastSchema.get(tableName).children.get(0); lastSchema.remove(tableName); if (tableName.equals(expectedTableAffected)) { continue; } diff.m_removedElements.add(tableXMLOld); } } } // this is a hack to allow the diff-apply-er to accept a diff that has no order diff.m_elementOrder.clear(); // remember the current schema if (expectedTableAffected != null) { lastSchema.put(expectedTableAffected, tableXMLNew.duplicate()); } return diff; }
java
public void runDDLCommand(String ddl) throws HSQLParseException { sessionProxy.clearLocalTables(); Result result = sessionProxy.executeDirectStatement(ddl); if (result.hasError()) { throw new HSQLParseException(result.getMainString()); } }
java
private void fixupInStatementExpressions(VoltXMLElement expr) throws HSQLParseException { if (doesExpressionReallyMeanIn(expr)) { inFixup(expr); // can't return because in with subquery can be nested } // recursive hunt for (VoltXMLElement child : expr.children) { fixupInStatementExpressions(child); } }
java
private void inFixup(VoltXMLElement inElement) { // make this an in expression inElement.name = "operation"; inElement.attributes.put("optype", "in"); VoltXMLElement rowElem = null; VoltXMLElement tableElem = null; VoltXMLElement subqueryElem = null; VoltXMLElement valueElem = null; for (VoltXMLElement child : inElement.children) { if (child.name.equals("row")) { rowElem = child; } else if (child.name.equals("table")) { tableElem = child; } else if (child.name.equals("tablesubquery")) { subqueryElem = child; } else if (child.name.equals("value")) { valueElem = child; } } VoltXMLElement inlist; if (tableElem != null) { // make the table expression an in-list inlist = new VoltXMLElement("vector"); for (VoltXMLElement child : tableElem.children) { assert(child.name.equals("row")); assert(child.children.size() == 1); inlist.children.addAll(child.children); } } else if (subqueryElem != null) { inlist = subqueryElem; } else { assert valueElem != null; inlist = valueElem; } assert(rowElem != null); assert(inlist != null); inElement.children.clear(); // add the row inElement.children.add(rowElem); // add the inlist inElement.children.add(inlist); }
java
@SuppressWarnings("unused") private void printTables() { try { String schemaName = sessionProxy.getSchemaName(null); System.out.println("*** Tables For Schema: " + schemaName + " ***"); } catch (HsqlException caught) { caught.printStackTrace(); } // load all the tables HashMappedList hsqlTables = getHSQLTables(); for (int i = 0; i < hsqlTables.size(); i++) { Table table = (Table) hsqlTables.get(i); System.out.println(table.getName().name); } }
java
public VoltXMLElement getXMLForTable(String tableName) throws HSQLParseException { VoltXMLElement xml = emptySchema.duplicate(); // search all the tables XXX probably could do this non-linearly, // but i don't know about case-insensitivity yet HashMappedList hsqlTables = getHSQLTables(); for (int i = 0; i < hsqlTables.size(); i++) { Table table = (Table) hsqlTables.get(i); String candidateTableName = table.getName().name; // found the table of interest if (candidateTableName.equalsIgnoreCase(tableName)) { VoltXMLElement vxmle = table.voltGetTableXML(sessionProxy); assert(vxmle != null); xml.children.add(vxmle); return xml; } } return null; }
java
private void calculateTrackers(Collection<TopicPartition> partitions) { Map<TopicPartition, CommitTracker> trackers = new HashMap<>(); trackers.putAll(m_trackerMap.get()); Map<TopicPartition, AtomicLong> lastCommittedOffSets = new HashMap<>(); lastCommittedOffSets.putAll(m_lastCommittedOffSets.get()); boolean newTopicPartition = false; for (TopicPartition partition : partitions) { if (m_trackerMap.get().get(partition) != null) { continue; } newTopicPartition = true; long startOffset = -1L; CommitTracker commitTracker = null; if (m_config.getCommitPolicy() == KafkaCommitPolicy.TIME && m_config.getTriggerValue() > 0) { commitTracker = new SimpleTracker(); } else { commitTracker = new DurableTracker(KafkaConstants.IMPORT_GAP_LEAD, partition.topic(), partition.partition(), m_config.getGroupId()); } trackers.put(partition, commitTracker); try { OffsetAndMetadata offsetAndMetaData = m_consumer.committed(partition); startOffset = offsetAndMetaData != null ? offsetAndMetaData.offset() : -1L; if (startOffset > -1L) { commitTracker.resetTo(startOffset); } } catch (KafkaException e) { LOGGER.error("Failed to read committed offsets for group " + m_config.getGroupId() + partition + " " + e.getMessage()); } lastCommittedOffSets.put(partition, new AtomicLong(startOffset)); m_pauseOffsets.put(partition, new AtomicLong(-1)); m_workTrackers.put(partition, new PendingWorkTracker()); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Starting offset for group:" + m_config.getGroupId() + ":" + startOffset + " partition:" + partition); } } if (newTopicPartition) { m_trackerMap.set(trackers); m_lastCommittedOffSets.set(lastCommittedOffSets); } }
java
private void seek(List<TopicPartition> seekList) { for (TopicPartition tp : seekList) { AtomicLong lastCommittedOffset = m_lastCommittedOffSets.get().get(tp); if (lastCommittedOffset != null && lastCommittedOffset.get() > -1L) { AtomicLong lastSeeked = m_lastSeekedOffSets.get(tp); //eliminate duplicate seek if (lastSeeked != null && lastSeeked.get() == lastCommittedOffset.get()) { continue; } m_consumer.seek(tp, lastCommittedOffset.longValue()); m_lastSeekedOffSets.put(tp, new AtomicLong(lastCommittedOffset.get())); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Moves offset for group " + m_config.getGroupId() + " -" + tp + " to " + lastCommittedOffset); } } } }
java
public static String toZeroPaddedString(long value, int precision, int maxSize) { StringBuffer sb = new StringBuffer(); if (value < 0) { value = -value; } String s = Long.toString(value); if (s.length() > precision) { s = s.substring(precision); } for (int i = s.length(); i < precision; i++) { sb.append('0'); } sb.append(s); if (maxSize < precision) { sb.setLength(maxSize); } return sb.toString(); }
java
public static String toLowerSubset(String source, char substitute) { int len = source.length(); StringBuffer sb = new StringBuffer(len); char ch; for (int i = 0; i < len; i++) { ch = source.charAt(i); if (!Character.isLetterOrDigit(ch)) { sb.append(substitute); } else if ((i == 0) && Character.isDigit(ch)) { sb.append(substitute); } else { sb.append(Character.toLowerCase(ch)); } } return sb.toString(); }
java
public static String arrayToString(Object array) { int len = Array.getLength(array); int last = len - 1; StringBuffer sb = new StringBuffer(2 * (len + 1)); sb.append('{'); for (int i = 0; i < len; i++) { sb.append(Array.get(array, i)); if (i != last) { sb.append(','); } } sb.append('}'); return sb.toString(); }
java
public static void appendPair(StringBuffer b, String s1, String s2, String separator, String terminator) { b.append(s1); b.append(separator); b.append(s2); b.append(terminator); }
java
public static int rightTrimSize(String s) { int i = s.length(); while (i > 0) { i--; if (s.charAt(i) != ' ') { return i + 1; } } return 0; }
java
public static int skipSpaces(String s, int start) { int limit = s.length(); int i = start; for (; i < limit; i++) { if (s.charAt(i) != ' ') { break; } } return i; }
java
public static String[] split(String s, String separator) { HsqlArrayList list = new HsqlArrayList(); int currindex = 0; for (boolean more = true; more; ) { int nextindex = s.indexOf(separator, currindex); if (nextindex == -1) { nextindex = s.length(); more = false; } list.add(s.substring(currindex, nextindex)); currindex = nextindex + separator.length(); } return (String[]) list.toArray(new String[list.size()]); }
java
@Override protected void populateColumnSchema(ArrayList<ColumnInfo> columns) { super.populateColumnSchema(columns); columns.add(new ColumnInfo(VoltSystemProcedure.CNAME_SITE_ID, VoltSystemProcedure.CTYPE_ID)); columns.add(new ColumnInfo(Columns.PARTITION_ID, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.SOURCE_NAME, VoltType.STRING)); columns.add(new ColumnInfo(Columns.EXPORT_TARGET, VoltType.STRING)); columns.add(new ColumnInfo(Columns.ACTIVE, VoltType.STRING)); columns.add(new ColumnInfo(Columns.TUPLE_COUNT, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.TUPLE_PENDING, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.LAST_QUEUED_TIMESTAMP, VoltType.TIMESTAMP)); columns.add(new ColumnInfo(Columns.LAST_ACKED_TIMESTAMP, VoltType.TIMESTAMP)); columns.add(new ColumnInfo(Columns.AVERAGE_LATENCY, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.MAX_LATENCY, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.QUEUE_GAP, VoltType.BIGINT)); columns.add(new ColumnInfo(Columns.STATUS, VoltType.STRING)); }
java
private void offerInternal(Mailbox mailbox, Item item, long handle) { m_bufferedReads.add(item); releaseBufferedReads(mailbox, handle); }
java
public long sizeInBytes() throws IOException { long memoryBlockUsage = 0; for (StreamBlock b : m_memoryDeque) { //Use only total size, but throw in the USO //to make book keeping consistent when flushed to disk //Also dont count persisted blocks. memoryBlockUsage += b.totalSize(); } //Subtract USO from on disk size return memoryBlockUsage + m_reader.sizeInBytes() - (StreamBlock.HEADER_SIZE * m_reader.getNumObjects()); }
java
public void truncateToSequenceNumber(final long truncationSeqNo) throws IOException { assert(m_memoryDeque.isEmpty()); m_persistentDeque.parseAndTruncate(new BinaryDequeTruncator() { @Override public TruncatorResponse parse(BBContainer bbc) { ByteBuffer b = bbc.b(); ByteOrder endianness = b.order(); b.order(ByteOrder.LITTLE_ENDIAN); try { final long startSequenceNumber = b.getLong(); // If after the truncation point is the first row in the block, the entire block is to be discarded if (startSequenceNumber > truncationSeqNo) { return PersistentBinaryDeque.fullTruncateResponse(); } final long committedSequenceNumber = b.getLong(); // committedSequenceNumber final int tupleCountPos = b.position(); final int tupleCount = b.getInt(); // There is nothing to do with this buffer final long lastSequenceNumber = startSequenceNumber + tupleCount - 1; if (lastSequenceNumber <= truncationSeqNo) { return null; } b.getLong(); // uniqueId // Partial truncation int offset = 0; while (b.hasRemaining()) { if (startSequenceNumber + offset > truncationSeqNo) { // The sequence number of this row is the greater than the truncation sequence number. // Don't want this row, but want to preserve all rows before it. // Move back before the row length prefix, txnId and header // Return everything in the block before the truncation point. // Indicate this is the end of the interesting data. b.limit(b.position()); // update tuple count in the header b.putInt(tupleCountPos, offset - 1); b.position(0); return new ByteBufferTruncatorResponse(b); } offset++; // Not the row we are looking to truncate at. Skip past it (row length + row length field). final int rowLength = b.getInt(); b.position(b.position() + rowLength); } return null; } finally { b.order(endianness); } } }); // close reopen reader m_persistentDeque.close(); CatalogContext catalogContext = VoltDB.instance().getCatalogContext(); Table streamTable = VoltDB.instance().getCatalogContext().database.getTables().get(m_streamName); StreamTableSchemaSerializer ds = new StreamTableSchemaSerializer( streamTable, m_streamName, catalogContext.m_genId); m_persistentDeque = new PersistentBinaryDeque(m_nonce, ds, new VoltFile(m_path), exportLog, !DISABLE_COMPRESSION); m_reader = m_persistentDeque.openForRead(m_nonce); // temporary debug stmt exportLog.info("After truncate, PBD size is " + (m_reader.sizeInBytes() - (8 * m_reader.getNumObjects()))); }
java
public int set(int pos) { while (pos >= capacity) { doubleCapacity(); } if (pos >= limitPos) { limitPos = pos + 1; } int windex = pos >> 5; int mask = 0x80000000 >>> (pos & 0x1F); int word = map[windex]; int result = (word & mask) == 0 ? 0 : 1; map[windex] = (word | mask); return result; }
java
public static void and(byte[] map, int pos, byte source, int count) { int shift = pos & 0x07; int mask = (source & 0xff) >>> shift; int innermask = 0xff >> shift; int index = pos / 8; if (count < 8) { innermask = innermask >>> (8 - count); innermask = innermask << (8 - count); } mask &= innermask; innermask = ~innermask; if (index >= map.length) { return; } byte b = map[index]; map[index] = (byte) (b & innermask); b = (byte) (b & mask); map[index] = (byte) (map[index] | b); if (shift == 0) { return; } shift = 8 - shift; if (count > shift) { mask = ((source & 0xff) << 8) >>> shift; innermask = 0xff00 >>> shift; innermask = ~innermask; b = map[index + 1]; map[index + 1] = (byte) (b & innermask); b = (byte) (b & mask); map[index + 1] = (byte) (map[index + 1] | b); } }
java
public static void or(byte[] map, int pos, byte source, int count) { int shift = pos & 0x07; int mask = (source & 0xff) >>> shift; int index = pos / 8; if (index >= map.length) { return; } byte b = (byte) (map[index] | mask); map[index] = b; if (shift == 0) { return; } shift = 8 - shift; if (count > shift) { mask = ((source & 0xff) << 8) >>> shift; b = (byte) (map[index + 1] | mask); map[index + 1] = b; } }
java
public synchronized boolean addUnsorted(int key, int value) { if (count == capacity) { if (fixedSize) { return false; } else { doubleCapacity(); } } if (sorted && count != 0) { if (sortOnValues) { if (value < values[count - 1]) { sorted = false; } } else { if (value < keys[count - 1]) { sorted = false; } } } hasChanged = true; keys[count] = key; values[count] = value; count++; return true; }
java
public synchronized boolean addUnique(int key, int value) { if (count == capacity) { if (fixedSize) { return false; } else { doubleCapacity(); } } if (!sorted) { fastQuickSort(); } targetSearchValue = sortOnValues ? value : key; int i = binaryEmptySlotSearch(); if (i == -1) { return false; } hasChanged = true; if (count != i) { moveRows(i, i + 1, count - i); } keys[i] = key; values[i] = value; count++; return true; }
java
private int binaryFirstSearch() { int low = 0; int high = count; int mid = 0; int compare = 0; int found = count; while (low < high) { mid = (low + high) / 2; compare = compare(mid); if (compare < 0) { high = mid; } else if (compare > 0) { low = mid + 1; } else { high = mid; found = mid; } } return found == count ? -1 : found; }
java
private int binaryGreaterSearch() { int low = 0; int high = count; int mid = 0; int compare = 0; while (low < high) { mid = (low + high) / 2; compare = compare(mid); if (compare < 0) { high = mid; } else { low = mid + 1; } } return low == count ? -1 : low; }
java
private int binarySlotSearch() { int low = 0; int high = count; int mid = 0; int compare = 0; while (low < high) { mid = (low + high) / 2; compare = compare(mid); if (compare <= 0) { high = mid; } else { low = mid + 1; } } return low; }
java
private int binaryEmptySlotSearch() { int low = 0; int high = count; int mid = 0; int compare = 0; while (low < high) { mid = (low + high) / 2; compare = compare(mid); if (compare < 0) { high = mid; } else if (compare > 0) { low = mid + 1; } else { return -1; } } return low; }
java
private int compare(int i) { if (sortOnValues) { if (targetSearchValue > values[i]) { return 1; } else if (targetSearchValue < values[i]) { return -1; } } else { if (targetSearchValue > keys[i]) { return 1; } else if (targetSearchValue < keys[i]) { return -1; } } return 0; }
java
private boolean lessThan(int i, int j) { if (sortOnValues) { if (values[i] < values[j]) { return true; } } else { if (keys[i] < keys[j]) { return true; } } return false; }
java
public static void setFontSize(String inFontSize) { // weconsultants@users 20050215 - Changed for Compatbilty fix for JDK 1.3 // Convert Strng to float for deriveFont() call Float stageFloat = new Float(inFontSize); float fontSize = stageFloat.floatValue(); Font fonttTree = fOwner.tTree.getFont().deriveFont(fontSize); fOwner.tTree.setFont(fonttTree); Font fontTxtCommand = fOwner.txtCommand.getFont().deriveFont(fontSize); fOwner.txtCommand.setFont(fontTxtCommand); Font fontTxtResult = fOwner.txtResult.getFont().deriveFont(fontSize); fOwner.txtResult.setFont(fontTxtResult); }
java
public Host lookup(final String hostName) { final Map<String, Host> cache = this.refresh(); Host h = cache.get(hostName); if(h == null) { h = new Host(); } if(h.patternsApplied) { return h; } for(final Map.Entry<String, Host> e : cache.entrySet()) { if(!isHostPattern(e.getKey())) { continue; } if(!isHostMatch(e.getKey(), hostName)) { continue; } //log.debug("Found host match in SSH config:" + e.getValue()); h.copyFrom(e.getValue()); } if(h.port == 0) { h.port = -1; } h.patternsApplied = true; return h; }
java
public static ListeningExecutorService getCachedSingleThreadExecutor(String name, long keepAlive) { return MoreExecutors.listeningDecorator(new ThreadPoolExecutor( 0, 1, keepAlive, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), CoreUtils.getThreadFactory(null, name, SMALL_STACK_SIZE, false, null))); }
java
public static ListeningExecutorService getBoundedSingleThreadExecutor(String name, int capacity) { LinkedBlockingQueue<Runnable> lbq = new LinkedBlockingQueue<Runnable>(capacity); ThreadPoolExecutor tpe = new ThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, lbq, CoreUtils.getThreadFactory(name)); return MoreExecutors.listeningDecorator(tpe); }
java
public static ThreadPoolExecutor getBoundedThreadPoolExecutor(int maxPoolSize, long keepAliveTime, TimeUnit unit, ThreadFactory tFactory) { return new ThreadPoolExecutor(0, maxPoolSize, keepAliveTime, unit, new SynchronousQueue<Runnable>(), tFactory); }
java
public static ExecutorService getQueueingExecutorService(final Queue<Runnable> taskQueue) { return new ExecutorService() { @Override public void execute(Runnable command) { taskQueue.offer(command); } @Override public void shutdown() { throw new UnsupportedOperationException(); } @Override public List<Runnable> shutdownNow() { throw new UnsupportedOperationException(); } @Override public boolean isShutdown() { return false; } @Override public boolean isTerminated() { return false; } @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { return true; } @Override public <T> Future<T> submit(Callable<T> task) { Preconditions.checkNotNull(task); FutureTask<T> retval = new FutureTask<T>(task); taskQueue.offer(retval); return retval; } @Override public <T> Future<T> submit(Runnable task, T result) { Preconditions.checkNotNull(task); FutureTask<T> retval = new FutureTask<T>(task, result); taskQueue.offer(retval); return retval; } @Override public Future<?> submit(Runnable task) { Preconditions.checkNotNull(task); ListenableFutureTask<Object> retval = ListenableFutureTask.create(task, null); taskQueue.offer(retval); return retval; } @Override public <T> List<Future<T>> invokeAll( Collection<? extends Callable<T>> tasks) throws InterruptedException { throw new UnsupportedOperationException(); } @Override public <T> List<Future<T>> invokeAll( Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException { throw new UnsupportedOperationException(); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws InterruptedException, ExecutionException { throw new UnsupportedOperationException(); } @Override public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { throw new UnsupportedOperationException(); } }; }
java