code
stringlengths
73
34.1k
label
stringclasses
1 value
public static void enableCategories(Category... categories) throws IOException { if (s_tracer == null) { start(); } final VoltTrace tracer = s_tracer; assert tracer != null; final ImmutableSet.Builder<Category> builder = ImmutableSet.builder(); builder.addAll(tracer.m_enabledCategories); builder.addAll(Arrays.asList(categories)); tracer.m_enabledCategories = builder.build(); }
java
public static void disableCategories(Category... categories) { final VoltTrace tracer = s_tracer; if (tracer == null) { return; } final List<Category> toDisable = Arrays.asList(categories); final ImmutableSet.Builder<Category> builder = ImmutableSet.builder(); for (Category enabledCategory : tracer.m_enabledCategories) { if (!toDisable.contains(enabledCategory)) { builder.add(enabledCategory); } } final ImmutableSet<Category> enabledCategories = builder.build(); if (enabledCategories.isEmpty()) { // All categories disabled, shutdown tracer try { closeAllAndShutdown(null, 0); } catch (IOException e) {} } else { tracer.m_enabledCategories = enabledCategories; } }
java
public Thread newThread(Runnable r) { return factory == this ? new Thread(r) : factory.newThread(r); }
java
public synchronized ThreadFactory setImpl(ThreadFactory f) { ThreadFactory old; old = factory; factory = (f == null) ? this : f; return old; }
java
final byte[] getRaw(int columnIndex) { byte[] retval; int pos = m_buffer.position(); int offset = getOffset(columnIndex); VoltType type = getColumnType(columnIndex); switch(type) { case TINYINT: case SMALLINT: case INTEGER: case BIGINT: case TIMESTAMP: case FLOAT: case DECIMAL: case GEOGRAPHY_POINT: { // all of these types are fixed length, so easy to get raw type int length = type.getLengthInBytesForFixedTypesWithoutCheck(); retval = new byte[length]; m_buffer.position(offset); m_buffer.get(retval); m_buffer.position(pos); return retval; } case STRING: case VARBINARY: case GEOGRAPHY: { // all of these types are variable length with a prefix int length = m_buffer.getInt(offset); if (length == VoltTable.NULL_STRING_INDICATOR) { length = 0; } length += 4; retval = new byte[length]; m_buffer.position(offset); m_buffer.get(retval); m_buffer.position(pos); return retval; } default: throw new RuntimeException("Unknown type"); } }
java
final void validateColumnType(int columnIndex, VoltType... types) { if (m_position < 0) throw new RuntimeException("VoltTableRow is in an invalid state. Consider calling advanceRow()."); if ((columnIndex >= getColumnCount()) || (columnIndex < 0)) { throw new IndexOutOfBoundsException("Column index " + columnIndex + " is greater than the number of columns"); } final VoltType columnType = getColumnType(columnIndex); for (VoltType type : types) if (columnType == type) return; throw new IllegalArgumentException("Column index " + columnIndex + " is type " + columnType); }
java
final String readString(int position, Charset encoding) { // Sanity check the string size int position. Note that the eventual // m_buffer.get() does check for underflow, getInt() does not. if (STRING_LEN_SIZE > m_buffer.limit() - position) { throw new RuntimeException(String.format( "VoltTableRow::readString: Can't read string size as %d byte integer " + "from buffer with %d bytes remaining.", STRING_LEN_SIZE, m_buffer.limit() - position)); } final int len = m_buffer.getInt(position); //System.out.println(len); // check for null string if (len == VoltTable.NULL_STRING_INDICATOR) return null; if (len < 0) { throw new RuntimeException("Invalid object length."); } // Sanity check the size against the remaining buffer size. if (position + STRING_LEN_SIZE + len > m_buffer.limit()) { throw new RuntimeException(String.format( "VoltTableRow::readString: Can't read %d byte string " + "from buffer with %d bytes remaining.", len, m_buffer.limit() - position - STRING_LEN_SIZE)); } // this is a bit slower than directly getting the array (see below) // but that caused bugs byte[] stringData = new byte[len]; int oldPos = m_buffer.position(); m_buffer.position(position + STRING_LEN_SIZE); m_buffer.get(stringData); m_buffer.position(oldPos); return new String(stringData, encoding); }
java
public int appendTask(long sourceHSId, TransactionInfoBaseMessage task) throws IOException { Preconditions.checkState(compiledSize == 0, "buffer is already compiled"); final int msgSerializedSize = task.getSerializedSize(); ensureCapacity(taskHeaderSize() + msgSerializedSize); ByteBuffer bb = m_container.b(); bb.putInt(msgSerializedSize); bb.putLong(sourceHSId); int limit = bb.limit(); bb.limit(bb.position() + msgSerializedSize); task.flattenToBuffer(bb.slice()); bb.limit(limit); bb.position(bb.position() + msgSerializedSize); // Don't allow any further expansion to the underlying buffer if (bb.position() + taskHeaderSize() > DEFAULT_BUFFER_SIZE) { compile(); return 0; } else { return DEFAULT_BUFFER_SIZE - (bb.position() + taskHeaderSize()); } }
java
public TransactionInfoBaseMessage nextTask() throws IOException { if (!hasMoreEntries()) { return null; } ByteBuffer bb = m_container.b(); int position = bb.position(); int length = bb.getInt(); long sourceHSId = bb.getLong(); VoltDbMessageFactory factory = new VoltDbMessageFactory(); /* * create a new buffer that just contains the message, deserialization * of the messsage may assert on the capacity of the buffer */ final int oldLimit = bb.limit(); bb.limit(bb.position() + length); ByteBuffer slice = bb.slice(); bb.limit(oldLimit); VoltMessage msg = factory.createMessageFromBuffer(slice, sourceHSId); // createMessageFromBuffer() doesn't move the position pointer, set it here bb.position(position + length + 8 + 4); // sourceHSId + buf len return (TransactionInfoBaseMessage) msg; }
java
public void compile() { if (compiledSize == 0) { ByteBuffer bb = m_container.b(); compiledSize = bb.position(); bb.flip(); m_allocator.track(compiledSize); } if (log.isTraceEnabled()) { StringBuilder sb = new StringBuilder("Compiling buffer: "); ByteBuffer dup = m_container.bDR(); while (dup.hasRemaining()) { sb.append(" ").append(dup.get()); } log.trace(sb.toString()); } }
java
void updateCatalog(String diffCmds, CatalogContext context) { if (m_shuttingDown) { return; } m_catalogContext = context; // Wipe out all the idle sites with stale catalogs. // Non-idle sites will get killed and replaced when they finish // whatever they started before the catalog update Iterator<MpRoSiteContext> siterator = m_idleSites.iterator(); while (siterator.hasNext()) { MpRoSiteContext site = siterator.next(); if (site.getCatalogCRC() != m_catalogContext.getCatalogCRC() || site.getCatalogVersion() != m_catalogContext.catalogVersion) { site.shutdown(); m_idleSites.remove(site); m_allSites.remove(site); } } }
java
boolean doWork(long txnId, TransactionTask task) { boolean retval = canAcceptWork(); if (!retval) { return false; } MpRoSiteContext site; // Repair case if (m_busySites.containsKey(txnId)) { site = m_busySites.get(txnId); } else { if (m_idleSites.isEmpty()) { MpRoSiteContext newSite = new MpRoSiteContext(m_siteId, m_backend, m_catalogContext, m_partitionId, m_initiatorMailbox, m_poolThreadFactory); m_idleSites.push(newSite); m_allSites.add(newSite); } site = m_idleSites.pop(); m_busySites.put(txnId, site); } site.offer(task); return true; }
java
void completeWork(long txnId) { if (m_shuttingDown) { return; } MpRoSiteContext site = m_busySites.remove(txnId); if (site == null) { throw new RuntimeException("No busy site for txnID: " + txnId + " found, shouldn't happen."); } // check the catalog versions, only push back onto idle if the catalog hasn't changed // otherwise, just let it get garbage collected and let doWork() construct new ones for the // pool with the updated catalog. if (site.getCatalogCRC() == m_catalogContext.getCatalogCRC() && site.getCatalogVersion() == m_catalogContext.catalogVersion) { m_idleSites.push(site); } else { site.shutdown(); m_allSites.remove(site); } }
java
public VoltTable[] run(SystemProcedureExecutionContext ctx) { VoltTable[] result = null; try { result = createAndExecuteSysProcPlan(SysProcFragmentId.PF_quiesce_sites, SysProcFragmentId.PF_quiesce_processed_sites); } catch (Exception ex) { ex.printStackTrace(); } return result; }
java
public static void writeFile(final String dir, final String filename, String content, boolean debug) { // skip debug files when not in debug mode if (debug && !VoltCompiler.DEBUG_MODE) { return; } // cache the root of the folder for the debugoutput and the statement-plans folder if (m_debugRoot == null) { if (System.getenv("TEST_DIR") != null) { m_debugRoot = System.getenv("TEST_DIR") + File.separator + debugRootPrefix; } else { m_debugRoot = debugRootPrefix; } } if (m_userRoot == null) { if (System.getenv("TEST_DIR") != null) { m_userRoot = System.getenv("TEST_DIR") + File.separator + userRootPrefix; } else { m_userRoot = userRootPrefix; } } // pic a place for the file based on debugness of the file in question String root = debug ? m_debugRoot : m_userRoot; // don't call mkdirs more than once per subdir, so keep a cache String subFolderPath = root; if (dir != null) { subFolderPath += File.separator + dir; } if (!m_seenPaths.contains(subFolderPath)) { File f = new File(subFolderPath); f.mkdirs(); m_seenPaths.add(subFolderPath); } String filepath = subFolderPath + File.separator + filename; File f = new File(filepath); PrintStream streamOut = null; try { streamOut = new PrintStream(f); } catch (FileNotFoundException e) { e.printStackTrace(); return; } streamOut.println(content); streamOut.close(); }
java
public AbstractExpression getAllFilters() { ArrayDeque<JoinNode> joinNodes = new ArrayDeque<>(); ArrayDeque<AbstractExpression> in = new ArrayDeque<>(); ArrayDeque<AbstractExpression> out = new ArrayDeque<>(); // Iterate over the join nodes to collect their join and where expressions joinNodes.add(this); while (!joinNodes.isEmpty()) { JoinNode joinNode = joinNodes.poll(); if (joinNode.m_joinExpr != null) { in.add(joinNode.m_joinExpr); } if (joinNode.m_whereExpr != null) { in.add(joinNode.m_whereExpr); } joinNode.queueChildren(joinNodes); } // this chunk of code breaks the code into a list of expression that // all have to be true for the where clause to be true AbstractExpression inExpr = null; while ((inExpr = in.poll()) != null) { if (inExpr.getExpressionType() == ExpressionType.CONJUNCTION_AND) { in.add(inExpr.getLeft()); in.add(inExpr.getRight()); } else { out.add(inExpr); } } return ExpressionUtil.combinePredicates(out); }
java
public AbstractExpression getSimpleFilterExpression() { if (m_whereExpr != null) { if (m_joinExpr != null) { return ExpressionUtil.combine(m_whereExpr, m_joinExpr); } return m_whereExpr; } return m_joinExpr; }
java
public List<JoinNode> generateAllNodesJoinOrder() { ArrayList<JoinNode> nodes = new ArrayList<>(); listNodesJoinOrderRecursive(nodes, true); return nodes; }
java
public List<JoinNode> extractSubTrees() { List<JoinNode> subTrees = new ArrayList<>(); // Extract the first sub-tree starting at the root subTrees.add(this); List<JoinNode> leafNodes = new ArrayList<>(); extractSubTree(leafNodes); // Continue with the leafs for (JoinNode leaf : leafNodes) { subTrees.addAll(leaf.extractSubTrees()); } return subTrees; }
java
public static JoinNode reconstructJoinTreeFromTableNodes(List<JoinNode> tableNodes, JoinType joinType) { JoinNode root = null; for (JoinNode leafNode : tableNodes) { JoinNode node = leafNode.cloneWithoutFilters(); if (root == null) { root = node; } else { // We only care about the root node id to be able to reconnect the sub-trees // The intermediate node id can be anything. For the final root node its id // will be set later to the original tree's root id root = new BranchNode(-node.m_id, joinType, root, node); } } return root; }
java
public static JoinNode reconstructJoinTreeFromSubTrees(List<JoinNode> subTrees) { if (subTrees == null || subTrees.isEmpty()) { return null; } // Reconstruct the tree. The first element is the first sub-tree and so on JoinNode joinNode = subTrees.get(0); for (int i = 1; i < subTrees.size(); ++i) { JoinNode nextNode = subTrees.get(i); boolean replaced = joinNode.replaceChild(nextNode); // There must be a node in the current tree to be replaced assert(replaced); } return joinNode; }
java
protected static void applyTransitiveEquivalence(List<AbstractExpression> outerTableExprs, List<AbstractExpression> innerTableExprs, List<AbstractExpression> innerOuterTableExprs) { List<AbstractExpression> simplifiedOuterExprs = applyTransitiveEquivalence(innerTableExprs, innerOuterTableExprs); List<AbstractExpression> simplifiedInnerExprs = applyTransitiveEquivalence(outerTableExprs, innerOuterTableExprs); outerTableExprs.addAll(simplifiedOuterExprs); innerTableExprs.addAll(simplifiedInnerExprs); }
java
protected static void classifyJoinExpressions(Collection<AbstractExpression> exprList, Collection<String> outerTables, Collection<String> innerTables, List<AbstractExpression> outerList, List<AbstractExpression> innerList, List<AbstractExpression> innerOuterList, List<AbstractExpression> noneList) { HashSet<String> tableAliasSet = new HashSet<>(); HashSet<String> outerSet = new HashSet<>(outerTables); HashSet<String> innerSet = new HashSet<>(innerTables); for (AbstractExpression expr : exprList) { tableAliasSet.clear(); getTablesForExpression(expr, tableAliasSet); String tableAliases[] = tableAliasSet.toArray(new String[0]); if (tableAliasSet.isEmpty()) { noneList.add(expr); } else { boolean outer = false; boolean inner = false; for (String alias : tableAliases) { outer = outer || outerSet.contains(alias); inner = inner || innerSet.contains(alias); } if (outer && inner) { innerOuterList.add(expr); } else if (outer) { outerList.add(expr); } else if (inner) { innerList.add(expr); } else { // can not be, right? assert(false); } } } }
java
public static HSQLInterface.ParameterStateManager getParamStateManager() { return new ParameterStateManager() { @Override public int getNextParamIndex() { return ParameterizationInfo.getNextParamIndex(); } @Override public void resetCurrentParamIndex() { ParameterizationInfo.resetCurrentParamIndex(); } }; }
java
private static Object decodeNextColumn(ByteBuffer bb, VoltType columnType) throws IOException { Object retval = null; switch (columnType) { case TINYINT: retval = decodeTinyInt(bb); break; case SMALLINT: retval = decodeSmallInt(bb); break; case INTEGER: retval = decodeInteger(bb); break; case BIGINT: retval = decodeBigInt(bb); break; case FLOAT: retval = decodeFloat(bb); break; case TIMESTAMP: retval = decodeTimestamp(bb); break; case STRING: retval = decodeString(bb); break; case VARBINARY: retval = decodeVarbinary(bb); break; case DECIMAL: retval = decodeDecimal(bb); break; case GEOGRAPHY_POINT: retval = decodeGeographyPoint(bb); break; case GEOGRAPHY: retval = decodeGeography(bb); break; default: throw new IOException("Invalid column type: " + columnType); } return retval; }
java
static public BigDecimal decodeDecimal(final ByteBuffer bb) { final int scale = bb.get(); final int precisionBytes = bb.get(); final byte[] bytes = new byte[precisionBytes]; bb.get(bytes); return new BigDecimal(new BigInteger(bytes), scale); }
java
static public Object decodeVarbinary(final ByteBuffer bb) { final int length = bb.getInt(); final byte[] data = new byte[length]; bb.get(data); return data; }
java
static public GeographyValue decodeGeography(final ByteBuffer bb) { final int strLength = bb.getInt(); final int startPosition = bb.position(); GeographyValue gv = GeographyValue.unflattenFromBuffer(bb); assert(bb.position() - startPosition == strLength); return gv; }
java
@Override public final Iterable<T> children(final T root) { checkNotNull(root); return new FluentIterable<T>() { @Override public Iterator<T> iterator() { return new AbstractIterator<T>() { boolean doneLeft; boolean doneRight; @Override protected T computeNext() { if (!doneLeft) { doneLeft = true; Optional<T> left = leftChild(root); if (left.isPresent()) { return left.get(); } } if (!doneRight) { doneRight = true; Optional<T> right = rightChild(root); if (right.isPresent()) { return right.get(); } } return endOfData(); } }; } }; }
java
private boolean processTrueOrFalse() { if (token.tokenType == Tokens.TRUE) { read(); return true; } else if (token.tokenType == Tokens.FALSE) { read(); return false; } else { throw unexpectedToken(); } }
java
public VoltTable sortByAverage(String tableName) { List<ProcProfRow> sorted = new ArrayList<ProcProfRow>(m_table); Collections.sort(sorted, new Comparator<ProcProfRow>() { @Override public int compare(ProcProfRow lhs, ProcProfRow rhs) { return compareByAvg(rhs, lhs); // sort desc } }); long sumOfAverage = 0L; for (ProcProfRow row : sorted) { sumOfAverage += (row.avg * row.invocations); } VoltTable result = TableShorthand.tableFromShorthand( tableName + "(TIMESTAMP:BIGINT, PROCEDURE:VARCHAR, WEIGHTED_PERC:BIGINT, INVOCATIONS:BIGINT," + "AVG:BIGINT, MIN:BIGINT, MAX:BIGINT, ABORTS:BIGINT, FAILURES:BIGINT)"); for (ProcProfRow row : sorted ) { result.addRow(row.timestamp, row.procedure, calculatePercent(row.avg * row.invocations, sumOfAverage), row.invocations, row.avg, row.min, row.max, row.aborts, row.failures); } return result; }
java
public int compareByAvg(ProcProfRow lhs, ProcProfRow rhs) { if (lhs.avg * lhs.invocations > rhs.avg * rhs.invocations) { return 1; } else if (lhs.avg * lhs.invocations < rhs.avg * rhs.invocations) { return -1; } else { return 0; } }
java
void doInitiation(RejoinMessage message) { m_coordinatorHsId = message.m_sourceHSId; m_hasPersistentTables = message.schemaHasPersistentTables(); if (m_hasPersistentTables) { m_streamSnapshotMb = VoltDB.instance().getHostMessenger().createMailbox(); m_rejoinSiteProcessor = new StreamSnapshotSink(m_streamSnapshotMb); // Start the watchdog so if we never get data it will notice kickWatchdog(TimerCallback.initialTimer()); } else { m_streamSnapshotMb = null; m_rejoinSiteProcessor = null; } // MUST choose the leader as the source. long sourceSite = m_mailbox.getMasterHsId(m_partitionId); // The lowest partition has a single source for all messages whereas all other partitions have a real // data source and a dummy data source for replicated tables that are used to sync up replicated table changes. boolean haveTwoSources = VoltDB.instance().getLowestPartitionId() != m_partitionId; // Provide a valid sink host id unless it is an empty database. long hsId = (m_rejoinSiteProcessor != null ? m_rejoinSiteProcessor.initialize(haveTwoSources?2:1, message.getSnapshotDataBufferPool(), message.getSnapshotCompressedDataBufferPool()) : Long.MIN_VALUE); REJOINLOG.debug(m_whoami + "received INITIATION message. Doing rejoin" + ". Source site is: " + CoreUtils.hsIdToString(sourceSite) + " and destination rejoin processor is: " + CoreUtils.hsIdToString(hsId) + " and snapshot nonce is: " + message.getSnapshotNonce()); registerSnapshotMonitor(message.getSnapshotNonce()); // Tell the RejoinCoordinator everything it will need to know to get us our snapshot stream. RejoinMessage initResp = new RejoinMessage(m_mailbox.getHSId(), sourceSite, hsId); m_mailbox.send(m_coordinatorHsId, initResp); // Start waiting for snapshot data m_taskQueue.offer(this); }
java
void updateTableIndexRoots() { HsqlArrayList allTables = database.schemaManager.getAllTables(); for (int i = 0, size = allTables.size(); i < size; i++) { Table t = (Table) allTables.get(i); if (t.getTableType() == TableBase.CACHED_TABLE) { int[] rootsArray = rootsList[i]; t.setIndexRoots(rootsArray); } } }
java
public final void sendSentinel(long txnId, int partitionId) { final long initiatorHSId = m_cartographer.getHSIdForSinglePartitionMaster(partitionId); sendSentinel(txnId, initiatorHSId, -1, -1, true); }
java
private final ClientResponseImpl dispatchLoadSinglepartitionTable(Procedure catProc, StoredProcedureInvocation task, InvocationClientHandler handler, Connection ccxn) { int partition = -1; try { CatalogMap<Table> tables = m_catalogContext.get().database.getTables(); int partitionParamType = getLoadSinglePartitionTablePartitionParamType(tables, task); byte[] valueToHash = (byte[])task.getParameterAtIndex(0); partition = TheHashinator.getPartitionForParameter(partitionParamType, valueToHash); } catch (Exception e) { authLog.warn(e.getMessage()); return new ClientResponseImpl(ClientResponseImpl.UNEXPECTED_FAILURE, new VoltTable[0], e.getMessage(), task.clientHandle); } assert(partition != -1); createTransaction(handler.connectionId(), task, catProc.getReadonly(), catProc.getSinglepartition(), catProc.getEverysite(), new int[] { partition }, task.getSerializedSize(), System.nanoTime()); return null; }
java
public void handleAllHostNTProcedureResponse(ClientResponseImpl clientResponseData) { long handle = clientResponseData.getClientHandle(); ProcedureRunnerNT runner = m_NTProcedureService.m_outstanding.get(handle); if (runner == null) { hostLog.info("Run everywhere NTProcedure early returned, probably gets timed out."); return; } runner.allHostNTProcedureCallback(clientResponseData); }
java
private static boolean valueConstantsMatch(AbstractExpression e1, AbstractExpression e2) { return (e1 instanceof ParameterValueExpression && e2 instanceof ConstantValueExpression || e1 instanceof ConstantValueExpression && e2 instanceof ParameterValueExpression) && equalsAsCVE(e1, e2); }
java
private static boolean equalsAsCVE(AbstractExpression e1, AbstractExpression e2) { final ConstantValueExpression ce1 = asCVE(e1), ce2 = asCVE(e2); return ce1 == null || ce2 == null ? ce1 == ce2 : ce1.equals(ce2); }
java
private static ConstantValueExpression asCVE(AbstractExpression expr) { return expr instanceof ConstantValueExpression ? (ConstantValueExpression) expr : ((ParameterValueExpression) expr).getOriginalValue(); }
java
protected void addCorrelationParameterValueExpression(AbstractExpression expr, List<AbstractExpression> pves) { int paramIdx = ParameterizationInfo.getNextParamIndex(); m_parameterIdxList.add(paramIdx); ParameterValueExpression pve = new ParameterValueExpression(paramIdx, expr); pves.add(pve); }
java
public synchronized CachedObject get(int pos) { if (accessCount == Integer.MAX_VALUE) { resetAccessCount(); } int lookup = getLookup(pos); if (lookup == -1) { return null; } accessTable[lookup] = accessCount++; return (CachedObject) objectValueTable[lookup]; }
java
synchronized void put(int key, CachedObject row) { int storageSize = row.getStorageSize(); if (size() >= capacity || storageSize + cacheBytesLength > bytesCapacity) { cleanUp(); } if (accessCount == Integer.MAX_VALUE) { super.resetAccessCount(); } super.addOrRemove(key, row, false); row.setInMemory(true); cacheBytesLength += storageSize; }
java
synchronized CachedObject release(int i) { CachedObject r = (CachedObject) super.addOrRemove(i, null, true); if (r == null) { return null; } cacheBytesLength -= r.getStorageSize(); r.setInMemory(false); return r; }
java
synchronized void saveAll() { Iterator it = new BaseHashIterator(); int savecount = 0; for (; it.hasNext(); ) { CachedObject r = (CachedObject) it.next(); if (r.hasChanged()) { rowTable[savecount++] = r; } } saveRows(savecount); Error.printSystemOut( saveAllTimer.elapsedTimeToMessage( "Cache.saveRow() total row save time")); Error.printSystemOut("Cache.saveRow() total row save count = " + saveRowCount); Error.printSystemOut( makeRowTimer.elapsedTimeToMessage( "Cache.makeRow() total row load time")); Error.printSystemOut("Cache.makeRow() total row load count = " + makeRowCount); Error.printSystemOut( sortTimer.elapsedTimeToMessage("Cache.sort() total time")); }
java
public void addAggregate(ExpressionType aggType, boolean isDistinct, Integer aggOutputColumn, AbstractExpression aggInputExpr) { m_aggregateTypes.add(aggType); if (isDistinct) { m_aggregateDistinct.add(1); } else { m_aggregateDistinct.add(0); } m_aggregateOutputColumns.add(aggOutputColumn); if (aggType.isNullary()) { assert(aggInputExpr == null); m_aggregateExpressions.add(null); } else { assert(aggInputExpr != null); m_aggregateExpressions.add(aggInputExpr.clone()); } }
java
public static AggregatePlanNode convertToSerialAggregatePlanNode(HashAggregatePlanNode hashAggregateNode) { AggregatePlanNode serialAggr = new AggregatePlanNode(); return setAggregatePlanNode(hashAggregateNode, serialAggr); }
java
public static AggregatePlanNode convertToPartialAggregatePlanNode(HashAggregatePlanNode hashAggregateNode, List<Integer> aggrColumnIdxs) { final AggregatePlanNode partialAggr = setAggregatePlanNode(hashAggregateNode, new PartialAggregatePlanNode()); partialAggr.m_partialGroupByColumns = aggrColumnIdxs; return partialAggr; }
java
public String getSQLState() { String state = null; try { state = new String(m_sqlState, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } return state; }
java
private static String[] aggregatePerHostResults(VoltTable vtable) { String[] ret = new String[2]; vtable.advanceRow(); String kitCheckResult = vtable.getString("KIT_CHECK_RESULT"); String rootCheckResult = vtable.getString("ROOT_CHECK_RESULT"); String xdcrCheckResult = vtable.getString("XDCR_CHECK_RESULT"); StringBuilder result = new StringBuilder(); if (!kitCheckResult.equals(SUCCESS)) { result.append(kitCheckResult).append("\n"); } if (!rootCheckResult.equals(SUCCESS)) { result.append(rootCheckResult).append("\n"); } if (!xdcrCheckResult.equals(SUCCESS)) { result.append(xdcrCheckResult); } if (result.length() == 0) { result.append(SUCCESS); } ret[0] = result.toString(); String warnings = vtable.getString("WARNINGS"); if (warnings != null) { ret[1] = warnings; } return ret; }
java
public static long millisFromJDBCformat(String param) { java.sql.Timestamp sqlTS = java.sql.Timestamp.valueOf(param); final long fractionalSecondsInNanos = sqlTS.getNanos(); // Fractional milliseconds would get truncated so flag them as an error. if ((fractionalSecondsInNanos % 1000000) != 0) { throw new IllegalArgumentException("Can't convert from String to Date with fractional milliseconds"); } return sqlTS.getTime(); }
java
@Override public int compareTo(TimestampType dateval) { int comp = m_date.compareTo(dateval.m_date); if (comp == 0) { return m_usecs - dateval.m_usecs; } else { return comp; } }
java
public java.sql.Date asExactJavaSqlDate() { if (m_usecs != 0) { throw new RuntimeException("Can't convert to sql Date from TimestampType with fractional milliseconds"); } return new java.sql.Date(m_date.getTime()); }
java
public java.sql.Timestamp asJavaTimestamp() { java.sql.Timestamp result = new java.sql.Timestamp(m_date.getTime()); result.setNanos(result.getNanos() + m_usecs * 1000); return result; }
java
public boolean load() { boolean exists; if (!DatabaseURL.isFileBasedDatabaseType(database.getType())) { return true; } try { exists = super.load(); } catch (Exception e) { throw Error.error(ErrorCode.FILE_IO_ERROR, ErrorCode.M_LOAD_SAVE_PROPERTIES, new Object[] { fileName, e }); } if (!exists) { return false; } filterLoadedProperties(); String version = getProperty(hsqldb_compatible_version); // do not open if the database belongs to a later (future) version int check = version.substring(0, 5).compareTo(THIS_VERSION); if (check > 0) { throw Error.error(ErrorCode.WRONG_DATABASE_FILE_VERSION); } version = getProperty(db_version); if (version.charAt(2) == '6') { setProperty(hsqldb_cache_version, "1.6.0"); } JavaSystem.gcFrequency = getIntegerProperty(runtime_gc_interval, 0); return true; }
java
public void setDatabaseVariables() { if (isPropertyTrue(db_readonly)) { database.setReadOnly(); } if (isPropertyTrue(hsqldb_files_readonly)) { database.setFilesReadOnly(); } database.sqlEnforceStrictSize = isPropertyTrue(sql_enforce_strict_size); if (isPropertyTrue(sql_compare_in_locale)) { stringProps.remove(sql_compare_in_locale); database.collation.setCollationAsLocale(); } database.setMetaDirty(false); }
java
public void setURLProperties(HsqlProperties p) { if (p != null) { for (Enumeration e = p.propertyNames(); e.hasMoreElements(); ) { String propertyName = (String) e.nextElement(); Object[] row = (Object[]) meta.get(propertyName); if (row != null && (db_readonly.equals(propertyName) || ((Integer) row[indexType]).intValue() == SET_PROPERTY)) { // can add error checking with defaults setProperty(propertyName, p.getProperty(propertyName)); } } } }
java
private void runSubmissions(boolean block) throws InterruptedException { if (block) { Runnable r = m_submissionQueue.take(); do { r.run(); } while ((r = m_submissionQueue.poll()) != null); } else { Runnable r = null; while ((r = m_submissionQueue.poll()) != null) { r.run(); } } }
java
public void queueNotification( final Collection<ClientInterfaceHandleManager> connections, final Supplier<DeferredSerialization> notification, final Predicate<ClientInterfaceHandleManager> wantsNotificationPredicate) { m_submissionQueue.offer(new Runnable() { @Override public void run() { for (ClientInterfaceHandleManager cihm : connections) { if (!wantsNotificationPredicate.apply(cihm)) continue; final Connection c = cihm.connection; /* * To avoid extra allocations and promotion we initially store a single event * as just the event. Once we have two or more events we create a linked list * and walk the list to dedupe events by identity */ Object pendingNotifications = m_clientsPendingNotification.get(c); try { if (pendingNotifications == null) { m_clientsPendingNotification.put(c, notification); } else if (pendingNotifications instanceof Supplier) { //Identity duplicate check if (pendingNotifications == notification) return; //Convert to a two node linked list @SuppressWarnings("unchecked") Node n1 = new Node((Supplier<DeferredSerialization>)pendingNotifications, null); n1 = m_cachedNodes.get(n1, n1); Node n2 = new Node(notification, n1); n2 = m_cachedNodes.get(n2, n2); m_clientsPendingNotification.put(c, n2); } else { //Walk the list and check if the notification is a duplicate Node head = (Node)pendingNotifications; boolean dup = false; while (head != null) { if (head.notification == notification) { dup = true; break; } head = head.next; } //If it's a dupe, no new work if (dup) continue; //Otherwise replace the head of the list which is the value in the map Node replacement = new Node(notification, (Node)pendingNotifications); replacement = m_cachedNodes.get(replacement, replacement); m_clientsPendingNotification.put(c, replacement); } } catch (ExecutionException e) { VoltDB.crashLocalVoltDB( "Unexpected exception pushing client notifications", true, Throwables.getRootCause(e)); } } } }); }
java
public PerfCounter get(String counter) { // Admited: could get a little race condition at the very beginning, but all that'll happen is that we'll lose a handful of tracking event, a loss far outweighed by overall reduced contention. if (!this.Counters.containsKey(counter)) this.Counters.put(counter, new PerfCounter(false)); return this.Counters.get(counter); }
java
public void update(String counter, long executionDuration, boolean success) { this.get(counter).update(executionDuration, success); }
java
public String toRawString(char delimiter) { StringBuilder result = new StringBuilder(); for (Entry<String, PerfCounter> e : Counters.entrySet()) { result.append(e.getKey()) .append(delimiter) .append(e.getValue().toRawString(delimiter)) .append('\n'); } return result.toString(); }
java
private void leaderElection() { loggingLog.info("Starting leader election for snapshot truncation daemon"); try { while (true) { Stat stat = m_zk.exists(VoltZK.snapshot_truncation_master, new Watcher() { @Override public void process(WatchedEvent event) { switch(event.getType()) { case NodeDeleted: loggingLog.info("Detected the snapshot truncation leader's ephemeral node deletion"); m_es.execute(new Runnable() { @Override public void run() { leaderElection(); } }); break; default: break; } } }); if (stat == null) { try { m_zk.create(VoltZK.snapshot_truncation_master, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); m_isAutoSnapshotLeader = true; if (m_lastKnownSchedule != null) { makeActivePrivate(m_lastKnownSchedule); } electedTruncationLeader(); return; } catch (NodeExistsException e) { } } else { loggingLog.info("Leader election concluded, a leader already exists"); break; } } } catch (Exception e) { VoltDB.crashLocalVoltDB("Exception in snapshot daemon electing master via ZK", true, e); } }
java
public ListenableFuture<Void> mayGoActiveOrInactive(final SnapshotSchedule schedule) { return m_es.submit(new Callable<Void>() { @Override public Void call() throws Exception { makeActivePrivate(schedule); return null; } }); }
java
private void doPeriodicWork(final long now) { if (m_lastKnownSchedule == null) { setState(State.STARTUP); return; } if (m_frequencyUnit == null) { return; } if (m_state == State.STARTUP) { initiateSnapshotScan(); } else if (m_state == State.SCANNING) { RateLimitedLogger.tryLogForMessage(System.currentTimeMillis(), 5, TimeUnit.MINUTES, SNAP_LOG, Level.INFO, "Blocked in scanning"); return; } else if (m_state == State.WAITING){ processWaitingPeriodicWork(now); } else if (m_state == State.SNAPSHOTTING) { return; } else if (m_state == State.DELETING){ return; } }
java
private void processWaitingPeriodicWork(long now) { if (now - m_lastSysprocInvocation < m_minTimeBetweenSysprocs) { return; } if (m_snapshots.size() > m_retain) { //Quick hack to make sure we don't delete while the snapshot is running. //Deletes work really badly during a snapshot because the FS is occupied if (!SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.isEmpty()) { m_lastSysprocInvocation = System.currentTimeMillis() + 3000; return; } deleteExtraSnapshots(); return; } if (m_nextSnapshotTime < now) { initiateNextSnapshot(now); return; } }
java
public Future<Void> processClientResponse(final Callable<ClientResponseImpl> response) { return m_es.submit(new Callable<Void>() { @Override public Void call() throws Exception { try { ClientResponseImpl resp = response.call(); long handle = resp.getClientHandle(); m_procedureCallbacks.remove(handle).clientCallback(resp); } catch (Exception e) { SNAP_LOG.warn("Error when SnapshotDaemon invoked callback for a procedure invocation", e); /* * Don't think it is productive to propagate any exceptions here, Ideally * they should be handled by the procedure callbacks */ } return null; } }); }
java
private void processSnapshotResponse(ClientResponse response) { setState(State.WAITING); final long now = System.currentTimeMillis(); m_nextSnapshotTime += m_frequencyInMillis; if (m_nextSnapshotTime < now) { m_nextSnapshotTime = now - 1; } if (response.getStatus() != ClientResponse.SUCCESS){ logFailureResponse("Snapshot failed", response); return; } final VoltTable results[] = response.getResults(); final VoltTable result = results[0]; final String err = SnapshotUtil.didSnapshotRequestFailWithErr(results); if (err != null) { SNAP_LOG.warn("Snapshot failed with failure response: " + err); m_snapshots.removeLast(); return; } //assert(result.getColumnName(1).equals("TABLE")); boolean success = true; while (result.advanceRow()) { if (!result.getString("RESULT").equals("SUCCESS")) { success = false; SNAP_LOG.warn("Snapshot save feasibility test failed for host " + result.getLong("HOST_ID") + " table " + result.getString("TABLE") + " with error message " + result.getString("ERR_MSG")); } } if (!success) { m_snapshots.removeLast(); } }
java
private void processDeleteResponse(ClientResponse response) { //Continue snapshotting even if a delete fails. setState(State.WAITING); if (response.getStatus() != ClientResponse.SUCCESS){ logFailureResponse("Delete of snapshots failed", response); return; } final VoltTable results[] = response.getResults(); final String err = SnapshotUtil.didSnapshotRequestFailWithErr(results); if (err != null) { SNAP_LOG.warn("Snapshot delete failed with failure response: " + err); return; } }
java
private void processScanResponse(ClientResponse response) { setState(State.WAITING); if (response.getStatus() != ClientResponse.SUCCESS) { logFailureResponse("Initial snapshot scan failed", response); return; } final VoltTable results[] = response.getResults(); if (results.length == 1) { final VoltTable result = results[0]; boolean advanced = result.advanceRow(); assert(advanced); assert(result.getColumnCount() == 1); assert(result.getColumnType(0) == VoltType.STRING); SNAP_LOG.warn("Initial snapshot scan failed with failure response: " + result.getString("ERR_MSG")); return; } assert(results.length == 3); final VoltTable snapshots = results[0]; assert(snapshots.getColumnCount() == 10); final File myPath = new File(m_path); while (snapshots.advanceRow()) { final String path = snapshots.getString("PATH"); final File pathFile = new File(path); if (pathFile.equals(myPath)) { final String nonce = snapshots.getString("NONCE"); if (nonce.startsWith(m_prefixAndSeparator)) { final Long txnId = snapshots.getLong("TXNID"); m_snapshots.add(new Snapshot(path, SnapshotPathType.SNAP_AUTO, nonce, txnId)); } } } java.util.Collections.sort(m_snapshots); deleteExtraSnapshots(); }
java
private void deleteExtraSnapshots() { if (m_snapshots.size() <= m_retain) { setState(State.WAITING); } else { m_lastSysprocInvocation = System.currentTimeMillis(); setState(State.DELETING); final int numberToDelete = m_snapshots.size() - m_retain; String pathsToDelete[] = new String[numberToDelete]; String noncesToDelete[] = new String[numberToDelete]; for (int ii = 0; ii < numberToDelete; ii++) { final Snapshot s = m_snapshots.poll(); pathsToDelete[ii] = s.path; noncesToDelete[ii] = s.nonce; SNAP_LOG.info("Snapshot daemon deleting " + s.nonce); } Object params[] = new Object[] { pathsToDelete, noncesToDelete, SnapshotPathType.SNAP_AUTO.toString() }; long handle = m_nextCallbackHandle++; m_procedureCallbacks.put(handle, new ProcedureCallback() { @Override public void clientCallback(final ClientResponse clientResponse) throws Exception { processClientResponsePrivate(clientResponse); } }); m_initiator.initiateSnapshotDaemonWork("@SnapshotDelete", handle, params); } }
java
public void createAndWatchRequestNode(final long clientHandle, final Connection c, SnapshotInitiationInfo snapInfo, boolean notifyChanges) throws ForwardClientException { boolean requestExists = false; final String requestId = createRequestNode(snapInfo); if (requestId == null) { requestExists = true; } else { if (!snapInfo.isTruncationRequest()) { try { registerUserSnapshotResponseWatch(requestId, clientHandle, c, notifyChanges); } catch (Exception e) { VoltDB.crashLocalVoltDB("Failed to register ZK watch on snapshot response", true, e); } } else { // need to construct a success response of some sort here to indicate the truncation attempt // was successfully attempted VoltTable result = SnapshotUtil.constructNodeResultsTable(); result.addRow(-1, CoreUtils.getHostnameOrAddress(), "", "SUCCESS", "SNAPSHOT REQUEST QUEUED"); final ClientResponseImpl resp = new ClientResponseImpl(ClientResponseImpl.SUCCESS, new VoltTable[] {result}, "User-requested truncation snapshot successfully queued for execution.", clientHandle); ByteBuffer buf = ByteBuffer.allocate(resp.getSerializedSize() + 4); buf.putInt(buf.capacity() - 4); resp.flattenToBuffer(buf).flip(); c.writeStream().enqueue(buf); } } if (requestExists) { VoltTable result = SnapshotUtil.constructNodeResultsTable(); result.addRow(-1, CoreUtils.getHostnameOrAddress(), "", "FAILURE", "SNAPSHOT IN PROGRESS"); throw new ForwardClientException("A request to perform a user snapshot already exists", result); } }
java
private String createRequestNode(SnapshotInitiationInfo snapInfo) { String requestId = null; try { requestId = java.util.UUID.randomUUID().toString(); if (!snapInfo.isTruncationRequest()) { final JSONObject jsObj = snapInfo.getJSONObjectForZK(); jsObj.put("requestId", requestId); String zkString = jsObj.toString(4); byte zkBytes[] = zkString.getBytes("UTF-8"); m_zk.create(VoltZK.user_snapshot_request, zkBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } else { m_zk.create(VoltZK.request_truncation_snapshot_node, null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT_SEQUENTIAL); } } catch (KeeperException.NodeExistsException e) { return null; } catch (Exception e) { VoltDB.crashLocalVoltDB("Exception while attempting to create user snapshot request in ZK", true, e); } return requestId; }
java
public final static <T extends FastSerializable> T deserialize( final byte[] data, final Class<T> expectedType) throws IOException { final FastDeserializer in = new FastDeserializer(data); return in.readObject(expectedType); }
java
public <T extends FastSerializable> T readObject(final Class<T> expectedType) throws IOException { assert(expectedType != null); T obj = null; try { obj = expectedType.newInstance(); obj.readExternal(this); } catch (final InstantiationException e) { e.printStackTrace(); } catch (final IllegalAccessException e) { e.printStackTrace(); } return obj; }
java
public FastSerializable readObject(final FastSerializable obj, final DeserializationMonitor monitor) throws IOException { final int startPosition = buffer.position(); obj.readExternal(this); final int endPosition = buffer.position(); if (monitor != null) { monitor.deserializedBytes(endPosition - startPosition); } return obj; }
java
public static String readString(ByteBuffer buffer) throws IOException { final int NULL_STRING_INDICATOR = -1; final int len = buffer.getInt(); // check for null string if (len == NULL_STRING_INDICATOR) return null; assert len >= 0; if (len > VoltType.MAX_VALUE_LENGTH) { throw new IOException("Serializable strings cannot be longer then " + VoltType.MAX_VALUE_LENGTH + " bytes"); } if (len < NULL_STRING_INDICATOR) { throw new IOException("String length is negative " + len); } // now assume not null final byte[] strbytes = new byte[len]; buffer.get(strbytes); String retval = null; try { retval = new String(strbytes, "UTF-8"); } catch (final UnsupportedEncodingException e) { e.printStackTrace(); } return retval; }
java
public String readString() throws IOException { final int len = readInt(); // check for null string if (len == VoltType.NULL_STRING_LENGTH) { return null; } if (len < VoltType.NULL_STRING_LENGTH) { throw new IOException("String length is negative " + len); } if (len > buffer.remaining()) { throw new IOException("String length is bigger than total buffer " + len); } // now assume not null final byte[] strbytes = new byte[len]; readFully(strbytes); return new String(strbytes, Constants.UTF8ENCODING); }
java
public ByteBuffer readBuffer(final int byteLen) { final byte[] data = new byte[byteLen]; buffer.get(data); return ByteBuffer.wrap(data); }
java
private boolean removeUDFInSchema(String functionName) { for (int idx = 0; idx < m_schema.children.size(); idx++) { VoltXMLElement func = m_schema.children.get(idx); if ("ud_function".equals(func.name)) { String fnm = func.attributes.get("name"); if (fnm != null && functionName.equals(fnm)) { m_schema.children.remove(idx); m_tracker.addDroppedFunction(functionName); m_logger.debug(String.format("Removed XML for" + " function named %s", functionName)); return true; } } } return false; }
java
public String toXML() { StringBuilder sb = new StringBuilder(); sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); toXML(sb, 0); return sb.toString(); }
java
public List<VoltXMLElement> findChildrenRecursively(String name) { List<VoltXMLElement> retval = new ArrayList<>(); for (VoltXMLElement vxe : children) { if (name.equals(vxe.name)) { retval.add(vxe); } retval.addAll(vxe.findChildrenRecursively(name)); } return retval; }
java
public List<VoltXMLElement> findChildren(String name) { List<VoltXMLElement> retval = new ArrayList<>(); for (VoltXMLElement vxe : children) { if (name.equals(vxe.name)) { retval.add(vxe); } } return retval; }
java
public VoltXMLElement findChild(String uniqueName) { for (VoltXMLElement vxe : children) { if (uniqueName.equals(vxe.getUniqueName())) { return vxe; } } return null; }
java
static public VoltXMLDiff computeDiff(VoltXMLElement before, VoltXMLElement after) { // Top level call needs both names to match (I think this makes sense) if (!before.getUniqueName().equals(after.getUniqueName())) { // not sure this is best behavior, ponder as progress is made return null; } VoltXMLDiff result = new VoltXMLDiff(before.getUniqueName()); // Short-circuit check for any differences first. Can return early if there are no changes if (before.toMinString().equals(after.toMinString())) { return result; } // Store the final desired element order for (int i = 0; i < after.children.size(); i++) { VoltXMLElement child = after.children.get(i); result.m_elementOrder.put(child.getUniqueName(), i); } // first, check the attributes Set<String> firstKeys = before.attributes.keySet(); Set<String> secondKeys = new HashSet<>(); secondKeys.addAll(after.attributes.keySet()); // Do removed and changed attributes walking the first element's attributes for (String firstKey : firstKeys) { if (!secondKeys.contains(firstKey)) { result.m_removedAttributes.add(firstKey); } else if (!(after.attributes.get(firstKey).equals(before.attributes.get(firstKey)))) { result.m_changedAttributes.put(firstKey, after.attributes.get(firstKey)); } // remove the firstKey from secondKeys to track things added secondKeys.remove(firstKey); } // everything in secondKeys should be something added for (String key : secondKeys) { result.m_addedAttributes.put(key, after.attributes.get(key)); } // Now, need to check the children. Each pair of children with the same names // need to be descended to look for changes // Probably more efficient ways to do this, but brute force it for now // Would be helpful if the underlying children objects were Maps rather than // Lists. Set<String> firstChildren = new HashSet<>(); for (VoltXMLElement child : before.children) { firstChildren.add(child.getUniqueName()); } Set<String> secondChildren = new HashSet<>(); for (VoltXMLElement child : after.children) { secondChildren.add(child.getUniqueName()); } Set<String> commonNames = new HashSet<>(); for (VoltXMLElement firstChild : before.children) { if (!secondChildren.contains(firstChild.getUniqueName())) { // Need to duplicate the result.m_removedElements.add(firstChild); } else { commonNames.add(firstChild.getUniqueName()); } } for (VoltXMLElement secondChild : after.children) { if (!firstChildren.contains(secondChild.getUniqueName())) { result.m_addedElements.add(secondChild); } else { assert(commonNames.contains(secondChild.getUniqueName())); } } // This set contains uniquenames for (String name : commonNames) { VoltXMLDiff childDiff = computeDiff(before.findChild(name), after.findChild(name)); if (!childDiff.isEmpty()) { result.m_changedElements.put(name, childDiff); } } return result; }
java
public List<VoltXMLElement> extractSubElements(String elementName, String attrName, String attrValue) { assert(elementName != null); assert((elementName != null && attrValue != null) || attrName == null); List<VoltXMLElement> elements = new ArrayList<>(); extractSubElements(elementName, attrName, attrValue, elements); return elements; }
java
static int getHexValue(int c) { if (c >= '0' && c <= '9') { c -= '0'; } else if (c > 'z') { c = 16; } else if (c >= 'a') { c -= ('a' - 10); } else if (c > 'Z') { c = 16; } else if (c >= 'A') { c -= ('A' - 10); } else { c = -1; } return c; }
java
boolean scanSpecialIdentifier(String identifier) { int length = identifier.length(); if (limit - currentPosition < length) { return false; } for (int i = 0; i < length; i++) { int character = identifier.charAt(i); if (character == sqlString.charAt(currentPosition + i)) { continue; } if (character == Character.toUpperCase(sqlString.charAt(currentPosition + i))) { continue; } return false; } currentPosition += length; return true; }
java
IntervalType scanIntervalType() { int precision = -1; int scale = -1; int startToken; int endToken; final int errorCode = ErrorCode.X_22006; startToken = endToken = token.tokenType; scanNext(errorCode); if (token.tokenType == Tokens.OPENBRACKET) { scanNext(errorCode); if (token.dataType == null || token.dataType.typeCode != Types.SQL_INTEGER) { throw Error.error(errorCode); } precision = ((Number) this.token.tokenValue).intValue(); scanNext(errorCode); if (token.tokenType == Tokens.COMMA) { if (startToken != Tokens.SECOND) { throw Error.error(errorCode); } scanNext(errorCode); if (token.dataType == null || token.dataType.typeCode != Types.SQL_INTEGER) { throw Error.error(errorCode); } scale = ((Number) token.tokenValue).intValue(); scanNext(errorCode); } if (token.tokenType != Tokens.CLOSEBRACKET) { throw Error.error(errorCode); } scanNext(errorCode); } if (token.tokenType == Tokens.TO) { scanNext(errorCode); endToken = token.tokenType; scanNext(errorCode); } if (token.tokenType == Tokens.OPENBRACKET) { if (endToken != Tokens.SECOND || endToken == startToken) { throw Error.error(errorCode); } scanNext(errorCode); if (token.dataType == null || token.dataType.typeCode != Types.SQL_INTEGER) { throw Error.error(errorCode); } scale = ((Number) token.tokenValue).intValue(); scanNext(errorCode); if (token.tokenType != Tokens.CLOSEBRACKET) { throw Error.error(errorCode); } scanNext(errorCode); } int startIndex = ArrayUtil.find(Tokens.SQL_INTERVAL_FIELD_CODES, startToken); int endIndex = ArrayUtil.find(Tokens.SQL_INTERVAL_FIELD_CODES, endToken); return IntervalType.getIntervalType(startIndex, endIndex, precision, scale); }
java
public synchronized Object convertToDatetimeInterval(String s, DTIType type) { Object value; IntervalType intervalType = null; int dateTimeToken = -1; int errorCode = type.isDateTimeType() ? ErrorCode.X_22007 : ErrorCode.X_22006; reset(s); resetState(); scanToken(); scanWhitespace(); switch (token.tokenType) { case Tokens.INTERVAL : case Tokens.DATE : case Tokens.TIME : case Tokens.TIMESTAMP : dateTimeToken = token.tokenType; scanToken(); if (token.tokenType != Tokens.X_VALUE || token.dataType.typeCode != Types.SQL_CHAR) { // error datetime bad literal throw Error.error(errorCode); } s = token.tokenString; scanNext(ErrorCode.X_22007); if (type.isIntervalType()) { intervalType = scanIntervalType(); } if (token.tokenType != Tokens.X_ENDPARSE) { throw Error.error(errorCode); } // $FALL-THROUGH$ default : } switch (type.typeCode) { case Types.SQL_DATE : if (dateTimeToken != -1 && dateTimeToken != Tokens.DATE) { throw Error.error(errorCode); } return newDate(s); case Types.SQL_TIME : case Types.SQL_TIME_WITH_TIME_ZONE : { if (dateTimeToken != -1 && dateTimeToken != Tokens.TIME) { throw Error.error(errorCode); } return newTime(s); } case Types.SQL_TIMESTAMP : case Types.SQL_TIMESTAMP_WITH_TIME_ZONE : { if (dateTimeToken != -1 && dateTimeToken != Tokens.TIMESTAMP) { throw Error.error(errorCode); } return newTimestamp(s); } default : if (dateTimeToken != -1 && dateTimeToken != Tokens.INTERVAL) { throw Error.error(errorCode); } if (type.isIntervalType()) { value = newInterval(s, (IntervalType) type); if (intervalType != null) { if (intervalType.startIntervalType != type .startIntervalType || intervalType .endIntervalType != type.endIntervalType) { throw Error.error(errorCode); } } return value; } throw Error.runtimeError(ErrorCode.U_S0500, "Scanner"); } }
java
public void writeData(Object[] data, Type[] types) { writeData(types.length, types, data, null, null); }
java
void addWarning(SQLWarning w) { // PRE: w is never null synchronized (rootWarning_mutex) { if (rootWarning == null) { rootWarning = w; } else { rootWarning.setNextWarning(w); } } }
java
public void reset() throws SQLException { try { this.sessionProxy.resetSession(); } catch (HsqlException e) { throw Util.sqlException(ErrorCode.X_08006, e.getMessage(), e); } }
java
private int onStartEscapeSequence(String sql, StringBuffer sb, int i) throws SQLException { sb.setCharAt(i++, ' '); i = StringUtil.skipSpaces(sql, i); if (sql.regionMatches(true, i, "fn ", 0, 3) || sql.regionMatches(true, i, "oj ", 0, 3) || sql.regionMatches(true, i, "ts ", 0, 3)) { sb.setCharAt(i++, ' '); sb.setCharAt(i++, ' '); } else if (sql.regionMatches(true, i, "d ", 0, 2) || sql.regionMatches(true, i, "t ", 0, 2)) { sb.setCharAt(i++, ' '); } else if (sql.regionMatches(true, i, "call ", 0, 5)) { i += 4; } else if (sql.regionMatches(true, i, "?= call ", 0, 8)) { sb.setCharAt(i++, ' '); sb.setCharAt(i++, ' '); i += 5; } else if (sql.regionMatches(true, i, "escape ", 0, 7)) { i += 6; } else { i--; throw Util.sqlException( Error.error( ErrorCode.JDBC_CONNECTION_NATIVE_SQL, sql.substring(i))); } return i; }
java
synchronized long userUpdate(long value) { if (value == currValue) { currValue += increment; return value; } if (increment > 0) { if (value > currValue) { currValue += ((value - currValue + increment) / increment) * increment; } } else { if (value < currValue) { currValue += ((value - currValue + increment) / increment) * increment; } } return value; }
java
synchronized long systemUpdate(long value) { if (value == currValue) { currValue += increment; return value; } if (increment > 0) { if (value > currValue) { currValue = value + increment; } } else { if (value < currValue) { currValue = value + increment; } } return value; }
java
synchronized public long getValue() { if (limitReached) { throw Error.error(ErrorCode.X_2200H); } long nextValue; if (increment > 0) { if (currValue > maxValue - increment) { if (isCycle) { nextValue = minValue; } else { limitReached = true; nextValue = minValue; } } else { nextValue = currValue + increment; } } else { if (currValue < minValue - increment) { if (isCycle) { nextValue = maxValue; } else { limitReached = true; nextValue = minValue; } } else { nextValue = currValue + increment; } } long result = currValue; currValue = nextValue; return result; }
java
synchronized public void reset(long value) { if (value < minValue || value > maxValue) { throw Error.error(ErrorCode.X_42597); } startValue = currValue = lastValue = value; }
java
@Override public int compareTo(Sha1Wrapper arg0) { if (arg0 == null) return 1; for (int i = 0; i < 20; i++) { int cmp = hashBytes[i] - arg0.hashBytes[i]; if (cmp != 0) return cmp; } return 0; }
java
private static void appendSpaces(final StringBuilder sb, final int spaces) { for( int i = 0; i < spaces; i++ ) { sb.append(SPACE); } }
java