code
stringlengths
73
34.1k
label
stringclasses
1 value
private void processScript() { ScriptReaderBase scr = null; try { if (database.isFilesInJar() || fa.isStreamElement(scriptFileName)) { scr = ScriptReaderBase.newScriptReader(database, scriptFileName, scriptFormat); Session session = database.sessionManager.getSysSessionForScript(database); scr.readAll(session); scr.close(); } } catch (Throwable e) { if (scr != null) { scr.close(); if (cache != null) { cache.close(false); } closeAllTextCaches(false); } database.logger.appLog.logContext(e, null); if (e instanceof HsqlException) { throw (HsqlException) e; } else if (e instanceof IOException) { throw Error.error(ErrorCode.FILE_IO_ERROR, e.toString()); } else if (e instanceof OutOfMemoryError) { throw Error.error(ErrorCode.OUT_OF_MEMORY); } else { throw Error.error(ErrorCode.GENERAL_ERROR, e.toString()); } } }
java
private void processDataFile() { // OOo related code if (database.isStoredFileAccess()) { return; } // OOo end if (cache == null || filesReadOnly || !fa.isStreamElement(logFileName)) { return; } File file = new File(logFileName); long logLength = file.length(); long dataLength = cache.getFileFreePos(); if (logLength + dataLength > cache.maxDataFileSize) { database.logger.needsCheckpoint = true; } }
java
private void processLog() { if (!database.isFilesInJar() && fa.isStreamElement(logFileName)) { ScriptRunner.runScript(database, logFileName, ScriptWriterBase.SCRIPT_TEXT_170); } }
java
private void restoreBackup() { if (incBackup) { restoreBackupIncremental(); return; } // in case data file cannot be deleted, reset it DataFileCache.deleteOrResetFreePos(database, fileName + ".data"); try { FileArchiver.unarchive(fileName + ".backup", fileName + ".data", database.getFileAccess(), FileArchiver.COMPRESSION_ZIP); } catch (Exception e) { throw Error.error(ErrorCode.FILE_IO_ERROR, ErrorCode.M_Message_Pair, new Object[] { fileName + ".backup", e.toString() }); } }
java
private void restoreBackupIncremental() { try { if (fa.isStreamElement(fileName + ".backup")) { RAShadowFile.restoreFile(fileName + ".backup", fileName + ".data"); } else { /* // this is to ensure file has been written fully but it is not necessary // as semantics dictate that if a backup does not exist, the file // was never changed or was fully written to if (FileUtil.exists(cacheFileName)) { int flags = DataFileCache.getFlags(cacheFileName); if (!BitMap.isSet(flags, DataFileCache.FLAG_ISSAVED)) { FileUtil.delete(cacheFileName); } } */ } deleteBackup(); } catch (IOException e) { throw Error.error(ErrorCode.FILE_IO_ERROR, fileName + ".backup"); } }
java
public void updateCatalog(String diffCmds, CatalogContext context, boolean isReplay, boolean requireCatalogDiffCmdsApplyToEE, boolean requiresNewExportGeneration) { // note this will never require snapshot isolation because the MPI has no snapshot funtionality m_executionSite.updateCatalog(diffCmds, context, false, true, Long.MIN_VALUE, Long.MIN_VALUE, Long.MIN_VALUE, isReplay, requireCatalogDiffCmdsApplyToEE, requiresNewExportGeneration); m_scheduler.updateCatalog(diffCmds, context); }
java
public static Pair<AbstractTopology, ImmutableList<Integer>> mutateAddNewHosts(AbstractTopology currentTopology, Map<Integer, HostInfo> newHostInfos) { int startingPartitionId = getNextFreePartitionId(currentTopology); TopologyBuilder topologyBuilder = addPartitionsToHosts(newHostInfos, Collections.emptySet(), currentTopology.getReplicationFactor(), startingPartitionId); ImmutableList.Builder<Integer> newPartitions = ImmutableList.builder(); for (PartitionBuilder pb : topologyBuilder.m_partitions) { newPartitions.add(pb.m_id); } return Pair.of(new AbstractTopology(currentTopology, topologyBuilder), newPartitions.build()); }
java
public static Pair<AbstractTopology, Set<Integer>> mutateRemoveHosts(AbstractTopology currentTopology, Set<Integer> removalHosts) { Set<Integer> removalPartitionIds = getPartitionIdsForHosts(currentTopology, removalHosts); return Pair.of(new AbstractTopology(currentTopology, removalHosts, removalPartitionIds), removalPartitionIds); }
java
public Set<Integer> getPartitionGroupPeers(int hostId) { Set<Integer> peers = Sets.newHashSet(); for (Partition p : hostsById.get(hostId).partitions) { peers.addAll(p.hostIds); } return peers; }
java
public static List<Collection<Integer>> sortHostIdByHGDistance(int hostId, Map<Integer, String> hostGroups) { String localHostGroup = hostGroups.get(hostId); Preconditions.checkArgument(localHostGroup != null); HAGroup localHaGroup = new HAGroup(localHostGroup); // Memorize the distance, map the distance to host ids. Multimap<Integer, Integer> distanceMap = MultimapBuilder.treeKeys(Comparator.<Integer>naturalOrder().reversed()) .arrayListValues().build(); for (Map.Entry<Integer, String> entry : hostGroups.entrySet()) { if (hostId == entry.getKey()) { continue; } distanceMap.put(localHaGroup.getRelationshipTo(entry.getValue()).m_distance, entry.getKey()); } return new ArrayList<>(distanceMap.asMap().values()); }
java
public void reportQueued(String importerName, String procName) { StatsInfo statsInfo = getStatsInfo(importerName, procName); statsInfo.m_pendingCount.incrementAndGet(); }
java
public void reportFailure(String importerName, String procName, boolean decrementPending) { StatsInfo statsInfo = getStatsInfo(importerName, procName); if (decrementPending) { statsInfo.m_pendingCount.decrementAndGet(); } statsInfo.m_failureCount.incrementAndGet(); }
java
private void reportSuccess(String importerName, String procName) { StatsInfo statsInfo = getStatsInfo(importerName, procName); statsInfo.m_pendingCount.decrementAndGet(); statsInfo.m_successCount.incrementAndGet(); }
java
private void reportRetry(String importerName, String procName) { StatsInfo statsInfo = getStatsInfo(importerName, procName); statsInfo.m_retryCount.incrementAndGet(); }
java
public void writeBlock(byte[] block) throws IOException { if (block.length != 512) { throw new IllegalArgumentException( RB.singleton.getString(RB.BAD_BLOCK_WRITE_LEN, block.length)); } write(block, block.length); }
java
public void writePadBlocks(int blockCount) throws IOException { for (int i = 0; i < blockCount; i++) { write(ZERO_BLOCK, ZERO_BLOCK.length); } }
java
private Vector getAllTables() { Vector result = new Vector(20); try { if (cConn == null) { return null; } dbmeta = cConn.getMetaData(); String[] tableTypes = { "TABLE" }; ResultSet allTables = dbmeta.getTables(null, null, null, tableTypes); while (allTables.next()) { String aktTable = allTables.getString("TABLE_NAME"); ResultSet primKeys = dbmeta.getPrimaryKeys(null, null, aktTable); // take only table with a primary key if (primKeys.next()) { result.addElement(aktTable); } primKeys.close(); } allTables.close(); } catch (SQLException e) { // System.out.println("SQL Exception: " + e.getMessage()); } return result; }
java
private int getChoosenTableIndex() { String tableName = cTables.getSelectedItem(); // System.out.println("in getChoosenTableIndex, selected Item is "+tableName); int index = getTableIndex(tableName); if (index >= 0) { // System.out.println("table found, index: " + index); return index; } // end of if (index >= 0) ZaurusTableForm tableForm = new ZaurusTableForm(tableName, cConn); pForm.add(tableName, tableForm); vHoldTableNames.addElement(tableName); vHoldForms.addElement(tableForm); // System.out.println("new tableform for table "+tableName+", index: " + index); return vHoldTableNames.size() - 1; }
java
private int getTableIndex(String tableName) { int index; // System.out.println("begin searching for "+tableName); for (index = 0; index < vHoldTableNames.size(); index++) { // System.out.println("in getTableIndex searching for "+tableName+", index: "+index); if (tableName.equals((String) vHoldTableNames.elementAt(index))) { return index; } // end of if (tableName.equals(vHoldTableNames.elementAt(index))) } // end of for (index = 0; index < vHoldTableNames.size(); index ++) return -1; }
java
private String[] getWords() { StringTokenizer tokenizer = new StringTokenizer(fSearchWords.getText()); String[] result = new String[tokenizer.countTokens()]; int i = 0; while (tokenizer.hasMoreTokens()) { result[i++] = tokenizer.nextToken(); } // end of while ((tokenizer.hasMoreTokens())) return result; }
java
private void initButtons() { // the buttons for the search form bSearchRow = new Button("Search Rows"); bNewRow = new Button("Insert New Row"); bSearchRow.addActionListener(this); bNewRow.addActionListener(this); pSearchButs = new Panel(); pSearchButs.setLayout(new GridLayout(1, 0, 4, 4)); pSearchButs.add(bSearchRow); pSearchButs.add(bNewRow); // the buttons for editing a row bCancel1 = new Button("Cancel"); bPrev = new Button("Prev"); bNext = new Button("Next"); bDelete = new Button("Delete"); lastButtonDelete = false; bNewSearch = new Button("Search"); bCancel1.addActionListener(this); bPrev.addActionListener(this); bNext.addActionListener(this); bDelete.addActionListener(this); bNewSearch.addActionListener(this); pEditButs = new Panel(); pEditButs.setLayout(new GridLayout(1, 0, 4, 4)); pEditButs.add(bCancel1); pEditButs.add(bPrev); pEditButs.add(bNext); pEditButs.add(bDelete); pEditButs.add(bNewSearch); // the buttons for inserting a new row pInsertButs = new Panel(); pInsertButs.setLayout(new GridLayout(1, 0, 4, 4)); bCancel2 = new Button("Cancel Insert"); bNewInsert = new Button("New Insert"); bNewSearch1 = new Button("Search"); bCancel2.addActionListener(this); bNewInsert.addActionListener(this); bNewSearch1.addActionListener(this); pInsertButs.add(bCancel2); pInsertButs.add(bNewInsert); pInsertButs.add(bNewSearch1); }
java
private void resetTableForms() { lForm.show(pForm, "search"); lButton.show(pButton, "search"); Vector vAllTables = getAllTables(); // fill the drop down list again // get all table names and show a drop down list of them in cTables cTables.removeAll(); for (Enumeration e = vAllTables.elements(); e.hasMoreElements(); ) { cTables.addItem((String) e.nextElement()); } // remove all form panels from pForm for (Enumeration e = vHoldForms.elements(); e.hasMoreElements(); ) { pForm.remove((ZaurusTableForm) e.nextElement()); } // end of while (Enumeration e = vHoldForms.elements(); e.hasMoreElements();) // initialize a new list for the table names which have a form in pForm vHoldTableNames = new Vector(20); vHoldForms = new Vector(20); }
java
private ParsedSelectStmt getLeftmostSelectStmt() { assert (!m_children.isEmpty()); AbstractParsedStmt firstChild = m_children.get(0); if (firstChild instanceof ParsedSelectStmt) { return (ParsedSelectStmt) firstChild; } else { assert(firstChild instanceof ParsedUnionStmt); return ((ParsedUnionStmt)firstChild).getLeftmostSelectStmt(); } }
java
@Override public String calculateContentDeterminismMessage() { String ans = null; for (AbstractParsedStmt child : m_children) { ans = child.getContentDeterminismMessage(); if (ans != null) { return ans; } } return null; }
java
public synchronized void rollLog() throws IOException { if (logStream != null) { this.logStream.flush(); this.logStream = null; oa = null; } }
java
public synchronized void close() throws IOException { if (logStream != null) { logStream.close(); } for (FileOutputStream log : streamsToFlush) { log.close(); } }
java
public synchronized boolean append(TxnHeader hdr, Record txn) throws IOException { if (hdr != null) { if (hdr.getZxid() <= lastZxidSeen) { LOG.warn("Current zxid " + hdr.getZxid() + " is <= " + lastZxidSeen + " for " + hdr.getType()); } if (logStream==null) { if(LOG.isInfoEnabled()){ LOG.info("Creating new log file: log." + Long.toHexString(hdr.getZxid())); } logFileWrite = new File(logDir, ("log." + Long.toHexString(hdr.getZxid()))); fos = new FileOutputStream(logFileWrite); logStream=new BufferedOutputStream(fos); oa = BinaryOutputArchive.getArchive(logStream); FileHeader fhdr = new FileHeader(TXNLOG_MAGIC,VERSION, dbId); fhdr.serialize(oa, "fileheader"); currentSize = fos.getChannel().position(); streamsToFlush.add(fos); } padFile(fos); byte[] buf = Util.marshallTxnEntry(hdr, txn); if (buf == null || buf.length == 0) { throw new IOException("Faulty serialization for header " + "and txn"); } Checksum crc = makeChecksumAlgorithm(); crc.update(buf, 0, buf.length); oa.writeLong(crc.getValue(), "txnEntryCRC"); Util.writeTxnBytes(oa, buf); return true; } return false; }
java
private void padFile(FileOutputStream out) throws IOException { currentSize = Util.padLogFile(out, currentSize, preAllocSize); }
java
public static File[] getLogFiles(File[] logDirList,long snapshotZxid) { List<File> files = Util.sortDataDir(logDirList, "log", true); long logZxid = 0; // Find the log file that starts before or at the same time as the // zxid of the snapshot for (File f : files) { long fzxid = Util.getZxidFromName(f.getName(), "log"); if (fzxid > snapshotZxid) { continue; } // the files // are sorted with zxid's if (fzxid > logZxid) { logZxid = fzxid; } } List<File> v=new ArrayList<File>(5); for (File f : files) { long fzxid = Util.getZxidFromName(f.getName(), "log"); if (fzxid < logZxid) { continue; } v.add(f); } return v.toArray(new File[0]); }
java
public long getLastLoggedZxid() { File[] files = getLogFiles(logDir.listFiles(), 0); long maxLog=files.length>0? Util.getZxidFromName(files[files.length-1].getName(),"log"):-1; // if a log file is more recent we must scan it to find // the highest zxid long zxid = maxLog; try { FileTxnLog txn = new FileTxnLog(logDir); TxnIterator itr = txn.read(maxLog); while (true) { if(!itr.next()) break; TxnHeader hdr = itr.getHeader(); zxid = hdr.getZxid(); } } catch (IOException e) { LOG.warn("Unexpected exception", e); } return zxid; }
java
public synchronized void commit() throws IOException { if (logStream != null) { logStream.flush(); } for (FileOutputStream log : streamsToFlush) { log.flush(); if (forceSync) { log.getChannel().force(false); } } while (streamsToFlush.size() > 1) { streamsToFlush.removeFirst().close(); } }
java
public boolean truncate(long zxid) throws IOException { FileTxnIterator itr = new FileTxnIterator(this.logDir, zxid); PositionInputStream input = itr.inputStream; long pos = input.getPosition(); // now, truncate at the current position RandomAccessFile raf=new RandomAccessFile(itr.logFile,"rw"); raf.setLength(pos); raf.close(); while(itr.goToNextLog()) { if (!itr.logFile.delete()) { LOG.warn("Unable to truncate " + itr.logFile); } } return true; }
java
private static FileHeader readHeader(File file) throws IOException { InputStream is =null; try { is = new BufferedInputStream(new FileInputStream(file)); InputArchive ia=BinaryInputArchive.getArchive(is); FileHeader hdr = new FileHeader(); hdr.deserialize(ia, "fileheader"); return hdr; } finally { try { if (is != null) is.close(); } catch (IOException e) { LOG.warn("Ignoring exception during close", e); } } }
java
public long getDbId() throws IOException { FileTxnIterator itr = new FileTxnIterator(logDir, 0); FileHeader fh=readHeader(itr.logFile); itr.close(); if(fh==null) throw new IOException("Unsupported Format."); return fh.getDbid(); }
java
public static void verifyForHdfsUse(String sb) throws IllegalArgumentException { Preconditions.checkArgument( sb != null && !sb.trim().isEmpty(), "null or empty hdfs endpoint" ); int mask = conversionMaskFor(sb); boolean hasDateConversion = (mask & DATE) == DATE; Preconditions.checkArgument( (mask & HDFS_MASK) == HDFS_MASK, "hdfs endpoint \"" + sb + "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions" ); final String tn = "__IMPROBABLE_TABLE_NAME__"; final int pn = Integer.MIN_VALUE; final long gn = Long.MIN_VALUE; final Date dt = new Date(0); final String fmtd = hasDateConversion ? new SimpleDateFormat(DATE_FORMAT).format(dt) : ""; URI uri = URI.create(expand(sb, tn, pn, gn, dt)); String path = uri.getPath(); List<String> missing = new ArrayList<>(); if (!path.contains(tn)) missing.add("%t"); if (!path.contains(Integer.toString(pn))) missing.add("%p"); if (!path.contains(Long.toString(gn,Character.MAX_RADIX))) missing.add("%g"); if (hasDateConversion && !path.contains(fmtd)) missing.add("%d"); if (!missing.isEmpty()) { String notInPath = Joiner.on(", ").join(missing); throw new IllegalArgumentException( "hdfs enpoint \"" + sb + "\" does not contain conversion(s) " + notInPath + " in the path element of the URL"); } }
java
public static void verifyForBatchUse(String sb) throws IllegalArgumentException { Preconditions.checkArgument( sb != null && !sb.trim().isEmpty(), "null or empty hdfs endpoint" ); int mask = conversionMaskFor(sb); Preconditions.checkArgument( (mask & HDFS_MASK) == HDFS_MASK, "batch mode endpoint \"" + sb + "\" must contain the (%t)able, the (%p)artition, and the (%g) generation conversions" ); }
java
@Override protected void handleJSONMessage(JSONObject obj) throws Exception { hostLog.warn("SystemCatalogAgent received a JSON message, which should be impossible."); VoltTable[] results = null; sendOpsResponse(results, obj); }
java
public static <K, V> Map<K, V> constrainedMap( Map<K, V> map, MapConstraint<? super K, ? super V> constraint) { return new ConstrainedMap<K, V>(map, constraint); }
java
public static <K, V> ListMultimap<K, V> constrainedListMultimap( ListMultimap<K, V> multimap, MapConstraint<? super K, ? super V> constraint) { return new ConstrainedListMultimap<K, V>(multimap, constraint); }
java
private void validateWindowedSyntax() { // Check that the aggregate is one of the supported ones, and // that the number of aggregate parameters is right. switch (opType) { case OpTypes.WINDOWED_RANK: case OpTypes.WINDOWED_DENSE_RANK: case OpTypes.WINDOWED_ROW_NUMBER: if (nodes.length != 0) { throw Error.error("Windowed Aggregate " + OpTypes.aggregateName(opType) + " expects no arguments.", "", 0); } break; case OpTypes.WINDOWED_COUNT: case OpTypes.WINDOWED_MIN: case OpTypes.WINDOWED_MAX: case OpTypes.WINDOWED_SUM: break; default: throw Error.error("Unsupported window function " + OpTypes.aggregateName(opType), "", 0); } }
java
Result getResult(Session session) { Table table = baseTable; Result resultOut = null; RowSetNavigator generatedNavigator = null; PersistentStore store = session.sessionData.getRowStore(baseTable); if (generatedIndexes != null) { resultOut = Result.newUpdateCountResult(generatedResultMetaData, 0); generatedNavigator = resultOut.getChainedResult().getNavigator(); } RowSetNavigator newDataNavigator = queryExpression == null ? getInsertValuesNavigator(session) : getInsertSelectNavigator(session); Expression checkCondition = null; RangeIteratorBase checkIterator = null; if (targetTable != baseTable) { QuerySpecification select = ((TableDerived) targetTable).getQueryExpression() .getMainSelect(); checkCondition = select.checkQueryCondition; if (checkCondition != null) { checkIterator = select.rangeVariables[0].getIterator(session); } } while (newDataNavigator.hasNext()) { Object[] data = newDataNavigator.getNext(); if (checkCondition != null) { checkIterator.currentData = data; boolean check = checkCondition.testCondition(session); if (!check) { throw Error.error(ErrorCode.X_44000); } } table.insertRow(session, store, data); if (generatedNavigator != null) { Object[] generatedValues = getGeneratedColumns(data); generatedNavigator.add(generatedValues); } } newDataNavigator.beforeFirst(); table.fireAfterTriggers(session, Trigger.INSERT_AFTER, newDataNavigator); if (resultOut == null) { resultOut = Result.getUpdateCountResult(newDataNavigator.getSize()); } else { resultOut.setUpdateCount(newDataNavigator.getSize()); } return resultOut; }
java
@Override public void resolveForTable(Table table) { assert(table != null); // It MAY be that for the case in which this function is called (expression indexes), the column's // table name is not specified (and not missed?). // It is possible to "correct" that here by cribbing it from the supplied table (base table for the index) // -- not bothering for now. Column column = table.getColumns().getExact(m_columnName); assert(column != null); m_tableName = table.getTypeName(); m_columnIndex = column.getIndex(); setTypeSizeAndInBytes(column); }
java
public int setColumnIndexUsingSchema(NodeSchema inputSchema) { int index = inputSchema.getIndexOfTve(this); if (index < 0) { //* enable to debug*/ System.out.println("DEBUG: setColumnIndex miss: " + this); //* enable to debug*/ System.out.println("DEBUG: setColumnIndex candidates: " + inputSchema); return index; } setColumnIndex(index); if (getValueType() == null) { // In case of sub-queries the TVE may not have its // value type and size resolved yet. Try to resolve it now SchemaColumn inputColumn = inputSchema.getColumn(index); setTypeSizeAndInBytes(inputColumn); } return index; }
java
public String getColumnClassName(int column) throws SQLException { sourceResultSet.checkColumnBounds(column); VoltType type = sourceResultSet.table.getColumnType(column - 1); String result = type.getJdbcClass(); if (result == null) { throw SQLError.get(SQLError.TRANSLATION_NOT_FOUND, type); } return result; }
java
public int getPrecision(int column) throws SQLException { sourceResultSet.checkColumnBounds(column); VoltType type = sourceResultSet.table.getColumnType(column - 1); Integer result = type.getTypePrecisionAndRadix()[0]; if (result == null) { result = 0; } return result; }
java
public int getScale(int column) throws SQLException { sourceResultSet.checkColumnBounds(column); VoltType type = sourceResultSet.table.getColumnType(column - 1); Integer result = type.getMaximumScale(); if (result == null) { result = 0; } return result; }
java
public boolean isCaseSensitive(int column) throws SQLException { sourceResultSet.checkColumnBounds(column); VoltType type = sourceResultSet.table.getColumnType(column - 1); return type.isCaseSensitive(); }
java
public boolean isSigned(int column) throws SQLException { sourceResultSet.checkColumnBounds(column); VoltType type = sourceResultSet.table.getColumnType(column - 1); Boolean result = type.isUnsigned(); if (result == null) { // Null return value means 'not signed' as far as this interface goes return false; } return !result; }
java
private AbstractPlanNode applyOptimization(WindowFunctionPlanNode plan) { assert(plan.getChildCount() == 1); assert(plan.getChild(0) != null); AbstractPlanNode child = plan.getChild(0); assert(child != null); // SP Plans which have an index which can provide // the window function ordering don't create // an order by node. if ( ! ( child instanceof OrderByPlanNode ) ) { return plan; } OrderByPlanNode onode = (OrderByPlanNode)child; child = onode.getChild(0); // The order by node needs a RECEIVE node child // for this optimization to work. if ( ! ( child instanceof ReceivePlanNode)) { return plan; } ReceivePlanNode receiveNode = (ReceivePlanNode)child; assert(receiveNode.getChildCount() == 1); child = receiveNode.getChild(0); // The Receive node needs a send node child. assert( child instanceof SendPlanNode ); SendPlanNode sendNode = (SendPlanNode)child; child = sendNode.getChild(0); // If this window function does not use the // index then this optimization is not possible. // We've recorded a number of the window function // in the root of the subplan, which will be // the first child of the send node. // // Right now the only window function has number // 0, and we don't record that in the // WINDOWFUNCTION plan node. If there were // more than one window function we would need // to record a number in the plan node and // then check that child.getWindowFunctionUsesIndex() // returns the number in the plan node. if ( ! ( child instanceof IndexSortablePlanNode)) { return plan; } IndexSortablePlanNode indexed = (IndexSortablePlanNode)child; if (indexed.indexUse().getWindowFunctionUsesIndex() != 0) { return plan; } // Remove the Receive node and the Order by node // and replace them with a MergeReceive node. Leave // the order by node inline in the MergeReceive node, // since we need it to calculate the merge. plan.clearChildren(); receiveNode.removeFromGraph(); MergeReceivePlanNode mrnode = new MergeReceivePlanNode(); mrnode.addInlinePlanNode(onode); mrnode.addAndLinkChild(sendNode); plan.addAndLinkChild(mrnode); return plan; }
java
AbstractPlanNode convertToSerialAggregation(AbstractPlanNode aggregateNode, OrderByPlanNode orderbyNode) { assert(aggregateNode instanceof HashAggregatePlanNode); HashAggregatePlanNode hashAggr = (HashAggregatePlanNode) aggregateNode; List<AbstractExpression> groupbys = new ArrayList<>(hashAggr.getGroupByExpressions()); List<AbstractExpression> orderbys = new ArrayList<>(orderbyNode.getSortExpressions()); Set<Integer> coveredGroupByColumns = new HashSet<>(); Iterator<AbstractExpression> orderbyIt = orderbys.iterator(); while (orderbyIt.hasNext()) { AbstractExpression orderby = orderbyIt.next(); int idx = 0; for (AbstractExpression groupby : groupbys) { if (!coveredGroupByColumns.contains(idx)) { if (orderby.equals(groupby)) { orderbyIt.remove(); coveredGroupByColumns.add(idx); break; } } ++idx; } } if (orderbys.isEmpty() && groupbys.size() == coveredGroupByColumns.size()) { // All GROUP BY expressions are also ORDER BY - Serial aggregation return AggregatePlanNode.convertToSerialAggregatePlanNode(hashAggr); } if (orderbys.isEmpty() && !coveredGroupByColumns.isEmpty() ) { // Partial aggregation List<Integer> coveredGroupByColumnList = new ArrayList<>(); coveredGroupByColumnList.addAll(coveredGroupByColumns); return AggregatePlanNode.convertToPartialAggregatePlanNode(hashAggr, coveredGroupByColumnList); } return aggregateNode; }
java
private final void startHeartbeat() { if (timerTask == null || HsqlTimer.isCancelled(timerTask)) { Runnable runner = new HeartbeatRunner(); timerTask = timer.schedulePeriodicallyAfter(0, HEARTBEAT_INTERVAL, runner, true); } }
java
private final void stopHeartbeat() { if (timerTask != null && !HsqlTimer.isCancelled(timerTask)) { HsqlTimer.cancel(timerTask); timerTask = null; } }
java
public final static boolean isLocked(final String path) { boolean locked = true; try { LockFile lockFile = LockFile.newLockFile(path); lockFile.checkHeartbeat(false); locked = false; } catch (Exception e) {} return locked; }
java
public static ImmutableSortedSet<String> hosts(String option) { checkArgument(option != null, "option is null"); if (option.trim().isEmpty()) { return ImmutableSortedSet.of( HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString()); } Splitter commaSplitter = Splitter.on(',').omitEmptyStrings().trimResults(); ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder(); for (String h: commaSplitter.split(option)) { checkArgument(isValidCoordinatorSpec(h), "%s is not a valid host spec", h); sbld.add(HostAndPort.fromString(h).withDefaultPort(Constants.DEFAULT_INTERNAL_PORT).toString()); } return sbld.build(); }
java
public static ImmutableSortedSet<String> hosts(int...ports) { if (ports.length == 0) { return ImmutableSortedSet.of( HostAndPort.fromParts("", Constants.DEFAULT_INTERNAL_PORT).toString()); } ImmutableSortedSet.Builder<String> sbld = ImmutableSortedSet.naturalOrder(); for (int p: ports) { sbld.add(HostAndPort.fromParts("", p).toString()); } return sbld.build(); }
java
public ClientResponseImpl call(Object... paramListIn) { m_perCallStats = m_statsCollector.beginProcedure(); // if we're keeping track, calculate parameter size if (m_perCallStats != null) { StoredProcedureInvocation invoc = (m_txnState != null ? m_txnState.getInvocation() : null); ParameterSet params = (invoc != null ? invoc.getParams() : ParameterSet.fromArrayNoCopy(paramListIn)); m_perCallStats.setParameterSize(params.getSerializedSize()); } ClientResponseImpl result = coreCall(paramListIn); // if we're keeping track, calculate result size if (m_perCallStats != null) { m_perCallStats.setResultSize(result.getResults()); } m_statsCollector.endProcedure(result.getStatus() == ClientResponse.USER_ABORT, (result.getStatus() != ClientResponse.USER_ABORT) && (result.getStatus() != ClientResponse.SUCCESS), m_perCallStats); // allow the GC to collect per-call stats if this proc isn't called for a while m_perCallStats = null; return result; }
java
public boolean checkPartition(TransactionState txnState, TheHashinator hashinator) { if (m_isSinglePartition) { // can happen when a proc changes from multi-to-single after it's routed if (hashinator == null) { return false; // this will kick it back to CI for re-routing } if (m_site.getCorrespondingPartitionId() == MpInitiator.MP_INIT_PID) { // SP txn misrouted to MPI, possible to happen during catalog update throw new ExpectedProcedureException("Single-partition procedure routed to multi-partition initiator"); } StoredProcedureInvocation invocation = txnState.getInvocation(); VoltType parameterType; Object parameterAtIndex; // check if AdHoc_RO_SP or AdHoc_RW_SP if (m_procedure instanceof AdHocBase) { // ClientInterface should pre-validate this param is valid parameterAtIndex = invocation.getParameterAtIndex(0); parameterType = VoltType.get((Byte) invocation.getParameterAtIndex(1)); if (parameterAtIndex == null && m_isReadOnly) { assert (m_procedure instanceof AdHoc_RO_SP); // Replicated table reads can run on any partition, skip check return true; } } else { parameterType = m_partitionColumnType; parameterAtIndex = invocation.getParameterAtIndex(m_partitionColumn); } // Note that @LoadSinglepartitionTable has problems if the parititoning param // uses integers as bytes and isn't padded to 8b or using the right byte order. // Since this is not exposed to users, we're ok for now. The right fix is to probably // accept the right partitioning type from the user, then rewrite the params internally // before we initiate the proc (like adhocs). try { int partition = hashinator.getHashedPartitionForParameter(parameterType, parameterAtIndex); if (partition == m_site.getCorrespondingPartitionId()) { return true; } else { // Wrong partition, should restart the txn if (HOST_TRACE_ENABLED) { log.trace("Txn " + txnState.getInvocation().getProcName() + " will be restarted"); } } } catch (Exception e) { log.warn("Unable to check partitioning of transaction " + txnState.m_spHandle, e); } return false; } else { if (!m_catProc.getEverysite() && m_site.getCorrespondingPartitionId() != MpInitiator.MP_INIT_PID) { log.warn("Detected MP transaction misrouted to SPI. This can happen during a schema update. " + "Otherwise, it is unexpected behavior. " + "Please report the following information to support@voltdb.com"); log.warn("procedure name: " + m_catProc.getTypeName() + ", site partition id: " + m_site.getCorrespondingPartitionId() + ", site HSId: " + m_site.getCorrespondingHostId() + ":" + m_site.getCorrespondingSiteId() + ", txnState initiatorHSId: " + CoreUtils.hsIdToString(txnState.initiatorHSId)); if (txnState.getNotice() instanceof Iv2InitiateTaskMessage) { Iv2InitiateTaskMessage initiateTaskMessage = (Iv2InitiateTaskMessage) txnState.getNotice(); log.warn("Iv2InitiateTaskMessage: sourceHSId: " + CoreUtils.hsIdToString(initiateTaskMessage.m_sourceHSId) + ", dump: " + initiateTaskMessage); } // MP txn misrouted to SPI, possible to happen during catalog update throw new ExpectedProcedureException("Multi-partition procedure routed to single-partition initiator"); } // For n-partition transactions, we need to rehash the partitioning values and check // if they still hash to the assigned partitions. // // Note that when n-partition transaction runs, it's run on the MPI site, so calling // m_site.getCorrespondingPartitionId() will return the MPI's partition ID. We need // another way of getting what partitions were assigned to this transaction. return true; } }
java
public static boolean isProcedureStackTraceElement(String procedureName, StackTraceElement stel) { int lastPeriodPos = stel.getClassName().lastIndexOf('.'); if (lastPeriodPos == -1) { lastPeriodPos = 0; } else { ++lastPeriodPos; } // Account for inner classes too. Inner classes names comprise of the parent // class path followed by a dollar sign String simpleName = stel.getClassName().substring(lastPeriodPos); return simpleName.equals(procedureName) || (simpleName.startsWith(procedureName) && simpleName.charAt(procedureName.length()) == '$'); }
java
public void handleUpdateDeployment(String jsonp, HttpServletRequest request, HttpServletResponse response, AuthenticationResult ar) throws IOException, ServletException { String deployment = request.getParameter("deployment"); if (deployment == null || deployment.length() == 0) { response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to get deployment information.")); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } try { DeploymentType newDeployment = m_mapper.readValue(deployment, DeploymentType.class); if (newDeployment == null) { response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to parse deployment information.")); return; } DeploymentType currentDeployment = this.getDeployment(); if (currentDeployment.getUsers() != null) { newDeployment.setUsers(currentDeployment.getUsers()); } // reset the host count so that it wont fail the deployment checks newDeployment.getCluster().setHostcount(currentDeployment.getCluster().getHostcount()); String dep = CatalogUtil.getDeployment(newDeployment); if (dep == null || dep.trim().length() <= 0) { response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information.")); return; } Object[] params = new Object[]{null, dep}; SyncCallback cb = new SyncCallback(); httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params); cb.waitForResponse(); ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse()); if (r.getStatus() == ClientResponse.SUCCESS) { response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "Deployment Updated.")); } else { response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString())); } } catch (JsonParseException e) { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Unparsable JSON")); } catch (Exception ex) { m_log.error("Failed to update deployment from API", ex); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex))); } }
java
public void handleRemoveUser(String jsonp, String target, HttpServletRequest request, HttpServletResponse response, AuthenticationResult ar) throws IOException, ServletException { try { DeploymentType newDeployment = CatalogUtil.getDeployment(new ByteArrayInputStream(getDeploymentBytes())); User user = null; String[] splitTarget = target.split("/"); if (splitTarget.length == 3) { user = findUser(splitTarget[2], newDeployment); } if (user == null) { response.setStatus(HttpServletResponse.SC_NOT_FOUND); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found")); return; } if (newDeployment.getUsers().getUser().size() == 1) { newDeployment.setUsers(null); } else { newDeployment.getUsers().getUser().remove(user); } String dep = CatalogUtil.getDeployment(newDeployment); if (dep == null || dep.trim().length() <= 0) { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Failed to build deployment information.")); return; } Object[] params = new Object[]{null, dep}; //Call sync as nothing else can happen when this is going on. SyncCallback cb = new SyncCallback(); httpClientInterface.callProcedure(request.getRemoteHost(), ar, BatchTimeoutOverrideType.NO_TIMEOUT, cb, "@UpdateApplicationCatalog", params); cb.waitForResponse(); ClientResponseImpl r = ClientResponseImpl.class.cast(cb.getResponse()); response.setStatus(HttpServletResponse.SC_NO_CONTENT); if (r.getStatus() == ClientResponse.SUCCESS) { response.getWriter().print(buildClientResponse(jsonp, ClientResponse.SUCCESS, "User Removed.")); } else { response.getWriter().print(HTTPClientInterface.asJsonp(jsonp, r.toJSONString())); } } catch (Exception ex) { m_log.error("Failed to update role from API", ex); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, Throwables.getStackTraceAsString(ex))); } }
java
public void handleGetUsers(String jsonp, String target, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { ObjectMapper mapper = new ObjectMapper(); User user = null; String[] splitTarget = target.split("/"); if (splitTarget.length < 3 || splitTarget[2].isEmpty()) { if (jsonp != null) { response.getWriter().write(jsonp + "("); } if (getDeployment().getUsers() != null) { List<IdUser> id = new ArrayList<>(); for (UsersType.User u : getDeployment().getUsers().getUser()) { id.add(new IdUser(u, getHostHeader())); } mapper.writeValue(response.getWriter(), id); } else { response.getWriter().write("[]"); } if (jsonp != null) { response.getWriter().write(")"); } return; } user = findUser(splitTarget[2], getDeployment()); if (user == null) { response.setStatus(HttpServletResponse.SC_NOT_FOUND); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "User not found")); return; } else { if (jsonp != null) { response.getWriter().write(jsonp + "("); } mapper.writeValue(response.getWriter(), new IdUser(user, getHostHeader())); if (jsonp != null) { response.getWriter().write(")"); } } }
java
public void handleGetExportTypes(String jsonp, HttpServletResponse response) throws IOException, ServletException { if (jsonp != null) { response.getWriter().write(jsonp + "("); } JSONObject exportTypes = new JSONObject(); HashSet<String> exportList = new HashSet<>(); for (ServerExportEnum type : ServerExportEnum.values()) { exportList.add(type.value().toUpperCase()); } try { exportTypes.put("types", exportList); } catch (JSONException e) { m_log.error("Failed to generate exportTypes JSON: ", e); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); response.getWriter().print(buildClientResponse(jsonp, ClientResponse.UNEXPECTED_FAILURE, "Type list failed to build")); return; } response.getWriter().write(exportTypes.toString()); if (jsonp != null) { response.getWriter().write(")"); } }
java
void createZKDirectory(String path) { try { try { m_zk.create(path, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (KeeperException e) { if (e.code() != Code.NODEEXISTS) { throw e; } } } catch (Exception e) { VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(), false, e); } }
java
public Pair<Integer, String> findRestoreCatalog() { enterRestore(); try { m_snapshotToRestore = generatePlans(); } catch (Exception e) { VoltDB.crashGlobalVoltDB(e.getMessage(), true, e); } if (m_snapshotToRestore != null) { int hostId = m_snapshotToRestore.hostId; File file = new File(m_snapshotToRestore.path, m_snapshotToRestore.nonce + ".jar"); String path = file.getPath(); return Pair.of(hostId, path); } return null; }
java
void enterRestore() { createZKDirectory(VoltZK.restore); createZKDirectory(VoltZK.restore_barrier); createZKDirectory(VoltZK.restore_barrier2); try { m_generatedRestoreBarrier2 = m_zk.create(VoltZK.restore_barrier2 + "/counter", null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); } catch (Exception e) { VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(), false, e); } }
java
void exitRestore() { try { m_zk.delete(m_generatedRestoreBarrier2, -1); } catch (Exception e) { VoltDB.crashLocalVoltDB("Unable to delete zk node " + m_generatedRestoreBarrier2, false, e); } if (m_callback != null) { m_callback.onSnapshotRestoreCompletion(); } LOG.debug("Waiting for all hosts to complete restore"); List<String> children = null; while (true) { try { children = m_zk.getChildren(VoltZK.restore_barrier2, false); } catch (KeeperException e2) { VoltDB.crashGlobalVoltDB(e2.getMessage(), false, e2); } catch (InterruptedException e2) { continue; } if (children.size() > 0) { try { Thread.sleep(500); } catch (InterruptedException e) {} } else { break; } } // Clean up the ZK snapshot ID node so that we're good for next time. try { m_zk.delete(VoltZK.restore_snapshot_id, -1); } catch (Exception ignore) {} }
java
static SnapshotInfo consolidateSnapshotInfos(Collection<SnapshotInfo> lastSnapshot) { SnapshotInfo chosen = null; if (lastSnapshot != null) { Iterator<SnapshotInfo> i = lastSnapshot.iterator(); while (i.hasNext()) { SnapshotInfo next = i.next(); if (chosen == null) { chosen = next; } else if (next.hostId < chosen.hostId) { next.partitionToTxnId.putAll(chosen.partitionToTxnId); chosen = next; } else { // create a full mapping of txn ids to partition ids. chosen.partitionToTxnId.putAll(next.partitionToTxnId); } } } return chosen; }
java
private void sendSnapshotTxnId(SnapshotInfo toRestore) { long txnId = toRestore != null ? toRestore.txnId : 0; String jsonData = toRestore != null ? toRestore.toJSONObject().toString() : "{}"; LOG.debug("Sending snapshot ID " + txnId + " for restore to other nodes"); try { m_zk.create(VoltZK.restore_snapshot_id, jsonData.getBytes(Constants.UTF8ENCODING), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } catch (Exception e) { VoltDB.crashGlobalVoltDB("Failed to create Zookeeper node: " + e.getMessage(), false, e); } }
java
private void sendLocalRestoreInformation(Long max, Set<SnapshotInfo> snapshots) { String jsonData = serializeRestoreInformation(max, snapshots); String zkNode = VoltZK.restore + "/" + m_hostId; try { m_zk.create(zkNode, jsonData.getBytes(StandardCharsets.UTF_8), Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } catch (Exception e) { throw new RuntimeException("Failed to create Zookeeper node: " + e.getMessage(), e); } }
java
private Long deserializeRestoreInformation(List<String> children, Map<String, Set<SnapshotInfo>> snapshotFragments) throws Exception { try { int recover = m_action.ordinal(); Long clStartTxnId = null; for (String node : children) { //This might be created before we are done fetching the restore info if (node.equals("snapshot_id")) { continue; } byte[] data = null; data = m_zk.getData(VoltZK.restore + "/" + node, false, null); String jsonData = new String(data, "UTF8"); JSONObject json = new JSONObject(jsonData); long maxTxnId = json.optLong("max", Long.MIN_VALUE); if (maxTxnId != Long.MIN_VALUE) { if (clStartTxnId == null || maxTxnId > clStartTxnId) { clStartTxnId = maxTxnId; } } int remoteRecover = json.getInt("action"); if (remoteRecover != recover) { String msg = "Database actions are not consistent. Remote node action is not 'recover'. " + "Please enter the same database action on the command-line."; VoltDB.crashLocalVoltDB(msg, false, null); } JSONArray snapInfos = json.getJSONArray("snapInfos"); int snapInfoCnt = snapInfos.length(); for (int i=0; i < snapInfoCnt; i++) { JSONObject jsonInfo = snapInfos.getJSONObject(i); SnapshotInfo info = new SnapshotInfo(jsonInfo); Set<SnapshotInfo> fragments = snapshotFragments.get(info.nonce); if (fragments == null) { fragments = new HashSet<SnapshotInfo>(); snapshotFragments.put(info.nonce, fragments); } fragments.add(info); } } return clStartTxnId; } catch (JSONException je) { VoltDB.crashLocalVoltDB("Error exchanging snapshot information", true, je); } throw new RuntimeException("impossible"); }
java
private void changeState() { if (m_state == State.RESTORE) { fetchSnapshotTxnId(); exitRestore(); m_state = State.REPLAY; /* * Add the interest here so that we can use the barriers in replay * agent to synchronize. */ m_snapshotMonitor.addInterest(this); m_replayAgent.replay(); } else if (m_state == State.REPLAY) { m_state = State.TRUNCATE; } else if (m_state == State.TRUNCATE) { m_snapshotMonitor.removeInterest(this); if (m_callback != null) { m_callback.onReplayCompletion(m_truncationSnapshot, m_truncationSnapshotPerPartition); } // Call balance partitions after enabling transactions on the node to shorten the recovery time if (m_isLeader) { m_replayAgent.resumeElasticOperationIfNecessary(); } } }
java
private Map<String, Snapshot> getSnapshots() { /* * Use the individual snapshot directories instead of voltroot, because * they can be set individually */ Map<String, SnapshotPathType> paths = new HashMap<String, SnapshotPathType>(); if (VoltDB.instance().getConfig().m_isEnterprise) { if (m_clSnapshotPath != null) { paths.put(m_clSnapshotPath, SnapshotPathType.SNAP_CL); } } if (m_snapshotPath != null) { paths.put(m_snapshotPath, SnapshotPathType.SNAP_AUTO); } HashMap<String, Snapshot> snapshots = new HashMap<String, Snapshot>(); FileFilter filter = new SnapshotUtil.SnapshotFilter(); for (String path : paths.keySet()) { SnapshotUtil.retrieveSnapshotFiles(new File(path), snapshots, filter, false, paths.get(path), LOG); } return snapshots; }
java
@Override public CountDownLatch snapshotCompleted(SnapshotCompletionEvent event) { if (!event.truncationSnapshot || !event.didSucceed) { VoltDB.crashGlobalVoltDB("Failed to truncate command logs by snapshot", false, null); } else { m_truncationSnapshot = event.multipartTxnId; m_truncationSnapshotPerPartition = event.partitionTxnIds; m_replayAgent.returnAllSegments(); changeState(); } return new CountDownLatch(0); }
java
void shutdown() throws InterruptedException { m_shouldStop = true; if (m_thread != null) { m_selector.wakeup(); m_thread.join(); } }
java
Connection registerChannel( final SocketChannel channel, final InputHandler handler, final int interestOps, final ReverseDNSPolicy dns, final CipherExecutor cipherService, final SSLEngine sslEngine) throws IOException { synchronized(channel.blockingLock()) { channel.configureBlocking (false); channel.socket().setKeepAlive(true); } Callable<Connection> registerTask = new Callable<Connection>() { @Override public Connection call() throws Exception { final VoltPort port = VoltPortFactory.createVoltPort( channel, VoltNetwork.this, handler, (InetSocketAddress)channel.socket().getRemoteSocketAddress(), m_pool, cipherService, sslEngine); port.registering(); /* * This means we are used by a client. No need to wait then, trigger * the reverse DNS lookup now. */ if (dns != ReverseDNSPolicy.NONE) { port.resolveHostname(dns == ReverseDNSPolicy.SYNCHRONOUS); } try { SelectionKey key = channel.register (m_selector, interestOps, null); port.setKey (key); port.registered(); //Fix a bug witnessed on the mini where the registration lock and the selector wakeup contained //within was not enough to prevent the selector from returning the port after it was registered, //but before setKey was called. Suspect a bug in the selector.wakeup() or register() implementation //on the mac. //The null check in invokeCallbacks will catch the null attachment, continue, and do the work //next time through the selection loop key.attach(port); return port; } finally { m_ports.add(port); m_numPorts.incrementAndGet(); } } }; FutureTask<Connection> ft = new FutureTask<Connection>(registerTask); m_tasks.offer(ft); m_selector.wakeup(); try { return ft.get(); } catch (Exception e) { throw new IOException(e); } }
java
Future<?> unregisterChannel (Connection c) { FutureTask<Object> ft = new FutureTask<Object>(getUnregisterRunnable(c), null); m_tasks.offer(ft); m_selector.wakeup(); return ft; }
java
void addToChangeList(final VoltPort port, final boolean runFirst) { if (runFirst) { m_tasks.offer(new Runnable() { @Override public void run() { callPort(port); } }); } else { m_tasks.offer(new Runnable() { @Override public void run() { installInterests(port); } }); } m_selector.wakeup(); }
java
protected void invokeCallbacks(ThreadLocalRandom r) { final Set<SelectionKey> selectedKeys = m_selector.selectedKeys(); final int keyCount = selectedKeys.size(); int startInx = r.nextInt(keyCount); int itInx = 0; Iterator<SelectionKey> it = selectedKeys.iterator(); while(itInx < startInx) { it.next(); itInx++; } while(itInx < keyCount) { final Object obj = it.next().attachment(); if (obj == null) { continue; } final VoltPort port = (VoltPort)obj; callPort(port); itInx++; } itInx = 0; it = selectedKeys.iterator(); while(itInx < startInx) { final Object obj = it.next().attachment(); if (obj == null) { continue; } final VoltPort port = (VoltPort)obj; callPort(port); itInx++; } selectedKeys.clear(); }
java
public static String path(String... components) { String path = components[0]; for (int i=1; i < components.length; i++) { path = ZKUtil.joinZKPath(path, components[i]); } return path; }
java
private String getSegmentFileName(long currentId, long previousId) { return PbdSegmentName.createName(m_nonce, currentId, previousId, false); }
java
private long getPreviousSegmentId(File file) { PbdSegmentName segmentName = PbdSegmentName.parseFile(m_usageSpecificLog, file); if (segmentName.m_result != PbdSegmentName.Result.OK) { throw new IllegalStateException("Invalid file name: " + file.getName()); } return segmentName.m_prevId; }
java
private void deleteStalePbdFile(File file) throws IOException { try { PBDSegment.setFinal(file, false); if (m_usageSpecificLog.isDebugEnabled()) { m_usageSpecificLog.debug("Segment " + file.getName() + " (final: " + PBDSegment.isFinal(file) + "), will be closed and deleted during init"); } file.delete(); } catch (Exception e) { if (e instanceof NoSuchFileException) { // Concurrent delete, noop } else { throw e; } } }
java
private void recoverSegment(long segmentIndex, long segmentId, PbdSegmentName segmentName) throws IOException { PBDSegment segment; if (segmentName.m_quarantined) { segment = new PbdQuarantinedSegment(segmentName.m_file, segmentIndex, segmentId); } else { segment = newSegment(segmentIndex, segmentId, segmentName.m_file); try { if (segment.getNumEntries() == 0) { if (m_usageSpecificLog.isDebugEnabled()) { m_usageSpecificLog.debug("Found Empty Segment with entries: " + segment.getNumEntries() + " For: " + segment.file().getName()); m_usageSpecificLog.debug("Segment " + segment.file() + " (final: " + segment.isFinal() + "), will be closed and deleted during init"); } segment.closeAndDelete(); return; } // Any recovered segment that is not final should be checked // for internal consistency. if (!segment.isFinal()) { m_usageSpecificLog.warn("Segment " + segment.file() + " (final: " + segment.isFinal() + "), has been recovered but is not in a final state"); } else if (m_usageSpecificLog.isDebugEnabled()) { m_usageSpecificLog.debug( "Segment " + segment.file() + " (final: " + segment.isFinal() + "), has been recovered"); } m_segments.put(segment.segmentIndex(), segment); } catch (IOException e) { m_usageSpecificLog.warn( "Failed to retrieve entry count from segment " + segment.file() + ". Quarantining segment", e); quarantineSegment(segment); return; } finally { segment.close(); } } m_segments.put(segment.segmentIndex(), segment); }
java
int numOpenSegments() { int numOpen = 0; for (PBDSegment segment : m_segments.values()) { if (!segment.isClosed()) { numOpen++; } } return numOpen; }
java
public CacheBuilder<K, V> expireAfterWrite(long duration, TimeUnit unit) { checkState( expireAfterWriteNanos == UNSET_INT, "expireAfterWrite was already set to %s ns", expireAfterWriteNanos); checkArgument(duration >= 0, "duration cannot be negative: %s %s", duration, unit); this.expireAfterWriteNanos = unit.toNanos(duration); return this; }
java
public void revoke(Grantee role) { if (!hasRoleDirect(role)) { throw Error.error(ErrorCode.X_0P503, role.getNameString()); } roles.remove(role); }
java
private OrderedHashSet addGranteeAndRoles(OrderedHashSet set) { Grantee candidateRole; set.add(this); for (int i = 0; i < roles.size(); i++) { candidateRole = (Grantee) roles.get(i); if (!set.contains(candidateRole)) { candidateRole.addGranteeAndRoles(set); } } return set; }
java
public void addAllRoles(HashMap map) { for (int i = 0; i < roles.size(); i++) { Grantee role = (Grantee) roles.get(i); map.put(role.granteeName.name, role.roles); } }
java
void clearPrivileges() { roles.clear(); directRightsMap.clear(); grantedRightsMap.clear(); fullRightsMap.clear(); isAdmin = false; }
java
boolean updateNestedRoles(Grantee role) { boolean hasNested = false; if (role != this) { for (int i = 0; i < roles.size(); i++) { Grantee currentRole = (Grantee) roles.get(i); hasNested |= currentRole.updateNestedRoles(role); } } if (hasNested) { updateAllRights(); } return hasNested || role == this; }
java
void addToFullRights(HashMap map) { Iterator it = map.keySet().iterator(); while (it.hasNext()) { Object key = it.next(); Right add = (Right) map.get(key); Right existing = (Right) fullRightsMap.get(key); if (existing == null) { existing = add.duplicate(); fullRightsMap.put(key, existing); } else { existing.add(add); } if (add.grantableRights == null) { continue; } if (existing.grantableRights == null) { existing.grantableRights = add.grantableRights.duplicate(); } else { existing.grantableRights.add(add.grantableRights); } } }
java
public void toLeftJoin() { assert((m_leftNode != null && m_rightNode != null) || (m_leftNode == null && m_rightNode == null)); if (m_leftNode == null && m_rightNode == null) { // End of recursion return; } // recursive calls if (m_leftNode instanceof BranchNode) { ((BranchNode)m_leftNode).toLeftJoin(); } if (m_rightNode instanceof BranchNode) { ((BranchNode)m_rightNode).toLeftJoin(); } // Swap own children if (m_joinType == JoinType.RIGHT) { JoinNode node = m_rightNode; m_rightNode = m_leftNode; m_leftNode = node; m_joinType = JoinType.LEFT; } }
java
@Override protected void extractSubTree(List<JoinNode> leafNodes) { JoinNode[] children = {m_leftNode, m_rightNode}; for (JoinNode child : children) { // Leaf nodes don't have a significant join type, // test for them first and never attempt to start a new tree at a leaf. if ( ! (child instanceof BranchNode)) { continue; } if (((BranchNode)child).m_joinType == m_joinType) { // The join type for this node is the same as the root's one // Keep walking down the tree child.extractSubTree(leafNodes); } else { // The join type for this join differs from the root's one // Terminate the sub-tree leafNodes.add(child); // Replace the join node with the temporary node having the id negated JoinNode tempNode = new TableLeafNode( -child.m_id, child.m_joinExpr, child.m_whereExpr, null); if (child == m_leftNode) { m_leftNode = tempNode; } else { m_rightNode = tempNode; } } } }
java
@Override public boolean hasOuterJoin() { assert(m_leftNode != null && m_rightNode != null); return m_joinType != JoinType.INNER || m_leftNode.hasOuterJoin() || m_rightNode.hasOuterJoin(); }
java
@Override public void extractEphemeralTableQueries(List<StmtEphemeralTableScan> scans) { if (m_leftNode != null) { m_leftNode.extractEphemeralTableQueries(scans); } if (m_rightNode != null) { m_rightNode.extractEphemeralTableQueries(scans); } }
java
@Override public boolean allInnerJoins() { return m_joinType == JoinType.INNER && (m_leftNode == null || m_leftNode.allInnerJoins()) && (m_rightNode == null || m_rightNode.allInnerJoins()); }
java
public static void apply(CompiledPlan plan, DeterminismMode detMode) { if (detMode == DeterminismMode.FASTER) { return; } if (plan.hasDeterministicStatement()) { return; } AbstractPlanNode planGraph = plan.rootPlanGraph; if (planGraph.isOrderDeterministic()) { return; } AbstractPlanNode root = plan.rootPlanGraph; root = recursivelyApply(root); plan.rootPlanGraph = root; }
java
public void updateLastSeenUniqueIds(VoltMessage message) { long sequenceWithUniqueId = Long.MIN_VALUE; boolean commandLog = (message instanceof TransactionInfoBaseMessage && (((TransactionInfoBaseMessage)message).isForReplay())); boolean sentinel = message instanceof MultiPartitionParticipantMessage; // if replay if (commandLog || sentinel) { sequenceWithUniqueId = ((TransactionInfoBaseMessage)message).getUniqueId(); // Update last seen and last polled txnId for replicas m_replaySequencer.updateLastSeenUniqueId(sequenceWithUniqueId, (TransactionInfoBaseMessage) message); m_replaySequencer.updateLastPolledUniqueId(sequenceWithUniqueId, (TransactionInfoBaseMessage) message); } }
java
public void parseRestoreResultRow(VoltTable vt) { RestoreResultKey key = new RestoreResultKey( (int)vt.getLong("HOST_ID"), (int)vt.getLong("PARTITION_ID"), vt.getString("TABLE")); if (containsKey(key)) { get(key).mergeData(vt.getString("RESULT").equals("SUCCESS"), vt.getString("ERR_MSG")); } else { put(key, new RestoreResultValue((int)vt.getLong("SITE_ID"), vt.getString("RESULT").equals("SUCCESS"), vt.getString("HOSTNAME"), vt.getString("ERR_MSG"))); } }
java
public static <E extends Comparable> int binarySearch( List<? extends E> list, E e, KeyPresentBehavior presentBehavior, KeyAbsentBehavior absentBehavior) { checkNotNull(e); return binarySearch(list, e, Ordering.natural(), presentBehavior, absentBehavior); }
java