code
stringlengths
73
34.1k
label
stringclasses
1 value
@Override public ResultSet getVersionColumns(String catalog, String schema, String table) throws SQLException { checkClosed(); throw SQLError.noSupport(); }
java
@Override public boolean supportsConvert(int fromType, int toType) throws SQLException { checkClosed(); switch (fromType) { /* * ALL types can be converted to VARCHAR /VoltType.String */ case java.sql.Types.VARCHAR: case java.sql.Types.VARBINARY: case java.sql.Types.TIMESTAMP: case java.sql.Types.OTHER: switch (toType) { case java.sql.Types.VARCHAR: return true; default: return false; } case java.sql.Types.TINYINT: case java.sql.Types.SMALLINT: case java.sql.Types.INTEGER: case java.sql.Types.BIGINT: case java.sql.Types.FLOAT: case java.sql.Types.DECIMAL: switch (toType) { case java.sql.Types.VARCHAR: case java.sql.Types.TINYINT: case java.sql.Types.SMALLINT: case java.sql.Types.INTEGER: case java.sql.Types.BIGINT: case java.sql.Types.FLOAT: case java.sql.Types.DECIMAL: return true; default: return false; } default: return false; } }
java
@Override public boolean supportsResultSetType(int type) throws SQLException { checkClosed(); if (type == ResultSet.TYPE_SCROLL_INSENSITIVE) return true; return false; }
java
public static boolean isInProcessDatabaseType(String url) { if (url == S_FILE || url == S_RES || url == S_MEM) { return true; } return false; }
java
public T nextReady(long systemCurrentTimeMillis) { if (delayed.size() == 0) { return null; } // no ready objects if (delayed.firstKey() > systemCurrentTimeMillis) { return null; } Entry<Long, Object[]> entry = delayed.pollFirstEntry(); Object[] values = entry.getValue(); @SuppressWarnings("unchecked") T value = (T) values[0]; // if this map entry had multiple values, put all but one // of them back if (values.length > 1) { int prevLength = values.length; values = Arrays.copyOfRange(values, 1, values.length); assert(values.length == prevLength - 1); delayed.put(entry.getKey(), values); } m_size--; return value; }
java
private static byte[] readCatalog(String catalogUrl) throws IOException { assert (catalogUrl != null); final int MAX_CATALOG_SIZE = 40 * 1024 * 1024; // 40mb InputStream fin = null; try { URL url = new URL(catalogUrl); fin = url.openStream(); } catch (MalformedURLException ex) { // Invalid URL. Try as a file. fin = new FileInputStream(catalogUrl); } byte[] buffer = new byte[MAX_CATALOG_SIZE]; int readBytes = 0; int totalBytes = 0; try { while (readBytes >= 0) { totalBytes += readBytes; readBytes = fin.read(buffer, totalBytes, buffer.length - totalBytes - 1); } } finally { fin.close(); } return Arrays.copyOf(buffer, totalBytes); }
java
synchronized public void close() { closed = true; if (sqw != null) { try { if (layoutHeaderChecked && layout != null && layout.getFooter() != null) { sendLayoutMessage(layout.getFooter()); } sqw.close(); sqw = null; } catch(java.io.IOException ex) { sqw = null; } } }
java
public static int getFacility(String facilityName) { if(facilityName != null) { facilityName = facilityName.trim(); } if("KERN".equalsIgnoreCase(facilityName)) { return LOG_KERN; } else if("USER".equalsIgnoreCase(facilityName)) { return LOG_USER; } else if("MAIL".equalsIgnoreCase(facilityName)) { return LOG_MAIL; } else if("DAEMON".equalsIgnoreCase(facilityName)) { return LOG_DAEMON; } else if("AUTH".equalsIgnoreCase(facilityName)) { return LOG_AUTH; } else if("SYSLOG".equalsIgnoreCase(facilityName)) { return LOG_SYSLOG; } else if("LPR".equalsIgnoreCase(facilityName)) { return LOG_LPR; } else if("NEWS".equalsIgnoreCase(facilityName)) { return LOG_NEWS; } else if("UUCP".equalsIgnoreCase(facilityName)) { return LOG_UUCP; } else if("CRON".equalsIgnoreCase(facilityName)) { return LOG_CRON; } else if("AUTHPRIV".equalsIgnoreCase(facilityName)) { return LOG_AUTHPRIV; } else if("FTP".equalsIgnoreCase(facilityName)) { return LOG_FTP; } else if("LOCAL0".equalsIgnoreCase(facilityName)) { return LOG_LOCAL0; } else if("LOCAL1".equalsIgnoreCase(facilityName)) { return LOG_LOCAL1; } else if("LOCAL2".equalsIgnoreCase(facilityName)) { return LOG_LOCAL2; } else if("LOCAL3".equalsIgnoreCase(facilityName)) { return LOG_LOCAL3; } else if("LOCAL4".equalsIgnoreCase(facilityName)) { return LOG_LOCAL4; } else if("LOCAL5".equalsIgnoreCase(facilityName)) { return LOG_LOCAL5; } else if("LOCAL6".equalsIgnoreCase(facilityName)) { return LOG_LOCAL6; } else if("LOCAL7".equalsIgnoreCase(facilityName)) { return LOG_LOCAL7; } else { return -1; } }
java
public void activateOptions() { if (header) { getLocalHostname(); } if (layout != null && layout.getHeader() != null) { sendLayoutMessage(layout.getHeader()); } layoutHeaderChecked = true; }
java
private String getPacketHeader(final long timeStamp) { if (header) { StringBuffer buf = new StringBuffer(dateFormat.format(new Date(timeStamp))); // RFC 3164 says leading space, not leading zero on days 1-9 if (buf.charAt(4) == '0') { buf.setCharAt(4, ' '); } buf.append(getLocalHostname()); buf.append(' '); return buf.toString(); } return ""; }
java
private void sendLayoutMessage(final String msg) { if (sqw != null) { String packet = msg; String hdr = getPacketHeader(new Date().getTime()); if(facilityPrinting || hdr.length() > 0) { StringBuffer buf = new StringBuffer(hdr); if(facilityPrinting) { buf.append(facilityStr); } buf.append(msg); packet = buf.toString(); } sqw.setLevel(6); sqw.write(packet); } }
java
protected byte[] getGZipData() throws SQLException { byte[] bytes = gZipData(); if (bytes != null) { return bytes; } if ((this.outputStream == null) || !this.outputStream.isClosed() || this.outputStream.isFreed()) { throw Exceptions.notReadable(); } try { setGZipData(this.outputStream.toByteArray()); return gZipData(); } catch (IOException ex) { throw Exceptions.notReadable(); } finally { this.freeOutputStream(); } }
java
protected synchronized void close() { this.closed = true; setReadable(false); setWritable(false); freeOutputStream(); freeInputStream(); this.gzdata = null; }
java
protected <T extends Result>T createResult( Class<T> resultClass) throws SQLException { checkWritable(); setWritable(false); setReadable(true); if (JAXBResult.class.isAssignableFrom(resultClass)) { // Must go first presently, since JAXBResult extends SAXResult // (purely as an implmentation detail) and it's not possible // to instantiate a valid JAXBResult with a Zero-Args // constructor(or any subclass thereof, due to the finality of // its private UnmarshallerHandler) // FALL THROUGH... will throw an exception } else if ((resultClass == null) || StreamResult.class.isAssignableFrom(resultClass)) { return createStreamResult(resultClass); } else if (DOMResult.class.isAssignableFrom(resultClass)) { return createDOMResult(resultClass); } else if (SAXResult.class.isAssignableFrom(resultClass)) { return createSAXResult(resultClass); } else if (StAXResult.class.isAssignableFrom(resultClass)) { return createStAXResult(resultClass); } throw Util.invalidArgument("resultClass: " + resultClass); }
java
@SuppressWarnings("unchecked") protected <T extends Result>T createSAXResult( Class<T> resultClass) throws SQLException { SAXResult result = null; try { result = (resultClass == null) ? new SAXResult() : (SAXResult) resultClass.newInstance(); } catch (SecurityException ex) { throw Exceptions.resultInstantiation(ex); } catch (InstantiationException ex) { throw Exceptions.resultInstantiation(ex); } catch (IllegalAccessException ex) { throw Exceptions.resultInstantiation(ex); } catch (ClassCastException ex) { throw Exceptions.resultInstantiation(ex); } StAXResult staxResult = createStAXResult(null); XMLStreamWriter xmlWriter = staxResult.getXMLStreamWriter(); SAX2XMLStreamWriter handler = new SAX2XMLStreamWriter(xmlWriter); result.setHandler(handler); return (T) result; }
java
@Override public List<AbstractExpression> bindingToIndexedExpression(AbstractExpression expr) { if (equals(expr)) { return s_reusableImmutableEmptyBinding; } return null; }
java
public static Client getClient(ClientConfig config, String[] servers, int port) throws Exception { config.setTopologyChangeAware(true); // Set client to be topology-aware final Client client = ClientFactory.createClient(config); for (String server : servers) { // Try connecting servers one by one until we have a success try { client.createConnection(server.trim(), port); break; } catch(IOException e) { // Only swallow the exceptions from Java network or connection problems // Unresolved hostname exceptions will be thrown } } if (client.getConnectedHostList().isEmpty()) { throw new Exception("Unable to connect to any servers."); } return client; }
java
public synchronized void addAdapter(int pid, InternalClientResponseAdapter adapter) { final ImmutableMap.Builder<Integer, InternalClientResponseAdapter> builder = ImmutableMap.builder(); builder.putAll(m_adapters); builder.put(pid, adapter); m_adapters = builder.build(); }
java
public boolean hasTable(String name) { Table table = getCatalogContext().tables.get(name); return (table!=null); }
java
public boolean callProcedure(InternalConnectionContext caller, Function<Integer, Boolean> backPressurePredicate, InternalConnectionStatsCollector statsCollector, ProcedureCallback procCallback, String proc, Object... fieldList) { Procedure catProc = InvocationDispatcher.getProcedureFromName(proc, getCatalogContext()); if (catProc == null) { String fmt = "Cannot invoke procedure %s from streaming interface %s. Procedure not found."; m_logger.rateLimitedLog(SUPPRESS_INTERVAL, Level.ERROR, null, fmt, proc, caller); m_failedCount.incrementAndGet(); return false; } StoredProcedureInvocation task = new StoredProcedureInvocation(); task.setProcName(proc); task.setParams(fieldList); try { task = MiscUtils.roundTripForCL(task); } catch (Exception e) { String fmt = "Cannot invoke procedure %s from streaming interface %s. failed to create task."; m_logger.rateLimitedLog(SUPPRESS_INTERVAL, Level.ERROR, null, fmt, proc, caller); m_failedCount.incrementAndGet(); return false; } int[] partitions = null; try { partitions = InvocationDispatcher.getPartitionsForProcedure(catProc, task); } catch (Exception e) { String fmt = "Can not invoke procedure %s from streaming interface %s. Partition not found."; m_logger.rateLimitedLog(SUPPRESS_INTERVAL, Level.ERROR, e, fmt, proc, caller); m_failedCount.incrementAndGet(); return false; } boolean mp = (partitions[0] == MpInitiator.MP_INIT_PID) || (partitions.length > 1); final InternalClientResponseAdapter adapter = mp ? m_adapters.get(MpInitiator.MP_INIT_PID) : m_adapters.get(partitions[0]); InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes(caller, adapter.connectionId()); final AuthUser user = getCatalogContext().authSystem.getImporterUser(); if (!adapter.createTransaction(kattrs, proc, catProc, procCallback, statsCollector, task, user, partitions, false, backPressurePredicate)) { m_failedCount.incrementAndGet(); return false; } m_submitSuccessCount.incrementAndGet(); return true; }
java
synchronized void registerService(Promotable service) { m_services.add(service); if (m_isLeader) { try { service.acceptPromotion(); } catch (Exception e) { VoltDB.crashLocalVoltDB("Unable to promote global service.", true, e); } } }
java
void resolveTypesForCaseWhen(Session session) { if (dataType != null) { return; } Expression expr = this; while (expr.opType == OpTypes.CASEWHEN) { expr.nodes[LEFT].resolveTypes(session, expr); if (expr.nodes[LEFT].isParam) { expr.nodes[LEFT].dataType = Type.SQL_BOOLEAN; } expr.nodes[RIGHT].nodes[LEFT].resolveTypes(session, nodes[RIGHT]); expr.nodes[RIGHT].nodes[RIGHT].resolveTypes(session, nodes[RIGHT]); expr = expr.nodes[RIGHT].nodes[RIGHT]; } expr = this; while (expr.opType == OpTypes.CASEWHEN) { dataType = Type.getAggregateType(expr.nodes[RIGHT].nodes[LEFT].dataType, dataType); dataType = Type.getAggregateType(expr.nodes[RIGHT].nodes[RIGHT].dataType, dataType); expr = expr.nodes[RIGHT].nodes[RIGHT]; } expr = this; while (expr.opType == OpTypes.CASEWHEN) { if (expr.nodes[RIGHT].nodes[LEFT].dataType == null) { expr.nodes[RIGHT].nodes[LEFT].dataType = dataType; } if (expr.nodes[RIGHT].nodes[RIGHT].dataType == null) { expr.nodes[RIGHT].nodes[RIGHT].dataType = dataType; } if (expr.nodes[RIGHT].dataType == null) { expr.nodes[RIGHT].dataType = dataType; } expr = expr.nodes[RIGHT].nodes[RIGHT]; } if (dataType == null) { throw Error.error(ErrorCode.X_42567); } }
java
public static GeographyPointValue fromWKT(String param) { if (param == null) { throw new IllegalArgumentException("Null well known text argument to GeographyPointValue constructor."); } Matcher m = wktPattern.matcher(param); if (m.find()) { // Add 0.0 to avoid -0.0. double longitude = toDouble(m.group(1), m.group(2)) + 0.0; double latitude = toDouble(m.group(3), m.group(4)) + 0.0; if (Math.abs(latitude) > 90.0) { throw new IllegalArgumentException(String.format("Latitude \"%f\" out of bounds.", latitude)); } if (Math.abs(longitude) > 180.0) { throw new IllegalArgumentException(String.format("Longitude \"%f\" out of bounds.", longitude)); } return new GeographyPointValue(longitude, latitude); } else { throw new IllegalArgumentException("Cannot construct GeographyPointValue value from \"" + param + "\""); } }
java
String formatLngLat() { DecimalFormat df = new DecimalFormat("##0.0###########"); // Explicitly test for differences less than 1.0e-12 and // force them to be zero. Otherwise you may find a case // where two points differ in the less significant bits, but // they format as the same number. double lng = (Math.abs(m_longitude) < EPSILON) ? 0 : m_longitude; double lat = (Math.abs(m_latitude) < EPSILON) ? 0 : m_latitude; return df.format(lng) + " " + df.format(lat); }
java
public static GeographyPointValue unflattenFromBuffer(ByteBuffer inBuffer, int offset) { double lng = inBuffer.getDouble(offset); double lat = inBuffer.getDouble(offset + BYTES_IN_A_COORD); if (lat == 360.0 && lng == 360.0) { // This is a null point. return null; } return new GeographyPointValue(lng, lat); }
java
private static double normalize(double v, double range) { double a = v-Math.floor((v + (range/2))/range)*range; // Make sure that a and v have the same sign // when abs(v) = 180. if (Math.abs(a) == 180.0 && (a * v) < 0) { a *= -1; } // The addition of 0.0 is to avoid negative // zero, which just confuses things. return a + 0.0; }
java
@Deprecated public GeographyPointValue mul(double alpha) { return GeographyPointValue.normalizeLngLat(getLongitude() * alpha + 0.0, getLatitude() * alpha + 0.0); }
java
@Deprecated public GeographyPointValue rotate(double phi, GeographyPointValue center) { double sinphi = Math.sin(2*Math.PI*phi/360.0); double cosphi = Math.cos(2*Math.PI*phi/360.0); // Translate to the center. double longitude = getLongitude() - center.getLongitude(); double latitude = getLatitude() - center.getLatitude(); // Rotate and translate back. return GeographyPointValue.normalizeLngLat((cosphi * longitude - sinphi * latitude) + center.getLongitude(), (sinphi * longitude + cosphi * latitude) + center.getLatitude()); }
java
public static void createPersistentZKNodes(ZooKeeper zk) { LinkedList<ZKUtil.StringCallback> callbacks = new LinkedList<ZKUtil.StringCallback>(); for (int i=0; i < VoltZK.ZK_HIERARCHY.length; i++) { ZKUtil.StringCallback cb = new ZKUtil.StringCallback(); callbacks.add(cb); zk.create(VoltZK.ZK_HIERARCHY[i], null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, cb, null); } for (ZKUtil.StringCallback cb : callbacks) { try { cb.get(); } catch (org.apache.zookeeper_voltpatches.KeeperException.NodeExistsException e) { // this is an expected race. } catch (Exception e) { VoltDB.crashLocalVoltDB(e.getMessage(), true, e); } } }
java
public static List<MailboxNodeContent> parseMailboxContents(List<String> jsons) throws JSONException { ArrayList<MailboxNodeContent> objects = new ArrayList<MailboxNodeContent>(jsons.size()); for (String json : jsons) { MailboxNodeContent content = null; JSONObject jsObj = new JSONObject(json); long HSId = jsObj.getLong("HSId"); Integer partitionId = null; if (jsObj.has("partitionId")) { partitionId = jsObj.getInt("partitionId"); } content = new MailboxNodeContent(HSId, partitionId); objects.add(content); } return objects; }
java
public static boolean createMigratePartitionLeaderInfo(ZooKeeper zk, MigratePartitionLeaderInfo info) { try { zk.create(migrate_partition_leader_info, info.toBytes(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (KeeperException e) { if (e.code() == KeeperException.Code.NODEEXISTS) { try { zk.setData(migrate_partition_leader_info, info.toBytes(), -1); } catch (KeeperException | InterruptedException | JSONException e1) { } return false; } org.voltdb.VoltDB.crashLocalVoltDB("Unable to create MigratePartitionLeader Indicator", true, e); } catch (InterruptedException | JSONException e) { org.voltdb.VoltDB.crashLocalVoltDB("Unable to create MigratePartitionLeader Indicator", true, e); } return true; }
java
public static MigratePartitionLeaderInfo getMigratePartitionLeaderInfo(ZooKeeper zk) { try { byte[] data = zk.getData(migrate_partition_leader_info, null, null); if (data != null) { MigratePartitionLeaderInfo info = new MigratePartitionLeaderInfo(data); return info; } } catch (KeeperException | InterruptedException | JSONException e) { } return null; }
java
private boolean convertDateTimeLiteral(Session session, Expression a, Expression b) { if (a.dataType.isDateTimeType()) {} else if (b.dataType.isDateTimeType()) { Expression c = a; a = b; b = c; } else { return false; } if (a.dataType.isDateTimeTypeWithZone()) { return false; } if (b.opType == OpTypes.VALUE && b.dataType.isCharacterType()) { b.valueData = a.dataType.castToType(session, b.valueData, b.dataType); b.dataType = a.dataType; return true; } return false; }
java
void distributeOr() { if (opType != OpTypes.OR) { return; } if (nodes[LEFT].opType == OpTypes.AND) { opType = OpTypes.AND; Expression temp = new ExpressionLogical(OpTypes.OR, nodes[LEFT].nodes[RIGHT], nodes[RIGHT]); nodes[LEFT].opType = OpTypes.OR; nodes[LEFT].nodes[RIGHT] = nodes[RIGHT]; nodes[RIGHT] = temp; } else if (nodes[RIGHT].opType == OpTypes.AND) { Expression temp = nodes[LEFT]; nodes[LEFT] = nodes[RIGHT]; nodes[RIGHT] = temp; distributeOr(); return; } ((ExpressionLogical) nodes[LEFT]).distributeOr(); ((ExpressionLogical) nodes[RIGHT]).distributeOr(); }
java
boolean isSimpleBound() { if (opType == OpTypes.IS_NULL) { return true; } if (nodes[RIGHT] != null) { if (nodes[RIGHT].opType == OpTypes.VALUE) { // also true for all parameters return true; } if (nodes[RIGHT].opType == OpTypes.SQL_FUNCTION) { if (((FunctionSQL) nodes[RIGHT]).isValueFunction()) { return true; } } } return false; }
java
void swapCondition() { int i = OpTypes.EQUAL; switch (opType) { case OpTypes.GREATER_EQUAL : i = OpTypes.SMALLER_EQUAL; break; case OpTypes.SMALLER_EQUAL : i = OpTypes.GREATER_EQUAL; break; case OpTypes.SMALLER : i = OpTypes.GREATER; break; case OpTypes.GREATER : i = OpTypes.SMALLER; break; case OpTypes.NOT_DISTINCT : i = OpTypes.NOT_DISTINCT; break; case OpTypes.EQUAL : break; default : throw Error.runtimeError(ErrorCode.U_S0500, "Expression.swapCondition"); } opType = i; Expression e = nodes[LEFT]; nodes[LEFT] = nodes[RIGHT]; nodes[RIGHT] = e; }
java
private boolean voltConvertBinaryIntegerLiteral(Session session, Expression lhs, Expression rhs) { Expression nonIntegralExpr; int whichChild; if (lhs.dataType.isIntegralType()) { nonIntegralExpr = rhs; whichChild = RIGHT; } else if (rhs.dataType.isIntegralType()) { nonIntegralExpr = lhs; whichChild = LEFT; } else { return false; } return ExpressionValue.voltMutateToBigintType(nonIntegralExpr, this, whichChild); }
java
public final void delete(Row row) { for (int i = indexList.length - 1; i >= 0; i--) { indexList[i].delete(this, row); } remove(row.getPos()); }
java
public int compare(final Object a, final Object b) { final long awhen = ((Task) (a)).getNextScheduled(); final long bwhen = ((Task) (b)).getNextScheduled(); return (awhen < bwhen) ? -1 : (awhen == bwhen) ? 0 : 1; }
java
public Object scheduleAfter(final long delay, final Runnable runnable) throws IllegalArgumentException { if (runnable == null) { throw new IllegalArgumentException("runnable == null"); } return this.addTask(now() + delay, runnable, 0, false); }
java
public Object scheduleAt(final Date date, final Runnable runnable) throws IllegalArgumentException { if (date == null) { throw new IllegalArgumentException("date == null"); } else if (runnable == null) { throw new IllegalArgumentException("runnable == null"); } return this.addTask(date.getTime(), runnable, 0, false); }
java
public Object schedulePeriodicallyAt(final Date date, final long period, final Runnable runnable, final boolean relative) throws IllegalArgumentException { if (date == null) { throw new IllegalArgumentException("date == null"); } else if (period <= 0) { throw new IllegalArgumentException("period <= 0"); } else if (runnable == null) { throw new IllegalArgumentException("runnable == null"); } return addTask(date.getTime(), runnable, period, relative); }
java
public Object schedulePeriodicallyAfter(final long delay, final long period, final Runnable runnable, final boolean relative) throws IllegalArgumentException { if (period <= 0) { throw new IllegalArgumentException("period <= 0"); } else if (runnable == null) { throw new IllegalArgumentException("runnable == null"); } return addTask(now() + delay, runnable, period, relative); }
java
public synchronized void shutdownImmediately() { if (!this.isShutdown) { final Thread runner = this.taskRunnerThread; this.isShutdown = true; if (runner != null && runner.isAlive()) { runner.interrupt(); } this.taskQueue.cancelAllTasks(); } }
java
public static boolean isFixedRate(final Object task) { if (task instanceof Task) { final Task ltask = (Task) task; return (ltask.relative && ltask.period > 0); } else { return false; } }
java
public static boolean isFixedDelay(final Object task) { if (task instanceof Task) { final Task ltask = (Task) task; return (!ltask.relative && ltask.period > 0); } else { return false; } }
java
public static Date getLastScheduled(Object task) { if (task instanceof Task) { final Task ltask = (Task) task; final long last = ltask.getLastScheduled(); return (last == 0) ? null : new Date(last); } else { return null; } }
java
public static Date getNextScheduled(Object task) { if (task instanceof Task) { final Task ltask = (Task) task; final long next = ltask.isCancelled() ? 0 : ltask.getNextScheduled(); return next == 0 ? null : new Date(next); } else { return null; } }
java
protected Task addTask(final long first, final Runnable runnable, final long period, boolean relative) { if (this.isShutdown) { throw new IllegalStateException("shutdown"); } final Task task = new Task(first, runnable, period, relative); // sychronized this.taskQueue.addTask(task); // sychronized this.restart(); return task; }
java
protected Task nextTask() { try { while (!this.isShutdown || Thread.interrupted()) { long now; long next; long wait; Task task; // synchronized to ensure removeTask // applies only to the peeked task, // when the computed wait <= 0 synchronized (this.taskQueue) { task = this.taskQueue.peekTask(); if (task == null) { // queue is empty break; } now = System.currentTimeMillis(); next = task.next; wait = (next - now); if (wait > 0) { // release ownership of taskQueue monitor and await // notification of task addition or cancellation, // at most until the time when the peeked task is // next supposed to execute this.taskQueue.park(wait); continue; // to top of loop } else { this.taskQueue.removeTask(); } } long period = task.period; if (period > 0) { // repeated task if (task.relative) { // using fixed rate shceduling final long late = (now - next); if (late > period) { // ensure that really late tasks don't // completely saturate the head of the // task queue period = 0; /** @todo : is -1, -2 ... fairer? */ } else if (late > 0) { // compensate for scheduling overruns period -= late; } } task.updateSchedule(now, now + period); this.taskQueue.addTask(task); } return task; } } catch (InterruptedException e) { //e.printStackTrace(); } return null; }
java
ExecutionEngine initializeEE() { String hostname = CoreUtils.getHostnameOrAddress(); HashinatorConfig hashinatorConfig = TheHashinator.getCurrentConfig(); ExecutionEngine eeTemp = null; Deployment deploy = m_context.cluster.getDeployment().get("deployment"); final int defaultDrBufferSize = Integer.getInteger("DR_DEFAULT_BUFFER_SIZE", 512 * 1024); // 512KB int configuredTimeout = Integer.getInteger("MAX_EXPORT_BUFFER_FLUSH_INTERVAL", 4*1000); final int exportFlushTimeout = configuredTimeout > 0 ? configuredTimeout : 4*1000; int tempTableMaxSize = deploy.getSystemsettings().get("systemsettings").getTemptablemaxsize(); if (System.getProperty("TEMP_TABLE_MAX_SIZE") != null) { // Allow a system property to override the deployment setting // for testing purposes. tempTableMaxSize = Integer.getInteger("TEMP_TABLE_MAX_SIZE"); } try { // NATIVE_EE_JNI and NATIVE_EE_LARGE_JNI if (m_backend.isDefaultJNITarget) { eeTemp = new ExecutionEngineJNI( m_context.cluster.getRelativeIndex(), m_siteId, m_partitionId, m_context.getNodeSettings().getLocalSitesCount(), CoreUtils.getHostIdFromHSId(m_siteId), hostname, m_context.cluster.getDrclusterid(), defaultDrBufferSize, tempTableMaxSize, hashinatorConfig, m_isLowestSiteId, exportFlushTimeout); } else if (m_backend == BackendTarget.NATIVE_EE_SPY_JNI){ Class<?> spyClass = Class.forName("org.mockito.Mockito"); Method spyMethod = spyClass.getDeclaredMethod("spy", Object.class); ExecutionEngine internalEE = new ExecutionEngineJNI( m_context.cluster.getRelativeIndex(), m_siteId, m_partitionId, m_context.getNodeSettings().getLocalSitesCount(), CoreUtils.getHostIdFromHSId(m_siteId), hostname, m_context.cluster.getDrclusterid(), defaultDrBufferSize, tempTableMaxSize, hashinatorConfig, m_isLowestSiteId, exportFlushTimeout); eeTemp = (ExecutionEngine) spyMethod.invoke(null, internalEE); } else if (m_backend.isIPC) { // set up the EE over IPC eeTemp = new ExecutionEngineIPC( m_context.cluster.getRelativeIndex(), m_siteId, m_partitionId, m_context.getNodeSettings().getLocalSitesCount(), CoreUtils.getHostIdFromHSId(m_siteId), hostname, m_context.cluster.getDrclusterid(), defaultDrBufferSize, tempTableMaxSize, m_backend, VoltDB.instance().getConfig().m_ipcPort, hashinatorConfig, m_isLowestSiteId, exportFlushTimeout); } else { /* This seems very bad. */ throw new VoltAbortException( String.format("Unexpected BackendTarget value %s", m_backend) ); } eeTemp.loadCatalog(m_startupConfig.m_timestamp, m_startupConfig.m_serializedCatalog); eeTemp.setBatchTimeout(m_context.cluster.getDeployment().get("deployment"). getSystemsettings().get("systemsettings").getQuerytimeout()); } // just print error info an bail if we run into an error here catch (final Exception ex) { hostLog.l7dlog( Level.FATAL, LogKeys.host_ExecutionSite_FailedConstruction.name(), new Object[] { m_siteId, m_siteIndex }, ex); VoltDB.crashLocalVoltDB(ex.getMessage(), true, ex); } return eeTemp; }
java
private static void handleUndoLog(List<UndoAction> undoLog, boolean undo) { if (undoLog == null) { return; } if (undo) { undoLog = Lists.reverse(undoLog); } for (UndoAction action : undoLog) { if (undo) { action.undo(); } else { action.release(); } } if (undo) { undoLog.clear(); } }
java
public boolean updateCatalog(String diffCmds, CatalogContext context, boolean requiresSnapshotIsolationboolean, boolean isMPI, long txnId, long uniqueId, long spHandle, boolean isReplay, boolean requireCatalogDiffCmdsApplyToEE, boolean requiresNewExportGeneration) { CatalogContext oldContext = m_context; m_context = context; m_ee.setBatchTimeout(m_context.cluster.getDeployment().get("deployment"). getSystemsettings().get("systemsettings").getQuerytimeout()); m_loadedProcedures.loadProcedures(m_context, isReplay); m_ee.loadFunctions(m_context); if (isMPI) { // the rest of the work applies to sites with real EEs return true; } if (requireCatalogDiffCmdsApplyToEE == false) { // empty diff cmds for the EE to apply, so skip the JNI call hostLog.debug("Skipped applying diff commands on EE."); return true; } CatalogMap<Table> tables = m_context.catalog.getClusters().get("cluster").getDatabases().get("database").getTables(); boolean DRCatalogChange = false; for (Table t : tables) { if (t.getIsdred()) { DRCatalogChange |= diffCmds.contains("tables#" + t.getTypeName()); if (DRCatalogChange) { break; } } } if (!DRCatalogChange) { // Check against old catalog for deletions CatalogMap<Table> oldTables = oldContext.catalog.getClusters().get("cluster").getDatabases().get("database").getTables(); for (Table t : oldTables) { if (t.getIsdred()) { DRCatalogChange |= diffCmds.contains(CatalogSerializer.getDeleteDiffStatement(t, "tables")); if (DRCatalogChange) { break; } } } } // if a snapshot is in process, wait for it to finish // don't bother if this isn't a schema change // if (requiresSnapshotIsolationboolean && m_snapshotter.isEESnapshotting()) { hostLog.info(String.format("Site %d performing schema change operation must block until snapshot is locally complete.", CoreUtils.getSiteIdFromHSId(m_siteId))); try { m_snapshotter.completeSnapshotWork(m_sysprocContext); hostLog.info(String.format("Site %d locally finished snapshot. Will update catalog now.", CoreUtils.getSiteIdFromHSId(m_siteId))); } catch (InterruptedException e) { VoltDB.crashLocalVoltDB("Unexpected Interrupted Exception while finishing a snapshot for a catalog update.", true, e); } } //Necessary to quiesce before updating the catalog //so export data for the old generation is pushed to Java. //No need to quiesce as there is no rolling of generation OLD datasources will be polled and pushed until there is no more data. //m_ee.quiesce(m_lastCommittedSpHandle); m_ee.updateCatalog(m_context.m_genId, requiresNewExportGeneration, diffCmds); if (DRCatalogChange) { final DRCatalogCommands catalogCommands = DRCatalogDiffEngine.serializeCatalogCommandsForDr(m_context.catalog, -1); generateDREvent(EventType.CATALOG_UPDATE, txnId, uniqueId, m_lastCommittedSpHandle, spHandle, catalogCommands.commands.getBytes(Charsets.UTF_8)); } return true; }
java
public boolean updateSettings(CatalogContext context) { m_context = context; // here you could bring the timeout settings m_loadedProcedures.loadProcedures(m_context); m_ee.loadFunctions(m_context); return true; }
java
@Override public long[] validatePartitioning(long[] tableIds, byte[] hashinatorConfig) { ByteBuffer paramBuffer = m_ee.getParamBufferForExecuteTask(4 + (8 * tableIds.length) + 4 + hashinatorConfig.length); paramBuffer.putInt(tableIds.length); for (long tableId : tableIds) { paramBuffer.putLong(tableId); } paramBuffer.put(hashinatorConfig); ByteBuffer resultBuffer = ByteBuffer.wrap(m_ee.executeTask( TaskType.VALIDATE_PARTITIONING, paramBuffer)); long mispartitionedRows[] = new long[tableIds.length]; for (int ii = 0; ii < tableIds.length; ii++) { mispartitionedRows[ii] = resultBuffer.getLong(); } return mispartitionedRows; }
java
public void generateDREvent(EventType type, long txnId, long uniqueId, long lastCommittedSpHandle, long spHandle, byte[] payloads) { m_ee.quiesce(lastCommittedSpHandle); ByteBuffer paramBuffer = m_ee.getParamBufferForExecuteTask(32 + 16 + payloads.length); paramBuffer.putInt(type.ordinal()); paramBuffer.putLong(uniqueId); paramBuffer.putLong(lastCommittedSpHandle); paramBuffer.putLong(spHandle); // adding txnId and undoToken to make generateDREvent undoable paramBuffer.putLong(txnId); paramBuffer.putLong(getNextUndoToken(m_currentTxnId)); paramBuffer.putInt(payloads.length); paramBuffer.put(payloads); m_ee.executeTask(TaskType.GENERATE_DR_EVENT, paramBuffer); }
java
public boolean areRepairLogsComplete() { for (Entry<Long, ReplicaRepairStruct> entry : m_replicaRepairStructs.entrySet()) { if (!entry.getValue().logsComplete()) { return false; } } return true; }
java
public void repairSurvivors() { // cancel() and repair() must be synchronized by the caller (the deliver lock, // currently). If cancelled and the last repair message arrives, don't send // out corrections! if (this.m_promotionResult.isCancelled()) { repairLogger.debug(m_whoami + "skipping repair message creation for cancelled Term."); return; } if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "received all repair logs and is repairing surviving replicas."); } for (Iv2RepairLogResponseMessage li : m_repairLogUnion) { // send the repair log union to all the survivors. SPIs will ignore // CompleteTransactionMessages for transactions which have already // completed, so this has the effect of making sure that any holes // in the repair log are filled without explicitly having to // discover and track them. VoltMessage repairMsg = createRepairMessage(li); if (repairLogger.isDebugEnabled()) { repairLogger.debug(m_whoami + "repairing: " + CoreUtils.hsIdCollectionToString(m_survivors) + " with: " + TxnEgo.txnIdToString(li.getTxnId()) + " " + repairMsg); } if (repairMsg != null) { m_mailbox.repairReplicasWith(m_survivors, repairMsg); } } m_promotionResult.set(new RepairResult(m_maxSeenTxnId)); }
java
void addToRepairLog(Iv2RepairLogResponseMessage msg) { // don't add the null payload from the first message ack to the repair log if (msg.getPayload() == null) { return; } // MP repair log has at most two messages, complete message for prior transaction // and fragment message for current transaction, don't add message before prior completion if (msg.getTxnId() <= m_maxSeenCompleteTxnId) { return; } Iv2RepairLogResponseMessage prev = m_repairLogUnion.floor(msg); if (prev != null && (prev.getTxnId() != msg.getTxnId())) { prev = null; } if (msg.getPayload() instanceof CompleteTransactionMessage) { // prefer complete messages to fragment tasks. Completion message also erases prior staled messages m_repairLogUnion.removeIf((p) -> p.getTxnId() <= msg.getTxnId()); m_repairLogUnion.add(msg); m_maxSeenCompleteTxnId = msg.getTxnId(); } else if (prev == null) { m_repairLogUnion.add(msg); } }
java
static String getSchemaPath(String projectFilePath, String path) throws IOException { File file = null; if (path.contains(".jar!")) { String ddlText = null; ddlText = VoltCompilerUtils.readFileFromJarfile(path); file = VoltProjectBuilder.writeStringToTempFile(ddlText); } else { file = new File(path); } if (!file.isAbsolute()) { // Resolve schemaPath relative to either the database definition xml file // or the working directory. if (projectFilePath != null) { file = new File(new File(projectFilePath).getParent(), path); } else { file = new File(path); } } return file.getPath(); }
java
public void loadFunctions(CatalogContext catalogContext) { final CatalogMap<Function> catalogFunctions = catalogContext.database.getFunctions(); // Remove obsolete tokens for (UserDefinedFunctionRunner runner : m_udfs.values()) { // The function that the current UserDefinedFunctionRunner is referring to // does not exist in the catalog anymore, we need to remove its token. if (catalogFunctions.get(runner.m_functionName) == null) { FunctionForVoltDB.deregisterUserDefinedFunction(runner.m_functionName); } } // Build new UDF runners ImmutableMap.Builder<Integer, UserDefinedFunctionRunner> builder = ImmutableMap.<Integer, UserDefinedFunctionRunner>builder(); for (final Function catalogFunction : catalogFunctions) { final String className = catalogFunction.getClassname(); Class<?> funcClass = null; try { funcClass = catalogContext.classForProcedureOrUDF(className); } catch (final ClassNotFoundException e) { if (className.startsWith("org.voltdb.")) { String msg = String.format(ORGVOLTDB_FUNCCNAME_ERROR_FMT, className); VoltDB.crashLocalVoltDB(msg, false, null); } else { String msg = String.format(UNABLETOLOAD_ERROR_FMT, className); VoltDB.crashLocalVoltDB(msg, false, null); } } Object funcInstance = null; try { funcInstance = funcClass.newInstance(); } catch (InstantiationException | IllegalAccessException e) { throw new RuntimeException(String.format("Error instantiating function \"%s\"", className), e); } assert(funcInstance != null); builder.put(catalogFunction.getFunctionid(), new UserDefinedFunctionRunner(catalogFunction, funcInstance)); } loadBuiltInJavaFunctions(builder); m_udfs = builder.build(); }
java
static String readFile(String file) { try { FileReader reader = new FileReader(file); BufferedReader read = new BufferedReader(reader); StringBuffer b = new StringBuffer(); String s = null; int count = 0; while ((s = read.readLine()) != null) { count++; b.append(s); b.append('\n'); } read.close(); reader.close(); return b.toString(); } catch (IOException e) { return e.getMessage(); } }
java
static String[] getServersFromURL(String url) { // get everything between the prefix and the ? String prefix = URL_PREFIX + "//"; int end = url.length(); if (url.indexOf("?") > 0) { end = url.indexOf("?"); } String servstring = url.substring(prefix.length(), end); return servstring.split(","); }
java
private void initializeGenerationFromDisk(final CatalogMap<Connector> connectors, final ExportDataProcessor processor, File[] files, List<Pair<Integer, Integer>> localPartitionsToSites, long genId) { List<Integer> onDiskPartitions = new ArrayList<Integer>(); NavigableSet<Table> streams = CatalogUtil.getExportTablesExcludeViewOnly(connectors); Set<String> exportedTables = new HashSet<>(); for (Table stream : streams) { exportedTables.add(stream.getTypeName()); } /* * Find all the data files. Once one is found, extract the nonce * and check for any advertisements related to the data files. If * there are orphaned advertisements, delete them. */ Map<String, File> dataFiles = new HashMap<>(); for (File data: files) { if (data.getName().endsWith(".pbd")) { PbdSegmentName pbdName = PbdSegmentName.parseFile(exportLog, data); if (pbdName.m_nonce != null) { String nonce = pbdName.m_nonce; String streamName = getStreamNameFromNonce(nonce); if (exportedTables.contains(streamName)) { dataFiles.put(nonce, data); } else { // ENG-15740, stream can be dropped while node is offline, delete .pbd files // if stream is no longer in catalog data.delete(); } } else if (pbdName.m_result == Result.NOT_PBD) { exportLog.warn(data.getAbsolutePath() + " is not a PBD file."); } else if (pbdName.m_result == Result.INVALID_NAME) { exportLog.warn(data.getAbsolutePath() + " doesn't have valid PBD name."); } } } for (File ad: files) { if (ad.getName().endsWith(".ad")) { String nonce = getNonceFromAdFile(ad); File dataFile = dataFiles.get(nonce); if (dataFile != null) { try { addDataSource(ad, localPartitionsToSites, onDiskPartitions, processor, genId); } catch (IOException e) { VoltDB.crashLocalVoltDB("Error intializing export datasource " + ad, true, e); } } else { //Delete ads that have no data ad.delete(); } } } // Count unique partitions only Set<Integer> allLocalPartitions = localPartitionsToSites.stream() .map(p -> p.getFirst()) .collect(Collectors.toSet()); Set<Integer> onDIskPartitionsSet = new HashSet<Integer>(onDiskPartitions); onDIskPartitionsSet.removeAll(allLocalPartitions); // One export mailbox per node, since we only keep one generation if (!onDIskPartitionsSet.isEmpty()) { createAckMailboxesIfNeeded(onDIskPartitionsSet); } }
java
void initializeGenerationFromCatalog(CatalogContext catalogContext, final CatalogMap<Connector> connectors, final ExportDataProcessor processor, int hostId, List<Pair<Integer, Integer>> localPartitionsToSites, boolean isCatalogUpdate) { // Update catalog version so that datasources use this version when propagating acks m_catalogVersion = catalogContext.catalogVersion; if (exportLog.isDebugEnabled()) { exportLog.debug("Updating to catalog version : " + m_catalogVersion); } // Collect table names of existing datasources Set<String> currentTables = new HashSet<>(); synchronized(m_dataSourcesByPartition) { for (Iterator<Map<String, ExportDataSource>> it = m_dataSourcesByPartition.values().iterator(); it.hasNext();) { Map<String, ExportDataSource> sources = it.next(); currentTables.addAll(sources.keySet()); } } if (exportLog.isDebugEnabled()) { exportLog.debug("Current tables: " + currentTables); } // Now create datasources based on the catalog (if already present will not be re-created). // Note that we create sources on disabled connectors. Set<Integer> partitionsInUse = localPartitionsToSites.stream().map(p -> p.getFirst()).collect(Collectors.toSet()); boolean createdSources = false; NavigableSet<Table> streams = CatalogUtil.getExportTablesExcludeViewOnly(connectors); Set<String> exportedTables = new HashSet<>(); for (Table stream : streams) { addDataSources(stream, hostId, localPartitionsToSites, partitionsInUse, processor, catalogContext.m_genId, isCatalogUpdate); exportedTables.add(stream.getTypeName()); createdSources = true; } updateStreamStatus(exportedTables); // Remove datasources that are not exported anymore for (String table : exportedTables) { currentTables.remove(table); } if (!currentTables.isEmpty()) { removeDataSources(currentTables); } //Only populate partitions in use if export is actually happening createAckMailboxesIfNeeded(createdSources ? partitionsInUse : new HashSet<Integer>()); }
java
private void updateStreamStatus( Set<String> exportedTables) { synchronized(m_dataSourcesByPartition) { for (Iterator<Map<String, ExportDataSource>> it = m_dataSourcesByPartition.values().iterator(); it.hasNext();) { Map<String, ExportDataSource> sources = it.next(); for (String tableName: sources.keySet()) { ExportDataSource src = sources.get(tableName); if (!exportedTables.contains(tableName)) { src.setStatus(ExportDataSource.StreamStatus.DROPPED); } else if (src.getStatus() == ExportDataSource.StreamStatus.DROPPED) { src.setStatus(ExportDataSource.StreamStatus.ACTIVE); } } } } }
java
private void sendDummyTakeMastershipResponse(long sourceHsid, long requestId, int partitionId, byte[] signatureBytes) { // msg type(1) + partition:int(4) + length:int(4) + signaturesBytes.length // requestId(8) int msgLen = 1 + 4 + 4 + signatureBytes.length + 8; ByteBuffer buf = ByteBuffer.allocate(msgLen); buf.put(ExportManager.TAKE_MASTERSHIP_RESPONSE); buf.putInt(partitionId); buf.putInt(signatureBytes.length); buf.put(signatureBytes); buf.putLong(requestId); BinaryPayloadMessage bpm = new BinaryPayloadMessage(new byte[0], buf.array()); m_mbox.send(sourceHsid, bpm); if (exportLog.isDebugEnabled()) { exportLog.debug("Partition " + partitionId + " mailbox hsid (" + CoreUtils.hsIdToString(m_mbox.getHSId()) + ") send dummy TAKE_MASTERSHIP_RESPONSE message(" + requestId + ") to " + CoreUtils.hsIdToString(sourceHsid)); } }
java
public void updateAckMailboxes(int partition, Set<Long> newHSIds) { ImmutableList<Long> replicaHSIds = m_replicasHSIds.get(partition); synchronized (m_dataSourcesByPartition) { Map<String, ExportDataSource> partitionMap = m_dataSourcesByPartition.get(partition); if (partitionMap == null) { return; } for( ExportDataSource eds: partitionMap.values()) { eds.updateAckMailboxes(Pair.of(m_mbox, replicaHSIds)); if (newHSIds != null && !newHSIds.isEmpty()) { // In case of newly joined or rejoined streams miss any RELEASE_BUFFER event, // master stream resends the event when the export mailbox is aware of new streams. eds.forwardAckToNewJoinedReplicas(newHSIds); // After rejoin, new data source may contain the data which current master doesn't have, // only on master stream if it is blocked by the gap eds.queryForBestCandidate(); } } } }
java
private void addDataSources(Table table, int hostId, List<Pair<Integer, Integer>> localPartitionsToSites, Set<Integer> partitionsInUse, final ExportDataProcessor processor, final long genId, boolean isCatalogUpdate) { for (Pair<Integer, Integer> partitionAndSiteId : localPartitionsToSites) { /* * IOException can occur if there is a problem * with the persistent aspects of the datasource storage */ int partition = partitionAndSiteId.getFirst(); int siteId = partitionAndSiteId.getSecond(); synchronized(m_dataSourcesByPartition) { try { Map<String, ExportDataSource> dataSourcesForPartition = m_dataSourcesByPartition.get(partition); if (dataSourcesForPartition == null) { dataSourcesForPartition = new HashMap<String, ExportDataSource>(); m_dataSourcesByPartition.put(partition, dataSourcesForPartition); } final String key = table.getTypeName(); if (!dataSourcesForPartition.containsKey(key)) { ExportDataSource exportDataSource = new ExportDataSource(this, processor, "database", key, partition, siteId, genId, table.getColumns(), table.getPartitioncolumn(), m_directory.getPath()); int migrateBatchSize = CatalogUtil.getPersistentMigrateBatchSize(key); exportDataSource.setupMigrateRowsDeleter(migrateBatchSize); if (exportLog.isDebugEnabled()) { exportLog.debug("Creating ExportDataSource for table in catalog " + key + " partition " + partition + " site " + siteId); } dataSourcesForPartition.put(key, exportDataSource); if (isCatalogUpdate) { exportDataSource.updateCatalog(table, genId); } } else { // Associate any existing EDS to the export client in the new processor ExportDataSource eds = dataSourcesForPartition.get(key); ExportClientBase client = processor.getExportClient(key); if (client != null) { // Associate to an existing export client eds.setClient(client); eds.setRunEveryWhere(client.isRunEverywhere()); } else { // Reset to no export client eds.setClient(null); eds.setRunEveryWhere(false); } // Mark in catalog only if partition is in use eds.markInCatalog(partitionsInUse.contains(partition)); if (isCatalogUpdate) { eds.updateCatalog(table, genId); } } } catch (IOException e) { VoltDB.crashLocalVoltDB( "Error creating datasources for table " + table.getTypeName() + " host id " + hostId, true, e); } } } }
java
@Override public void onSourceDrained(int partitionId, String tableName) { ExportDataSource source; synchronized(m_dataSourcesByPartition) { Map<String, ExportDataSource> sources = m_dataSourcesByPartition.get(partitionId); if (sources == null) { if (!m_removingPartitions.contains(partitionId)) { exportLog.error("Could not find export data sources for partition " + partitionId + ". The export cleanup stream is being discarded."); } return; } source = sources.get(tableName); if (source == null) { exportLog.warn("Could not find export data source for signature " + partitionId + " name " + tableName + ". The export cleanup stream is being discarded."); return; } // Remove source and partition entry if empty sources.remove(tableName); if (sources.isEmpty()) { m_dataSourcesByPartition.remove(partitionId); removeMailbox(partitionId); } } //Do closing outside the synchronized block. Do not wait on future since // we're invoked from the source's executor thread. exportLog.info("Drained on unused partition " + partitionId + ": " + source); source.closeAndDelete(); }
java
public void add(int index, Object element) { // reporter.updateCounter++; if (index > elementCount) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + ">" + elementCount); } if (index < 0) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " < 0"); } if (elementCount >= elementData.length) { increaseCapacity(); } for (int i = elementCount; i > index; i--) { elementData[i] = elementData[i - 1]; } elementData[index] = element; elementCount++; }
java
public boolean add(Object element) { // reporter.updateCounter++; if (elementCount >= elementData.length) { increaseCapacity(); } elementData[elementCount] = element; elementCount++; return true; }
java
public Object get(int index) { if (index >= elementCount) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " >= " + elementCount); } if (index < 0) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " < 0"); } return elementData[index]; }
java
public Object remove(int index) { if (index >= elementCount) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " >= " + elementCount); } if (index < 0) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " < 0"); } Object removedObj = elementData[index]; for (int i = index; i < elementCount - 1; i++) { elementData[i] = elementData[i + 1]; } elementCount--; if (elementCount == 0) { clear(); } else { elementData[elementCount] = null; } return removedObj; }
java
public Object set(int index, Object element) { if (index >= elementCount) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " >= " + elementCount); } if (index < 0) { throw new IndexOutOfBoundsException("Index out of bounds: " + index + " < 0"); } Object replacedObj = elementData[index]; elementData[index] = element; return replacedObj; }
java
public static boolean bufEquals(byte onearray[], byte twoarray[]) { if (onearray == twoarray) return true; boolean ret = (onearray.length == twoarray.length); if (!ret) { return ret; } for (int idx = 0; idx < onearray.length; idx++) { if (onearray[idx] != twoarray[idx]) { return false; } } return true; }
java
public Connection getConnection(String curDriverIn, String curCharsetIn, String curTrustStoreIn) throws ClassNotFoundException, MalformedURLException, SQLException { // Local vars to satisfy compiler warnings String curDriver = curDriverIn; String curCharset = curCharsetIn; String curTrustStore = curTrustStoreIn; Properties sysProps = System.getProperties(); if (curDriver == null) { // If explicit driver not specified curDriver = ((driver == null) ? DEFAULT_JDBC_DRIVER : driver); } if (curCharset == null && charset != null) { curCharset = charset; } if (curTrustStore == null && truststore != null) { curTrustStore = truststore; } if (curCharset == null) { sysProps.remove("sqlfile.charset"); } else { sysProps.put("sqlfile.charset", curCharset); } if (curTrustStore == null) { sysProps.remove("javax.net.ssl.trustStore"); } else { sysProps.put("javax.net.ssl.trustStore", curTrustStore); } String urlString = null; try { urlString = expandSysPropVars(url); } catch (IllegalArgumentException iae) { throw new MalformedURLException(iae.getMessage() + " for URL '" + url + "'"); } String userString = null; if (username != null) try { userString = expandSysPropVars(username); } catch (IllegalArgumentException iae) { throw new MalformedURLException(iae.getMessage() + " for user name '" + username + "'"); } String passwordString = null; if (password != null) try { passwordString = expandSysPropVars(password); } catch (IllegalArgumentException iae) { throw new MalformedURLException(iae.getMessage() + " for password"); } Class.forName(curDriver); // This is not necessary for jdbc:odbc or if class loaded by a // service resource file. Consider checking for that. Connection c = (userString == null) ? DriverManager.getConnection(urlString) : DriverManager.getConnection(urlString, userString, passwordString); if (ti != null) RCData.setTI(c, ti); // Would like to verify the setting made by checking // c.getTransactionIsolation(). Unfortunately, the spec allows for // databases to substitute levels according to some rules, and it's // impossible to know what to expect since custom levels are permitted. // Debug: // System.err.println("TI set to " + ti + "\nPOST: " // + SqlTool.tiToString(c.getTransactionIsolation())); return c; }
java
static public String tiToString(int ti) { switch (ti) { case Connection.TRANSACTION_READ_UNCOMMITTED: return "TRANSACTION_READ_UNCOMMITTED"; case Connection.TRANSACTION_READ_COMMITTED: return "TRANSACTION_READ_COMMITTED"; case Connection.TRANSACTION_REPEATABLE_READ: return "TRANSACTION_REPEATABLE_READ"; case Connection.TRANSACTION_SERIALIZABLE: return "TRANSACTION_SERIALIZABLE"; case Connection.TRANSACTION_NONE: return "TRANSACTION_NONE"; } return "Custom Transaction Isolation numerical value: " + ti; }
java
protected void handleJSONMessageAsDummy(JSONObject obj) throws Exception { hostLog.info("Generating dummy response for ops request " + obj); sendOpsResponse(null, obj, OPS_DUMMY); }
java
public void performOpsAction(final Connection c, final long clientHandle, final OpsSelector selector, final ParameterSet params) throws Exception { m_es.submit(new Runnable() { @Override public void run() { try { collectStatsImpl(c, clientHandle, selector, params); } catch (Exception e) { hostLog.warn("Exception while attempting to collect stats", e); // ENG-14639, prevent clients like sqlcmd from hanging on exception sendErrorResponse(c, ClientResponse.OPERATIONAL_FAILURE, "Failed to get statistics (" + e.getMessage() + ").", clientHandle); } } }); }
java
protected void distributeOpsWork(PendingOpsRequest newRequest, JSONObject obj) throws Exception { if (m_pendingRequests.size() > MAX_IN_FLIGHT_REQUESTS) { /* * Defensively check for an expired request not caught * by timeout check. Should never happen. */ Iterator<Entry<Long, PendingOpsRequest>> iter = m_pendingRequests.entrySet().iterator(); final long now = System.currentTimeMillis(); boolean foundExpiredRequest = false; while (iter.hasNext()) { PendingOpsRequest por = iter.next().getValue(); if (now - por.startTime > OPS_COLLECTION_TIMEOUT * 2) { iter.remove(); foundExpiredRequest = true; } } if (!foundExpiredRequest) { sendErrorResponse(newRequest.c, ClientResponse.GRACEFUL_FAILURE, "Too many pending stat requests", newRequest.clientData); return; } } final long requestId = m_nextRequestId++; m_pendingRequests.put(requestId, newRequest); newRequest.timer = m_es.schedule(new Runnable() { @Override public void run() { checkForRequestTimeout(requestId); } }, OPS_COLLECTION_TIMEOUT, TimeUnit.MILLISECONDS); // selector, subselector, interval filled in by parse... obj.put("requestId", requestId); obj.put("returnAddress", m_mailbox.getHSId()); int siteId = CoreUtils.getSiteIdFromHSId(m_mailbox.getHSId()); byte payloadBytes[] = CompressionService.compressBytes(obj.toString(4).getBytes("UTF-8")); for (int hostId : m_messenger.getLiveHostIds()) { long agentHsId = CoreUtils.getHSIdFromHostAndSite(hostId, siteId); newRequest.expectedOpsResponses++; BinaryPayloadMessage bpm = new BinaryPayloadMessage(new byte[] {JSON_PAYLOAD}, payloadBytes); m_mailbox.send(agentHsId, bpm); } }
java
protected void sendClientResponse(PendingOpsRequest request) { byte statusCode = ClientResponse.SUCCESS; String statusString = null; /* * It is possible not to receive a table response if a feature is not enabled */ // All of the null/empty table handling/detecting/generation sucks. Just making it // work for now, not making it pretty. --izzy VoltTable responseTables[] = request.aggregateTables; if (responseTables == null || responseTables.length == 0) { responseTables = new VoltTable[0]; statusCode = ClientResponse.GRACEFUL_FAILURE; statusString = "Requested info \"" + request.subselector + "\" is not yet available or not supported in the current configuration."; } ClientResponseImpl response = new ClientResponseImpl(statusCode, ClientResponse.UNINITIALIZED_APP_STATUS_CODE, null, responseTables, statusString); response.setClientHandle(request.clientData); ByteBuffer buf = ByteBuffer.allocate(response.getSerializedSize() + 4); buf.putInt(buf.capacity() - 4); response.flattenToBuffer(buf).flip(); request.c.writeStream().enqueue(buf); }
java
private void sendOpsResponse(VoltTable[] results, JSONObject obj, byte payloadType) throws Exception { long requestId = obj.getLong("requestId"); long returnAddress = obj.getLong("returnAddress"); // Send a response with no data since the stats is not supported or not yet available if (results == null) { ByteBuffer responseBuffer = ByteBuffer.allocate(8); responseBuffer.putLong(requestId); byte responseBytes[] = CompressionService.compressBytes(responseBuffer.array()); BinaryPayloadMessage bpm = new BinaryPayloadMessage( new byte[] {payloadType}, responseBytes); m_mailbox.send(returnAddress, bpm); return; } ByteBuffer[] bufs = new ByteBuffer[results.length]; int statbytes = 0; for (int i = 0; i < results.length; i++) { bufs[i] = results[i].getBuffer(); bufs[i].position(0); statbytes += bufs[i].remaining(); } ByteBuffer responseBuffer = ByteBuffer.allocate( 8 + // requestId 4 * results.length + // length prefix for each stats table + statbytes); responseBuffer.putLong(requestId); for (ByteBuffer buf : bufs) { responseBuffer.putInt(buf.remaining()); responseBuffer.put(buf); } byte responseBytes[] = CompressionService.compressBytes(responseBuffer.array()); BinaryPayloadMessage bpm = new BinaryPayloadMessage( new byte[] {payloadType}, responseBytes); m_mailbox.send(returnAddress, bpm); }
java
private static void addUDFDependences(Function function, Statement catalogStmt) { Procedure procedure = (Procedure)catalogStmt.getParent(); addFunctionDependence(function, procedure, catalogStmt); addStatementDependence(function, catalogStmt); }
java
private static void addFunctionDependence(Function function, Procedure procedure, Statement catalogStmt) { String funcDeps = function.getStmtdependers(); Set<String> stmtSet = new TreeSet<>(); for (String stmtName : funcDeps.split(",")) { if (! stmtName.isEmpty()) { stmtSet.add(stmtName); } } String statementName = procedure.getTypeName() + ":" + catalogStmt.getTypeName(); if (stmtSet.contains(statementName)) { return; } stmtSet.add(statementName); StringBuilder sb = new StringBuilder(); // We will add this procedure:statement pair. So make sure we have // an initial comma. Note that an empty set must be represented // by an empty string. We represent the set {pp:ss, qq:tt}, // where "pp" and "qq" are procedures and "ss" and "tt" are // statements in their procedures respectively, with // the string ",pp:ss,qq:tt,". If we search for "pp:ss" we will // never find "ppp:sss" by accident. // // Do to this, when we add something to string we start with a single // comma, and then add "qq:tt," at the end. sb.append(","); for (String stmtName : stmtSet) { sb.append(stmtName + ","); } function.setStmtdependers(sb.toString()); }
java
private static void addStatementDependence(Function function, Statement catalogStmt) { String fnDeps = catalogStmt.getFunctiondependees(); Set<String> fnSet = new TreeSet<>(); for (String fnName : fnDeps.split(",")) { if (! fnName.isEmpty()) { fnSet.add(fnName); } } String functionName = function.getTypeName(); if (fnSet.contains(functionName)) { return; } fnSet.add(functionName); StringBuilder sb = new StringBuilder(); sb.append(","); for (String fnName : fnSet) { sb.append(fnName + ","); } catalogStmt.setFunctiondependees(sb.toString()); }
java
static boolean fragmentReferencesPersistentTable(AbstractPlanNode node) { if (node == null) return false; // these nodes can read/modify persistent tables if (node instanceof AbstractScanPlanNode) return true; if (node instanceof InsertPlanNode) return true; if (node instanceof DeletePlanNode) return true; if (node instanceof UpdatePlanNode) return true; // recursively check out children for (int i = 0; i < node.getChildCount(); i++) { AbstractPlanNode child = node.getChild(i); if (fragmentReferencesPersistentTable(child)) return true; } // if nothing found, return false return false; }
java
public static Procedure compileNibbleDeleteProcedure(Table catTable, String procName, Column col, ComparisonOperation comp) { Procedure newCatProc = addProcedure(catTable, procName); String countingQuery = genSelectSqlForNibbleDelete(catTable, col, comp); addStatement(catTable, newCatProc, countingQuery, "0"); String deleteQuery = genDeleteSqlForNibbleDelete(catTable, col, comp); addStatement(catTable, newCatProc, deleteQuery, "1"); String valueAtQuery = genValueAtOffsetSqlForNibbleDelete(catTable, col, comp); addStatement(catTable, newCatProc, valueAtQuery, "2"); return newCatProc; }
java
public static Procedure compileMigrateProcedure(Table table, String procName, Column column, ComparisonOperation comparison) { Procedure proc = addProcedure(table, procName); // Select count(*) StringBuilder sb = new StringBuilder(); sb.append("SELECT COUNT(*) FROM " + table.getTypeName()); sb.append(" WHERE not migrating AND " + column.getName() + " " + comparison.toString() + " ?;"); addStatement(table, proc, sb.toString(), "0"); // Get cutoff value sb.setLength(0); sb.append("SELECT " + column.getName() + " FROM " + table.getTypeName()); sb.append(" WHERE not migrating ORDER BY " + column.getName()); if (comparison == ComparisonOperation.LTE || comparison == ComparisonOperation.LT) { sb.append(" ASC OFFSET ? LIMIT 1;"); } else { sb.append(" DESC OFFSET ? LIMIT 1;"); } addStatement(table, proc, sb.toString(), "1"); // Migrate sb.setLength(0); sb.append("MIGRATE FROM " + table.getTypeName()); sb.append(" WHERE not migrating AND " + column.getName() + " " + comparison.toString() + " ?;"); addStatement(table, proc, sb.toString(), "2"); return proc; }
java
public static <E> Collection<E> constrainedCollection( Collection<E> collection, Constraint<? super E> constraint) { return new ConstrainedCollection<E>(collection, constraint); }
java
public static <E> Set<E> constrainedSet(Set<E> set, Constraint<? super E> constraint) { return new ConstrainedSet<E>(set, constraint); }
java
public static <E> SortedSet<E> constrainedSortedSet( SortedSet<E> sortedSet, Constraint<? super E> constraint) { return new ConstrainedSortedSet<E>(sortedSet, constraint); }
java
public static <E> List<E> constrainedList(List<E> list, Constraint<? super E> constraint) { return (list instanceof RandomAccess) ? new ConstrainedRandomAccessList<E>(list, constraint) : new ConstrainedList<E>(list, constraint); }
java
private static <E> ListIterator<E> constrainedListIterator( ListIterator<E> listIterator, Constraint<? super E> constraint) { return new ConstrainedListIterator<E>(listIterator, constraint); }
java
public final Index createIndex(PersistentStore store, HsqlName name, int[] columns, boolean[] descending, boolean[] nullsLast, boolean unique, boolean migrating, boolean constraint, boolean forward) { Index newIndex = createAndAddIndexStructure(name, columns, descending, nullsLast, unique, migrating, constraint, forward); return newIndex; }
java
public Type getCombinedType(Type other, int operation) { if (operation != OpTypes.CONCAT) { return getAggregateType(other); } Type newType; long newPrecision = precision + other.precision; switch (other.typeCode) { case Types.SQL_ALL_TYPES : return this; case Types.SQL_BIT : newType = this; break; case Types.SQL_BIT_VARYING : newType = other; break; case Types.SQL_BINARY : case Types.SQL_VARBINARY : case Types.SQL_BLOB : return other.getCombinedType(this, operation); default : throw Error.error(ErrorCode.X_42562); } if (newPrecision > maxBitPrecision) { if (typeCode == Types.SQL_BIT) { // Standard disallows type length reduction throw Error.error(ErrorCode.X_42570); } newPrecision = maxBitPrecision; } return getBitType(newType.typeCode, newPrecision); }
java
public VoltTable[] run(SystemProcedureExecutionContext ctx) { // Choose the lowest site ID on this host to actually flip the bit if (ctx.isLowestSiteId()) { VoltDBInterface voltdb = VoltDB.instance(); OperationMode opMode = voltdb.getMode(); if (LOG.isDebugEnabled()) { LOG.debug("voltdb opmode is " + opMode); } ZooKeeper zk = voltdb.getHostMessenger().getZK(); try { Stat stat; OperationMode zkMode = null; Code code; do { stat = new Stat(); code = Code.BADVERSION; try { byte [] data = zk.getData(VoltZK.operationMode, false, stat); if (LOG.isDebugEnabled()) { LOG.debug("zkMode is " + (zkMode == null ? "(null)" : OperationMode.valueOf(data))); } zkMode = data == null ? opMode : OperationMode.valueOf(data); if (zkMode == PAUSED) { if (LOG.isDebugEnabled()) { LOG.debug("read node at version " + stat.getVersion() + ", txn " + ll(stat.getMzxid())); } break; } stat = zk.setData(VoltZK.operationMode, PAUSED.getBytes(), stat.getVersion()); code = Code.OK; zkMode = PAUSED; if (LOG.isDebugEnabled()) { LOG.debug("!WROTE! node at version " + stat.getVersion() + ", txn " + ll(stat.getMzxid())); } break; } catch (BadVersionException ex) { code = ex.code(); } } while (zkMode != PAUSED && code == Code.BADVERSION); m_stat = stat; voltdb.getHostMessenger().pause(); voltdb.setMode(PAUSED); // for snmp SnmpTrapSender snmp = voltdb.getSnmpTrapSender(); if (snmp != null) { snmp.pause("Cluster paused."); } } catch (Exception e) { throw new RuntimeException(e); } } // Force a tick so that stats will be updated. // Primarily added to get latest table stats for DR pause and empty db check. ctx.getSiteProcedureConnection().tick(); VoltTable t = new VoltTable(VoltSystemProcedure.STATUS_SCHEMA); t.addRow(VoltSystemProcedure.STATUS_OK); return (new VoltTable[] {t}); }
java
@Override public void setGeneratedColumnInfo(int generate, ResultMetaData meta) { // can support INSERT_SELECT also if (type != StatementTypes.INSERT) { return; } int colIndex = baseTable.getIdentityColumnIndex(); if (colIndex == -1) { return; } switch (generate) { case ResultConstants.RETURN_NO_GENERATED_KEYS : return; case ResultConstants.RETURN_GENERATED_KEYS_COL_INDEXES : int[] columnIndexes = meta.getGeneratedColumnIndexes(); if (columnIndexes.length != 1) { return; } if (columnIndexes[0] != colIndex) { return; } // $FALL-THROUGH$ case ResultConstants.RETURN_GENERATED_KEYS : generatedIndexes = new int[]{ colIndex }; break; case ResultConstants.RETURN_GENERATED_KEYS_COL_NAMES : String[] columnNames = meta.getGeneratedColumnNames(); if (columnNames.length != 1) { return; } if (baseTable.findColumn(columnNames[0]) != colIndex) { return; } generatedIndexes = new int[]{ colIndex }; break; } generatedResultMetaData = ResultMetaData.newResultMetaData(generatedIndexes.length); for (int i = 0; i < generatedIndexes.length; i++) { ColumnSchema column = baseTable.getColumn(generatedIndexes[i]); generatedResultMetaData.columns[i] = column; } generatedResultMetaData.prepareData(); }
java
void checkAccessRights(Session session) { if (targetTable != null && !targetTable.isTemp()) { targetTable.checkDataReadOnly(); session.checkReadWrite(); } if (session.isAdmin()) { return; } for (int i = 0; i < sequences.length; i++) { session.getGrantee().checkAccess(sequences[i]); } for (int i = 0; i < routines.length; i++) { if (routines[i].isLibraryRoutine()) { continue; } session.getGrantee().checkAccess(routines[i]); } for (int i = 0; i < rangeVariables.length; i++) { RangeVariable range = rangeVariables[i]; if (range.rangeTable.getSchemaName() == SqlInvariants.SYSTEM_SCHEMA_HSQLNAME) { continue; } session.getGrantee().checkSelect(range.rangeTable, range.usedColumns); } switch (type) { case StatementTypes.CALL : { break; } case StatementTypes.INSERT : { session.getGrantee().checkInsert(targetTable, insertCheckColumns); break; } case StatementTypes.SELECT_CURSOR : break; case StatementTypes.DELETE_WHERE : { session.getGrantee().checkDelete(targetTable); break; } case StatementTypes.UPDATE_WHERE : { session.getGrantee().checkUpdate(targetTable, updateCheckColumns); break; } case StatementTypes.MERGE : { session.getGrantee().checkInsert(targetTable, insertCheckColumns); session.getGrantee().checkUpdate(targetTable, updateCheckColumns); break; } } }
java
@Override public ResultMetaData getResultMetaData() { switch (type) { case StatementTypes.DELETE_WHERE : case StatementTypes.INSERT : case StatementTypes.UPDATE_WHERE : case StatementTypes.MIGRATE_WHERE : return ResultMetaData.emptyResultMetaData; default : throw Error.runtimeError( ErrorCode.U_S0500, "CompiledStatement.getResultMetaData()"); } }
java