code stringlengths 73 34.1k | label stringclasses 1 value |
|---|---|
public void seek(long position) throws IOException {
if (!readOnly && file.length() < position) {
long tempSize = position - file.length();
if (tempSize > 1 << 18) {
tempSize = 1 << 18;
}
byte[] temp = new byte[(int) tempSize];
try {
long pos = file.length();
for (; pos < position - tempSize; pos += tempSize) {
file.seek(pos);
file.write(temp, 0, (int) tempSize);
}
file.seek(pos);
file.write(temp, 0, (int) (position - pos));
realPosition = position;
} catch (IOException e) {
appLog.logContext(e, null);
throw e;
}
}
seekPosition = position;
} | java |
void getBytes(byte[] output) {
if (m_totalAvailable < output.length) {
throw new IllegalStateException("Requested " + output.length + " bytes; only have "
+ m_totalAvailable + " bytes; call tryRead() first");
}
int bytesCopied = 0;
while (bytesCopied < output.length) {
BBContainer firstC = m_readBBContainers.peekFirst();
if (firstC == null) {
// Steal the write buffer
m_poolBBContainer.b().flip();
m_readBBContainers.add(m_poolBBContainer);
firstC = m_poolBBContainer;
m_poolBBContainer = null;
}
ByteBuffer first = firstC.b();
assert first.remaining() > 0;
// Copy bytes from first into output
int bytesRemaining = first.remaining();
int bytesToCopy = output.length - bytesCopied;
if (bytesToCopy > bytesRemaining) bytesToCopy = bytesRemaining;
first.get(output, bytesCopied, bytesToCopy);
bytesCopied += bytesToCopy;
m_totalAvailable -= bytesToCopy;
if (first.remaining() == 0) {
// read an entire block: move it to the empty buffers list
m_readBBContainers.poll();
firstC.discard();
}
}
} | java |
void reindex(Session session, Index index) {
setAccessor(index, null);
RowIterator it = table.rowIterator(session);
while (it.hasNext()) {
Row row = it.getNextRow();
// may need to clear the node before insert
index.insert(session, this, row);
}
} | java |
public XAConnection getXAConnection() throws SQLException {
// Comment out before public release:
System.err.print("Executing " + getClass().getName()
+ ".getXAConnection()...");
try {
Class.forName(driver).newInstance();
} catch (ClassNotFoundException e) {
throw new SQLException("Error opening connection: "
+ e.getMessage());
} catch (IllegalAccessException e) {
throw new SQLException("Error opening connection: "
+ e.getMessage());
} catch (InstantiationException e) {
throw new SQLException("Error opening connection: "
+ e.getMessage());
}
JDBCConnection connection =
(JDBCConnection) DriverManager.getConnection(url, connProperties);
// Comment out before public release:
System.err.print("New phys: " + connection);
JDBCXAResource xaResource = new JDBCXAResource(connection, this);
JDBCXAConnectionWrapper xaWrapper =
new JDBCXAConnectionWrapper(connection, xaResource,
connectionDefaults);
JDBCXAConnection xaConnection = new JDBCXAConnection(xaWrapper,
xaResource);
xaWrapper.setPooledConnection(xaConnection);
return xaConnection;
} | java |
public XAConnection getXAConnection(String user,
String password) throws SQLException {
validateSpecifiedUserAndPassword(user, password);
return getXAConnection();
} | java |
static String id(Object o) {
if (o == null) return "(null)";
Thread t = Thread.currentThread();
StringBuilder sb = new StringBuilder(128);
sb.append("(T[").append(t.getName()).append("]@");
sb.append(Long.toString(t.getId(), Character.MAX_RADIX));
sb.append(":O[").append(o.getClass().getSimpleName());
sb.append("]@");
sb.append(Long.toString(System.identityHashCode(o),Character.MAX_RADIX));
sb.append(")");
return sb.toString();
} | java |
public void registerCallback(String importer, ChannelChangeCallback callback) {
Preconditions.checkArgument(
importer != null && !importer.trim().isEmpty(),
"importer is null or empty"
);
callback = checkNotNull(callback, "callback is null");
if (m_done.get()) return;
int [] stamp = new int[]{0};
NavigableMap<String,ChannelChangeCallback> prev = null;
NavigableMap<String,ChannelChangeCallback> next = null;
ImmutableSortedMap.Builder<String,ChannelChangeCallback> mbldr = null;
synchronized (m_undispatched) {
do {
prev = m_callbacks.get(stamp);
mbldr = ImmutableSortedMap.naturalOrder();
mbldr.putAll(Maps.filterKeys(prev, not(equalTo(importer))));
mbldr.put(importer, callback);
next = mbldr.build();
} while (!m_callbacks.compareAndSet(prev, next, stamp[0], stamp[0]+1));
NavigableSet<String> registered = next.navigableKeySet();
NavigableSet<String> unregistered = m_unregistered.getReference();
Iterator<ImporterChannelAssignment> itr = m_undispatched.iterator();
while (itr.hasNext()) {
final ImporterChannelAssignment assignment = itr.next();
if (registered.contains(assignment.getImporter())) {
final ChannelChangeCallback dispatch = next.get(assignment.getImporter());
m_buses.submit(new DistributerRunnable() {
@Override
public void susceptibleRun() throws Exception {
dispatch.onChange(assignment);
}
});
itr.remove();
} else if (unregistered.contains(assignment.getImporter())) {
itr.remove();
if (!assignment.getAdded().isEmpty()) {
LOG.warn("(" + m_hostId
+ ") discarding assignment to unregistered importer "
+ assignment);
}
}
}
}
} | java |
public void unregisterCallback(String importer) {
if ( importer == null
|| !m_callbacks.getReference().containsKey(importer)
|| m_unregistered.getReference().contains(importer))
{
return;
}
if (m_done.get()) return;
int [] rstamp = new int[]{0};
NavigableMap<String,ChannelChangeCallback> rprev = null;
NavigableMap<String,ChannelChangeCallback> rnext = null;
int [] ustamp = new int[]{0};
NavigableSet<String> uprev = null;
NavigableSet<String> unext = null;
synchronized(m_undispatched) {
do {
rprev = m_callbacks.get(rstamp);
rnext = ImmutableSortedMap.<String,ChannelChangeCallback>naturalOrder()
.putAll(Maps.filterKeys(rprev, not(equalTo(importer))))
.build();
} while (rprev.containsKey(importer) && !m_callbacks.compareAndSet(rprev, rnext, rstamp[0], rstamp[0]+1));
do {
uprev = m_unregistered.get(ustamp);
unext = ImmutableSortedSet.<String>naturalOrder()
.addAll(Sets.filter(uprev, not(equalTo(importer))))
.add(importer)
.build();
} while (!uprev.contains(importer) && m_unregistered.compareAndSet(uprev, unext, ustamp[0], ustamp[0]+1));
Iterator<ImporterChannelAssignment> itr = m_undispatched.iterator();
while (itr.hasNext()) {
final ImporterChannelAssignment assignment = itr.next();
if (unext.contains(assignment.getImporter())) {
itr.remove();
}
}
}
} | java |
public void shutdown() {
if (m_done.compareAndSet(false, true)) {
m_es.shutdown();
m_buses.shutdown();
DeleteNode deleteHost = new DeleteNode(joinZKPath(HOST_DN, m_hostId));
DeleteNode deleteCandidate = new DeleteNode(m_candidate);
try {
m_es.awaitTermination(365, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw loggedDistributerException(e, "interrupted while waiting for executor termination");
}
try {
m_buses.awaitTermination(365, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw loggedDistributerException(e, "interrupted while waiting for executor termination");
}
deleteHost.onComplete();
deleteCandidate.onComplete();
}
} | java |
@Subscribe
public void undispatched(DeadEvent e) {
if (!m_done.get() && e.getEvent() instanceof ImporterChannelAssignment) {
ImporterChannelAssignment assignment = (ImporterChannelAssignment)e.getEvent();
synchronized (m_undispatched) {
NavigableSet<String> registered = m_callbacks.getReference().navigableKeySet();
NavigableSet<String> unregistered = m_unregistered.getReference();
if (registered.contains(assignment.getImporter())) {
m_eb.post(assignment);
} else if (!assignment.getAdded().isEmpty()
&& unregistered.contains(assignment.getImporter())) {
LOG.warn("(" + m_hostId
+ ") disgarding assignment to unregistered importer "
+ assignment);
} else {
m_undispatched.add(assignment);
}
}
}
} | java |
public Object next() {
// for chained iterators
if (chained) {
if (it1 == null) {
if (it2 == null) {
throw new NoSuchElementException();
}
if (it2.hasNext()) {
return it2.next();
}
it2 = null;
next();
} else {
if (it1.hasNext()) {
return it1.next();
}
it1 = null;
next();
}
}
// for other itertors
if (hasNext()) {
return elements[i++];
}
throw new NoSuchElementException();
} | java |
public List<Long> getSitesForPartitions(int[] partitions) {
ArrayList<Long> all_sites = new ArrayList<Long>();
for (int p : partitions) {
List<Long> sites = getSitesForPartition(p);
for (long site : sites)
{
all_sites.add(site);
}
}
return all_sites;
} | java |
public long[] getSitesForPartitionsAsArray(int[] partitions) {
ArrayList<Long> all_sites = new ArrayList<Long>();
for (int p : partitions) {
List<Long> sites = getSitesForPartition(p);
for (long site : sites)
{
all_sites.add(site);
}
}
return longListToArray(all_sites);
} | java |
protected void readCompressedBlocks(int blocks) throws IOException {
int bytesSoFar = 0;
int requiredBytes = 512 * blocks;
// This method works with individual bytes!
int i;
while (bytesSoFar < requiredBytes) {
i = readStream.read(readBuffer, bytesSoFar,
requiredBytes - bytesSoFar);
if (i < 0) {
// A VoltDB extension to disable tagging eof as an error.
return;
/* disable 3 lines ...
throw new EOFException(
RB.singleton.getString(
RB.DECOMPRESS_RANOUT, bytesSoFar, requiredBytes));
... disabled 3 lines */
// End of VoltDB extension
}
bytesRead += i;
bytesSoFar += i;
}
} | java |
static <T> T[] arraysCopyOf(T[] original, int newLength) {
T[] copy = newArray(original, newLength);
System.arraycopy(original, 0, copy, 0, Math.min(original.length, newLength));
return copy;
} | java |
public static void initialize(Class<? extends TheHashinator> hashinatorImplementation, byte config[]) {
TheHashinator hashinator = constructHashinator( hashinatorImplementation, config, false);
m_pristineHashinator = hashinator;
m_cachedHashinators.put(0L, hashinator);
instance.set(Pair.of(0L, hashinator));
} | java |
public static TheHashinator getHashinator(Class<? extends TheHashinator> hashinatorImplementation,
byte config[], boolean cooked) {
return constructHashinator(hashinatorImplementation, config, cooked);
} | java |
public static TheHashinator
constructHashinator(
Class<? extends TheHashinator> hashinatorImplementation,
byte configBytes[], boolean cooked) {
try {
Constructor<? extends TheHashinator> constructor =
hashinatorImplementation.getConstructor(byte[].class, boolean.class);
return constructor.newInstance(configBytes, cooked);
} catch (Exception e) {
Throwables.propagate(e);
}
return null;
} | java |
static public long computeConfigurationSignature(byte [] config) {
PureJavaCrc32C crc = new PureJavaCrc32C();
crc.update(config);
return crc.getValue();
} | java |
public static int getPartitionForParameter(VoltType partitionType, Object invocationParameter) {
return instance.get().getSecond().getHashedPartitionForParameter(partitionType, invocationParameter);
} | java |
public static Pair<? extends UndoAction, TheHashinator> updateHashinator(
Class<? extends TheHashinator> hashinatorImplementation,
long version,
byte configBytes[],
boolean cooked) {
//Use a cached/canonical hashinator if possible
TheHashinator existingHashinator = m_cachedHashinators.get(version);
if (existingHashinator == null) {
existingHashinator = constructHashinator(hashinatorImplementation, configBytes, cooked);
TheHashinator tempVal = m_cachedHashinators.putIfAbsent( version, existingHashinator);
if (tempVal != null) {
existingHashinator = tempVal;
}
}
//Do a CAS loop to maintain a global instance
while (true) {
final Pair<Long, ? extends TheHashinator> snapshot = instance.get();
if (version > snapshot.getFirst()) {
final Pair<Long, ? extends TheHashinator> update =
Pair.of(version, existingHashinator);
if (instance.compareAndSet(snapshot, update)) {
if (!m_elasticallyModified) {
if (!update.getSecond().pIsPristine()) {
// This is not a lock protected (atomic) but it should be fine because
// release() should only be called by the one thread that successfully
// updated the hashinator
hostLogger.debug("The Hashinator has been elastically modified.");
m_elasticallyModified = true;
}
}
// Note: Only undo is ever called and only from a failure in @BalancePartitions
return Pair.of(new UndoAction() {
@Override
public void release() {}
@Override
public void undo() {
boolean rolledBack = instance.compareAndSet(update, snapshot);
if (!rolledBack) {
hostLogger.info(
"Didn't roll back hashinator because it wasn't set to expected hashinator");
}
}
}, existingHashinator);
}
} else {
return Pair.of(new UndoAction() {
@Override
public void release() {}
@Override
public void undo() {}
}, existingHashinator);
}
}
} | java |
public static Map<Integer, Integer> getRanges(int partition) {
return instance.get().getSecond().pGetRanges(partition);
} | java |
public static HashinatorSnapshotData serializeConfiguredHashinator()
throws IOException
{
Pair<Long, ? extends TheHashinator> currentInstance = instance.get();
byte[] cookedData = currentInstance.getSecond().getCookedBytes();
return new HashinatorSnapshotData(cookedData, currentInstance.getFirst());
} | java |
public static Pair< ? extends UndoAction, TheHashinator> updateConfiguredHashinator(long version, byte config[]) {
return updateHashinator(getConfiguredHashinatorClass(), version, config, true);
} | java |
public static VoltTable getPartitionKeys(TheHashinator hashinator, VoltType type) {
// get partitionKeys response table so we can copy it
final VoltTable partitionKeys;
switch (type) {
case INTEGER:
partitionKeys = hashinator.m_integerPartitionKeys.get();
break;
case STRING:
partitionKeys = hashinator.m_stringPartitionKeys.get();
break;
case VARBINARY:
partitionKeys = hashinator.m_varbinaryPartitionKeys.get();
break;
default:
return null;
}
// return a clone because if the table is used at all in the voltdb process,
// (like by an NT procedure),
// you can corrupt the various offsets and positions in the underlying buffer
return partitionKeys.semiDeepCopy();
} | java |
public void append(long startDrId, long endDrId, long spUniqueId, long mpUniqueId) {
assert(startDrId <= endDrId && (m_map.isEmpty() || startDrId > end(m_map.span())));
addRange(startDrId, endDrId, spUniqueId, mpUniqueId);
} | java |
public void truncate(long newTruncationPoint) {
if (newTruncationPoint < getFirstDrId()) {
return;
}
final Iterator<Range<Long>> iter = m_map.asRanges().iterator();
while (iter.hasNext()) {
final Range<Long> next = iter.next();
if (end(next) < newTruncationPoint) {
iter.remove();
} else if (next.contains(newTruncationPoint)) {
iter.remove();
m_map.add(range(newTruncationPoint, end(next)));
return;
} else {
break;
}
}
m_map.add(range(newTruncationPoint, newTruncationPoint));
} | java |
public void mergeTracker(DRConsumerDrIdTracker tracker) {
final long newSafePoint = Math.max(tracker.getSafePointDrId(), getSafePointDrId());
m_map.addAll(tracker.m_map);
truncate(newSafePoint);
m_lastSpUniqueId = Math.max(m_lastSpUniqueId, tracker.m_lastSpUniqueId);
m_lastMpUniqueId = Math.max(m_lastMpUniqueId, tracker.m_lastMpUniqueId);
} | java |
private JSONObject readJSONObjFromWire(MessagingChannel messagingChannel) throws IOException, JSONException {
ByteBuffer messageBytes = messagingChannel.readMessage();
JSONObject jsObj = new JSONObject(new String(messageBytes.array(), StandardCharsets.UTF_8));
return jsObj;
} | java |
private JSONObject processJSONResponse(MessagingChannel messagingChannel,
Set<String> activeVersions,
boolean checkVersion) throws IOException, JSONException
{
// read the json response from socketjoiner with version info
JSONObject jsonResponse = readJSONObjFromWire(messagingChannel);
if (!checkVersion) {
return jsonResponse;
}
VersionChecker versionChecker = m_acceptor.getVersionChecker();
String remoteVersionString = jsonResponse.getString(VERSION_STRING);
String remoteBuildString = jsonResponse.getString(BUILD_STRING);
boolean remoteAcceptsLocalVersion = jsonResponse.getBoolean(VERSION_COMPATIBLE);
if (remoteVersionString.equals(versionChecker.getVersionString())) {
if (!versionChecker.getBuildString().equals(remoteBuildString)) {
// ignore test/eclipse build string so tests still work
if (!versionChecker.getBuildString().equals("VoltDB") && !remoteBuildString.equals("VoltDB")) {
org.voltdb.VoltDB.crashLocalVoltDB("For VoltDB version " + versionChecker.getVersionString() +
" git tag/hash is not identical across the cluster. Node join failed.\n" +
" joining build string: " + versionChecker.getBuildString() + "\n" +
" existing build string: " + remoteBuildString, false, null);
return null;
}
}
}
else if (!remoteAcceptsLocalVersion) {
if (!versionChecker.isCompatibleVersionString(remoteVersionString)) {
org.voltdb.VoltDB.crashLocalVoltDB("Cluster contains nodes running VoltDB version " + remoteVersionString +
" which is incompatibile with local version " + versionChecker.getVersionString() +
".\n", false, null);
return null;
}
}
//Do this only after we think we are compatible.
activeVersions.add(remoteVersionString);
return jsonResponse;
} | java |
private SocketChannel createLeaderSocket(
SocketAddress hostAddr,
ConnectStrategy mode) throws IOException
{
SocketChannel socket;
int connectAttempts = 0;
do {
try {
socket = SocketChannel.open();
socket.socket().connect(hostAddr, 5000);
}
catch (java.net.ConnectException
|java.nio.channels.UnresolvedAddressException
|java.net.NoRouteToHostException
|java.net.PortUnreachableException e)
{
// reset the socket to null for loop purposes
socket = null;
if (mode == ConnectStrategy.PROBE) {
return null;
}
if ((++connectAttempts % 8) == 0) {
LOG.warn("Joining primary failed: " + e + " retrying..");
}
try {
Thread.sleep(250); // milliseconds
}
catch (InterruptedException dontcare) {}
}
} while (socket == null);
return socket;
} | java |
private SocketChannel connectToHost(SocketAddress hostAddr)
throws IOException
{
SocketChannel socket = null;
while (socket == null) {
try {
socket = SocketChannel.open(hostAddr);
}
catch (java.net.ConnectException e) {
LOG.warn("Joining host failed: " + e.getMessage() + " retrying..");
try {
Thread.sleep(250); // milliseconds
}
catch (InterruptedException dontcare) {}
}
}
return socket;
} | java |
private RequestHostIdResponse requestHostId (
MessagingChannel messagingChannel,
Set<String> activeVersions) throws Exception
{
VersionChecker versionChecker = m_acceptor.getVersionChecker();
activeVersions.add(versionChecker.getVersionString());
JSONObject jsObj = new JSONObject();
jsObj.put(TYPE, ConnectionType.REQUEST_HOSTID.name());
// put the version compatibility status in the json
jsObj.put(VERSION_STRING, versionChecker.getVersionString());
// Advertise the port we are going to listen on based on config
jsObj.put(PORT, m_internalPort);
// If config specified an internal interface use that.
// Otherwise the leader will echo back what we connected on
if (!m_internalInterface.isEmpty()) {
jsObj.put(ADDRESS, m_internalInterface);
}
// communicate configuration and node state
jsObj = m_acceptor.decorate(jsObj, Optional.empty());
jsObj.put(MAY_EXCHANGE_TS, true);
byte jsBytes[] = jsObj.toString(4).getBytes(StandardCharsets.UTF_8);
ByteBuffer requestHostIdBuffer = ByteBuffer.allocate(4 + jsBytes.length);
requestHostIdBuffer.putInt(jsBytes.length);
requestHostIdBuffer.put(jsBytes).flip();
messagingChannel.writeMessage(requestHostIdBuffer);
// read the json response from socketjoiner with version info and validate it
JSONObject leaderInfo = processJSONResponse(messagingChannel, activeVersions, true);
// read the json response sent by HostMessenger with HostID
JSONObject jsonObj = readJSONObjFromWire(messagingChannel);
return new RequestHostIdResponse(leaderInfo, jsonObj);
} | java |
public void addFragment(byte[] planHash, int outputDepId, ByteBuffer parameterSet) {
addFragment(planHash, null, outputDepId, parameterSet);
} | java |
public void addCustomFragment(byte[] planHash, int outputDepId, ByteBuffer parameterSet, byte[] fragmentPlan, String stmtText) {
FragmentData item = new FragmentData();
item.m_planHash = planHash;
item.m_outputDepId = outputDepId;
item.m_parameterSet = parameterSet;
item.m_fragmentPlan = fragmentPlan;
item.m_stmtText = stmtText.getBytes();
m_items.add(item);
} | java |
public static FragmentTaskMessage createWithOneFragment(long initiatorHSId,
long coordinatorHSId,
long txnId,
long uniqueId,
boolean isReadOnly,
byte[] planHash,
int outputDepId,
ParameterSet params,
boolean isFinal,
boolean isForReplay,
boolean isNPartTxn,
long timestamp) {
ByteBuffer parambytes = null;
if (params != null) {
parambytes = ByteBuffer.allocate(params.getSerializedSize());
try {
params.flattenToBuffer(parambytes);
parambytes.flip();
}
catch (IOException e) {
VoltDB.crashLocalVoltDB("Failed to serialize parameter for fragment: " + params.toString(), true, e);
}
}
FragmentTaskMessage ret = new FragmentTaskMessage(initiatorHSId, coordinatorHSId,
txnId, uniqueId, isReadOnly, isFinal,
isForReplay, isNPartTxn, timestamp);
ret.addFragment(planHash, outputDepId, parambytes);
return ret;
} | java |
public void setEmptyForRestart(int outputDepId) {
m_emptyForRestart = true;
ParameterSet blank = ParameterSet.emptyParameterSet();
ByteBuffer mt = ByteBuffer.allocate(blank.getSerializedSize());
try {
blank.flattenToBuffer(mt);
}
catch (IOException ioe) {
// Shouldn't ever happen, just bail out to not-obviously equivalent behavior
mt = ByteBuffer.allocate(2);
mt.putShort((short)0);
}
addFragment(EMPTY_HASH, outputDepId, mt);
} | java |
public static <R, C, V> ImmutableTable<R, C, V> copyOf(
Table<? extends R, ? extends C, ? extends V> table) {
if (table instanceof ImmutableTable) {
@SuppressWarnings("unchecked")
ImmutableTable<R, C, V> parameterizedTable = (ImmutableTable<R, C, V>) table;
return parameterizedTable;
} else {
int size = table.size();
switch (size) {
case 0:
return of();
case 1:
Cell<? extends R, ? extends C, ? extends V> onlyCell =
Iterables.getOnlyElement(table.cellSet());
return ImmutableTable.<R, C, V>of(
onlyCell.getRowKey(), onlyCell.getColumnKey(), onlyCell.getValue());
default:
ImmutableSet.Builder<Cell<R, C, V>> cellSetBuilder =
new ImmutableSet.Builder<Cell<R, C, V>>(size);
for (Cell<? extends R, ? extends C, ? extends V> cell : table.cellSet()) {
/*
* Must cast to be able to create a Cell<R, C, V> rather than a
* Cell<? extends R, ? extends C, ? extends V>
*/
cellSetBuilder.add(
cellOf((R) cell.getRowKey(), (C) cell.getColumnKey(), (V) cell.getValue()));
}
return RegularImmutableTable.forCells(cellSetBuilder.build());
}
}
} | java |
private void replaceSocket(Socket newSocket) {
synchronized (m_socketLock) {
closeSocket(m_socket);
if (m_eos.get()) {
closeSocket(newSocket);
m_socket = null;
} else {
m_socket = newSocket;
}
}
} | java |
public synchronized void dumpWatches(PrintWriter pwriter, boolean byPath) {
if (byPath) {
for (Entry<String, HashSet<Watcher>> e : watchTable.entrySet()) {
pwriter.println(e.getKey());
for (Watcher w : e.getValue()) {
pwriter.print("\t0x");
pwriter.print(Long.toHexString(((ServerCnxn)w).getSessionId()));
pwriter.print("\n");
}
}
} else {
for (Entry<Watcher, HashSet<String>> e : watch2Paths.entrySet()) {
pwriter.print("0x");
pwriter.println(Long.toHexString(((ServerCnxn)e.getKey()).getSessionId()));
for (String path : e.getValue()) {
pwriter.print("\t");
pwriter.println(path);
}
}
}
} | java |
void setBaseValues(CatalogMap<? extends CatalogType> parentMap, String name) {
if (name == null) {
throw new CatalogException("Null value where it shouldn't be.");
}
m_parentMap = parentMap;
m_typename = name;
} | java |
public void validate() throws IllegalArgumentException, IllegalAccessException {
for (Field field : getClass().getDeclaredFields()) {
if (CatalogType.class.isAssignableFrom(field.getType())) {
CatalogType ct = (CatalogType) field.get(this);
assert(ct.getCatalog() == getCatalog()) : ct.getCatalogPath() + " has wrong catalog";
}
if (CatalogReference.class.isAssignableFrom(field.getType())) {
@SuppressWarnings("unchecked")
CatalogReference<? extends CatalogType> cr = (CatalogReference<? extends CatalogType>) field.get(this);
if (cr.m_value != null) {
assert(cr.m_value.getCatalog() == getCatalog()) : cr.m_value.getCatalogPath() + " has wrong catalog";
}
}
if (CatalogMap.class.isAssignableFrom(field.getClass())) {
@SuppressWarnings("unchecked")
CatalogMap<? extends CatalogType> cm = (CatalogMap<? extends CatalogType>) field.get(this);
for (CatalogType ct : cm) {
assert(ct.getCatalog() == getCatalog()) : ct.getCatalogPath() + " has wrong catalog";
ct.validate();
}
}
}
} | java |
private void writeExternalStreamStates(JSONStringer stringer) throws JSONException {
stringer.key(DISABLED_EXTERNAL_STREAMS).array();
for (int partition : m_disabledExternalStreams) {
stringer.value(partition);
}
stringer.endArray();
} | java |
public static VoltTable unionTables(Collection<VoltTable> operands) {
VoltTable result = null;
// Locate the first non-null table to get the schema
for (VoltTable vt : operands) {
if (vt != null) {
result = new VoltTable(vt.getTableSchema());
result.setStatusCode(vt.getStatusCode());
break;
}
}
if (result != null) {
result.addTables(operands);
result.resetRowPosition();
}
return result;
} | java |
public static boolean tableContainsString(VoltTable t, String s, boolean caseSenstive) {
if (t.getRowCount() == 0) {
return false;
}
if (!caseSenstive) {
s = s.toLowerCase();
}
VoltTableRow row = t.fetchRow(0);
do {
for (int i = 0; i < t.getColumnCount(); i++) {
if (t.getColumnType(i) == VoltType.STRING) {
String value = row.getString(i);
if (value == null) {
continue;
}
if (!caseSenstive) {
value = value.toLowerCase();
}
if (value.contains(s)) {
return true;
}
}
}
} while (row.advanceRow());
return false;
} | java |
public static Object[] tableRowAsObjects(VoltTableRow row) {
Object[] result = new Object[row.getColumnCount()];
for (int i = 0; i < row.getColumnCount(); i++) {
result[i] = row.get(i, row.getColumnType(i));
}
return result;
} | java |
public static Stream<VoltTableRow> stream(VoltTable table) {
return StreamSupport.stream(new VoltTableSpliterator(table, 0, table.getRowCount()), false);
} | java |
public void register(ZKMBeanInfo bean, ZKMBeanInfo parent)
throws JMException
{
assert bean != null;
String path = null;
if (parent != null) {
path = mapBean2Path.get(parent);
assert path != null;
}
path = makeFullPath(path, parent);
mapBean2Path.put(bean, path);
mapName2Bean.put(bean.getName(), bean);
if(bean.isHidden())
return;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName oname = makeObjectName(path, bean);
try {
mbs.registerMBean(bean, oname);
} catch (JMException e) {
LOG.warn("Failed to register MBean " + bean.getName());
throw e;
}
} | java |
private void unregister(String path,ZKMBeanInfo bean) throws JMException {
if(path==null)
return;
if (!bean.isHidden()) {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
try {
mbs.unregisterMBean(makeObjectName(path, bean));
} catch (JMException e) {
LOG.warn("Failed to unregister MBean " + bean.getName());
throw e;
}
}
} | java |
public void unregister(ZKMBeanInfo bean) {
if(bean==null)
return;
String path=mapBean2Path.get(bean);
try {
unregister(path,bean);
}
catch (InstanceNotFoundException e) {
LOG.warn("InstanceNotFoundException during unregister usually means more than one Zookeeper server has been running in a single JVM");
LOG.warn("InstanceNotFoundException during unregister can be safely ignored during automated tests.");
}
catch (JMException e) {
LOG.warn("Error during unregister", e);
}
mapBean2Path.remove(bean);
mapName2Bean.remove(bean.getName());
} | java |
public void unregisterAll() {
for(Map.Entry<ZKMBeanInfo,String> e: mapBean2Path.entrySet()) {
try {
unregister(e.getValue(), e.getKey());
} catch (JMException e1) {
LOG.warn("Error during unregister", e1);
}
}
mapBean2Path.clear();
mapName2Bean.clear();
} | java |
public String makeFullPath(String prefix, String... name) {
StringBuilder sb=new StringBuilder(prefix == null ? "/" : (prefix.equals("/")?prefix:prefix+"/"));
boolean first=true;
for (String s : name) {
if(s==null) continue;
if(!first){
sb.append("/");
}else
first=false;
sb.append(s);
}
return sb.toString();
} | java |
protected ObjectName makeObjectName(String path, ZKMBeanInfo bean)
throws MalformedObjectNameException
{
if(path==null)
return null;
StringBuilder beanName = new StringBuilder(CommonNames.DOMAIN + ":");
int counter=0;
counter=tokenize(beanName,path,counter);
tokenize(beanName,bean.getName(),counter);
beanName.deleteCharAt(beanName.length()-1);
try {
return new ObjectName(beanName.toString());
} catch (MalformedObjectNameException e) {
LOG.warn("Invalid name \"" + beanName.toString() + "\" for class "
+ bean.getClass().toString());
throw e;
}
} | java |
@Override
public final int getType() {
if (userTypeModifier == null) {
throw Error.runtimeError(ErrorCode.U_S0500, "Type");
}
return userTypeModifier.getType();
} | java |
public Object castToType(SessionInterface session, Object a, Type type) {
return convertToType(session, a, type);
} | java |
public Object convertToTypeJDBC(SessionInterface session, Object a,
Type type) {
return convertToType(session, a, type);
} | java |
public static int getJDBCTypeCode(int type) {
switch (type) {
case Types.SQL_BLOB :
return Types.BLOB;
case Types.SQL_CLOB :
return Types.CLOB;
case Types.SQL_BIGINT :
return Types.BIGINT;
case Types.SQL_BINARY :
return Types.BINARY;
case Types.SQL_VARBINARY :
return Types.VARBINARY;
case Types.SQL_BIT :
case Types.SQL_BIT_VARYING :
return Types.BIT;
default :
return type;
}
} | java |
public static Type getType(int type, int collation, long precision,
int scale) {
switch (type) {
case Types.SQL_ALL_TYPES :
return SQL_ALL_TYPES;
// return SQL_ALL_TYPES; // needs changes to Expression type resolution
case Types.SQL_CHAR :
case Types.SQL_VARCHAR :
case Types.VARCHAR_IGNORECASE :
case Types.SQL_CLOB :
return CharacterType.getCharacterType(type, precision);
case Types.SQL_INTEGER :
return SQL_INTEGER;
case Types.SQL_SMALLINT :
return SQL_SMALLINT;
case Types.SQL_BIGINT :
return SQL_BIGINT;
case Types.TINYINT :
return TINYINT;
case Types.SQL_FLOAT :
if (precision > 53) {
throw Error.error(ErrorCode.X_42592, "" + precision);
}
// $FALL-THROUGH$
case Types.SQL_REAL :
case Types.SQL_DOUBLE :
return SQL_DOUBLE;
case Types.SQL_NUMERIC :
case Types.SQL_DECIMAL :
if (precision == 0) {
precision = NumberType.defaultNumericPrecision;
}
// A VoltDB extension to disable variable scale decimals
scale = 12;
// End of VoltDB extension
return NumberType.getNumberType(type, precision, scale);
case Types.SQL_BOOLEAN :
return SQL_BOOLEAN;
case Types.SQL_BINARY :
case Types.SQL_VARBINARY :
case Types.SQL_BLOB :
return BinaryType.getBinaryType(type, precision);
case Types.SQL_BIT :
case Types.SQL_BIT_VARYING :
return BitType.getBitType(type, precision);
case Types.SQL_DATE :
case Types.SQL_TIME :
case Types.SQL_TIME_WITH_TIME_ZONE :
case Types.SQL_TIMESTAMP :
case Types.SQL_TIMESTAMP_WITH_TIME_ZONE :
return DateTimeType.getDateTimeType(type, scale);
case Types.SQL_INTERVAL_YEAR :
case Types.SQL_INTERVAL_YEAR_TO_MONTH :
case Types.SQL_INTERVAL_MONTH :
case Types.SQL_INTERVAL_DAY :
case Types.SQL_INTERVAL_DAY_TO_HOUR :
case Types.SQL_INTERVAL_DAY_TO_MINUTE :
case Types.SQL_INTERVAL_DAY_TO_SECOND :
case Types.SQL_INTERVAL_HOUR :
case Types.SQL_INTERVAL_HOUR_TO_MINUTE :
case Types.SQL_INTERVAL_HOUR_TO_SECOND :
case Types.SQL_INTERVAL_MINUTE :
case Types.SQL_INTERVAL_MINUTE_TO_SECOND :
case Types.SQL_INTERVAL_SECOND :
return IntervalType.getIntervalType(type, precision, scale);
case Types.VOLT_GEOGRAPHY_POINT :
return VOLT_GEOGRAPHY_POINT;
case Types.VOLT_GEOGRAPHY :
return new VoltGeographyType(precision);
case Types.OTHER :
return OTHER;
default :
throw Error.runtimeError(ErrorCode.U_S0500, "Type");
}
} | java |
public boolean handleEvent(Event e) {
switch (e.id) {
case Event.SCROLL_LINE_UP :
case Event.SCROLL_LINE_DOWN :
case Event.SCROLL_PAGE_UP :
case Event.SCROLL_PAGE_DOWN :
case Event.SCROLL_ABSOLUTE :
iX = sbHoriz.getValue();
iY = iRowHeight * sbVert.getValue();
repaint();
return true;
}
return super.handleEvent(e);
} | java |
public static byte[] getConfigureBytes(int partitionCount, int tokenCount) {
Preconditions.checkArgument(partitionCount > 0);
Preconditions.checkArgument(tokenCount > partitionCount);
Buckets buckets = new Buckets(partitionCount, tokenCount);
ElasticHashinator hashinator = new ElasticHashinator(buckets.getTokens());
return hashinator.getConfigBytes();
} | java |
private byte[] toBytes() {
ByteBuffer buf = ByteBuffer.allocate(4 + (m_tokenCount * 8));
buf.putInt(m_tokenCount);
int lastToken = Integer.MIN_VALUE;
for (int ii = 0; ii < m_tokenCount; ii++) {
final long ptr = m_tokens + (ii * 8);
final int token = Bits.unsafe.getInt(ptr);
Preconditions.checkArgument(token >= lastToken);
lastToken = token;
final int pid = Bits.unsafe.getInt(ptr + 4);
buf.putInt(token);
buf.putInt(pid);
}
return buf.array();
} | java |
public ElasticHashinator addTokens(NavigableMap<Integer, Integer> tokensToAdd)
{
// figure out the interval
long interval = deriveTokenInterval(m_tokensMap.get().keySet());
Map<Integer, Integer> tokens = Maps.newTreeMap();
for (Map.Entry<Integer, Integer> e : m_tokensMap.get().entrySet()) {
if (tokensToAdd.containsKey(e.getKey())) {
continue;
}
// see if we are moving an intermediate token forward
if (isIntermediateToken(e.getKey(), interval)) {
Map.Entry<Integer, Integer> floorEntry = tokensToAdd.floorEntry(e.getKey());
// If the two tokens belong to the same partition and bucket, we are moving the one on the ring
// forward, so remove it from the ring
if (floorEntry != null &&
floorEntry.getValue().equals(e.getValue()) &&
containingBucket(floorEntry.getKey(), interval) == containingBucket(e.getKey(), interval)) {
continue;
}
}
tokens.put(e.getKey(), e.getValue());
}
tokens.putAll(tokensToAdd);
return new ElasticHashinator(ImmutableSortedMap.copyOf(tokens));
} | java |
@Override
public Map<Integer, Integer> pPredecessors(int partition) {
Map<Integer, Integer> predecessors = new TreeMap<Integer, Integer>();
UnmodifiableIterator<Map.Entry<Integer,Integer>> iter = m_tokensMap.get().entrySet().iterator();
Set<Integer> pTokens = new HashSet<Integer>();
while (iter.hasNext()) {
Map.Entry<Integer, Integer> next = iter.next();
if (next.getValue() == partition) {
pTokens.add(next.getKey());
}
}
for (Integer token : pTokens) {
Map.Entry<Integer, Integer> predecessor = null;
if (token != null) {
predecessor = m_tokensMap.get().headMap(token).lastEntry();
// If null, it means partition is the first one on the ring, so predecessor
// should be the last entry on the ring because it wraps around.
if (predecessor == null) {
predecessor = m_tokensMap.get().lastEntry();
}
}
if (predecessor != null && predecessor.getValue() != partition) {
predecessors.put(predecessor.getKey(), predecessor.getValue());
}
}
return predecessors;
} | java |
@Override
public Pair<Integer, Integer> pPredecessor(int partition, int token) {
Integer partForToken = m_tokensMap.get().get(token);
if (partForToken != null && partForToken == partition) {
Map.Entry<Integer, Integer> predecessor = m_tokensMap.get().headMap(token).lastEntry();
if (predecessor == null) {
predecessor = m_tokensMap.get().lastEntry();
}
if (predecessor.getKey() != token) {
return Pair.of(predecessor.getKey(), predecessor.getValue());
} else {
// given token is the only one on the ring, umpossible
throw new RuntimeException("There is only one token on the hash ring");
}
} else {
// given token doesn't map to partition
throw new IllegalArgumentException("The given token " + token +
" does not map to partition " + partition);
}
} | java |
@Override
public Map<Integer, Integer> pGetRanges(int partition) {
Map<Integer, Integer> ranges = new TreeMap<Integer, Integer>();
Integer first = null; // start of the very first token on the ring
Integer start = null; // start of a range
UnmodifiableIterator<Map.Entry<Integer,Integer>> iter = m_tokensMap.get().entrySet().iterator();
// Iterate through the token map to find the ranges assigned to
// the given partition
while (iter.hasNext()) {
Map.Entry<Integer, Integer> next = iter.next();
int token = next.getKey();
int pid = next.getValue();
if (first == null) {
first = token;
}
// if start is not null, there's an open range, now is
// the time to close it.
// else there is no open range, keep on going.
if (start != null) {
//Range end is inclusive so do token - 1
ranges.put(start, token - 1);
start = null;
}
if (pid == partition) {
// if start is null, there's no open range, start one.
start = token;
}
}
// if there is an open range when we get here
// It is the last token which implicity ends at the next max value
if (start != null) {
assert first != null;
ranges.put(start, Integer.MAX_VALUE);
}
return ranges;
} | java |
private byte[] toCookedBytes()
{
// Allocate for a int pair per token/partition ID entry, plus a size.
ByteBuffer buf = ByteBuffer.allocate(4 + (m_tokenCount * 8));
buf.putInt(m_tokenCount);
// Keep tokens and partition ids separate to aid compression.
for (int zz = 3; zz >= 0; zz--) {
int lastToken = Integer.MIN_VALUE;
for (int ii = 0; ii < m_tokenCount; ii++) {
int token = Bits.unsafe.getInt(m_tokens + (ii * 8));
Preconditions.checkArgument(token >= lastToken);
lastToken = token;
token = token >>> (zz * 8);
token = token & 0xFF;
buf.put((byte)token);
}
}
for (int ii = 0; ii < m_tokenCount; ii++) {
buf.putInt(Bits.unsafe.getInt(m_tokens + (ii * 8) + 4));
}
try {
return CompressionService.gzipBytes(buf.array());
} catch (IOException e) {
throw new RuntimeException("Failed to compress bytes", e);
}
} | java |
private static synchronized void trackAllocatedHashinatorBytes(long bytes) {
final long allocated = m_allocatedHashinatorBytes.addAndGet(bytes);
if (allocated > HASHINATOR_GC_THRESHHOLD) {
hostLogger.warn(allocated + " bytes of hashinator data has been allocated");
if (m_emergencyGCThread == null || m_emergencyGCThread.getState() == State.TERMINATED) {
m_emergencyGCThread = new Thread(new Runnable() {
@Override
public void run() {
hostLogger.warn("Invoking System.gc() to recoup hashinator bytes");
System.gc();
try {
Thread.sleep(2000);
} catch (InterruptedException e) {}
hostLogger.info(m_allocatedHashinatorBytes.get() + " bytes of hashinator allocated after GC");
}
}, "Hashinator GC thread");
m_emergencyGCThread.start();
}
}
} | java |
private static long deriveTokenInterval(ImmutableSortedSet<Integer> tokens)
{
long interval = 0;
int count = 4;
int prevToken = Integer.MIN_VALUE;
UnmodifiableIterator<Integer> tokenIter = tokens.iterator();
while (tokenIter.hasNext() && count-- > 0) {
int nextToken = tokenIter.next();
interval = Math.max(interval, nextToken - prevToken);
prevToken = nextToken;
}
return interval;
} | java |
private static int containingBucket(int token, long interval)
{
return (int) ((((long) token - Integer.MIN_VALUE) / interval) * interval + Integer.MIN_VALUE);
} | java |
@Override
public boolean isOrderDeterministic() {
assert(m_children != null);
assert(m_children.size() == 1);
// This implementation is very close to AbstractPlanNode's implementation of this
// method, except that we assert just one child.
// Java doesn't allow calls to super-super-class methods via super.super.
AbstractPlanNode child = m_children.get(0);
if (! child.isOrderDeterministic()) {
m_nondeterminismDetail = child.m_nondeterminismDetail;
return false;
}
return true;
} | java |
private void logBatch(final CatalogContext context,
final AdHocPlannedStmtBatch batch,
final Object[] userParams)
{
final int numStmts = batch.getPlannedStatementCount();
final int numParams = userParams == null ? 0 : userParams.length;
final String readOnly = batch.readOnly ? "yes" : "no";
final String singlePartition = batch.isSinglePartitionCompatible() ? "yes" : "no";
final String user = getUsername();
final String[] groupNames = context.authSystem.getGroupNamesForUser(user);
final String groupList = StringUtils.join(groupNames, ',');
//String[] stmtArray = batch.stmts.stream().map(s -> new String(s.sql, Charsets.UTF_8)).toArray(String[]::new);
adhocLog.debug(String.format(
"=== statements=%d parameters=%d read-only=%s single-partition=%s user=%s groups=[%s]",
numStmts, numParams, readOnly, singlePartition, user, groupList));
for (int i = 0; i < batch.getPlannedStatementCount(); i++) {
AdHocPlannedStatement stmt = batch.getPlannedStatement(i);
String sql = stmt.sql == null ? "SQL_UNKNOWN" : new String(stmt.sql, Charsets.UTF_8);
adhocLog.debug(String.format("Statement #%d: %s", i + 1, sql));
}
if (userParams != null) {
for (int i = 0; i < userParams.length; ++i) {
Object value = userParams[i];
final String valueString = (value != null ? value.toString() : "NULL");
adhocLog.debug(String.format("Parameter #%d: %s", i + 1, valueString));
}
}
} | java |
static CompletableFuture<ClientResponse> processExplainDefaultProc(AdHocPlannedStmtBatch planBatch) {
Database db = VoltDB.instance().getCatalogContext().database;
// there better be one statement if this is really SQL
// from a default procedure
assert(planBatch.getPlannedStatementCount() == 1);
AdHocPlannedStatement ahps = planBatch.getPlannedStatement(0);
String sql = new String(ahps.sql, StandardCharsets.UTF_8);
String explain = planBatch.explainStatement(0, db, false);
VoltTable vt = new VoltTable(new VoltTable.ColumnInfo("STATEMENT_NAME", VoltType.STRING),
new VoltTable.ColumnInfo( "SQL_STATEMENT", VoltType.STRING),
new VoltTable.ColumnInfo( "EXECUTION_PLAN", VoltType.STRING));
vt.addRow("sql0", sql, explain);
ClientResponseImpl response =
new ClientResponseImpl(
ClientResponseImpl.SUCCESS,
ClientResponse.UNINITIALIZED_APP_STATUS_CODE,
null,
new VoltTable[] { vt },
null);
CompletableFuture<ClientResponse> fut = new CompletableFuture<>();
fut.complete(response);
return fut;
} | java |
private final CompletableFuture<ClientResponse> createAdHocTransaction(
final AdHocPlannedStmtBatch plannedStmtBatch,
final boolean isSwapTables)
throws VoltTypeException
{
ByteBuffer buf = null;
try {
buf = plannedStmtBatch.flattenPlanArrayToBuffer();
}
catch (IOException e) {
VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
}
assert(buf.hasArray());
// create the execution site task
String procedureName = null;
Object[] params = null;
// pick the sysproc based on the presence of partition info
// HSQL (or PostgreSQL) does not specifically implement AdHoc SP
// -- instead, use its always-SP implementation of AdHoc
boolean isSinglePartition = plannedStmtBatch.isSinglePartitionCompatible() || m_isConfiguredForNonVoltDBBackend;
if (isSwapTables) {
procedureName = "@SwapTablesCore";
params = new Object[] { buf.array() };
}
else if (isSinglePartition) {
if (plannedStmtBatch.isReadOnly()) {
procedureName = "@AdHoc_RO_SP";
}
else {
procedureName = "@AdHoc_RW_SP";
}
int type = VoltType.NULL.getValue();
// replicated table read is single-part without a partitioning param
// I copied this from below, but I'm not convinced that the above statement is correct
// or that the null behavior here either (a) ever actually happens or (b) has the
// desired intent.
Object partitionParam = plannedStmtBatch.partitionParam();
byte[] param = null;
if (partitionParam != null) {
type = VoltType.typeFromClass(partitionParam.getClass()).getValue();
param = VoltType.valueToBytes(partitionParam);
}
// Send the partitioning parameter and its type along so that the site can check if
// it's mis-partitioned. Type is needed to re-hashinate for command log re-init.
params = new Object[] { param, (byte)type, buf.array() };
}
else {
if (plannedStmtBatch.isReadOnly()) {
procedureName = "@AdHoc_RO_MP";
}
else {
procedureName = "@AdHoc_RW_MP";
}
params = new Object[] { buf.array() };
}
return callProcedure(procedureName, params);
} | java |
private void collectParameterValueExpressions(AbstractExpression expr, List<AbstractExpression> pves) {
if (expr == null) {
return;
}
if (expr instanceof TupleValueExpression || expr instanceof AggregateExpression) {
// Create a matching PVE for this expression to be used on the EE side
// to get the original expression value
addCorrelationParameterValueExpression(expr, pves);
return;
}
collectParameterValueExpressions(expr.getLeft(), pves);
collectParameterValueExpressions(expr.getRight(), pves);
if (expr.getArgs() != null) {
for (AbstractExpression arg : expr.getArgs()) {
collectParameterValueExpressions(arg, pves);
}
}
} | java |
public static Result newPSMResult(int type, String label, Object value) {
Result result = newResult(ResultConstants.VALUE);
result.errorCode = type;
result.mainString = label;
result.valueData = value;
return result;
} | java |
public static Result newPreparedExecuteRequest(Type[] types,
long statementId) {
Result result = newResult(ResultConstants.EXECUTE);
result.metaData = ResultMetaData.newSimpleResultMetaData(types);
result.statementID = statementId;
result.navigator.add(ValuePool.emptyObjectArray);
return result;
} | java |
public static Result newCallResponse(Type[] types, long statementId,
Object[] values) {
Result result = newResult(ResultConstants.CALL_RESPONSE);
result.metaData = ResultMetaData.newSimpleResultMetaData(types);
result.statementID = statementId;
result.navigator.add(values);
return result;
} | java |
public static Result newUpdateResultRequest(Type[] types, long id) {
Result result = newResult(ResultConstants.UPDATE_RESULT);
result.metaData = ResultMetaData.newUpdateResultMetaData(types);
result.id = id;
result.navigator.add(new Object[]{});
return result;
} | java |
public void setPreparedResultUpdateProperties(Object[] parameterValues) {
if (navigator.getSize() == 1) {
((RowSetNavigatorClient) navigator).setData(0, parameterValues);
} else {
navigator.clear();
navigator.add(parameterValues);
}
} | java |
public void setPreparedExecuteProperties(Object[] parameterValues,
int maxRows, int fetchSize) {
mode = ResultConstants.EXECUTE;
if (navigator.getSize() == 1) {
((RowSetNavigatorClient) navigator).setData(0, parameterValues);
} else {
navigator.clear();
navigator.add(parameterValues);
}
updateCount = maxRows;
this.fetchSize = fetchSize;
} | java |
public static Result newBatchedExecuteResponse(int[] updateCounts,
Result generatedResult, Result e) {
Result result = newResult(ResultConstants.BATCHEXECRESPONSE);
result.addChainedResult(generatedResult);
result.addChainedResult(e);
Type[] types = new Type[]{ Type.SQL_INTEGER };
result.metaData = ResultMetaData.newSimpleResultMetaData(types);
Object[][] table = new Object[updateCounts.length][];
for (int i = 0; i < updateCounts.length; i++) {
table[i] = new Object[]{ ValuePool.getInt(updateCounts[i]) };
}
((RowSetNavigatorClient) result.navigator).setData(table);
return result;
} | java |
public void setPrepareOrExecuteProperties(String sql, int maxRows,
int fetchSize, int statementReturnType, int resultSetType,
int resultSetConcurrency, int resultSetHoldability, int keyMode,
int[] generatedIndexes, String[] generatedNames) {
mainString = sql;
updateCount = maxRows;
this.fetchSize = fetchSize;
this.statementReturnType = statementReturnType;
rsScrollability = resultSetType;
rsConcurrency = resultSetConcurrency;
rsHoldability = resultSetHoldability;
generateKeys = keyMode;
generatedMetaData =
ResultMetaData.newGeneratedColumnsMetaData(generatedIndexes,
generatedNames);
} | java |
private static void reset()
{
description = null;
argName = null;
longopt = null;
type = String.class;
required = false;
numberOfArgs = Option.UNINITIALIZED;
optionalArg = false;
valuesep = (char) 0;
} | java |
public static OptionBuilder hasOptionalArgs()
{
OptionBuilder.numberOfArgs = Option.UNLIMITED_VALUES;
OptionBuilder.optionalArg = true;
return INSTANCE;
} | java |
public static OptionBuilder hasOptionalArgs(int numArgs)
{
OptionBuilder.numberOfArgs = numArgs;
OptionBuilder.optionalArg = true;
return INSTANCE;
} | java |
public static Option create() throws IllegalArgumentException
{
if (longopt == null)
{
OptionBuilder.reset();
throw new IllegalArgumentException("must specify longopt");
}
return create(null);
} | java |
private String checkProcedureIdentifier(
final String identifier, final String statement
) throws VoltCompilerException {
String retIdent = checkIdentifierStart(identifier, statement);
if (retIdent.contains(".")) {
String msg = String.format(
"Invalid procedure name containing dots \"%s\" in DDL: \"%s\"",
identifier, statement.substring(0,statement.length()-1));
throw m_compiler.new VoltCompilerException(msg);
}
return retIdent;
} | java |
void resolveHostname(boolean synchronous) {
Runnable r = new Runnable() {
@Override
public void run() {
String remoteHost = ReverseDNSCache.hostnameOrAddress(m_remoteSocketAddress.getAddress());
if (!remoteHost.equals(m_remoteSocketAddress.getAddress().getHostAddress())) {
m_remoteHostname = remoteHost;
m_remoteHostAndAddressAndPort = remoteHost + m_remoteHostAndAddressAndPort;
m_toString = VoltPort.this.toString() + ":" + m_remoteHostAndAddressAndPort;
}
}
};
if (synchronous) {
r.run();
} else {
/*
* Start the reverse DNS lookup in background because it might be
* very slow if the hostname is not specified in local /etc/hosts.
*/
try {
ReverseDNSCache.submit(r);
} catch (RejectedExecutionException e) {
networkLog.debug(
"Reverse DNS lookup for " + m_remoteSocketAddress + " rejected because the queue was full");
}
}
} | java |
public void setInterests(int opsToAdd, int opsToRemove) {
// must be done atomically with changes to m_running
synchronized(m_lock) {
int oldInterestOps = m_interestOps;
m_interestOps = (m_interestOps | opsToAdd) & (~opsToRemove);
if (oldInterestOps != m_interestOps && !m_running) {
/*
* If this is a write, optimistically assume the write
* will succeed and try it without using the selector
*/
m_network.addToChangeList(this, (opsToAdd & SelectionKey.OP_WRITE) != 0);
}
}
} | java |
public static String adHocSQLFromInvocationForDebug(StoredProcedureInvocation invocation) {
assert(invocation.getProcName().startsWith("@AdHoc"));
ParameterSet params = invocation.getParams();
// the final param is the byte array we need
byte[] serializedBatchData = (byte[]) params.getParam(params.size() - 1);
Pair<Object[], AdHocPlannedStatement[]> data = decodeSerializedBatchData(serializedBatchData);
Object[] userparams = data.getFirst();
AdHocPlannedStatement[] statements = data.getSecond();
StringBuilder sb = new StringBuilder();
if (statements.length == 0) {
sb.append("ADHOC INVOCATION HAS NO SQL");
}
else if (statements.length == 1) {
sb.append(adHocSQLStringFromPlannedStatement(statements[0], userparams));
}
else { // > 1
sb.append("BEGIN ADHOC_SQL_BATCH {\n");
for (AdHocPlannedStatement stmt : statements) {
sb.append(adHocSQLStringFromPlannedStatement(stmt, userparams)).append("\n");
}
sb.append("} END ADHOC_SQL_BATCH");
}
return sb.toString();
} | java |
public static String adHocSQLStringFromPlannedStatement(AdHocPlannedStatement statement, Object[] userparams) {
final int MAX_PARAM_LINE_CHARS = 120;
StringBuilder sb = new StringBuilder();
String sql = new String(statement.sql, Charsets.UTF_8);
sb.append(sql);
Object[] params = paramsForStatement(statement, userparams);
// convert params to strings of a certain max length
for (int i = 0; i < params.length; i++) {
Object param = params[i];
String paramLineStr = String.format(" Param %d: %s", i, param.toString());
// trim param line if it's silly long
if (paramLineStr.length() > MAX_PARAM_LINE_CHARS) {
paramLineStr = paramLineStr.substring(0, MAX_PARAM_LINE_CHARS - 3);
paramLineStr += "...";
}
sb.append('\n').append(paramLineStr);
}
return sb.toString();
} | java |
public static Pair<Object[], AdHocPlannedStatement[]> decodeSerializedBatchData(byte[] serializedBatchData) {
// Collections must be the same size since they all contain slices of the same data.
assert(serializedBatchData != null);
ByteBuffer buf = ByteBuffer.wrap(serializedBatchData);
AdHocPlannedStatement[] statements = null;
Object[] userparams = null;
try {
userparams = AdHocPlannedStmtBatch.userParamsFromBuffer(buf);
statements = AdHocPlannedStmtBatch.planArrayFromBuffer(buf);
}
catch (IOException e) {
throw new VoltAbortException(e);
}
return new Pair<Object[], AdHocPlannedStatement[]>(userparams, statements);
} | java |
static Object[] paramsForStatement(AdHocPlannedStatement statement, Object[] userparams) {
// When there are no user-provided parameters, statements may have parameterized constants.
if (userparams.length > 0) {
return userparams;
} else {
return statement.extractedParamArray();
}
} | java |
public static void writeToFile(byte[] catalogBytes, File file) throws IOException {
JarOutputStream jarOut = new JarOutputStream(new FileOutputStream(file));
JarInputStream jarIn = new JarInputStream(new ByteArrayInputStream(catalogBytes));
JarEntry catEntry = null;
JarInputStreamReader reader = new JarInputStreamReader();
while ((catEntry = jarIn.getNextJarEntry()) != null) {
byte[] value = reader.readEntryFromStream(jarIn);
String key = catEntry.getName();
assert (value != null);
JarEntry entry = new JarEntry(key);
entry.setSize(value.length);
entry.setTime(System.currentTimeMillis());
jarOut.putNextEntry(entry);
jarOut.write(value);
jarOut.flush();
jarOut.closeEntry();
}
jarOut.finish();
jarIn.close();
} | java |
public long getCRC() {
PureJavaCrc32 crc = new PureJavaCrc32();
for (Entry<String, byte[]> e : super.entrySet()) {
if (e.getKey().equals("buildinfo.txt") || e.getKey().equals("catalog-report.html")) {
continue;
}
// Hacky way to skip the first line of the autogenerated ddl, which
// has a date which changes and causes test failures
if (e.getKey().equals(VoltCompiler.AUTOGEN_DDL_FILE_NAME)) {
byte[] ddlbytes = e.getValue();
int index = 0;
while (ddlbytes[index] != '\n') {
index++;
}
byte[] newddlbytes = Arrays.copyOfRange(ddlbytes, index, ddlbytes.length);
crc.update(e.getKey().getBytes(Constants.UTF8ENCODING));
crc.update(newddlbytes);
}
else {
crc.update(e.getKey().getBytes(Constants.UTF8ENCODING));
crc.update(e.getValue());
}
}
return crc.getValue();
} | java |
public void removeClassFromJar(String classname)
{
for (String innerclass : getLoader().getInnerClassesForClass(classname)) {
remove(classToFileName(innerclass));
}
remove(classToFileName(classname));
} | java |
public static ParsedCall parseJDBCCall(String jdbcCall) throws SQLParser.Exception
{
Matcher m = PAT_CALL_WITH_PARAMETERS.matcher(jdbcCall);
if (m.matches()) {
String sql = m.group(1);
int parameterCount = PAT_CLEAN_CALL_PARAMETERS.matcher(m.group(2)).replaceAll("").length();
return new ParsedCall(sql, parameterCount);
}
m = PAT_CALL_WITHOUT_PARAMETERS.matcher(jdbcCall);
if (m.matches()) {
return new ParsedCall(m.group(1), 0);
}
return null;
} | java |
static Type getType(int setType, Type type) {
if (setType == OpTypes.COUNT) {
return Type.SQL_BIGINT;
}
// A VoltDB extension to handle aggfnc(*) syntax errors.
// If the argument node does not have
// a data type, it may be '*'. If the
// operation is COUNT (optype == 71) this is
// just fine. But if it's anything else this
// is a syntax error.
if (type == null) {
throw Error.error(ErrorCode.U_S0500);
}
// End of VoltDB extension
int dataType = type.isIntervalType() ? Types.SQL_INTERVAL
: type.typeCode;
switch (setType) {
case OpTypes.AVG : {
switch (dataType) {
case Types.TINYINT :
case Types.SQL_SMALLINT :
case Types.SQL_INTEGER :
case Types.SQL_BIGINT :
case Types.SQL_REAL :
case Types.SQL_FLOAT :
case Types.SQL_DOUBLE :
case Types.SQL_NUMERIC :
case Types.SQL_DECIMAL :
case Types.SQL_INTERVAL :
return type;
default :
throw Error.error(ErrorCode.X_42565);
}
}
case OpTypes.SUM : {
switch (dataType) {
case Types.TINYINT :
case Types.SQL_SMALLINT :
case Types.SQL_INTEGER :
return Type.SQL_BIGINT;
case Types.SQL_BIGINT :
return Type.SQL_DECIMAL_BIGINT_SQR;
case Types.SQL_REAL :
case Types.SQL_FLOAT :
case Types.SQL_DOUBLE :
return Type.SQL_DOUBLE;
case Types.SQL_NUMERIC :
case Types.SQL_DECIMAL :
return Type.getType(type.typeCode, 0,
type.precision * 2, type.scale);
case Types.SQL_INTERVAL :
return IntervalType.newIntervalType(
type.typeCode, DTIType.maxIntervalPrecision,
type.scale);
default :
throw Error.error(ErrorCode.X_42565);
}
}
case OpTypes.MIN :
case OpTypes.MAX :
return type;
case OpTypes.EVERY :
case OpTypes.SOME :
if (type.isBooleanType()) {
return Type.SQL_BOOLEAN;
}
break;
case OpTypes.STDDEV_POP :
case OpTypes.STDDEV_SAMP :
case OpTypes.VAR_POP :
case OpTypes.VAR_SAMP :
if (type.isNumberType()) {
return Type.SQL_DOUBLE;
}
break;
// A VoltDB extension for APPROX_COUNT_DISTINCT
case OpTypes.APPROX_COUNT_DISTINCT :
switch (dataType) {
case Types.TINYINT :
case Types.SQL_SMALLINT :
case Types.SQL_INTEGER :
case Types.SQL_BIGINT :
case Types.SQL_DECIMAL :
case Types.SQL_TIMESTAMP :
return Type.SQL_BIGINT;
default:
// We only support fixed-width types for this
// aggregate function.
//
// FLOAT is not supported since this function
// relies on different values having different bit
// patterns, and the same values being the
// same. Floating point numbers don't hold to
// this---e.g., positive and negative zero.
//
// Incompatible data types in operation
throw Error.error(ErrorCode.X_42565);
}
// End of VoltDB extension for APPROX_COUNT_DISTINCT
default :
throw Error.runtimeError(ErrorCode.U_S0500, "SetFunction");
}
throw Error.error(ErrorCode.X_42565);
} | java |
public static URI makeFileLoggerURL(File dataDir, File dataLogDir){
return URI.create(makeURIString(dataDir.getPath(),dataLogDir.getPath(),null));
} | java |
public static boolean isValidSnapshot(File f) throws IOException {
if (f==null || Util.getZxidFromName(f.getName(), "snapshot") == -1)
return false;
// Check for a valid snapshot
RandomAccessFile raf = new RandomAccessFile(f, "r");
// including the header and the last / bytes
// the snapshot should be atleast 10 bytes
if (raf.length() < 10) {
return false;
}
try {
raf.seek(raf.length() - 5);
byte bytes[] = new byte[5];
int readlen = 0;
int l;
while(readlen < 5 &&
(l = raf.read(bytes, readlen, bytes.length - readlen)) >= 0) {
readlen += l;
}
if (readlen != bytes.length) {
LOG.info("Invalid snapshot " + f
+ " too short, len = " + readlen);
return false;
}
ByteBuffer bb = ByteBuffer.wrap(bytes);
int len = bb.getInt();
byte b = bb.get();
if (len != 1 || b != '/') {
LOG.info("Invalid snapshot " + f + " len = " + len
+ " byte = " + (b & 0xff));
return false;
}
} finally {
raf.close();
}
return true;
} | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.