repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/StatsOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.cache.ServerStatistics;
import org.infinispan.hotrod.impl.cache.ServerStatisticsImpl;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements to the stats operation as defined by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class StatsOperation extends RetryOnFailureOperation<ServerStatistics> {
private ServerStatisticsImpl result;
private int numStats = -1;
public StatsOperation(OperationContext operationContext, CacheOptions options) {
super(operationContext, STATS_REQUEST, STATS_RESPONSE, options, null);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndRead(channel);
}
@Override
protected void reset() {
super.reset();
result = null;
numStats = -1;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (numStats < 0) {
numStats = ByteBufUtil.readVInt(buf);
result = new ServerStatisticsImpl();
decoder.checkpoint();
}
while (result.size() < numStats) {
String statName = ByteBufUtil.readString(buf);
String statValue = ByteBufUtil.readString(buf);
result.addStats(statName, statValue);
decoder.checkpoint();
}
complete(result);
}
}
| 1,642
| 29.425926
| 115
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/GetOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements "get" operation as described by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod protocol
* specification</a>.
*
* @since 14.0
*/
public class GetOperation<K, V> extends AbstractKeyOperation<K, V> {
public GetOperation(OperationContext operationContext,
K key, byte[] keyBytes, CacheOptions options,
DataFormat dataFormat) {
super(operationContext, GET_REQUEST, GET_RESPONSE, key, keyBytes, options, dataFormat);
}
@Override
public void executeOperation(Channel channel) {
scheduleRead(channel);
sendArrayOperation(channel, keyBytes);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
throw new IllegalStateException("Get operation not called manually.");
}
}
| 1,109
| 30.714286
| 119
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/CacheOperationsFactory.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.transaction.xa.Xid;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.commons.util.IntSet;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.event.impl.ClientListenerNotifier;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.cache.CacheTopologyInfo;
import org.infinispan.hotrod.impl.cache.ClientStatistics;
import org.infinispan.hotrod.impl.cache.RemoteCache;
import org.infinispan.hotrod.impl.consistenthash.ConsistentHash;
import org.infinispan.hotrod.impl.iteration.KeyTracker;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.query.RemoteQuery;
import org.infinispan.hotrod.impl.transaction.entry.Modification;
import org.infinispan.hotrod.impl.transaction.operations.PrepareTransactionOperation;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
import org.infinispan.hotrod.telemetry.impl.TelemetryService;
import io.netty.channel.Channel;
/**
* Factory for {@link org.infinispan.hotrod.impl.operations.HotRodOperation} objects.
*
* @since 14.0
*/
public class CacheOperationsFactory implements HotRodConstants {
private static final Log log = LogFactory.getLog(CacheOperationsFactory.class, Log.class);
private final ThreadLocal<Integer> flagsMap = new ThreadLocal<>();
private final OperationContext cacheContext;
private final OperationContext defaultContext;
public CacheOperationsFactory(ChannelFactory channelFactory, String cacheName, Codec codec, ClientListenerNotifier listenerNotifier, HotRodConfiguration configuration, ClientStatistics clientStatistics) {
TelemetryService telemetryService = null;
try {
telemetryService = TelemetryService.create();
} catch (Throwable e) {
// missing dependency => no context to propagate to the server
log.noOpenTelemetryAPI(e);
}
this.cacheContext = new OperationContext(channelFactory, codec, listenerNotifier, configuration, clientStatistics, telemetryService, cacheName);
this.defaultContext = new OperationContext(channelFactory, codec, listenerNotifier, configuration, clientStatistics, telemetryService, null);
}
public CacheOperationsFactory(ChannelFactory channelFactory, Codec codec, ClientListenerNotifier listenerNotifier, HotRodConfiguration configuration) {
this(channelFactory, null, codec, listenerNotifier, configuration, null);
}
public OperationContext getDefaultContext() {
return defaultContext;
}
public OperationContext getCacheContext() {
return cacheContext;
}
public Codec getCodec() {
return cacheContext.getCodec();
}
public void setCodec(Codec codec) {
cacheContext.setCodec(codec);
defaultContext.setCodec(codec);
}
public <K, V> GetOperation<K, V> newGetKeyOperation(K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
return new GetOperation<>(cacheContext, key, keyBytes, options, dataFormat);
}
public <K, V> GetAllParallelOperation<K, V> newGetAllOperation(Set<byte[]> keys, CacheOptions options, DataFormat dataFormat) {
return new GetAllParallelOperation<>(cacheContext, keys, options, dataFormat);
}
public <K, V> GetAndRemoveOperation<K, V> newGetAndRemoveOperation(K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
return new GetAndRemoveOperation<>(cacheContext, key, keyBytes, options, dataFormat);
}
public <K> RemoveOperation<K> newRemoveOperation(K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
return new RemoveOperation<>(cacheContext, key, keyBytes, options, dataFormat);
}
public <K, V> RemoveIfUnmodifiedOperation<K, V> newRemoveIfUnmodifiedOperation(K key, byte[] keyBytes, long version, CacheOptions options, DataFormat dataFormat) {
return new RemoveIfUnmodifiedOperation<>(cacheContext, key, keyBytes, version, options, dataFormat);
}
public <K, V> ReplaceIfUnmodifiedOperation<K, V> newReplaceIfUnmodifiedOperation(K key, byte[] keyBytes, byte[] value, long version, CacheWriteOptions options, DataFormat dataFormat) {
return new ReplaceIfUnmodifiedOperation<>(cacheContext, key, keyBytes, value, version, options, dataFormat);
}
public <K, V> GetWithMetadataOperation<K, V> newGetWithMetadataOperation(K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
return newGetWithMetadataOperation(key, keyBytes, options, dataFormat, null);
}
public <K, V> GetWithMetadataOperation<K, V> newGetWithMetadataOperation(K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat, SocketAddress listenerServer) {
return new GetWithMetadataOperation<>(cacheContext, key, keyBytes, options, dataFormat, listenerServer);
}
public StatsOperation newStatsOperation(CacheOptions options) {
return new StatsOperation(cacheContext, options);
}
public <K, V> PutOperation<K, V> newPutKeyValueOperation(K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
return new PutOperation<>(cacheContext, key, keyBytes, value, options, dataFormat);
}
public <K> SetOperation<K> newSetKeyValueOperation(K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
return new SetOperation<>(cacheContext, key, keyBytes, value, options, dataFormat);
}
public PutAllParallelOperation newPutAllOperation(Map<byte[], byte[]> map, CacheWriteOptions options, DataFormat dataFormat) {
return new PutAllParallelOperation(cacheContext, map, options, dataFormat);
}
public <K, V> PutIfAbsentOperation<K, V> newPutIfAbsentOperation(K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
return new PutIfAbsentOperation<>(cacheContext, key, keyBytes, value, options, dataFormat);
}
public <K> SetIfAbsentOperation<K> newSetIfAbsentOperation(K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
return new SetIfAbsentOperation<>(cacheContext, key, keyBytes, value, options, dataFormat);
}
public <K, V> ReplaceOperation<K, V> newReplaceOperation(K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
return new ReplaceOperation<>(cacheContext, key, keyBytes, value, options, dataFormat);
}
public ContainsKeyOperation newContainsKeyOperation(Object key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
return new ContainsKeyOperation(cacheContext, key, keyBytes, options, dataFormat);
}
public ClearOperation newClearOperation() {
return new ClearOperation(cacheContext, CacheOptions.DEFAULT);
}
public <K> BulkGetKeysOperation<K> newBulkGetKeysOperation(int scope, CacheOptions options, DataFormat dataFormat) {
return new BulkGetKeysOperation<>(cacheContext, options, scope, dataFormat);
}
public AddClientListenerOperation newAddClientListenerOperation(Object listener, CacheOptions options, DataFormat dataFormat) {
return new AddClientListenerOperation(cacheContext, options, listener, null, null, dataFormat, null);
}
public AddClientListenerOperation newAddClientListenerOperation(Object listener, byte[][] filterFactoryParams, byte[][] converterFactoryParams, CacheOptions options, DataFormat dataFormat) {
return new AddClientListenerOperation(cacheContext, options, listener, filterFactoryParams, converterFactoryParams, dataFormat, null);
}
public RemoveClientListenerOperation newRemoveClientListenerOperation(Object listener, CacheOptions options) {
return new RemoveClientListenerOperation(cacheContext, options, listener);
}
public AddBloomNearCacheClientListenerOperation newAddNearCacheListenerOperation(Object listener, CacheOptions options, DataFormat dataFormat, int bloomFilterBits, RemoteCache<?, ?> remoteCache) {
return new AddBloomNearCacheClientListenerOperation(cacheContext, options, listener, dataFormat, bloomFilterBits, remoteCache);
}
public UpdateBloomFilterOperation newUpdateBloomFilterOperation(CacheOptions options, SocketAddress address, byte[] bloomBytes) {
return new UpdateBloomFilterOperation(cacheContext, options, address, bloomBytes);
}
/**
* Construct a ping request directed to a particular node.
*
* @param releaseChannel
* @return a ping operation for a particular node
*/
public PingOperation newPingOperation(boolean releaseChannel) {
return new PingOperation(cacheContext, releaseChannel);
}
/**
* Construct a fault tolerant ping request. This operation should be capable to deal with nodes being down, so it
* will find the first node successful node to respond to the ping.
*
* @return a ping operation for the cluster
*/
public FaultTolerantPingOperation newFaultTolerantPingOperation() {
return new FaultTolerantPingOperation(cacheContext, CacheOptions.DEFAULT);
}
public QueryOperation newQueryOperation(RemoteQuery remoteQuery, CacheOptions options, DataFormat dataFormat, boolean withHitCount) {
return new QueryOperation(cacheContext, options, remoteQuery, dataFormat, withHitCount);
}
public SizeOperation newSizeOperation(CacheOptions options) {
return new SizeOperation(cacheContext, options);
}
public <T> ExecuteOperation<T> newExecuteOperation(String taskName, Map<String, byte[]> marshalledParams, Object key, CacheOptions options, DataFormat dataFormat) {
return new ExecuteOperation<>(cacheContext, options, taskName, marshalledParams, key, dataFormat);
}
public AdminOperation newAdminOperation(String taskName, Map<String, byte[]> marshalledParams, CacheOptions options) {
return new AdminOperation(cacheContext, options, taskName, marshalledParams);
}
public CacheTopologyInfo getCacheTopologyInfo() {
return cacheContext.getChannelFactory().getCacheTopologyInfo(cacheContext.getCacheNameBytes());
}
/**
* Returns a map containing for each address all of its primarily owned segments. If the primary segments are not
* known an empty map will be returned instead
*
* @return map containing addresses and their primary segments
*/
public Map<SocketAddress, Set<Integer>> getPrimarySegmentsByAddress() {
return cacheContext.getChannelFactory().getPrimarySegmentsByAddress(cacheContext.getCacheNameBytes());
}
public ConsistentHash getConsistentHash() {
return cacheContext.getChannelFactory().getConsistentHash(cacheContext.getCacheNameBytes());
}
public int getTopologyId() {
return cacheContext.getClientTopology().get().getTopologyId();
}
public IterationStartOperation newIterationStartOperation(String filterConverterFactory, byte[][] filterParameters, IntSet segments, int batchSize, boolean metadata, CacheOptions options, DataFormat dataFormat, SocketAddress targetAddress) {
return new IterationStartOperation(cacheContext, options, filterConverterFactory, filterParameters, segments, batchSize, metadata, dataFormat, targetAddress);
}
public IterationEndOperation newIterationEndOperation(byte[] iterationId, CacheOptions options, Channel channel) {
return new IterationEndOperation(cacheContext, options, iterationId, channel);
}
public <K, E> IterationNextOperation<K, E> newIterationNextOperation(byte[] iterationId, Channel channel, KeyTracker segmentKeyTracker, CacheOptions options, DataFormat dataFormat) {
return new IterationNextOperation<>(cacheContext, options, iterationId, channel, segmentKeyTracker, dataFormat);
}
public <K> GetStreamOperation<K> newGetStreamOperation(K key, byte[] keyBytes, int offset, CacheOptions options) {
return new GetStreamOperation<>(cacheContext, key, keyBytes, offset, options);
}
public <K> PutStreamOperation<K> newPutStreamOperation(K key, byte[] keyBytes, long version, CacheWriteOptions options) {
return new PutStreamOperation<>(cacheContext, key, keyBytes, options, version);
}
public <K> PutStreamOperation<K> newPutStreamOperation(K key, byte[] keyBytes, CacheWriteOptions options) {
return new PutStreamOperation<>(cacheContext, key, keyBytes, options, PutStreamOperation.VERSION_PUT);
}
public <K> PutStreamOperation<K> newPutIfAbsentStreamOperation(K key, byte[] keyBytes, CacheWriteOptions options) {
return new PutStreamOperation<>(cacheContext, key, keyBytes, options, PutStreamOperation.VERSION_PUT_IF_ABSENT);
}
public AuthMechListOperation newAuthMechListOperation(Channel channel) {
return new AuthMechListOperation(cacheContext, channel);
}
public AuthOperation newAuthOperation(Channel channel, String saslMechanism, byte[] response) {
return new AuthOperation(cacheContext, channel, saslMechanism, response);
}
public PrepareTransactionOperation newPrepareTransactionOperation(Xid xid, boolean onePhaseCommit, List<Modification> modifications, boolean recoverable, long timeoutMs) {
return new PrepareTransactionOperation(cacheContext, xid, onePhaseCommit, modifications, recoverable, timeoutMs);
}
}
| 13,598
| 49.366667
| 244
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/OperationContext.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.DEFAULT_CACHE_NAME_BYTES;
import java.util.concurrent.atomic.AtomicReference;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.event.impl.ClientListenerNotifier;
import org.infinispan.hotrod.impl.ClientTopology;
import org.infinispan.hotrod.impl.HotRodTransport;
import org.infinispan.hotrod.impl.cache.ClientStatistics;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
import org.infinispan.hotrod.telemetry.impl.TelemetryService;
/**
* @since 14.0
**/
public class OperationContext {
private final ChannelFactory channelFactory;
private final AtomicReference<ClientTopology> clientTopology;
private final ClientListenerNotifier listenerNotifier;
private final HotRodConfiguration configuration;
private final ClientStatistics clientStatistics;
private final TelemetryService telemetryService;
private final byte[] cacheNameBytes;
private final String cacheName;
private Codec codec;
public OperationContext(ChannelFactory channelFactory, Codec codec, ClientListenerNotifier listenerNotifier,
HotRodConfiguration configuration, ClientStatistics clientStatistics,
TelemetryService telemetryService, String cacheName) {
this.channelFactory = channelFactory;
this.codec = codec;
this.listenerNotifier = listenerNotifier;
this.configuration = configuration;
this.clientStatistics = clientStatistics;
this.telemetryService = telemetryService;
this.cacheName = cacheName;
this.cacheNameBytes = cacheName == null ? DEFAULT_CACHE_NAME_BYTES : HotRodTransport.cacheNameBytes(cacheName);
this.clientTopology = channelFactory != null ?
channelFactory.createTopologyId(cacheNameBytes) :
new AtomicReference<>(new ClientTopology(-1, configuration.clientIntelligence()));
}
public ChannelFactory getChannelFactory() {
return channelFactory;
}
public AtomicReference<ClientTopology> getClientTopology() {
return clientTopology;
}
public ClientListenerNotifier getListenerNotifier() {
return listenerNotifier;
}
public HotRodConfiguration getConfiguration() {
return configuration;
}
public ClientStatistics getClientStatistics() {
return clientStatistics;
}
public TelemetryService getTelemetryService() {
return telemetryService;
}
public byte[] getCacheNameBytes() {
return cacheNameBytes;
}
public String getCacheName() {
return cacheName;
}
public Codec getCodec() {
return codec;
}
public void setCodec(Codec codec) {
this.codec = codec;
}
}
| 2,859
| 32.255814
| 117
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/RetryOnFailureOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.io.IOException;
import java.net.SocketAddress;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.exceptions.HotRodClientException;
import org.infinispan.hotrod.exceptions.RemoteIllegalLifecycleStateException;
import org.infinispan.hotrod.exceptions.RemoteNodeSuspectException;
import org.infinispan.hotrod.exceptions.TransportException;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.transport.netty.ChannelOperation;
import org.infinispan.hotrod.impl.transport.netty.ChannelRecord;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.channel.Channel;
import io.netty.handler.codec.DecoderException;
/**
* Base class for all the operations that need retry logic: if the operation fails due to connection problems, try with
* another available connection.
*
* @since 14.0
*/
public abstract class RetryOnFailureOperation<T> extends HotRodOperation<T> implements ChannelOperation {
protected static final Log log = LogFactory.getLog(RetryOnFailureOperation.class, Log.class);
private int retryCount = 0;
private Set<SocketAddress> failedServers = null;
protected RetryOnFailureOperation(OperationContext operationContext, short requestCode, short responseCode, CacheOptions options, DataFormat dataFormat) {
super(operationContext, requestCode, responseCode, options, dataFormat);
if (operationContext.getTelemetryService() != null) {
operationContext.getTelemetryService().injectSpanContext(header);
}
}
@Override
public CompletionStage<T> execute() {
assert !isDone();
try {
if (log.isTraceEnabled()) {
log.tracef("Requesting channel for operation %s", this);
}
fetchChannelAndInvoke(retryCount, failedServers);
} catch (Exception e) {
// if there's a bug before the operation is registered the operation wouldn't be completed
completeExceptionally(e);
}
return this;
}
@Override
public void invoke(Channel channel) {
try {
if (log.isTraceEnabled()) {
log.tracef("About to start executing operation %s on %s", this, channel);
}
executeOperation(channel);
} catch (Throwable t) {
completeExceptionally(t);
} finally {
releaseChannel(channel);
}
}
@Override
public void cancel(SocketAddress address, Throwable cause) {
cause = handleException(cause, null, address);
if (cause != null) {
completeExceptionally(cause);
}
}
private void retryIfNotDone() {
if (isDone()) {
if (log.isTraceEnabled()) {
log.tracef("Not retrying as done (exceptionally=%s), retryCount=%d", this.isCompletedExceptionally(), retryCount);
}
} else {
reset();
fetchChannelAndInvoke(retryCount, failedServers);
}
}
// hook for stateful operations
protected void reset() {
// The exception may happen when we try to fetch the channel; at this time the operation
// is not registered yet and timeoutFuture is null
if (timeoutFuture != null) {
timeoutFuture.cancel(false);
timeoutFuture = null;
}
// Update the topology age in case the retry is connecting to a new cluster
header.topologyAge(operationContext.getChannelFactory().getTopologyAge());
}
private Set<SocketAddress> addFailedServer(SocketAddress address) {
if (failedServers == null) {
failedServers = new HashSet<>();
}
if (log.isTraceEnabled())
log.tracef("Add %s to failed servers", address);
failedServers.add(address);
return failedServers;
}
@Override
public void channelInactive(Channel channel) {
if (isDone()) {
return;
}
SocketAddress address = ChannelRecord.of(channel).getUnresolvedAddress();
addFailedServer(address);
logAndRetryOrFail(HOTROD.connectionClosed(address, address));
}
@Override
public void exceptionCaught(Channel channel, Throwable cause) {
SocketAddress address = channel == null ? null : ChannelRecord.of(channel).getUnresolvedAddress();
cause = handleException(cause, channel, address);
if (cause != null) {
// ctx.close() triggers channelInactive; we want to complete this to signal that no retries are expected
try {
completeExceptionally(cause);
} finally {
if (channel != null) {
HOTROD.closingChannelAfterError(channel, cause);
channel.close();
}
}
}
}
protected Throwable handleException(Throwable cause, Channel channel, SocketAddress address) {
while (cause instanceof DecoderException && cause.getCause() != null) {
cause = cause.getCause();
}
if (cause instanceof RemoteIllegalLifecycleStateException || cause instanceof IOException || cause instanceof TransportException) {
if (Thread.interrupted()) {
// Don't invalidate the transport if our thread was interrupted
completeExceptionally(new InterruptedException());
return null;
}
if (address != null) {
addFailedServer(address);
}
if (channel != null) {
// We need to remove decoder even if we're about to close the channel
// because otherwise we would be notified through channelInactive and we would retry (again).
HeaderDecoder headerDecoder = (HeaderDecoder) channel.pipeline().get(HeaderDecoder.NAME);
if (headerDecoder != null) {
channel.pipeline().remove(HeaderDecoder.NAME);
}
HOTROD.closingChannelAfterError(channel, cause);
channel.close();
if (headerDecoder != null) {
headerDecoder.failoverClientListeners();
}
}
logAndRetryOrFail(cause);
return null;
} else if (cause instanceof RemoteNodeSuspectException) {
// TODO Clients should never receive a RemoteNodeSuspectException, see ISPN-11636
logAndRetryOrFail(cause);
return null;
} else if (cause instanceof HotRodClientException && ((HotRodClientException) cause).isServerError()) {
// fail the operation (don't retry) but don't close the channel
completeExceptionally(cause);
return null;
} else {
return cause;
}
}
protected void logAndRetryOrFail(Throwable e) {
if (retryCount < operationContext.getChannelFactory().getMaxRetries()) {
if (log.isTraceEnabled()) {
log.tracef(e, "Exception encountered in %s. Retry %d out of %d", this, retryCount, operationContext.getChannelFactory().getMaxRetries());
}
retryCount++;
operationContext.getChannelFactory().incrementRetryCount();
retryIfNotDone();
} else {
HOTROD.exceptionAndNoRetriesLeft(retryCount, operationContext.getChannelFactory().getMaxRetries(), e);
completeExceptionally(e);
}
}
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
operationContext.getChannelFactory().fetchChannelAndInvoke(failedServers, operationContext.getCacheNameBytes(), this);
}
/**
* Perform the operation-specific request/response I/O on the specified channel. If an error occurs during I/O, this
* class will detect it and retry the operation with a different channel by invoking the executeOperation method
* again.
*
* @param channel the channel to use for I/O
*/
protected abstract void executeOperation(Channel channel);
}
| 8,072
| 36.724299
| 157
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/ClientListenerOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.util.concurrent.ThreadLocalRandom;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ChannelRecord;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import org.infinispan.hotrod.impl.transport.netty.HotRodClientDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
public abstract class ClientListenerOperation extends RetryOnFailureOperation<SocketAddress> {
public final byte[] listenerId;
public final Object listener;
// Holds which address we are currently executing the operation on
protected SocketAddress address;
protected ClientListenerOperation(OperationContext operationContext, short requestCode, short responseCode,
CacheOptions options, byte[] listenerId, DataFormat dataFormat, Object listener) {
super(operationContext, requestCode, responseCode, options, dataFormat);
this.listenerId = listenerId;
this.listener = listener;
}
protected static byte[] generateListenerId() {
ThreadLocalRandom random = ThreadLocalRandom.current();
byte[] listenerId = new byte[16];
ByteBuffer bb = ByteBuffer.wrap(listenerId);
bb.putLong(random.nextLong());
bb.putLong(random.nextLong());
return listenerId;
}
public String getCacheName() {
return operationContext.getCacheName();
}
@Override
protected final void executeOperation(Channel channel) {
// Note: since the HeaderDecoder now supports decoding both operations and events we don't have to
// wait until all operations complete; the server will deliver responses and we'll just handle them regardless
// of the order
if (!channel.isActive()) {
channelInactive(channel);
return;
}
this.address = ChannelRecord.of(channel).getUnresolvedAddress();
actualExecute(channel);
}
protected abstract void actualExecute(Channel channel);
protected void cleanup(Channel channel) {
channel.eventLoop().execute(() -> {
if (!operationContext.getCodec().allowOperationsAndEvents()) {
if (channel.isOpen()) {
operationContext.getChannelFactory().releaseChannel(channel);
}
}
HotRodClientDecoder decoder = channel.pipeline().get(HotRodClientDecoder.class);
if (decoder != null) {
decoder.removeListener(listenerId);
}
});
}
@Override
public void releaseChannel(Channel channel) {
if (operationContext.getCodec().allowOperationsAndEvents()) {
super.releaseChannel(channel);
}
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (HotRodConstants.isSuccess(status)) {
decoder.addListener(listenerId);
operationContext.getListenerNotifier().startClientListener(listenerId);
} else {
// this releases the channel
operationContext.getListenerNotifier().removeClientListener(listenerId);
throw HOTROD.failedToAddListener(listener, status);
}
complete(address);
}
@Override
public boolean completeExceptionally(Throwable ex) {
if (!isDone()) {
operationContext.getListenerNotifier().removeClientListener(listenerId);
}
return super.completeExceptionally(ex);
}
public void postponeTimeout(Channel channel) {
assert !isDone();
timeoutFuture.cancel(false);
timeoutFuture = null;
scheduleTimeout(channel);
}
@Override
protected void addParams(StringBuilder sb) {
sb.append("listenerId=").append(Util.printArray(listenerId));
}
abstract public ClientListenerOperation copy();
}
| 4,080
| 33.584746
| 119
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/GetAndRemoveOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
import io.netty.buffer.ByteBuf;
/**
* Implement "remove" operation as described in <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class GetAndRemoveOperation<K, V> extends AbstractRemoveOperation<K, CacheEntry<K, V>> {
public GetAndRemoveOperation(OperationContext operationContext,
K key, byte[] keyBytes, CacheOptions options,
DataFormat dataFormat) {
super(operationContext, key, keyBytes, options, dataFormat);
}
@Override
void completeNotExist() {
complete(null);
}
@Override
protected int flags() {
return super.flags() | PrivateHotRodFlag.FORCE_RETURN_VALUE.getFlagInt();
}
@Override
void completeExisted(ByteBuf buf, short status) {
CacheEntry<K, V> result = returnPossiblePrevValue(buf, status);
statsDataRemove();
complete(result); // NO_ERROR_STATUS
}
}
| 1,123
| 27.1
| 112
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AbstractKeyOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.Set;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.api.common.CacheEntryVersion;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.VersionedOperationResponse;
import org.infinispan.hotrod.impl.cache.CacheEntryImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryMetadataImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryVersionImpl;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import io.netty.buffer.ByteBuf;
/**
* Basic class for all hot rod operations that manipulate a key.
*
* @since 14.0
*/
public abstract class AbstractKeyOperation<K, T> extends StatsAffectingRetryingOperation<T> {
protected final K key;
protected final byte[] keyBytes;
protected AbstractKeyOperation(OperationContext operationContext, short requestCode, short responseCode,
K key, byte[] keyBytes, CacheOptions options,
DataFormat dataFormat) {
super(operationContext, requestCode, responseCode, options, dataFormat);
this.key = key;
this.keyBytes = keyBytes;
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
if (retryCount == 0) {
operationContext.getChannelFactory().fetchChannelAndInvoke(key == null ? keyBytes : key, failedServers, operationContext.getCacheNameBytes(), this);
} else {
operationContext.getChannelFactory().fetchChannelAndInvoke(failedServers, operationContext.getCacheNameBytes(), this);
}
}
protected <V> CacheEntry<K, V> returnPossiblePrevValue(ByteBuf buf, short status) {
return operationContext.getCodec().returnPossiblePrevValue(operationKey(), buf, status, dataFormat(), flags(),
operationContext.getConfiguration().getClassAllowList(), operationContext.getChannelFactory().getMarshaller());
}
public K operationKey() {
if (key == null) {
return dataFormat().keyToObj(keyBytes, operationContext.getConfiguration().getClassAllowList());
}
return key;
}
protected <V> VersionedOperationResponse<CacheEntry<K, V>> returnVersionedOperationResponse(ByteBuf buf, short status) {
VersionedOperationResponse.RspCode code;
if (HotRodConstants.isSuccess(status)) {
code = VersionedOperationResponse.RspCode.SUCCESS;
} else if (HotRodConstants.isNotExecuted(status)) {
code = VersionedOperationResponse.RspCode.MODIFIED_KEY;
} else if (HotRodConstants.isNotExist(status)) {
code = VersionedOperationResponse.RspCode.NO_SUCH_KEY;
} else {
throw new IllegalStateException("Unknown response status: " + Integer.toHexString(status));
}
CacheEntry<K, V> prevValue = returnPossiblePrevValue(buf, status);
return new VersionedOperationResponse<>(prevValue, code);
}
@Override
protected void addParams(StringBuilder sb) {
sb.append(", key=").append(key == null ? Util.printArray(keyBytes) : key);
}
public static <K, V> CacheEntry<K, V> readEntry(ByteBuf buf, K key, DataFormat dataFormat, ClassAllowList allowList) {
short flags = buf.readUnsignedByte();
long creation = -1;
int lifespan = -1;
long lastUsed = -1;
int maxIdle = -1;
if ((flags & INFINITE_LIFESPAN) != INFINITE_LIFESPAN) {
creation = buf.readLong();
lifespan = ByteBufUtil.readVInt(buf);
}
if ((flags & INFINITE_MAXIDLE) != INFINITE_MAXIDLE) {
lastUsed = buf.readLong();
maxIdle = ByteBufUtil.readVInt(buf);
}
CacheEntryExpiration expiration;
if (lifespan < 0) {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.IMMORTAL;
} else {
expiration = CacheEntryExpiration.withMaxIdle(Duration.ofSeconds(maxIdle));
}
} else {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.withLifespan(Duration.ofSeconds(lifespan));
} else {
expiration = CacheEntryExpiration.withLifespanAndMaxIdle(Duration.ofSeconds(lifespan), Duration.ofSeconds(maxIdle));
}
}
CacheEntryVersion version = new CacheEntryVersionImpl(buf.readLong());
if (log.isTraceEnabled()) {
log.tracef("Received version: %s", version);
}
V value = dataFormat.valueToObj(ByteBufUtil.readArray(buf), allowList);
return new CacheEntryImpl<>(key, value, new CacheEntryMetadataImpl(creation, lastUsed, expiration, version));
}
}
| 4,931
| 40.79661
| 157
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/PingOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.concurrent.CompletableFuture;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.transport.netty.ChannelOperation;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.handler.codec.DecoderException;
/**
* Corresponds to the "ping" operation as defined in <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class PingOperation extends HotRodOperation<PingResponse> implements ChannelOperation {
private static final Log log = LogFactory.getLog(PingOperation.class);
private final boolean releaseChannel;
public PingOperation(OperationContext operationContext, boolean releaseChannel) {
this(operationContext, PING_REQUEST, PING_RESPONSE, releaseChannel);
}
protected PingOperation(OperationContext operationContext, short requestCode, short responseCode, boolean releaseChannel) {
super(operationContext, requestCode, responseCode, CacheOptions.DEFAULT);
this.releaseChannel = releaseChannel;
}
@Override
public void invoke(Channel channel) {
sendHeaderAndRead(channel);
if (releaseChannel) {
releaseChannel(channel);
}
}
@Override
public void cancel(SocketAddress address, Throwable cause) {
completeExceptionally(cause);
}
@Override
public CompletableFuture<PingResponse> execute() {
throw new UnsupportedOperationException("Cannot execute directly");
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
throw new IllegalStateException("Ping response not called manually.");
}
@Override
public void exceptionCaught(Channel channel, Throwable cause) {
while (cause instanceof DecoderException && cause.getCause() != null) {
cause = cause.getCause();
}
PingResponse pingResponse = new PingResponse(cause);
if (pingResponse.isCacheNotFound()) {
complete(pingResponse);
} else {
super.exceptionCaught(channel, cause);
}
}
}
| 2,356
| 31.736111
| 126
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AdminOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
/**
* AdminOperation. A special type of {@link ExecuteOperation} which returns the result of an admin operation which is
* always represented as a JSON object. The actual parsing and interpretation of the result is up to the caller.
*
* @since 14.0
*/
public class AdminOperation extends ExecuteOperation<String> {
AdminOperation(OperationContext operationContext, CacheOptions options, String taskName, Map<String, byte[]> marshalledParams) {
super(operationContext, options, taskName, marshalledParams, null, null);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
byte[] bytes = ByteBufUtil.readArray(buf);
complete(new String(bytes, StandardCharsets.UTF_8));
}
}
| 1,070
| 35.931034
| 131
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/PutOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.impl.DataFormat;
/**
* Implements "put" as defined by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod protocol
* specification</a>.
*
* @since 14.0
*/
public class PutOperation<K, V> extends AbstractPutOperation<K, CacheEntry<K, V>> {
public PutOperation(OperationContext operationContext, K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
super(operationContext, key, keyBytes, value, options, dataFormat);
}
@Override
protected int flags() {
return super.flags() | PrivateHotRodFlag.FORCE_RETURN_VALUE.getFlagInt();
}
}
| 789
| 31.916667
| 147
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/PutStreamOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.exceptions.InvalidResponseException;
import org.infinispan.hotrod.impl.protocol.ChannelOutputStream;
import org.infinispan.hotrod.impl.protocol.ChannelOutputStreamListener;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Streaming put operation
*
* @since 14.0
*/
public class PutStreamOperation<K> extends AbstractKeyOperation<K, OutputStream> implements ChannelOutputStreamListener {
static final long VERSION_PUT = 0;
static final long VERSION_PUT_IF_ABSENT = -1;
private final long version;
private final CompletableFuture<Void> closeFuture = new CompletableFuture<>();
public PutStreamOperation(OperationContext operationContext,
K key, byte[] keyBytes,
CacheWriteOptions options, long version) {
super(operationContext, PUT_STREAM_REQUEST, PUT_STREAM_RESPONSE, key, keyBytes, options, null);
this.version = version;
}
@Override
public void executeOperation(Channel channel) {
scheduleRead(channel);
Codec codec = operationContext.getCodec();
CacheEntryExpiration.Impl expiration = (CacheEntryExpiration.Impl) ((CacheWriteOptions) options).expiration();
ByteBuf buf = channel.alloc().buffer(codec.estimateHeaderSize(header) + ByteBufUtil.estimateArraySize(keyBytes) +
codec.estimateExpirationSize(expiration) + 8);
codec.writeHeader(buf, header);
ByteBufUtil.writeArray(buf, keyBytes);
codec.writeExpirationParams(buf, expiration);
buf.writeLong(version);
channel.writeAndFlush(buf);
complete(new ChannelOutputStream(channel, this));
}
@Override
public void releaseChannel(Channel channel) {
}
@Override
public boolean completeExceptionally(Throwable ex) {
closeFuture.completeExceptionally(ex);
return super.completeExceptionally(ex);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (HotRodConstants.isSuccess(status) || HotRodConstants.isNotExecuted(status) && (version != VERSION_PUT)) {
if (HotRodConstants.isSuccess(status)) {
statsDataStore();
}
closeFuture.complete(null);
} else {
closeFuture.completeExceptionally(new InvalidResponseException("Unexpected response status: " + Integer.toHexString(status)));
}
}
@Override
public void onError(Channel channel, Throwable error) {
completeExceptionally(error);
}
@Override
public void onClose(Channel channel) throws IOException {
try {
closeFuture.join();
} catch (CompletionException e) {
throw new IOException(e.getCause());
} finally {
// When the channel is closed during the operation it's already released; don't do that again
if (channel.isActive()) {
operationContext.getChannelFactory().releaseChannel(channel);
}
}
}
}
| 3,529
| 35.391753
| 135
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/QuerySerializer.java
|
package org.infinispan.hotrod.impl.operations;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.infinispan.commons.dataconversion.MediaType.APPLICATION_JSON;
import static org.infinispan.commons.dataconversion.MediaType.MATCH_ALL;
import java.io.IOException;
import org.infinispan.api.common.query.QueryRequest;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.dataconversion.internal.Json;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.hotrod.exceptions.HotRodClientException;
import org.infinispan.hotrod.impl.query.QueryResponse;
import org.infinispan.hotrod.impl.query.RemoteQuery;
import org.infinispan.protostream.ProtobufUtil;
import org.infinispan.protostream.SerializationContext;
/**
* @since 9.4
*/
enum QuerySerializer {
JSON(APPLICATION_JSON) {
@Override
byte[] serializeQueryRequest(RemoteQuery remoteQuery, QueryRequest queryRequest) {
Json object = Json.make(queryRequest);
return object.toString().getBytes(UTF_8);
}
@Override
QueryResponse readQueryResponse(Marshaller marshaller, RemoteQuery remoteQuery, byte[] bytesResponse) {
Json response = Json.read(new String(bytesResponse, UTF_8));
//return new JsonClientQueryResponse(response);
return null;
}
},
DEFAULT(MATCH_ALL) {
@Override
byte[] serializeQueryRequest(RemoteQuery remoteQuery, QueryRequest queryRequest) {
final SerializationContext serCtx = remoteQuery.getSerializationContext();
Marshaller marshaller;
if (serCtx != null) {
try {
return ProtobufUtil.toByteArray(serCtx, queryRequest);
} catch (IOException e) {
throw new HotRodClientException(e);
}
} else {
marshaller = remoteQuery.getCache().getHotRodTransport().getMarshaller();
try {
return marshaller.objectToByteBuffer(queryRequest);
} catch (IOException e) {
throw new HotRodClientException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new HotRodClientException(e);
}
}
}
@Override
QueryResponse readQueryResponse(Marshaller marshaller, RemoteQuery remoteQuery, byte[] bytesResponse) {
SerializationContext serCtx = remoteQuery.getSerializationContext();
if (serCtx != null) {
try {
return ProtobufUtil.fromByteArray(serCtx, bytesResponse, QueryResponse.class);
} catch (IOException e) {
throw new HotRodClientException(e);
}
} else {
try {
return (QueryResponse) marshaller.objectFromByteBuffer(bytesResponse);
} catch (IOException | ClassNotFoundException e) {
throw new HotRodClientException(e);
}
}
}
};
private final MediaType mediaType;
QuerySerializer(MediaType mediaType) {
this.mediaType = mediaType;
}
static QuerySerializer findByMediaType(MediaType mediaType) {
return mediaType != null && mediaType.match(APPLICATION_JSON) ? JSON : DEFAULT;
}
@Override
public String toString() {
return mediaType.getTypeSubtype();
}
abstract byte[] serializeQueryRequest(RemoteQuery remoteQuery, QueryRequest queryRequest);
abstract QueryResponse readQueryResponse(Marshaller marshaller, RemoteQuery remoteQuery, byte[] bytesResponse);
}
| 3,593
| 34.584158
| 114
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AddBloomNearCacheClientListenerOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.event.impl.ClientEventDispatcher;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.cache.RemoteCache;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HotRodClientDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
*
*/
public class AddBloomNearCacheClientListenerOperation extends ClientListenerOperation {
private final int bloomFilterBits;
private final RemoteCache<?, ?> remoteCache;
protected AddBloomNearCacheClientListenerOperation(OperationContext operationContext, CacheOptions options,
Object listener,
DataFormat dataFormat,
int bloomFilterBits, RemoteCache<?, ?> remoteCache) {
this(operationContext, options, generateListenerId(), listener, dataFormat, bloomFilterBits, remoteCache);
}
private AddBloomNearCacheClientListenerOperation(OperationContext operationContext,
CacheOptions options,
byte[] listenerId, Object listener,
DataFormat dataFormat,
int bloomFilterBits, RemoteCache<?, ?> remoteCache) {
super(operationContext, ADD_BLOOM_FILTER_NEAR_CACHE_LISTENER_REQUEST, ADD_BLOOM_FILTER_NEAR_CACHE_LISTENER_RESPONSE,
options, listenerId, dataFormat, listener);
this.bloomFilterBits = bloomFilterBits;
this.remoteCache = remoteCache;
}
public AddBloomNearCacheClientListenerOperation copy() {
return new AddBloomNearCacheClientListenerOperation(operationContext, options, listenerId, listener, dataFormat(), bloomFilterBits, remoteCache);
}
@Override
protected void actualExecute(Channel channel) {
channel.pipeline().get(HotRodClientDecoder.class).registerOperation(channel, this);
operationContext.getListenerNotifier().addDispatcher(ClientEventDispatcher.create(this,
address, () -> cleanup(channel), remoteCache));
ByteBuf buf = channel.alloc().buffer();
Codec codec = operationContext.getCodec();
codec.writeHeader(buf, header);
ByteBufUtil.writeArray(buf, listenerId);
codec.writeBloomFilter(buf, bloomFilterBits);
channel.writeAndFlush(buf);
}
}
| 2,682
| 43.716667
| 151
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/RemoveClientListenerOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.concurrent.CompletableFuture;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ChannelOperation;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Remove client listener operation. In order to avoid issues with concurrent event consumption, removing client
* listener operation is sent in a separate connection to the one used for event consumption, but it must go to the same
* node where the listener has been added.
*/
public class RemoveClientListenerOperation extends HotRodOperation<Void> implements ChannelOperation {
private final Object listener;
private byte[] listenerId;
protected RemoveClientListenerOperation(OperationContext operationContext, CacheOptions options, Object listener) {
super(operationContext, REMOVE_CLIENT_LISTENER_REQUEST, REMOVE_CLIENT_LISTENER_RESPONSE, options);
this.listener = listener;
}
protected void fetchChannelAndInvoke() {
listenerId = operationContext.getListenerNotifier().findListenerId(listener);
if (listenerId != null) {
SocketAddress address = operationContext.getListenerNotifier().findAddress(listenerId);
operationContext.getChannelFactory().fetchChannelAndInvoke(address, this);
} else {
complete(null);
}
}
@Override
public void invoke(Channel channel) {
scheduleRead(channel);
sendArrayOperation(channel, listenerId);
releaseChannel(channel);
}
@Override
public void cancel(SocketAddress address, Throwable cause) {
completeExceptionally(cause);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (HotRodConstants.isSuccess(status) || HotRodConstants.isNotExecuted(status)) {
operationContext.getListenerNotifier().removeClientListener(listenerId);
}
complete(null);
}
@Override
public CompletableFuture<Void> execute() {
try {
fetchChannelAndInvoke();
} catch (Exception e) {
completeExceptionally(e);
}
return this;
}
}
| 2,335
| 33.352941
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AuthOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.util.concurrent.CompletableFuture;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Performs a step in the challenge/response authentication operation
*
* @since 14.0
*/
public class AuthOperation extends HotRodOperation<byte[]> {
private final Channel channel;
private final String saslMechanism;
private final byte[] response;
public AuthOperation(OperationContext operationContext, Channel channel, String saslMechanism, byte[] response) {
super(operationContext, AUTH_REQUEST, AUTH_RESPONSE, CacheOptions.DEFAULT);
this.channel = channel;
this.saslMechanism = saslMechanism;
this.response = response;
}
@Override
public CompletableFuture<byte[]> execute() {
if (!channel.isActive()) {
throw HOTROD.channelInactive(channel.remoteAddress(), channel.remoteAddress());
}
byte[] saslMechBytes = saslMechanism.getBytes(HOTROD_STRING_CHARSET);
scheduleRead(channel);
Codec codec = operationContext.getCodec();
ByteBuf buf = channel.alloc().buffer(codec.estimateHeaderSize(header) +
ByteBufUtil.estimateArraySize(saslMechBytes) + ByteBufUtil.estimateArraySize(response));
codec.writeHeader(buf, header);
ByteBufUtil.writeArray(buf, saslMechBytes);
ByteBufUtil.writeArray(buf, response);
channel.writeAndFlush(buf);
return this;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
boolean complete = buf.readUnsignedByte() > 0;
byte[] challenge = ByteBufUtil.readArray(buf);
complete(complete ? null : challenge);
}
}
| 1,998
| 31.770492
| 116
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AbstractPutIfAbsentOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements "putIfAbsent" operation as described in <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public abstract class AbstractPutIfAbsentOperation<K, T> extends AbstractKeyValueOperation<K, T> {
public AbstractPutIfAbsentOperation(OperationContext operationContext,
K key, byte[] keyBytes, byte[] value,
CacheWriteOptions options,
DataFormat dataFormat) {
super(operationContext, PUT_IF_ABSENT_REQUEST, PUT_IF_ABSENT_RESPONSE, key, keyBytes, value, options, dataFormat);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
sendKeyValueOperation(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (HotRodConstants.isNotExecuted(status)) {
completeResponseExistent(buf, status);
} else {
completeResponseNotExistent(buf, status);
}
}
/**
* Complete the response on case where the key already has a value associated with.
*
* @param buf: the response buffer.
* @param status: the response status.
*/
abstract void completeResponseExistent(ByteBuf buf, short status);
/**
* Complete the response on case where the key was not associated with a value previously.
*
* @param buf: the response buffer.
* @param status: the response status.
*/
abstract void completeResponseNotExistent(ByteBuf buf, short status);
}
| 1,876
| 31.929825
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/SizeOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
public class SizeOperation extends RetryOnFailureOperation<Integer> {
protected SizeOperation(OperationContext operationContext, CacheOptions options) {
super(operationContext, SIZE_REQUEST, SIZE_RESPONSE, options, null);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
complete(ByteBufUtil.readVInt(buf));
}
}
| 780
| 29.038462
| 85
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/FaultTolerantPingOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.handler.codec.DecoderException;
/**
* A fault tolerant ping operation that can survive to node failures.
*
* @since 14.0
*/
public class FaultTolerantPingOperation extends RetryOnFailureOperation<PingResponse> {
protected FaultTolerantPingOperation(OperationContext operationContext, CacheOptions options) {
super(operationContext, PING_REQUEST, PING_RESPONSE, options, null);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
throw new IllegalStateException("Fault tolerant ping not called manually.");
}
@Override
protected Throwable handleException(Throwable cause, Channel channel, SocketAddress address) {
while (cause instanceof DecoderException && cause.getCause() != null) {
cause = cause.getCause();
}
PingResponse pingResponse = new PingResponse(cause);
if (pingResponse.isCacheNotFound()) {
complete(pingResponse);
return null;
}
return super.handleException(cause, channel, address);
}
}
| 1,429
| 30.086957
| 98
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/PutIfAbsentOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.jboss.logging.BasicLogger;
import io.netty.buffer.ByteBuf;
/**
* Implements "putIfAbsent" operation as described in <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class PutIfAbsentOperation<K, V> extends AbstractPutIfAbsentOperation<K, CacheEntry<K, V>> {
private static final BasicLogger log = LogFactory.getLog(PutIfAbsentOperation.class);
public PutIfAbsentOperation(OperationContext operationContext,
K key, byte[] keyBytes, byte[] value,
CacheWriteOptions options,
DataFormat dataFormat) {
super(operationContext, key, keyBytes, value, options, dataFormat);
}
@Override
void completeResponseExistent(ByteBuf buf, short status) {
CacheEntry<K, V> prevValue = returnPossiblePrevValue(buf, status);
if (HotRodConstants.hasPrevious(status)) {
statsDataRead(true);
}
if (log.isTraceEnabled()) {
log.tracef("Returning from putIfAbsent: %s", prevValue);
}
complete(prevValue);
}
@Override
void completeResponseNotExistent(ByteBuf buf, short status) {
if (log.isTraceEnabled()) {
log.tracef("Returning from putIfAbsent created new entry");
}
statsDataStore();
complete(null);
}
@Override
protected int flags() {
return super.flags() | PrivateHotRodFlag.FORCE_RETURN_VALUE.getFlagInt();
}
}
| 1,753
| 30.321429
| 119
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AbstractPutOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements "put" as defined by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod protocol
* specification</a>.
*
* @since 14.0
*/
public abstract class AbstractPutOperation<K, T> extends AbstractKeyValueOperation<K, T> {
public AbstractPutOperation(OperationContext operationContext, K key, byte[] keyBytes, byte[] value, CacheWriteOptions options, DataFormat dataFormat) {
super(operationContext, PUT_REQUEST, PUT_RESPONSE, key, keyBytes, value, options, dataFormat);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
sendKeyValueOperation(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
throw new IllegalStateException("Put operation not called manually.");
}
}
| 1,109
| 32.636364
| 155
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/BulkGetKeysOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.marshall.MarshallerUtil.bytes2obj;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Reads all keys. Similar to <a href="http://community.jboss.org/wiki/HotRodBulkGet-Design">BulkGet</a>, but without
* the entry values.
*
* @since 14.0
*/
public class BulkGetKeysOperation<K> extends StatsAffectingRetryingOperation<Set<K>> {
private final int scope;
private final Set<K> result = new HashSet<>();
public BulkGetKeysOperation(OperationContext operationContext, CacheOptions options, int scope, DataFormat dataFormat) {
super(operationContext, BULK_GET_KEYS_REQUEST, BULK_GET_KEYS_RESPONSE, options, dataFormat);
this.scope = scope;
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
Codec codec = operationContext.getCodec();
ByteBuf buf = channel.alloc().buffer(codec.estimateHeaderSize(header) + ByteBufUtil.estimateVIntSize(scope));
codec.writeHeader(buf, header);
ByteBufUtil.writeVInt(buf, scope);
channel.writeAndFlush(buf);
}
@Override
protected void reset() {
super.reset();
result.clear();
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
while (buf.readUnsignedByte() == 1) { //there's more!
result.add(bytes2obj(operationContext.getChannelFactory().getMarshaller(), ByteBufUtil.readArray(buf), dataFormat().isObjectStorage(), operationContext.getConfiguration().getClassAllowList()));
decoder.checkpoint();
}
complete(result);
}
}
| 1,991
| 33.947368
| 202
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/ContainsKeyOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements "containsKey" operation as described in <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class ContainsKeyOperation<K> extends AbstractKeyOperation<K, Boolean> {
public ContainsKeyOperation(OperationContext operationContext, K key, byte[] keyBytes, CacheOptions options, DataFormat dataFormat) {
super(operationContext, CONTAINS_KEY_REQUEST, CONTAINS_KEY_RESPONSE, key, keyBytes, options, dataFormat);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
sendArrayOperation(channel, keyBytes);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
complete(!HotRodConstants.isNotExist(status) && HotRodConstants.isSuccess(status));
}
}
| 1,184
| 33.852941
| 136
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/GetAllParallelOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
/**
*
*/
public class GetAllParallelOperation<K, V> extends ParallelHotRodOperation<Map<K, V>, GetAllOperation<K, V>> {
private final Set<byte[]> keys;
protected GetAllParallelOperation(OperationContext operationContext, Set<byte[]> keys, CacheOptions options, DataFormat dataFormat) {
super(operationContext, options, dataFormat);
this.keys = keys;
}
@Override
protected List<GetAllOperation<K, V>> mapOperations() {
Map<SocketAddress, Set<byte[]>> splittedKeys = new HashMap<>();
for (byte[] key : keys) {
SocketAddress socketAddress = operationContext.getChannelFactory().getHashAwareServer(key, operationContext.getCacheNameBytes());
Set<byte[]> keys = splittedKeys.computeIfAbsent(socketAddress, k -> new HashSet<>());
keys.add(key);
}
return splittedKeys.values().stream().map(
keysSubset -> new GetAllOperation<K, V>(operationContext, keysSubset, options, dataFormat())).collect(Collectors.toList());
}
@Override
protected Map<K, V> createCollector() {
return new HashMap<>();
}
@Override
protected void combine(Map<K, V> collector, Map<K, V> result) {
collector.putAll(result);
}
}
| 1,551
| 30.04
| 138
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/IterationNextOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.api.common.CacheEntryMetadata;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.cache.CacheEntryImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryMetadataImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryVersionImpl;
import org.infinispan.hotrod.impl.iteration.KeyTracker;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* @since 14.0
*/
public class IterationNextOperation<K, E> extends HotRodOperation<IterationNextResponse<K, E>> {
private final byte[] iterationId;
private final Channel channel;
private final KeyTracker segmentKeyTracker;
private byte[] finishedSegments;
private int entriesSize = -1;
private List<CacheEntry<K, E>> entries;
private int projectionsSize;
private int untrackedEntries;
protected IterationNextOperation(OperationContext operationContext, CacheOptions options, byte[] iterationId, Channel channel,
KeyTracker segmentKeyTracker, DataFormat dataFormat) {
super(operationContext, ITERATION_NEXT_REQUEST, ITERATION_NEXT_RESPONSE, options, dataFormat);
this.iterationId = iterationId;
this.channel = channel;
this.segmentKeyTracker = segmentKeyTracker;
}
@Override
public CompletableFuture<IterationNextResponse<K, E>> execute() {
if (!channel.isActive()) {
throw HOTROD.channelInactive(channel.remoteAddress(), channel.remoteAddress());
}
scheduleRead(channel);
sendArrayOperation(channel, iterationId);
return this;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (entriesSize < 0) {
finishedSegments = ByteBufUtil.readArray(buf);
entriesSize = ByteBufUtil.readVInt(buf);
if (entriesSize == 0) {
IntSet finishedSegmentSet = IntSets.from(finishedSegments);
segmentKeyTracker.segmentsFinished(finishedSegmentSet);
complete(new IterationNextResponse(status, Collections.emptyList(), finishedSegmentSet, false));
return;
}
entries = new ArrayList<>(entriesSize);
projectionsSize = -1;
decoder.checkpoint();
}
if (projectionsSize < 0) {
projectionsSize = operationContext.getCodec().readProjectionSize(buf);
decoder.checkpoint();
}
while (entries.size() + untrackedEntries < entriesSize) {
short meta = operationContext.getCodec().readMeta(buf);
long creation = -1;
int lifespan = -1;
long lastUsed = -1;
int maxIdle = -1;
CacheEntryMetadata metadata;
if (meta == 1) {
short flags = buf.readUnsignedByte();
if ((flags & INFINITE_LIFESPAN) != INFINITE_LIFESPAN) {
creation = buf.readLong();
lifespan = ByteBufUtil.readVInt(buf);
}
if ((flags & INFINITE_MAXIDLE) != INFINITE_MAXIDLE) {
lastUsed = buf.readLong();
maxIdle = ByteBufUtil.readVInt(buf);
}
CacheEntryExpiration expiration;
if (lifespan < 0) {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.IMMORTAL;
} else {
expiration = CacheEntryExpiration.withMaxIdle(Duration.ofSeconds(maxIdle));
}
} else {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.withLifespan(Duration.ofSeconds(lifespan));
} else {
expiration = CacheEntryExpiration.withLifespanAndMaxIdle(Duration.ofSeconds(lifespan), Duration.ofSeconds(maxIdle));
}
}
metadata = new CacheEntryMetadataImpl(creation, lastUsed, expiration, new CacheEntryVersionImpl(buf.readLong()));
} else {
metadata = new CacheEntryMetadataImpl();
}
byte[] key = ByteBufUtil.readArray(buf);
E value;
if (projectionsSize > 1) {
Object[] projections = new Object[projectionsSize];
for (int j = 0; j < projectionsSize; j++) {
projections[j] = unmarshallValue(ByteBufUtil.readArray(buf));
}
value = (E) projections;
} else {
value = unmarshallValue(ByteBufUtil.readArray(buf));
}
if (segmentKeyTracker.track(key, status, operationContext.getConfiguration().getClassAllowList())) {
K unmarshallKey = dataFormat().keyToObj(key, operationContext.getConfiguration().getClassAllowList());
entries.add(new CacheEntryImpl<>(unmarshallKey, value, metadata));
} else {
untrackedEntries++;
}
decoder.checkpoint();
}
IntSet finishedSegmentSet = IntSets.from(finishedSegments);
segmentKeyTracker.segmentsFinished(finishedSegmentSet);
if (HotRodConstants.isInvalidIteration(status)) {
throw HOTROD.errorRetrievingNext(new String(iterationId, HOTROD_STRING_CHARSET));
}
complete(new IterationNextResponse<>(status, entries, finishedSegmentSet, entriesSize > 0));
}
private <M> M unmarshallValue(byte[] bytes) {
return dataFormat().valueToObj(bytes, operationContext.getConfiguration().getClassAllowList());
}
}
| 6,048
| 39.326667
| 134
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/PutAllParallelOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.infinispan.api.common.CacheWriteOptions;
import org.infinispan.hotrod.impl.DataFormat;
/**
*
*/
public class PutAllParallelOperation extends ParallelHotRodOperation<Void, PutAllOperation> {
protected final Map<byte[], byte[]> map;
public PutAllParallelOperation(OperationContext operationContext, Map<byte[], byte[]> map, CacheWriteOptions options,
DataFormat dataFormat) {
super(operationContext, options, dataFormat);
this.map = map;
}
@Override
protected List<PutAllOperation> mapOperations() {
Map<SocketAddress, Map<byte[], byte[]>> splittedMaps = new HashMap<>();
for (Map.Entry<byte[], byte[]> entry : map.entrySet()) {
SocketAddress socketAddress = operationContext.getChannelFactory().getHashAwareServer(entry.getKey(), operationContext.getCacheNameBytes());
Map<byte[], byte[]> keyValueMap = splittedMaps.computeIfAbsent(socketAddress, k -> new HashMap<>());
keyValueMap.put(entry.getKey(), entry.getValue());
}
return splittedMaps.values().stream().map(
mapSubset -> new PutAllOperation(operationContext, mapSubset, (CacheWriteOptions) options, dataFormat())).collect(Collectors.toList());
}
@Override
protected Void createCollector() {
return null;
}
@Override
protected void combine(Void collector, Void result) {
// Nothing to do
}
}
| 1,614
| 31.3
| 149
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/GetAllOperation.java
|
package org.infinispan.hotrod.impl.operations;
import java.net.SocketAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.DataFormat;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Implements "getAll" as defined by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod protocol
* specification</a>.
*
* @since 14.0
*/
public class GetAllOperation<K, V> extends StatsAffectingRetryingOperation<Map<K, V>> {
protected final Set<byte[]> keys;
private Map<K, V> result;
private int size = -1;
public GetAllOperation(OperationContext operationContext,
Set<byte[]> keys, CacheOptions options, DataFormat dataFormat) {
super(operationContext, GET_ALL_REQUEST, GET_ALL_RESPONSE, options, dataFormat);
this.keys = keys;
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
int bufSize = operationContext.getCodec().estimateHeaderSize(header) + ByteBufUtil.estimateVIntSize(keys.size());
for (byte[] key : keys) {
bufSize += ByteBufUtil.estimateArraySize(key);
}
ByteBuf buf = channel.alloc().buffer(bufSize);
operationContext.getCodec().writeHeader(buf, header);
ByteBufUtil.writeVInt(buf, keys.size());
for (byte[] key : keys) {
ByteBufUtil.writeArray(buf, key);
}
channel.writeAndFlush(buf);
}
@Override
protected void reset() {
super.reset();
result = null;
size = -1;
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
operationContext.getChannelFactory().fetchChannelAndInvoke(keys.iterator().next(), failedServers, operationContext.getCacheNameBytes(), this);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (size < 0) {
size = ByteBufUtil.readVInt(buf);
result = new HashMap<>(size);
decoder.checkpoint();
}
while (result.size() < size) {
K key = dataFormat().keyToObj(ByteBufUtil.readArray(buf), operationContext.getConfiguration().getClassAllowList());
V value = dataFormat().valueToObj(ByteBufUtil.readArray(buf), operationContext.getConfiguration().getClassAllowList());
result.put(key, value);
decoder.checkpoint();
}
statsDataRead(true, size);
statsDataRead(false, keys.size() - size);
complete(result);
}
}
| 2,725
| 32.243902
| 148
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/ClearOperation.java
|
package org.infinispan.hotrod.impl.operations;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Corresponds to clear operation as defined by <a href="http://community.jboss.org/wiki/HotRodProtocol">Hot Rod
* protocol specification</a>.
*
* @since 14.0
*/
public class ClearOperation extends RetryOnFailureOperation<Void> {
public ClearOperation(OperationContext operationContext, CacheOptions options) {
super(operationContext, CLEAR_REQUEST, CLEAR_RESPONSE, options, null);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
complete(null);
}
}
| 864
| 26.903226
| 112
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/operations/AuthMechListOperation.java
|
package org.infinispan.hotrod.impl.operations;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Obtains a list of SASL authentication mechanisms supported by the server
*
* @since 14.0
*/
public class AuthMechListOperation extends HotRodOperation<List<String>> {
private final Channel channel;
private int mechCount = -1;
private List<String> result;
public AuthMechListOperation(OperationContext operationContext, Channel channel) {
super(operationContext, AUTH_MECH_LIST_REQUEST, AUTH_MECH_LIST_RESPONSE, CacheOptions.DEFAULT);
this.channel = channel;
}
@Override
public CompletableFuture<List<String>> execute() {
if (!channel.isActive()) {
throw HOTROD.channelInactive(channel.remoteAddress(), channel.remoteAddress());
}
scheduleRead(channel);
sendHeader(channel);
return this;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (mechCount < 0) {
mechCount = ByteBufUtil.readVInt(buf);
result = new ArrayList<>(mechCount);
decoder.checkpoint();
}
while (result.size() < mechCount) {
result.add(ByteBufUtil.readString(buf));
decoder.checkpoint();
}
complete(result);
}
}
| 1,645
| 28.927273
| 101
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/query/QueryResponse.java
|
package org.infinispan.hotrod.impl.query;
/**
* @since 14.0
**/
public class QueryResponse {
}
| 98
| 11.375
| 41
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/query/RemoteQuery.java
|
package org.infinispan.hotrod.impl.query;
import org.infinispan.hotrod.impl.cache.RemoteCache;
import org.infinispan.protostream.SerializationContext;
/**
* @since 14.0
**/
public class RemoteQuery {
public SerializationContext getSerializationContext() {
return null;
}
public Object getQueryRequest() {
return null;
}
public RemoteCache<Object, Object> getCache() {
return null;
}
}
| 427
| 18.454545
| 58
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/tcp/RoundRobinBalancingStrategy.java
|
package org.infinispan.hotrod.impl.transport.tcp;
import java.net.SocketAddress;
import java.util.Arrays;
import java.util.Collection;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import org.infinispan.hotrod.configuration.FailoverRequestBalancingStrategy;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
/**
* Round-robin implementation for {@link FailoverRequestBalancingStrategy}.
*
* @since 14.0
*/
public class RoundRobinBalancingStrategy implements FailoverRequestBalancingStrategy {
private static final Log log = LogFactory.getLog(RoundRobinBalancingStrategy.class);
private int index;
private SocketAddress[] servers;
@Override
public void setServers(Collection<SocketAddress> servers) {
this.servers = servers.toArray(new SocketAddress[servers.size()]);
// Always start with a random server after a topology update
index = ThreadLocalRandom.current().nextInt(this.servers.length);
if (log.isTraceEnabled()) {
log.tracef("New server list is: " + Arrays.toString(this.servers));
}
}
/**
* @param failedServers Servers that should not be returned (if any other are available)
*/
@Override
public SocketAddress nextServer(Set<SocketAddress> failedServers) {
for (int i = 0;; ++i) {
SocketAddress server = getServerByIndex(index++);
// don't allow index to overflow and have a negative value
if (index >= servers.length)
index = 0;
if (failedServers == null || !failedServers.contains(server) || i >= failedServers.size()) {
if (log.isTraceEnabled()) {
if (failedServers == null)
log.tracef("Found server %s", server);
else
log.tracef("Found server %s, with failed servers %s", server, failedServers.toString());
}
return server;
}
}
}
private SocketAddress getServerByIndex(int pos) {
return servers[pos];
}
public SocketAddress[] getServers() {
return servers;
}
}
| 2,137
| 30.441176
| 106
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/handler/CacheRequestProcessor.java
|
package org.infinispan.hotrod.impl.transport.handler;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.time.Duration;
import java.util.List;
import java.util.Set;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.api.common.CacheEntryMetadata;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.configuration.ProtocolVersion;
import org.infinispan.hotrod.exceptions.InvalidResponseException;
import org.infinispan.hotrod.impl.cache.CacheEntryImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryMetadataImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryVersionImpl;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.AbstractKeyOperation;
import org.infinispan.hotrod.impl.operations.AbstractPutOperation;
import org.infinispan.hotrod.impl.operations.GetOperation;
import org.infinispan.hotrod.impl.operations.HotRodOperation;
import org.infinispan.hotrod.impl.operations.PingResponse;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.protocol.HeaderParams;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
public class CacheRequestProcessor {
private static final Log log = LogFactory.getLog(CacheRequestProcessor.class);
protected final ChannelFactory channelFactory;
private final HotRodConfiguration configuration;
public CacheRequestProcessor(ChannelFactory channelFactory, HotRodConfiguration configuration) {
this.channelFactory = channelFactory;
this.configuration = configuration;
}
public void pingResponse(HotRodOperation<Object> operation, short status, short protocolVersion,
MediaType keyMediaType, MediaType valueMediaType, Set<Short> serverOps) {
if (!HotRodConstants.isSuccess(status)) {
String hexStatus = Integer.toHexString(status);
if (log.isTraceEnabled())
log.tracef("Unknown response status: %s", hexStatus);
throw new InvalidResponseException("Unexpected response status: " + hexStatus);
}
ProtocolVersion version = ProtocolVersion.getBestVersion(protocolVersion);
PingResponse response = new PingResponse(status, version, keyMediaType, valueMediaType, serverOps);
if (response.getVersion() != null && configuration.version() == ProtocolVersion.PROTOCOL_VERSION_AUTO) {
channelFactory.getCacheOperationsFactory().setCodec(Codec.forProtocol(response.getVersion()));
}
operation.complete(response);
}
public void getResponse(HotRodOperation<Object> operation, short status, byte[] data) {
assert operation instanceof GetOperation : "Operation not get: " + operation.getClass();
GetOperation<Object, Object> op = (GetOperation<Object, Object>) operation;
op.statsDataRead(!HotRodConstants.isNotExist(status) && HotRodConstants.isSuccess(status));
op.complete(op.dataFormat().valueToObj(data, configuration.getClassAllowList()));
}
public void putResponse(HotRodOperation<Object> operation, Object value, short status) {
assert operation instanceof AbstractPutOperation : "Operation is not put: " + operation.getClass();
AbstractPutOperation<Object, Object> op = (AbstractPutOperation) operation;
op.statsDataStore();
if (HotRodConstants.hasPrevious(status)) op.statsDataRead(true);
op.complete(value);
}
public void topologyUpdate(HotRodOperation<?> operation, int responseTopologyId, InetSocketAddress[] addresses,
List<List<Integer>> segmentOwners, short hashFunctionVersion) {
HeaderParams params = (HeaderParams) operation.header();
SocketAddress[][] segmentOwnersArray = null;
if (segmentOwners != null) {
segmentOwnersArray = new SocketAddress[segmentOwners.size()][];
for (int i = 0; i < segmentOwners.size(); i++) {
List<Integer> ownersInSegment = segmentOwners.get(i);
segmentOwnersArray[i] = new SocketAddress[ownersInSegment.size()];
for (int j = 0; j < ownersInSegment.size(); j++) {
segmentOwnersArray[i][j] = addresses[ownersInSegment.get(j)];
}
}
}
channelFactory.receiveTopology(params.cacheName(), params.getTopologyAge(), responseTopologyId, addresses,
segmentOwnersArray, hashFunctionVersion);
}
public CacheEntry<?, ?> createCacheEntry(HotRodOperation<?> operation, long creation, int lifespan,
long lastUsed, int maxIdle, long version, byte[] value) {
assert operation instanceof AbstractKeyOperation : "Hot Rod operation type not accepted: " + operation.getClass();
AbstractKeyOperation<?, ?> op = (AbstractKeyOperation<?, ?>) operation;
CacheEntryExpiration expiration;
if (lifespan < 0) {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.IMMORTAL;
} else {
expiration = CacheEntryExpiration.withMaxIdle(Duration.ofSeconds(maxIdle));
}
} else {
if (maxIdle < 0) {
expiration = CacheEntryExpiration.withLifespan(Duration.ofSeconds(lifespan));
} else {
expiration = CacheEntryExpiration.withLifespanAndMaxIdle(Duration.ofSeconds(lifespan), Duration.ofSeconds(maxIdle));
}
}
Object v = parseToObject(operation, value);
CacheEntryMetadata metadata = new CacheEntryMetadataImpl(creation, lastUsed, expiration, new CacheEntryVersionImpl(version));
return new CacheEntryImpl<>(op.operationKey(), v, metadata);
}
public Object parseToObject(HotRodOperation<?> operation, byte[] data) {
assert operation.header() instanceof HeaderParams : "Unknown header type";
HeaderParams params = (HeaderParams) operation.header();
return params.dataFormat().valueToObj(data, configuration.getClassAllowList());
}
}
| 6,186
| 49.713115
| 131
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/IOURingNativeTransport.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.util.concurrent.ExecutorService;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.DatagramChannel;
import io.netty.channel.socket.SocketChannel;
import io.netty.incubator.channel.uring.IOUringDatagramChannel;
import io.netty.incubator.channel.uring.IOUringEventLoopGroup;
import io.netty.incubator.channel.uring.IOUringSocketChannel;
/**
* @since 14.0
**/
public class IOURingNativeTransport {
public static Class<? extends SocketChannel> socketChannelClass() {
return IOUringSocketChannel.class;
}
public static EventLoopGroup createEventLoopGroup(int maxExecutors, ExecutorService executorService) {
return new IOUringEventLoopGroup(maxExecutors, executorService);
}
public static Class<? extends DatagramChannel> datagramChannelClass() {
return IOUringDatagramChannel.class;
}
}
| 912
| 30.482759
| 105
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelInboundHandlerDefaults.java
|
package org.infinispan.hotrod.impl.transport.netty;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandler;
/**
* This is effectively the same as {@link io.netty.channel.ChannelInboundHandlerAdapter} but allows
* to be inherited in a class with another superclass.
*/
public interface ChannelInboundHandlerDefaults extends ChannelInboundHandler {
@Override
default void channelRegistered(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelRegistered();
}
@Override
default void channelUnregistered(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelUnregistered();
}
@Override
default void channelActive(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelActive();
}
@Override
default void channelInactive(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelInactive();
}
@Override
default void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ctx.fireChannelRead(msg);
}
@Override
default void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelReadComplete();
}
@Override
default void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
ctx.fireUserEventTriggered(evt);
}
@Override
default void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception {
ctx.fireChannelWritabilityChanged();
}
@Override
default void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.fireExceptionCaught(cause);
}
@Override
default void handlerAdded(ChannelHandlerContext ctx) throws Exception {
// noop
}
@Override
default void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
// noop
}
}
| 1,865
| 27.272727
| 99
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/HeaderDecoder.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_CREATED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_EXPIRED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_MODIFIED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_REMOVED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.COUNTER_EVENT_RESPONSE;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.event.impl.AbstractClientEvent;
import org.infinispan.hotrod.exceptions.TransportException;
import org.infinispan.hotrod.impl.counter.HotRodCounterEvent;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.AddClientListenerOperation;
import org.infinispan.hotrod.impl.operations.HotRodOperation;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.util.Signal;
public class HeaderDecoder extends HintedReplayingDecoder<HeaderDecoder.State> {
private static final Log log = LogFactory.getLog(HeaderDecoder.class);
// used for HeaderOrEventDecoder, too, as the function is similar
public static final String NAME = "header-decoder";
private final OperationContext operationContext;
// operations may be registered in any thread, and are removed in event loop thread
private final ConcurrentMap<Long, HotRodOperation<?>> incomplete = new ConcurrentHashMap<>();
private final List<byte[]> listeners = new ArrayList<>();
private volatile boolean closing;
HotRodOperation<?> operation;
private short status;
private short receivedOpCode;
public HeaderDecoder(OperationContext operationContext) {
super(State.READ_MESSAGE_ID);
this.operationContext = operationContext;
}
@Override
public boolean isSharable() {
return false;
}
public void registerOperation(Channel channel, HotRodOperation<?> operation) {
if (log.isTraceEnabled()) {
log.tracef("Registering operation %s(%08X) with id %d on %s",
operation, System.identityHashCode(operation), operation.header().messageId(), channel);
}
if (closing) {
throw HOTROD.noMoreOperationsAllowed();
}
HotRodOperation<?> prev = incomplete.put(operation.header().messageId(), operation);
assert prev == null : "Already registered: " + prev + ", new: " + operation;
operation.scheduleTimeout(channel);
}
public void tryCompleteExceptionally(long messageId, Throwable t) {
HotRodOperation<?> op = getAndRemove(messageId);
if (op != null) op.completeExceptionally(t);
else log.errorf(t, "Not found operation %d to complete with exception", messageId);
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
try {
Codec codec = operationContext.getCodec();
switch (state()) {
case READ_MESSAGE_ID:
long messageId = codec.readMessageId(in);
receivedOpCode = codec.readOpCode(in);
switch (receivedOpCode) {
case CACHE_ENTRY_CREATED_EVENT_RESPONSE:
case CACHE_ENTRY_MODIFIED_EVENT_RESPONSE:
case CACHE_ENTRY_REMOVED_EVENT_RESPONSE:
case CACHE_ENTRY_EXPIRED_EVENT_RESPONSE:
if (codec.allowOperationsAndEvents()) {
operation = messageId == 0 ? null : incomplete.get(messageId);
} else {
operation = null;
messageId = 0;
}
// The operation may be null even if the messageId was set: the server does not really wait
// until all events are sent, only until these are queued. In such case the operation may
// complete earlier.
if (operation != null && !(operation instanceof AddClientListenerOperation)) {
throw HOTROD.operationIsNotAddClientListener(messageId, operation.toString());
} else if (log.isTraceEnabled()) {
log.tracef("Received event for request %d", messageId, operation);
}
checkpoint(State.READ_CACHE_EVENT);
// the loop in HintedReplayingDecoder will call decode again
return;
case COUNTER_EVENT_RESPONSE:
checkpoint(State.READ_COUNTER_EVENT);
// the loop in HintedReplayingDecoder will call decode again
return;
}
if (messageId == 0) {
// let's read the header even at this stage; it should throw an error and the other throw statement
// won't be reached
codec.readHeader(in, receivedOpCode, null, operationContext.getChannelFactory(), ctx.channel().remoteAddress());
throw new IllegalStateException("Should be never reached");
}
// we can remove the operation at this point since we'll read no more in this state
loadCurrent(messageId);
if (log.isTraceEnabled()) {
log.tracef("Received response for request %d, %s", messageId, operation);
}
checkpoint(State.READ_HEADER);
// fall through
case READ_HEADER:
if (log.isTraceEnabled()) {
log.tracef("Decoding header for message %s", HotRodConstants.Names.of(receivedOpCode));
}
status = codec.readHeader(in, receivedOpCode, operation.header(), operationContext.getChannelFactory(), ctx.channel().remoteAddress());
checkpoint(State.READ_PAYLOAD);
// fall through
case READ_PAYLOAD:
if (log.isTraceEnabled()) {
log.tracef("Decoding payload for message %s", HotRodConstants.Names.of(receivedOpCode));
}
operation.acceptResponse(in, status, this);
checkpoint(State.READ_MESSAGE_ID);
break;
case READ_CACHE_EVENT:
if (log.isTraceEnabled()) {
log.tracef("Decoding cache event %s", HotRodConstants.Names.of(receivedOpCode));
}
AbstractClientEvent cacheEvent;
try {
cacheEvent = codec.readCacheEvent(in, operationContext.getListenerNotifier()::getCacheDataFormat,
receivedOpCode, operationContext.getConfiguration().getClassAllowList(), ctx.channel().remoteAddress());
} catch (Signal signal) {
throw signal;
} catch (Throwable t) {
log.unableToReadEventFromServer(t, ctx.channel().remoteAddress());
throw t;
}
if (operation != null) {
((AddClientListenerOperation) operation).postponeTimeout(ctx.channel());
}
invokeEvent(cacheEvent.getListenerId(), cacheEvent);
checkpoint(State.READ_MESSAGE_ID);
break;
case READ_COUNTER_EVENT:
if (log.isTraceEnabled()) {
log.tracef("Decoding counter event %s", HotRodConstants.Names.of(receivedOpCode));
}
HotRodCounterEvent counterEvent;
try {
counterEvent = codec.readCounterEvent(in);
} catch (Signal signal) {
throw signal;
} catch (Throwable t) {
HOTROD.unableToReadEventFromServer(t, ctx.channel().remoteAddress());
throw t;
}
invokeEvent(counterEvent.getListenerId(), counterEvent);
checkpoint(State.READ_MESSAGE_ID);
break;
}
} catch (Signal signal) {
throw signal;
} catch (Exception e) {
// If this is server error make sure to restart the state of decoder
checkpoint(State.READ_MESSAGE_ID);
throw e;
}
}
private void invokeEvent(byte[] listenerId, Object cacheEvent) {
try {
operationContext.getListenerNotifier().invokeEvent(listenerId, cacheEvent);
} catch (Exception e) {
HOTROD.unexpectedErrorConsumingEvent(cacheEvent, e);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
if (operation != null) {
operation.exceptionCaught(ctx.channel(), cause);
} else {
TransportException transportException = log.errorFromUnknownOperation(ctx.channel(), cause, ctx.channel().remoteAddress());
for (HotRodOperation<?> op : incomplete.values()) {
try {
op.exceptionCaught(ctx.channel(), transportException);
} catch (Throwable t) {
HOTROD.errorf(t, "Failed to complete %s", op);
}
}
if (log.isTraceEnabled()) {
log.tracef(cause, "Requesting %s close due to exception", ctx.channel());
}
ctx.close();
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
for (HotRodOperation<?> op : incomplete.values()) {
try {
op.channelInactive(ctx.channel());
} catch (Throwable t) {
HOTROD.errorf(t, "Failed to complete %s", op);
}
}
failoverClientListeners();
}
protected void resumeOperation(ByteBuf buf, long messageId, short opCode, short status) {
// At this stage, the headers was consumed in the buffer,
// but since it is delegating from elsewhere, the state is (likely) at READ_MESSAGE_ID.
try {
switch (state()) {
case READ_MESSAGE_ID:
receivedOpCode = opCode;
loadCurrent(messageId);
this.status = status;
checkpoint(State.READ_PAYLOAD);
//fallthrough;
case READ_PAYLOAD:
if (log.isTraceEnabled()) {
log.tracef("Decoding payload for message %s", HotRodConstants.Names.of(receivedOpCode));
}
operation.acceptResponse(buf, status, this);
checkpoint(State.READ_MESSAGE_ID);
break;
default:
throw new IllegalStateException("Delegate with state: " + state());
}
} catch (Exception e) {
state(State.READ_MESSAGE_ID);
throw e;
}
}
@Override
protected boolean isHandlingMessage() {
return state() != State.READ_MESSAGE_ID;
}
public void loadCurrent(long messageId) {
operation = getAndRemove(messageId);
if (operation == null) throw HOTROD.unknownMessageId(messageId);
}
private HotRodOperation<?> getAndRemove(long messageId) {
if (operation != null && operation.header().messageId() == messageId) return operation;
return incomplete.remove(messageId);
}
public void failoverClientListeners() {
for (byte[] listenerId : listeners) {
operationContext.getListenerNotifier().failoverClientListener(listenerId);
}
}
public CompletableFuture<Void> allCompleteFuture() {
return CompletableFuture.allOf(incomplete.values().toArray(new CompletableFuture[0]));
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof ChannelPoolCloseEvent) {
closing = true;
allCompleteFuture().whenComplete((nil, throwable) -> {
ctx.channel().close();
});
} else if (evt instanceof IdleStateEvent) {
// If we have incomplete operations this channel is not idle!
if (!incomplete.isEmpty()) {
return;
}
}
ctx.fireUserEventTriggered(evt);
}
/**
* {@inheritDoc}
*
* Checkpoint is exposed for implementations of {@link HotRodOperation}
*/
@Override
public void checkpoint() {
super.checkpoint();
}
public int registeredOperations() {
return incomplete.size();
}
public void addListener(byte[] listenerId) {
if (log.isTraceEnabled()) {
log.tracef("Decoder %08X adding listener %s", hashCode(), Util.printArray(listenerId));
}
listeners.add(listenerId);
}
// must be called from event loop thread!
public void removeListener(byte[] listenerId) {
boolean removed = listeners.removeIf(id -> Arrays.equals(id, listenerId));
if (log.isTraceEnabled()) {
log.tracef("Decoder %08X removed? %s listener %s", hashCode(), Boolean.toString(removed), Util.printArray(listenerId));
}
}
enum State {
READ_MESSAGE_ID,
READ_HEADER,
READ_PAYLOAD,
READ_CACHE_EVENT, READ_COUNTER_EVENT,
}
}
| 13,691
| 40.240964
| 150
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/InitialPingHandler.java
|
package org.infinispan.hotrod.impl.transport.netty;
import org.infinispan.hotrod.impl.operations.PingOperation;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
public class InitialPingHandler extends ActivationHandler {
private static final Log log = LogFactory.getLog(InitialPingHandler.class);
static final String NAME = "initial-ping-handler";
private final PingOperation ping;
public InitialPingHandler(PingOperation ping) {
this.ping = ping;
}
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
Channel channel = ctx.channel();
if (log.isTraceEnabled()) {
log.tracef("Activating channel %s", channel);
}
ChannelRecord channelRecord = ChannelRecord.of(channel);
ping.invoke(channel);
ping.whenComplete((result, throwable) -> {
if (log.isTraceEnabled()) {
log.tracef("Initial ping completed with result %s/%s", result, throwable);
}
if (throwable != null) {
channelRecord.completeExceptionally(throwable);
} else {
channelRecord.complete(channel);
}
});
ctx.pipeline().remove(this);
}
}
| 1,329
| 30.666667
| 86
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/EPollAvailable.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import io.netty.channel.epoll.Epoll;
// This is a separate class for better replacement within Quarkus as it doesn't support native EPoll
final class EPollAvailable {
private static final String USE_EPOLL_PROPERTY = "infinispan.server.channel.epoll";
private static final boolean IS_LINUX = System.getProperty("os.name").toLowerCase().startsWith("linux");
private static final boolean EPOLL_DISABLED = System.getProperty(USE_EPOLL_PROPERTY, "true").equalsIgnoreCase("false");
// Has to be after other static variables to ensure they are initialized
static final boolean USE_NATIVE_EPOLL = useNativeEpoll();
private static boolean useNativeEpoll() {
try {
Class.forName("io.netty.channel.epoll.Epoll", true, EPollAvailable.class.getClassLoader());
if (Epoll.isAvailable()) {
return !EPOLL_DISABLED && IS_LINUX;
} else {
if (IS_LINUX) {
HOTROD.epollNotAvailable(Epoll.unavailabilityCause().toString());
}
}
} catch (ClassNotFoundException e) {
if (IS_LINUX) {
HOTROD.epollNotAvailable(e.getMessage());
}
}
return false;
}
}
| 1,303
| 36.257143
| 122
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ClientBaseDecoder.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import static org.infinispan.hotrod.impl.transport.netty.HintedReplayingDecoder.REPLAY;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.hotrod.configuration.ClientIntelligence;
import org.infinispan.hotrod.impl.operations.HotRodOperation;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.handler.CacheRequestProcessor;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.DecoderException;
import io.netty.util.Signal;
abstract class ClientBaseDecoder extends ByteToMessageDecoder {
protected static final Log log = LogFactory.getLog(ClientBaseDecoder.class);
public static final Signal DELEGATE = Signal.valueOf(ClientBaseDecoder.class.getName() + ".DELEGATE");
protected final HeaderDecoder delegate;
protected final CacheRequestProcessor responseHandler;
ClientBaseDecoder(HeaderDecoder delegate, CacheRequestProcessor responseHandler) {
this.delegate = delegate;
this.responseHandler = responseHandler;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
try {
delegate.exceptionCaught(ctx, cause);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
protected void callDecode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
if (!delegate.isHandlingMessage()) {
try {
decode(ctx, in, out);
} catch (DecoderException de) {
throw de;
} catch (Exception e) {
throw new DecoderException(e);
}
}
if (delegate.isHandlingMessage())
callInverseDecode(ctx, in, out);
}
private void callInverseDecode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) {
if (delegate.isHandlingMessage())
delegate.callDecode(ctx, in, out);
if (!delegate.isHandlingMessage())
callDecode(ctx, in, out);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
delegate.channelInactive(ctx);
}
@Override
public boolean isSharable() {
return false;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
delegate.userEventTriggered(ctx, evt);
}
public void failoverClientListeners() {
delegate.failoverClientListeners();
}
protected void delegateParsing(ByteBuf buf, long messageId, short opCode, short status) {
try {
delegate.replayable.setCumulation(buf);
delegate.resumeOperation(delegate.replayable, messageId, opCode, status);
} catch (Signal replay) {
replay.expect(REPLAY);
delegate.checkAndAdvance(buf);
} finally {
delegate.replayable.setCumulation(null);
}
if (delegate.isHandlingMessage()) throw DELEGATE;
}
protected boolean operationResponseHasError(long messageId, short opCode) {
HotRodOperation<?> op = delegate.operation;
if (op.header().responseCode() != opCode) {
if (opCode == HotRodConstants.ERROR_RESPONSE) {
return true;
}
throw HOTROD.invalidResponse(new String(op.header().cacheName()), op.header().responseCode(), opCode);
}
return false;
}
public void removeListener(byte[] id) {
delegate.removeListener(id);
}
protected boolean isHashDistributionAware(long messageId) {
HotRodOperation<?> op = delegate.operation;
return op.header().clientIntelligence() == ClientIntelligence.HASH_DISTRIBUTION_AWARE.getValue();
}
public void registerOperation(Channel channel, HotRodOperation<?> op) {
delegate.registerOperation(channel, op);
}
public int registeredOperations() {
return delegate.registeredOperations();
}
public <T extends HotRodOperation<?>> T current() {
return (T) delegate.operation;
}
protected <K, V> Map<K, V> allocMap(int size) {
return size == 0 ? Collections.emptyMap() : new HashMap<>(size * 4/3, 0.75f);
}
protected <T> List<T> allocList(int size) {
return size == 0 ? Collections.emptyList() : new ArrayList<>(size);
}
protected <T> Set<T> allocSet(int size) {
return size == 0 ? Collections.emptySet() : new HashSet<>(size);
}
}
| 4,799
| 30.788079
| 111
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/IdleStateHandlerProvider.java
|
package org.infinispan.hotrod.impl.transport.netty;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.timeout.IdleStateEvent;
@Sharable
public class IdleStateHandlerProvider extends ChannelInboundHandlerAdapter {
private final int minIdle;
private final ChannelPool channelPool;
private final static Log log = LogFactory.getLog(IdleStateHandlerProvider.class);
static final String NAME = "idle-state-handler-provider";
public IdleStateHandlerProvider(int minIdle, ChannelPool channelPool) {
this.minIdle = minIdle;
this.channelPool = channelPool;
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt instanceof IdleStateEvent) {
if (channelPool.getIdle() > minIdle && ChannelRecord.of(ctx.channel()).isIdle()) {
log.debugf("Closing idle channel %s", ctx.channel());
ctx.close();
}
} else {
ctx.fireUserEventTriggered(evt);
}
}
}
| 1,207
| 32.555556
| 91
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelPoolCloseEvent.java
|
package org.infinispan.hotrod.impl.transport.netty;
public class ChannelPoolCloseEvent {
public static final ChannelPoolCloseEvent INSTANCE = new ChannelPoolCloseEvent();
private ChannelPoolCloseEvent() {}
}
| 216
| 26.125
| 84
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelOutboundHandlerDefaults.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.net.SocketAddress;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOutboundHandler;
import io.netty.channel.ChannelPromise;
/**
* This is effectively the same as {@link io.netty.channel.ChannelOutboundHandlerAdapter} but allows
* to be inherited in a class with another superclass.
*/
public interface ChannelOutboundHandlerDefaults extends ChannelOutboundHandler {
@Override
default void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) throws Exception {
ctx.bind(localAddress, promise);
}
@Override
default void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) throws Exception {
ctx.connect(remoteAddress, localAddress, promise);
}
@Override
default void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
ctx.disconnect(promise);
}
@Override
default void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
ctx.close(promise);
}
@Override
default void deregister(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
ctx.deregister(promise);
}
@Override
default void read(ChannelHandlerContext ctx) throws Exception {
ctx.read();
}
@Override
default void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
ctx.write(msg, promise);
}
@Override
default void flush(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
@Override
default void handlerAdded(ChannelHandlerContext ctx) throws Exception {
// noop
}
@Override
default void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
// noop
}
@Override
default void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.fireExceptionCaught(cause);
}
}
| 2,036
| 28.521739
| 150
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/HintedReplayingDecoder.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.util.Collections;
import java.util.List;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.DecoderException;
import io.netty.util.Signal;
/**
* Copy-paste of {@link io.netty.handler.codec.ReplayingDecoder} which is hinted to not attempt decoding unless enough
* bytes are read.
*
* The decoder does not expect pass different message up the pipeline, this is a terminal read operation.
*/
public abstract class HintedReplayingDecoder<S> extends ByteToMessageDecoder {
public static final Signal REPLAY = Signal.valueOf(HintedReplayingDecoder.class.getName() + ".REPLAY");
// We don't expect decode() to use the out param
private static final List<Object> NO_WRITE_LIST = Collections.emptyList();
final HintingByteBuf replayable = new HintingByteBuf(this);
private S state;
private int checkpoint = -1;
private int requiredReadableBytes = 0;
/**
* Creates a new instance with no initial state (i.e: {@code null}).
*/
protected HintedReplayingDecoder() {
this(null);
}
/**
* Creates a new instance with the specified initial state.
*/
protected HintedReplayingDecoder(S initialState) {
state = initialState;
}
/**
* Stores the internal cumulative buffer's reader position.
*/
protected void checkpoint() {
checkpoint = replayable.buffer != null
? replayable.buffer.readerIndex()
: internalBuffer().readerIndex();
}
/**
* Stores the internal cumulative buffer's reader position and updates
* the current decoder state.
*/
protected void checkpoint(S state) {
checkpoint();
state(state);
}
/**
* Returns the current state of this decoder.
* @return the current state of this decoder
*/
protected S state() {
return state;
}
protected abstract boolean isHandlingMessage();
/**
* Sets the current state of this decoder.
* @return the old state of this decoder
*/
protected S state(S newState) {
S oldState = state;
state = newState;
return oldState;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
replayable.terminate();
super.channelInactive(ctx);
}
@Override
public void callDecode(ChannelHandlerContext ctx, ByteBuf in, List<Object> xxx) {
if (in.readableBytes() < requiredReadableBytes) {
// noop, wait for further reads
return;
}
replayable.setCumulation(in);
try {
while (isHandlingMessage() && in.isReadable() && !ctx.isRemoved() && ctx.channel().isActive()) {
checkpoint = in.readerIndex();
try {
decode(ctx, replayable, NO_WRITE_LIST);
requiredReadableBytes = 0;
// TODO: unset cumulation
} catch (Signal replay) {
replay.expect(REPLAY);
// Check if this handler was removed before continuing the loop.
// If it was removed, it is not safe to continue to operate on the buffer.
//
// See https://github.com/netty/netty/issues/1664
if (ctx.isRemoved()) {
break;
}
// Return to the checkpoint (or oldPosition) and retry.
int checkpoint = this.checkpoint;
if (checkpoint >= 0) {
in.readerIndex(checkpoint);
} else {
// Called by cleanup() - no need to maintain the readerIndex
// anymore because the buffer has been released already.
}
break;
} catch (Throwable t) {
requiredReadableBytes = 0;
throw t;
}
}
} catch (DecoderException e) {
throw e;
} catch (Throwable cause) {
throw new DecoderException(cause);
} finally {
replayable.setCumulation(null);
}
}
void checkAndAdvance(ByteBuf buf) {
if (this.checkpoint >= 0) {
buf.readerIndex(checkpoint);
}
}
void requireWriterIndex(int index) {
// TODO: setCumulator to composite if the number of bytes is too high
requiredReadableBytes = index - checkpoint;
}
}
| 4,450
| 29.486301
| 118
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/AuthHandler.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static io.netty.util.internal.EmptyArrays.EMPTY_BYTES;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.function.Function;
import javax.security.auth.Subject;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import org.infinispan.hotrod.configuration.AuthenticationConfiguration;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.CacheOperationsFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
class AuthHandler extends ActivationHandler {
private static final Log log = LogFactory.getLog(AuthHandler.class);
private static final String AUTH_INT = "auth-int";
private static final String AUTH_CONF = "auth-conf";
static final String NAME = "auth-handler";
private final AuthenticationConfiguration authentication;
private final SaslClient saslClient;
private final CacheOperationsFactory cacheOperationsFactory;
AuthHandler(AuthenticationConfiguration authentication, SaslClient saslClient,
CacheOperationsFactory cacheOperationsFactory) {
this.authentication = authentication;
this.saslClient = saslClient;
this.cacheOperationsFactory = cacheOperationsFactory;
}
@Override
public void channelActive(ChannelHandlerContext ctx) {
Channel channel = ctx.channel();
cacheOperationsFactory.newAuthMechListOperation(channel).execute().thenCompose(serverMechs -> {
if (!serverMechs.contains(authentication.saslMechanism())) {
throw HOTROD.unsupportedMech(authentication.saslMechanism(), serverMechs);
}
if (log.isTraceEnabled()) {
log.tracef("Authenticating using mech: %s", authentication.saslMechanism());
}
byte response[];
if (saslClient.hasInitialResponse()) {
try {
response = evaluateChallenge(saslClient, EMPTY_BYTES, authentication.clientSubject());
} catch (SaslException e) {
throw new CompletionException(e);
}
} else {
response = EMPTY_BYTES;
}
return cacheOperationsFactory.newAuthOperation(channel, authentication.saslMechanism(), response).execute();
}).thenCompose(new ChallengeEvaluator(channel, saslClient)).thenRun(() -> {
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
if (qop != null && (qop.equalsIgnoreCase(AUTH_INT) || qop.equalsIgnoreCase(AUTH_CONF))) {
channel.pipeline().addFirst(
new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4),
new SaslDecoderEncoder(saslClient));
} else {
try {
saslClient.dispose();
} catch (SaslException e) {
channel.pipeline().fireExceptionCaught(e);
}
}
channel.pipeline().remove(this);
channel.pipeline().fireUserEventTriggered(ActivationHandler.ACTIVATION_EVENT);
}).exceptionally(throwable -> {
while (throwable instanceof CompletionException && throwable.getCause() != null) {
throwable = throwable.getCause();
}
channel.pipeline().fireExceptionCaught(throwable);
return null;
});
}
private byte[] evaluateChallenge(final SaslClient saslClient, final byte[] challenge, Subject clientSubject) throws SaslException {
if(clientSubject != null) {
try {
return Subject.doAs(clientSubject,
(PrivilegedExceptionAction<byte[]>) () -> saslClient.evaluateChallenge(challenge));
} catch (PrivilegedActionException e) {
Throwable cause = e.getCause();
if (cause instanceof SaslException) {
throw (SaslException)cause;
} else {
throw new RuntimeException(cause);
}
}
} else {
return saslClient.evaluateChallenge(challenge);
}
}
private class ChallengeEvaluator implements Function<byte[], CompletableFuture<byte[]>> {
private final Channel channel;
private final SaslClient saslClient;
private ChallengeEvaluator(Channel channel, SaslClient saslClient) {
this.channel = channel;
this.saslClient = saslClient;
}
@Override
public CompletableFuture<byte[]> apply(byte[] challenge) {
if (!saslClient.isComplete() && challenge != null) {
byte[] response;
try {
response = evaluateChallenge(saslClient, challenge, authentication.clientSubject());
} catch (SaslException e) {
throw new CompletionException(e);
}
if (response != null) {
return cacheOperationsFactory.newAuthOperation(channel, authentication.saslMechanism(), response)
.execute().thenCompose(this);
}
}
return CompletableFuture.completedFuture(null);
}
}
}
| 5,448
| 39.066176
| 134
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/SslHandshakeExceptionHandler.java
|
package org.infinispan.hotrod.impl.transport.netty;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.ssl.SslHandshakeCompletionEvent;
@Sharable
public class SslHandshakeExceptionHandler extends ChannelInboundHandlerAdapter {
public static final SslHandshakeExceptionHandler INSTANCE = new SslHandshakeExceptionHandler();
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt instanceof SslHandshakeCompletionEvent) {
if (evt != SslHandshakeCompletionEvent.SUCCESS) {
SslHandshakeCompletionEvent sslEvent = (SslHandshakeCompletionEvent) evt;
ctx.fireExceptionCaught(sslEvent.cause());
}
ctx.pipeline().remove(this);
} else {
ctx.fireUserEventTriggered(evt);
}
}
}
| 934
| 36.4
| 98
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/HintingByteBuf.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.transport.netty.HintedReplayingDecoder.REPLAY;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.FileChannel;
import java.nio.channels.GatheringByteChannel;
import java.nio.channels.ScatteringByteChannel;
import java.nio.charset.Charset;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.SwappedByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.util.ByteProcessor;
import io.netty.util.internal.StringUtil;
/**
* Copy-paste of {@link io.netty.handler.codec.ReplayingDecoderByteBuf} which hints {@link HintedReplayingDecoder}
* to not try decoding until requested bytes are received.
*/
public class HintingByteBuf extends ByteBuf {
private final HintedReplayingDecoder<?> decoder;
ByteBuf buffer;
private boolean terminated;
private SwappedByteBuf swapped;
HintingByteBuf(HintedReplayingDecoder<?> decoder) {
this.decoder = decoder;
}
void setCumulation(ByteBuf buffer) {
this.buffer = buffer;
}
void terminate() {
terminated = true;
}
@Override
public int capacity() {
if (terminated) {
return buffer.capacity();
} else {
return Integer.MAX_VALUE;
}
}
@Override
public ByteBuf capacity(int newCapacity) {
throw reject();
}
@Override
public int maxCapacity() {
return capacity();
}
@Override
public ByteBufAllocator alloc() {
return buffer.alloc();
}
@Override
public boolean isReadOnly() {
return false;
}
@SuppressWarnings("deprecation")
@Override
public ByteBuf asReadOnly() {
return Unpooled.unmodifiableBuffer(this);
}
@Override
public boolean isDirect() {
return buffer.isDirect();
}
@Override
public boolean hasArray() {
return false;
}
@Override
public byte[] array() {
throw new UnsupportedOperationException();
}
@Override
public int arrayOffset() {
throw new UnsupportedOperationException();
}
@Override
public boolean hasMemoryAddress() {
return false;
}
@Override
public long memoryAddress() {
throw new UnsupportedOperationException();
}
@Override
public ByteBuf clear() {
throw reject();
}
@Override
public boolean equals(Object obj) {
return this == obj;
}
@Override
public int compareTo(ByteBuf buffer) {
throw reject();
}
@Override
public ByteBuf copy() {
throw reject();
}
@Override
public ByteBuf copy(int index, int length) {
checkIndex(index, length);
return buffer.copy(index, length);
}
@Override
public ByteBuf discardReadBytes() {
throw reject();
}
@Override
public ByteBuf ensureWritable(int writableBytes) {
throw reject();
}
@Override
public int ensureWritable(int minWritableBytes, boolean force) {
throw reject();
}
@Override
public ByteBuf duplicate() {
throw reject();
}
@Override
public ByteBuf retainedDuplicate() {
throw reject();
}
@Override
public boolean getBoolean(int index) {
checkIndex(index, 1);
return buffer.getBoolean(index);
}
@Override
public byte getByte(int index) {
checkIndex(index, 1);
return buffer.getByte(index);
}
@Override
public short getUnsignedByte(int index) {
checkIndex(index, 1);
return buffer.getUnsignedByte(index);
}
@Override
public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
checkIndex(index, length);
buffer.getBytes(index, dst, dstIndex, length);
return this;
}
@Override
public ByteBuf getBytes(int index, byte[] dst) {
checkIndex(index, dst.length);
buffer.getBytes(index, dst);
return this;
}
@Override
public ByteBuf getBytes(int index, ByteBuffer dst) {
throw reject();
}
@Override
public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
checkIndex(index, length);
buffer.getBytes(index, dst, dstIndex, length);
return this;
}
@Override
public ByteBuf getBytes(int index, ByteBuf dst, int length) {
throw reject();
}
@Override
public ByteBuf getBytes(int index, ByteBuf dst) {
throw reject();
}
@Override
public int getBytes(int index, GatheringByteChannel out, int length) {
throw reject();
}
@Override
public int getBytes(int index, FileChannel out, long position, int length) {
throw reject();
}
@Override
public ByteBuf getBytes(int index, OutputStream out, int length) {
throw reject();
}
@Override
public int getInt(int index) {
checkIndex(index, 4);
return buffer.getInt(index);
}
@Override
public int getIntLE(int index) {
checkIndex(index, 4);
return buffer.getIntLE(index);
}
@Override
public long getUnsignedInt(int index) {
checkIndex(index, 4);
return buffer.getUnsignedInt(index);
}
@Override
public long getUnsignedIntLE(int index) {
checkIndex(index, 4);
return buffer.getUnsignedIntLE(index);
}
@Override
public long getLong(int index) {
checkIndex(index, 8);
return buffer.getLong(index);
}
@Override
public long getLongLE(int index) {
checkIndex(index, 8);
return buffer.getLongLE(index);
}
@Override
public int getMedium(int index) {
checkIndex(index, 3);
return buffer.getMedium(index);
}
@Override
public int getMediumLE(int index) {
checkIndex(index, 3);
return buffer.getMediumLE(index);
}
@Override
public int getUnsignedMedium(int index) {
checkIndex(index, 3);
return buffer.getUnsignedMedium(index);
}
@Override
public int getUnsignedMediumLE(int index) {
checkIndex(index, 3);
return buffer.getUnsignedMediumLE(index);
}
@Override
public short getShort(int index) {
checkIndex(index, 2);
return buffer.getShort(index);
}
@Override
public short getShortLE(int index) {
checkIndex(index, 2);
return buffer.getShortLE(index);
}
@Override
public int getUnsignedShort(int index) {
checkIndex(index, 2);
return buffer.getUnsignedShort(index);
}
@Override
public int getUnsignedShortLE(int index) {
checkIndex(index, 2);
return buffer.getUnsignedShortLE(index);
}
@Override
public char getChar(int index) {
checkIndex(index, 2);
return buffer.getChar(index);
}
@Override
public float getFloat(int index) {
checkIndex(index, 4);
return buffer.getFloat(index);
}
@Override
public double getDouble(int index) {
checkIndex(index, 8);
return buffer.getDouble(index);
}
@Override
public CharSequence getCharSequence(int index, int length, Charset charset) {
checkIndex(index, length);
return buffer.getCharSequence(index, length, charset);
}
@Override
public int hashCode() {
throw reject();
}
@Override
public int indexOf(int fromIndex, int toIndex, byte value) {
if (fromIndex == toIndex) {
return -1;
}
if (Math.max(fromIndex, toIndex) > buffer.writerIndex()) {
decoder.requireWriterIndex(Math.max(fromIndex, toIndex));
throw REPLAY;
}
return buffer.indexOf(fromIndex, toIndex, value);
}
@Override
public int bytesBefore(byte value) {
int bytes = buffer.bytesBefore(value);
if (bytes < 0) {
throw REPLAY;
}
return bytes;
}
@Override
public int bytesBefore(int length, byte value) {
return bytesBefore(buffer.readerIndex(), length, value);
}
@Override
public int bytesBefore(int index, int length, byte value) {
final int writerIndex = buffer.writerIndex();
if (index >= writerIndex) {
throw REPLAY;
}
if (index <= writerIndex - length) {
return buffer.bytesBefore(index, length, value);
}
int res = buffer.bytesBefore(index, writerIndex - index, value);
if (res < 0) {
throw REPLAY;
} else {
return res;
}
}
@Override
public int forEachByte(ByteProcessor processor) {
int ret = buffer.forEachByte(processor);
if (ret < 0) {
throw REPLAY;
} else {
return ret;
}
}
@Override
public int forEachByte(int index, int length, ByteProcessor processor) {
final int writerIndex = buffer.writerIndex();
if (index >= writerIndex) {
decoder.requireWriterIndex(index);
throw REPLAY;
}
if (index <= writerIndex - length) {
return buffer.forEachByte(index, length, processor);
}
int ret = buffer.forEachByte(index, writerIndex - index, processor);
if (ret < 0) {
throw REPLAY;
} else {
return ret;
}
}
@Override
public int forEachByteDesc(ByteProcessor processor) {
if (terminated) {
return buffer.forEachByteDesc(processor);
} else {
throw reject();
}
}
@Override
public int forEachByteDesc(int index, int length, ByteProcessor processor) {
if (index + length > buffer.writerIndex()) {
decoder.requireWriterIndex(index + length);
throw REPLAY;
}
return buffer.forEachByteDesc(index, length, processor);
}
@Override
public ByteBuf markReaderIndex() {
buffer.markReaderIndex();
return this;
}
@Override
public ByteBuf markWriterIndex() {
throw reject();
}
@Override
public ByteOrder order() {
return buffer.order();
}
@Override
public ByteBuf order(ByteOrder endianness) {
if (endianness == null) {
throw new NullPointerException("endianness");
}
if (endianness == order()) {
return this;
}
SwappedByteBuf swapped = this.swapped;
if (swapped == null) {
this.swapped = swapped = new SwappedByteBuf(this);
}
return swapped;
}
@Override
public boolean isReadable() {
return buffer.isReadable(); // see readableBytes();
}
@Override
public boolean isReadable(int size) {
return buffer.isReadable(size); // see readableBytes();
}
@Override
public int readableBytes() {
// Contrary to ReplayingDecoderByteBuf we will provide the correct number.
// If someone reads past this number we'll throw the Signal as usual, but we're not fooling anyone.
// This is useful when injecting another handler below the decoder (as GetStreamOperation does),
// as we can hand over the buffered bytes to be consumed immediately.
return buffer.readableBytes();
}
@Override
public boolean readBoolean() {
checkReadableBytes(1);
return buffer.readBoolean();
}
@Override
public byte readByte() {
checkReadableBytes(1);
return buffer.readByte();
}
@Override
public short readUnsignedByte() {
checkReadableBytes(1);
return buffer.readUnsignedByte();
}
@Override
public ByteBuf readBytes(byte[] dst, int dstIndex, int length) {
checkReadableBytes(length);
buffer.readBytes(dst, dstIndex, length);
return this;
}
@Override
public ByteBuf readBytes(byte[] dst) {
checkReadableBytes(dst.length);
buffer.readBytes(dst);
return this;
}
@Override
public ByteBuf readBytes(ByteBuffer dst) {
throw reject();
}
@Override
public ByteBuf readBytes(ByteBuf dst, int dstIndex, int length) {
checkReadableBytes(length);
buffer.readBytes(dst, dstIndex, length);
return this;
}
@Override
public ByteBuf readBytes(ByteBuf dst, int length) {
throw reject();
}
@Override
public ByteBuf readBytes(ByteBuf dst) {
checkReadableBytes(dst.writableBytes());
buffer.readBytes(dst);
return this;
}
@Override
public int readBytes(GatheringByteChannel out, int length) {
throw reject();
}
@Override
public int readBytes(FileChannel out, long position, int length) {
throw reject();
}
@Override
public ByteBuf readBytes(int length) {
checkReadableBytes(length);
return buffer.readBytes(length);
}
@Override
public ByteBuf readSlice(int length) {
checkReadableBytes(length);
return buffer.readSlice(length);
}
@Override
public ByteBuf readRetainedSlice(int length) {
checkReadableBytes(length);
return buffer.readRetainedSlice(length);
}
@Override
public ByteBuf readBytes(OutputStream out, int length) {
throw reject();
}
@Override
public int readerIndex() {
return buffer.readerIndex();
}
@Override
public ByteBuf readerIndex(int readerIndex) {
buffer.readerIndex(readerIndex);
return this;
}
@Override
public int readInt() {
checkReadableBytes(4);
return buffer.readInt();
}
@Override
public int readIntLE() {
checkReadableBytes(4);
return buffer.readIntLE();
}
@Override
public long readUnsignedInt() {
checkReadableBytes(4);
return buffer.readUnsignedInt();
}
@Override
public long readUnsignedIntLE() {
checkReadableBytes(4);
return buffer.readUnsignedIntLE();
}
@Override
public long readLong() {
checkReadableBytes(8);
return buffer.readLong();
}
@Override
public long readLongLE() {
checkReadableBytes(8);
return buffer.readLongLE();
}
@Override
public int readMedium() {
checkReadableBytes(3);
return buffer.readMedium();
}
@Override
public int readMediumLE() {
checkReadableBytes(3);
return buffer.readMediumLE();
}
@Override
public int readUnsignedMedium() {
checkReadableBytes(3);
return buffer.readUnsignedMedium();
}
@Override
public int readUnsignedMediumLE() {
checkReadableBytes(3);
return buffer.readUnsignedMediumLE();
}
@Override
public short readShort() {
checkReadableBytes(2);
return buffer.readShort();
}
@Override
public short readShortLE() {
checkReadableBytes(2);
return buffer.readShortLE();
}
@Override
public int readUnsignedShort() {
checkReadableBytes(2);
return buffer.readUnsignedShort();
}
@Override
public int readUnsignedShortLE() {
checkReadableBytes(2);
return buffer.readUnsignedShortLE();
}
@Override
public char readChar() {
checkReadableBytes(2);
return buffer.readChar();
}
@Override
public float readFloat() {
checkReadableBytes(4);
return buffer.readFloat();
}
@Override
public double readDouble() {
checkReadableBytes(8);
return buffer.readDouble();
}
@Override
public CharSequence readCharSequence(int length, Charset charset) {
checkReadableBytes(length);
return buffer.readCharSequence(length, charset);
}
@Override
public ByteBuf resetReaderIndex() {
buffer.resetReaderIndex();
return this;
}
@Override
public ByteBuf resetWriterIndex() {
throw reject();
}
@Override
public ByteBuf setBoolean(int index, boolean value) {
throw reject();
}
@Override
public ByteBuf setByte(int index, int value) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, byte[] src) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, ByteBuffer src) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, ByteBuf src, int length) {
throw reject();
}
@Override
public ByteBuf setBytes(int index, ByteBuf src) {
throw reject();
}
@Override
public int setBytes(int index, InputStream in, int length) {
throw reject();
}
@Override
public ByteBuf setZero(int index, int length) {
throw reject();
}
@Override
public int setBytes(int index, ScatteringByteChannel in, int length) {
throw reject();
}
@Override
public int setBytes(int index, FileChannel in, long position, int length) {
throw reject();
}
@Override
public ByteBuf setIndex(int readerIndex, int writerIndex) {
throw reject();
}
@Override
public ByteBuf setInt(int index, int value) {
throw reject();
}
@Override
public ByteBuf setIntLE(int index, int value) {
throw reject();
}
@Override
public ByteBuf setLong(int index, long value) {
throw reject();
}
@Override
public ByteBuf setLongLE(int index, long value) {
throw reject();
}
@Override
public ByteBuf setMedium(int index, int value) {
throw reject();
}
@Override
public ByteBuf setMediumLE(int index, int value) {
throw reject();
}
@Override
public ByteBuf setShort(int index, int value) {
throw reject();
}
@Override
public ByteBuf setShortLE(int index, int value) {
throw reject();
}
@Override
public ByteBuf setChar(int index, int value) {
throw reject();
}
@Override
public ByteBuf setFloat(int index, float value) {
throw reject();
}
@Override
public ByteBuf setDouble(int index, double value) {
throw reject();
}
@Override
public ByteBuf skipBytes(int length) {
checkReadableBytes(length);
buffer.skipBytes(length);
return this;
}
@Override
public ByteBuf slice() {
throw reject();
}
@Override
public ByteBuf retainedSlice() {
throw reject();
}
@Override
public ByteBuf slice(int index, int length) {
checkIndex(index, length);
return buffer.slice(index, length);
}
@Override
public ByteBuf retainedSlice(int index, int length) {
checkIndex(index, length);
return buffer.slice(index, length);
}
@Override
public int nioBufferCount() {
return buffer.nioBufferCount();
}
@Override
public ByteBuffer nioBuffer() {
throw reject();
}
@Override
public ByteBuffer nioBuffer(int index, int length) {
checkIndex(index, length);
return buffer.nioBuffer(index, length);
}
@Override
public ByteBuffer[] nioBuffers() {
throw reject();
}
@Override
public ByteBuffer[] nioBuffers(int index, int length) {
checkIndex(index, length);
return buffer.nioBuffers(index, length);
}
@Override
public ByteBuffer internalNioBuffer(int index, int length) {
checkIndex(index, length);
return buffer.internalNioBuffer(index, length);
}
@Override
public String toString(int index, int length, Charset charset) {
checkIndex(index, length);
return buffer.toString(index, length, charset);
}
@Override
public String toString(Charset charsetName) {
throw reject();
}
@Override
public String toString() {
return StringUtil.simpleClassName(this) + '(' +
"ridx=" +
readerIndex() +
", " +
"widx=" +
writerIndex() +
')';
}
@Override
public boolean isWritable() {
return false;
}
@Override
public boolean isWritable(int size) {
return false;
}
@Override
public int writableBytes() {
return 0;
}
@Override
public int maxWritableBytes() {
return 0;
}
@Override
public ByteBuf writeBoolean(boolean value) {
throw reject();
}
@Override
public ByteBuf writeByte(int value) {
throw reject();
}
@Override
public ByteBuf writeBytes(byte[] src, int srcIndex, int length) {
throw reject();
}
@Override
public ByteBuf writeBytes(byte[] src) {
throw reject();
}
@Override
public ByteBuf writeBytes(ByteBuffer src) {
throw reject();
}
@Override
public ByteBuf writeBytes(ByteBuf src, int srcIndex, int length) {
throw reject();
}
@Override
public ByteBuf writeBytes(ByteBuf src, int length) {
throw reject();
}
@Override
public ByteBuf writeBytes(ByteBuf src) {
throw reject();
}
@Override
public int writeBytes(InputStream in, int length) {
throw reject();
}
@Override
public int writeBytes(ScatteringByteChannel in, int length) {
throw reject();
}
@Override
public int writeBytes(FileChannel in, long position, int length) {
throw reject();
}
@Override
public ByteBuf writeInt(int value) {
throw reject();
}
@Override
public ByteBuf writeIntLE(int value) {
throw reject();
}
@Override
public ByteBuf writeLong(long value) {
throw reject();
}
@Override
public ByteBuf writeLongLE(long value) {
throw reject();
}
@Override
public ByteBuf writeMedium(int value) {
throw reject();
}
@Override
public ByteBuf writeMediumLE(int value) {
throw reject();
}
@Override
public ByteBuf writeZero(int length) {
throw reject();
}
@Override
public int writerIndex() {
return buffer.writerIndex();
}
@Override
public ByteBuf writerIndex(int writerIndex) {
throw reject();
}
@Override
public ByteBuf writeShort(int value) {
throw reject();
}
@Override
public ByteBuf writeShortLE(int value) {
throw reject();
}
@Override
public ByteBuf writeChar(int value) {
throw reject();
}
@Override
public ByteBuf writeFloat(float value) {
throw reject();
}
@Override
public ByteBuf writeDouble(double value) {
throw reject();
}
@Override
public int setCharSequence(int index, CharSequence sequence, Charset charset) {
throw reject();
}
@Override
public int writeCharSequence(CharSequence sequence, Charset charset) {
throw reject();
}
private void checkIndex(int index, int length) {
if (index + length > buffer.writerIndex()) {
decoder.requireWriterIndex(index + length);
throw REPLAY;
}
}
private void checkReadableBytes(int readableBytes) {
if (buffer.readableBytes() < readableBytes) {
decoder.requireWriterIndex(buffer.readerIndex() + readableBytes);
throw REPLAY;
}
}
@Override
public ByteBuf discardSomeReadBytes() {
throw reject();
}
@Override
public int refCnt() {
return buffer.refCnt();
}
@Override
public ByteBuf retain() {
throw reject();
}
@Override
public ByteBuf retain(int increment) {
throw reject();
}
@Override
public ByteBuf touch() {
buffer.touch();
return this;
}
@Override
public ByteBuf touch(Object hint) {
buffer.touch(hint);
return this;
}
@Override
public boolean release() {
throw reject();
}
@Override
public boolean release(int decrement) {
throw reject();
}
@Override
public ByteBuf unwrap() {
throw reject();
}
private static UnsupportedOperationException reject() {
return new UnsupportedOperationException("not a replayable operation");
}
}
| 23,753
| 20.002653
| 114
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/DefaultTransportFactory.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.util.concurrent.ExecutorService;
import org.infinispan.hotrod.configuration.TransportFactory;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.socket.DatagramChannel;
import io.netty.channel.socket.SocketChannel;
/**
* Default implementation of the {@link TransportFactory} interface which uses epoll if available and nio otherwise.
*/
public class DefaultTransportFactory implements TransportFactory {
public Class<? extends SocketChannel> socketChannelClass() {
return NativeTransport.socketChannelClass();
}
public EventLoopGroup createEventLoopGroup(int maxExecutors, ExecutorService executorService) {
return NativeTransport.createEventLoopGroup(maxExecutors, executorService);
}
@Override
public Class<? extends DatagramChannel> datagramChannelClass() {
return NativeTransport.datagramChannelClass();
}
}
| 937
| 32.5
| 116
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelInitializer.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.io.File;
import java.net.SocketAddress;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.security.Provider;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import javax.net.ssl.SNIHostName;
import javax.net.ssl.SSLParameters;
import javax.security.auth.Subject;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslClientFactory;
import javax.security.sasl.SaslException;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.util.SaslUtils;
import org.infinispan.commons.util.SslContextFactory;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.HotRod;
import org.infinispan.hotrod.configuration.AuthenticationConfiguration;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.configuration.SslConfiguration;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.CacheOperationsFactory;
import org.infinispan.hotrod.impl.transport.handler.CacheRequestProcessor;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.handler.ssl.ClientAuth;
import io.netty.handler.ssl.JdkSslContext;
import io.netty.handler.ssl.SslContext;
import io.netty.handler.ssl.SslContextBuilder;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.timeout.IdleStateHandler;
class ChannelInitializer extends io.netty.channel.ChannelInitializer<Channel> {
private static final Log log = LogFactory.getLog(ChannelInitializer.class);
private final Bootstrap bootstrap;
private final SocketAddress unresolvedAddress;
private final CacheOperationsFactory cacheOperationsFactory;
private final HotRodConfiguration configuration;
private final ChannelFactory channelFactory;
private ChannelPool channelPool;
private volatile boolean isFirstPing = true;
private static final Provider[] SECURITY_PROVIDERS;
static {
// Register only the providers that matter to us
List<Provider> providers = new ArrayList<>();
for (String name : Arrays.asList(
"org.wildfly.security.sasl.plain.WildFlyElytronSaslPlainProvider",
"org.wildfly.security.sasl.digest.WildFlyElytronSaslDigestProvider",
"org.wildfly.security.sasl.external.WildFlyElytronSaslExternalProvider",
"org.wildfly.security.sasl.oauth2.WildFlyElytronSaslOAuth2Provider",
"org.wildfly.security.sasl.scram.WildFlyElytronSaslScramProvider",
"org.wildfly.security.sasl.gssapi.WildFlyElytronSaslGssapiProvider",
"org.wildfly.security.sasl.gs2.WildFlyElytronSaslGs2Provider"
)) {
Provider provider = Util.getInstance(name, HotRod.class.getClassLoader());
providers.add(provider);
}
SECURITY_PROVIDERS = providers.toArray(new Provider[0]);
}
ChannelInitializer(Bootstrap bootstrap, SocketAddress unresolvedAddress, CacheOperationsFactory cacheOperationsFactory, HotRodConfiguration configuration, ChannelFactory channelFactory) {
this.bootstrap = bootstrap;
this.unresolvedAddress = unresolvedAddress;
this.cacheOperationsFactory = cacheOperationsFactory;
this.configuration = configuration;
this.channelFactory = channelFactory;
}
CompletableFuture<Channel> createChannel() {
ChannelFuture connect = bootstrap.clone().connect();
ActivationFuture activationFuture = new ActivationFuture();
connect.addListener(activationFuture);
return activationFuture;
}
@Override
protected void initChannel(Channel channel) throws Exception {
if (log.isTraceEnabled()) {
log.tracef("Created channel %s", channel);
}
if (configuration.security().ssl().enabled()) {
initSsl(channel);
}
AuthenticationConfiguration authentication = configuration.security().authentication();
if (authentication.enabled()) {
initAuthentication(channel, authentication);
}
if (configuration.connectionPool().minEvictableIdleTime() > 0) {
channel.pipeline().addLast("idle-state-handler",
new IdleStateHandler(0, 0, configuration.connectionPool().minEvictableIdleTime(), TimeUnit.MILLISECONDS));
}
ChannelRecord channelRecord = new ChannelRecord(unresolvedAddress, channelPool);
channel.attr(ChannelRecord.KEY).set(channelRecord);
if (isFirstPing) {
isFirstPing = false;
channel.pipeline().addLast(InitialPingHandler.NAME, new InitialPingHandler(cacheOperationsFactory.newPingOperation(false)));
} else {
channel.pipeline().addLast(ActivationHandler.NAME, ActivationHandler.INSTANCE);
}
HeaderDecoder delegate = new HeaderDecoder(cacheOperationsFactory.getDefaultContext());
channel.pipeline().addLast(HeaderDecoder.NAME, new HotRodClientDecoder(delegate, new CacheRequestProcessor(channelFactory, configuration)));
if (configuration.connectionPool().minEvictableIdleTime() > 0) {
// This handler needs to be the last so that HeaderDecoder has the chance to cancel the idle event
channel.pipeline().addLast(IdleStateHandlerProvider.NAME,
new IdleStateHandlerProvider(configuration.connectionPool().minIdle(), channelPool));
}
}
private void initSsl(Channel channel) {
SslConfiguration ssl = configuration.security().ssl();
SslContext sslContext;
if (ssl.sslContext() == null) {
SslContextBuilder builder = SslContextBuilder.forClient();
try {
if (ssl.keyStoreFileName() != null) {
builder.keyManager(new SslContextFactory()
.keyStoreFileName(ssl.keyStoreFileName())
.keyStoreType(ssl.keyStoreType())
.keyStorePassword(ssl.keyStorePassword())
.keyAlias(ssl.keyAlias())
.classLoader(HotRod.class.getClassLoader())
.provider(ssl.provider())
.getKeyManagerFactory());
}
if (ssl.trustStoreFileName() != null) {
if ("pem".equalsIgnoreCase(ssl.trustStoreType())) {
builder.trustManager(new File(ssl.trustStoreFileName()));
} else {
builder.trustManager(new SslContextFactory()
.trustStoreFileName(ssl.trustStoreFileName())
.trustStoreType(ssl.trustStoreType())
.trustStorePassword(ssl.trustStorePassword())
.provider(ssl.provider())
.classLoader(HotRod.class.getClassLoader())
.getTrustManagerFactory());
}
}
if (ssl.protocol() != null) {
builder.protocols(ssl.protocol());
}
if (ssl.ciphers() != null) {
builder.ciphers(Arrays.asList(ssl.ciphers()));
}
if (ssl.provider() != null) {
Provider provider = SslContextFactory.findProvider(ssl.provider(), SslContext.class.getSimpleName(), "TLS");
builder.sslContextProvider(provider);
}
sslContext = builder.build();
} catch (Exception e) {
throw new CacheConfigurationException(e);
}
} else {
sslContext = new JdkSslContext(ssl.sslContext(), true, ClientAuth.NONE);
}
SslHandler sslHandler = sslContext.newHandler(channel.alloc(), ssl.sniHostName(), -1);
if (ssl.sniHostName() != null) {
SSLParameters sslParameters = sslHandler.engine().getSSLParameters();
sslParameters.setServerNames(Collections.singletonList(new SNIHostName(ssl.sniHostName())));
sslHandler.engine().setSSLParameters(sslParameters);
}
channel.pipeline().addFirst(sslHandler,
SslHandshakeExceptionHandler.INSTANCE);
}
private void initAuthentication(Channel channel, AuthenticationConfiguration authentication) throws PrivilegedActionException, SaslException {
SaslClient saslClient;
SaslClientFactory scf = getSaslClientFactory(authentication);
SslHandler sslHandler = channel.pipeline().get(SslHandler.class);
Principal principal = sslHandler != null ? sslHandler.engine().getSession().getLocalPrincipal() : null;
String authorizationId = principal != null ? principal.getName() : null;
if (authentication.clientSubject() != null) {
// We must use Subject.doAs() instead of Security.doAs()
saslClient = Subject.doAs(authentication.clientSubject(), (PrivilegedExceptionAction<SaslClient>) () ->
scf.createSaslClient(new String[]{authentication.saslMechanism()}, authorizationId, "hotrod",
authentication.serverName(), authentication.saslProperties(), authentication.callbackHandler())
);
} else {
saslClient = scf.createSaslClient(new String[]{authentication.saslMechanism()}, authorizationId, "hotrod",
authentication.serverName(), authentication.saslProperties(), authentication.callbackHandler());
}
channel.pipeline().addLast(AuthHandler.NAME, new AuthHandler(authentication, saslClient, cacheOperationsFactory));
}
private SaslClientFactory getSaslClientFactory(AuthenticationConfiguration configuration) {
if (log.isTraceEnabled()) {
log.tracef("Attempting to load SaslClientFactory implementation with mech=%s, props=%s",
configuration.saslMechanism(), configuration.saslProperties());
}
Collection<SaslClientFactory> clientFactories = SaslUtils.getSaslClientFactories(this.getClass().getClassLoader(), SECURITY_PROVIDERS, true);
for (SaslClientFactory saslFactory : clientFactories) {
try {
String[] saslFactoryMechs = saslFactory.getMechanismNames(configuration.saslProperties());
for (String supportedMech : saslFactoryMechs) {
if (supportedMech.equals(configuration.saslMechanism())) {
if (log.isTraceEnabled()) {
log.tracef("Loaded SaslClientFactory: %s", saslFactory.getClass().getName());
}
return saslFactory;
}
}
} catch (Throwable t) {
// Catch any errors that can happen when calling to a Sasl mech
log.tracef("Error while trying to obtain mechanism names supported by SaslClientFactory: %s", saslFactory.getClass().getName());
}
}
throw new IllegalStateException("SaslClientFactory implementation not found");
}
void setChannelPool(ChannelPool channelPool) {
this.channelPool = channelPool;
}
private static class ActivationFuture extends CompletableFuture<Channel> implements ChannelFutureListener, BiConsumer<Channel, Throwable> {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
Channel channel = future.channel();
ChannelRecord.of(channel).whenComplete(this);
} else {
completeExceptionally(future.cause());
}
}
@Override
public void accept(Channel channel, Throwable throwable) {
if (throwable != null) {
completeExceptionally(throwable);
} else {
complete(channel);
}
}
}
}
| 11,934
| 45.439689
| 190
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelFactory.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.Util.await;
import static org.infinispan.hotrod.impl.Util.wrapBytes;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.WrappedByteArray;
import org.infinispan.commons.marshall.WrappedBytes;
import org.infinispan.commons.util.Immutables;
import org.infinispan.commons.util.ProcessorInfo;
import org.infinispan.hotrod.configuration.ClientIntelligence;
import org.infinispan.hotrod.configuration.ClusterConfiguration;
import org.infinispan.hotrod.configuration.FailoverRequestBalancingStrategy;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.configuration.ServerConfiguration;
import org.infinispan.hotrod.event.impl.ClientListenerNotifier;
import org.infinispan.hotrod.impl.ClientTopology;
import org.infinispan.hotrod.impl.ConfigurationProperties;
import org.infinispan.hotrod.impl.MarshallerRegistry;
import org.infinispan.hotrod.impl.cache.CacheTopologyInfo;
import org.infinispan.hotrod.impl.cache.TopologyInfo;
import org.infinispan.hotrod.impl.consistenthash.ConsistentHash;
import org.infinispan.hotrod.impl.consistenthash.ConsistentHashFactory;
import org.infinispan.hotrod.impl.consistenthash.SegmentConsistentHash;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.CacheOperationsFactory;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.topology.CacheInfo;
import org.infinispan.hotrod.impl.topology.ClusterInfo;
import org.infinispan.hotrod.impl.transport.netty.ChannelPool.ChannelEventType;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.resolver.AddressResolverGroup;
import io.netty.resolver.dns.DnsNameResolverBuilder;
import io.netty.resolver.dns.RoundRobinDnsAddressResolverGroup;
import net.jcip.annotations.GuardedBy;
import net.jcip.annotations.ThreadSafe;
/**
* Central component providing connections to remote server. Most of the code originates in TcpTransportFactory.
*
* @since 14.0
*/
@ThreadSafe
public class ChannelFactory {
public static final String DEFAULT_CLUSTER_NAME = "___DEFAULT-CLUSTER___";
private static final Log log = LogFactory.getLog(ChannelFactory.class, Log.class);
private final ReadWriteLock lock = new ReentrantReadWriteLock();
private final ConcurrentMap<SocketAddress, ChannelPool> channelPoolMap = new ConcurrentHashMap<>();
private final Function<SocketAddress, ChannelPool> newPool = this::newPool;
private EventLoopGroup eventLoopGroup;
private ExecutorService executorService;
private CacheOperationsFactory cacheOperationsFactory;
private HotRodConfiguration configuration;
private int maxRetries;
private Marshaller marshaller;
private ClientListenerNotifier listenerNotifier;
@GuardedBy("lock")
private volatile TopologyInfo topologyInfo;
private List<ClusterInfo> clusters;
private MarshallerRegistry marshallerRegistry;
private final LongAdder totalRetries = new LongAdder();
@GuardedBy("lock")
private CompletableFuture<Void> clusterSwitchStage;
// Servers for which the last connection attempt failed and which have no established connections
@GuardedBy("lock")
private final Set<SocketAddress> failedServers = new HashSet<>();
public void start(Codec codec, HotRodConfiguration configuration, Marshaller marshaller, ExecutorService executorService,
ClientListenerNotifier listenerNotifier, MarshallerRegistry marshallerRegistry) {
this.marshallerRegistry = marshallerRegistry;
lock.writeLock().lock();
try {
this.marshaller = marshaller;
this.configuration = configuration;
this.executorService = executorService;
this.listenerNotifier = listenerNotifier;
int asyncThreads = maxAsyncThreads(executorService, configuration);
// static field with default is private in MultithreadEventLoopGroup
int eventLoopThreads =
Integer.getInteger("io.netty.eventLoopThreads", ProcessorInfo.availableProcessors() * 2);
// Note that each event loop opens a selector which counts
int maxExecutors = Math.min(asyncThreads, eventLoopThreads);
this.eventLoopGroup = configuration.transportFactory().createEventLoopGroup(maxExecutors, executorService);
List<InetSocketAddress> initialServers = new ArrayList<>();
for (ServerConfiguration server : configuration.servers()) {
initialServers.add(InetSocketAddress.createUnresolved(server.host(), server.port()));
}
ClusterInfo mainCluster = new ClusterInfo(DEFAULT_CLUSTER_NAME, initialServers, configuration.clientIntelligence());
List<ClusterInfo> clustersDefinitions = new ArrayList<>();
if (log.isDebugEnabled()) {
log.debugf("Statically configured servers: %s", initialServers);
log.debugf("Tcp no delay = %b; client socket timeout = %d ms; connect timeout = %d ms",
configuration.tcpNoDelay(), configuration.socketTimeout(), configuration.connectionTimeout());
}
if (!configuration.clusters().isEmpty()) {
for (ClusterConfiguration clusterConfiguration : configuration.clusters()) {
List<InetSocketAddress> alternateServers = new ArrayList<>();
for (ServerConfiguration server : clusterConfiguration.getServers()) {
alternateServers.add(InetSocketAddress.createUnresolved(server.host(), server.port()));
}
ClientIntelligence intelligence = clusterConfiguration.getClientIntelligence() != null ?
clusterConfiguration.getClientIntelligence() :
configuration.clientIntelligence();
ClusterInfo alternateCluster =
new ClusterInfo(clusterConfiguration.getClusterName(), alternateServers, intelligence);
log.debugf("Add secondary cluster: %s", alternateCluster);
clustersDefinitions.add(alternateCluster);
}
clustersDefinitions.add(mainCluster);
}
clusters = Immutables.immutableListCopy(clustersDefinitions);
topologyInfo = new TopologyInfo(configuration, mainCluster);
cacheOperationsFactory = new CacheOperationsFactory(this, codec, listenerNotifier, configuration);
maxRetries = configuration.maxRetries();
WrappedByteArray defaultCacheName = wrapBytes(HotRodConstants.DEFAULT_CACHE_NAME_BYTES);
topologyInfo.getOrCreateCacheInfo(defaultCacheName);
} finally {
lock.writeLock().unlock();
}
pingServersIgnoreException();
}
private int maxAsyncThreads(ExecutorService executorService, HotRodConfiguration configuration) {
if (executorService instanceof ThreadPoolExecutor) {
return ((ThreadPoolExecutor) executorService).getMaximumPoolSize();
}
// Note: this is quite dangerous, if someone sets different executor factory and does not update this setting
// we might deadlock
return new ConfigurationProperties(configuration.asyncExecutorFactory().properties()).getDefaultExecutorFactoryPoolSize();
}
public MarshallerRegistry getMarshallerRegistry() {
return marshallerRegistry;
}
private ChannelPool newPool(SocketAddress address) {
log.debugf("Creating new channel pool for %s", address);
DnsNameResolverBuilder builder = new DnsNameResolverBuilder()
.channelType(configuration.transportFactory().datagramChannelClass())
.ttl(configuration.dnsResolverMinTTL(), configuration.dnsResolverMaxTTL())
.negativeTtl(configuration.dnsResolverNegativeTTL());
AddressResolverGroup<?> dnsResolver = new RoundRobinDnsAddressResolverGroup(builder);
Bootstrap bootstrap = new Bootstrap()
.group(eventLoopGroup)
.channel(configuration.transportFactory().socketChannelClass())
.resolver(dnsResolver)
.remoteAddress(address)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, configuration.connectionTimeout())
.option(ChannelOption.SO_KEEPALIVE, configuration.tcpKeepAlive())
.option(ChannelOption.TCP_NODELAY, configuration.tcpNoDelay())
.option(ChannelOption.SO_RCVBUF, 1024576);
int maxConnections = configuration.connectionPool().maxActive();
if (maxConnections < 0) {
maxConnections = Integer.MAX_VALUE;
}
ChannelInitializer channelInitializer = createChannelInitializer(address, bootstrap);
bootstrap.handler(channelInitializer);
ChannelPool pool = new ChannelPool(bootstrap.config().group().next(), address, channelInitializer,
configuration.connectionPool().exhaustedAction(), this::onConnectionEvent,
configuration.connectionPool().maxWait(), maxConnections,
configuration.connectionPool().maxPendingRequests());
channelInitializer.setChannelPool(pool);
return pool;
}
protected ChannelInitializer createChannelInitializer(SocketAddress address, Bootstrap bootstrap) {
return new ChannelInitializer(bootstrap, address, cacheOperationsFactory, configuration, this);
}
public CacheOperationsFactory getCacheOperationsFactory() {
return cacheOperationsFactory;
}
private void pingServersIgnoreException() {
Collection<InetSocketAddress> servers = topologyInfo.getAllServers();
for (SocketAddress addr : servers) {
// Go through all statically configured nodes and force a
// connection to be established and a ping message to be sent.
try {
await(fetchChannelAndInvoke(addr, cacheOperationsFactory.newPingOperation(true)));
} catch (Exception e) {
// Ping's objective is to retrieve a potentially newer
// version of the Hot Rod cluster topology, so ignore
// exceptions from nodes that might not be up any more.
if (log.isTraceEnabled())
log.tracef(e, "Ignoring exception pinging configured servers %s to establish a connection",
servers);
}
}
}
public void destroy() {
try {
channelPoolMap.values().forEach(ChannelPool::close);
eventLoopGroup.shutdownGracefully(0, 0, TimeUnit.MILLISECONDS).get();
executorService.shutdownNow();
} catch (Exception e) {
log.warn("Exception while shutting down the connection pool.", e);
}
}
public CacheTopologyInfo getCacheTopologyInfo(byte[] cacheName) {
lock.readLock().lock();
try {
return topologyInfo.getCacheTopologyInfo(cacheName);
} finally {
lock.readLock().unlock();
}
}
public Map<SocketAddress, Set<Integer>> getPrimarySegmentsByAddress(byte[] cacheName) {
lock.readLock().lock();
try {
return topologyInfo.getPrimarySegmentsByServer(cacheName);
} finally {
lock.readLock().unlock();
}
}
public <T extends ChannelOperation> T fetchChannelAndInvoke(Set<SocketAddress> failedServers, byte[] cacheName,
T operation) {
return fetchChannelAndInvoke(failedServers, cacheName, operation, true);
}
public <T extends ChannelOperation> T fetchChannelAndInvoke(Set<SocketAddress> failedServers, byte[] cacheName,
T operation, boolean checkServer) {
SocketAddress server;
// Need the write lock because FailoverRequestBalancingStrategy is not thread-safe
lock.writeLock().lock();
try {
if (failedServers != null) {
CompletableFuture<Void> switchStage = this.clusterSwitchStage;
if (switchStage != null) {
switchStage.whenComplete((__, t) -> fetchChannelAndInvoke(failedServers, cacheName, operation));
return operation;
}
}
CacheInfo cacheInfo = topologyInfo.getCacheInfo(wrapBytes(cacheName));
FailoverRequestBalancingStrategy balancer = cacheInfo.getBalancer();
server = balancer.nextServer(failedServers);
} finally {
lock.writeLock().unlock();
}
return checkServer
? fetchChannelAndInvoke(server, cacheName, operation)
: fetchChannelAndInvoke(server, operation);
}
private <T extends ChannelOperation> T fetchChannelAndInvoke(SocketAddress preferred, byte[] cacheName, T operation) {
boolean suspect;
lock.readLock().lock();
try {
suspect = failedServers.contains(preferred);
} finally {
lock.readLock().unlock();
}
if (suspect) {
if (log.isTraceEnabled()) log.tracef("Server %s is suspected, trying another for %s", preferred, operation);
return fetchChannelAndInvoke(failedServers, cacheName, operation, false);
}
return fetchChannelAndInvoke(preferred, operation);
}
public <T extends ChannelOperation> T fetchChannelAndInvoke(SocketAddress server, T operation) {
ChannelPool pool = channelPoolMap.computeIfAbsent(server, newPool);
pool.acquire(operation);
return operation;
}
private void closeChannelPools(Set<? extends SocketAddress> servers) {
for (SocketAddress server : servers) {
HOTROD.removingServer(server);
ChannelPool pool = channelPoolMap.remove(server);
if (pool != null) {
pool.close();
}
}
// We don't care if the server is failed any more
lock.writeLock().lock();
try {
this.failedServers.removeAll(servers);
} finally {
lock.writeLock().unlock();
}
}
public SocketAddress getHashAwareServer(Object key, byte[] cacheName) {
CacheInfo cacheInfo = topologyInfo.getCacheInfo(wrapBytes(cacheName));
if (cacheInfo != null && cacheInfo.getConsistentHash() != null) {
return cacheInfo.getConsistentHash().getServer(key);
}
return null;
}
public <T extends ChannelOperation> T fetchChannelAndInvoke(Object key, Set<SocketAddress> failedServers,
byte[] cacheName, T operation) {
CacheInfo cacheInfo = topologyInfo.getCacheInfo(wrapBytes(cacheName));
if (cacheInfo != null && cacheInfo.getConsistentHash() != null) {
SocketAddress server = cacheInfo.getConsistentHash().getServer(key);
if (server != null && (failedServers == null || !failedServers.contains(server))) {
return fetchChannelAndInvoke(server, cacheName, operation);
}
}
return fetchChannelAndInvoke(failedServers, cacheName, operation);
}
public void releaseChannel(Channel channel) {
// Due to ISPN-7955 we need to keep addresses unresolved. However resolved and unresolved addresses
// are not deemed equal, and that breaks the comparison in channelPool - had we used channel.remoteAddress()
// we'd create another pool for this resolved address. Therefore we need to find out appropriate pool this
// channel belongs using the attribute.
ChannelRecord record = ChannelRecord.of(channel);
record.release(channel);
}
public void receiveTopology(byte[] cacheName, int responseTopologyAge, int responseTopologyId,
InetSocketAddress[] addresses, SocketAddress[][] segmentOwners,
short hashFunctionVersion) {
WrappedByteArray wrappedCacheName = wrapBytes(cacheName);
lock.writeLock().lock();
try {
CacheInfo cacheInfo = topologyInfo.getCacheInfo(wrappedCacheName);
assert cacheInfo != null : "The cache info must exist before receiving a topology update";
// Only accept the update if it's from the current age and the topology id is greater than the current one
// Relies on TopologyInfo.switchCluster() to update the topologyAge for caches first
if (responseTopologyAge == cacheInfo.getTopologyAge() && responseTopologyId != cacheInfo.getTopologyId()) {
List<InetSocketAddress> addressList = Arrays.asList(addresses);
HOTROD.newTopology(responseTopologyId, responseTopologyAge, addresses.length, addressList);
CacheInfo newCacheInfo;
if (hashFunctionVersion >= 0) {
SegmentConsistentHash consistentHash =
createConsistentHash(segmentOwners, hashFunctionVersion, cacheInfo.getCacheName());
newCacheInfo = cacheInfo.withNewHash(responseTopologyAge, responseTopologyId, addressList,
consistentHash, segmentOwners.length);
} else {
newCacheInfo = cacheInfo.withNewServers(responseTopologyAge, responseTopologyId, addressList);
}
updateCacheInfo(wrappedCacheName, newCacheInfo, false);
} else {
if (log.isTraceEnabled())
log.tracef("[%s] Ignoring outdated topology: topology id = %s, topology age = %s, servers = %s",
cacheInfo.getCacheName(), responseTopologyId, responseTopologyAge,
Arrays.toString(addresses));
}
} finally {
lock.writeLock().unlock();
}
}
private SegmentConsistentHash createConsistentHash(SocketAddress[][] segmentOwners, short hashFunctionVersion,
String cacheNameString) {
if (log.isTraceEnabled()) {
if (hashFunctionVersion == 0)
log.tracef("[%s] Not using a consistent hash function (hash function version == 0).",
cacheNameString);
else
log.tracef("[%s] Updating client hash function with %s number of segments",
cacheNameString, segmentOwners.length);
}
return topologyInfo.createConsistentHash(segmentOwners.length, hashFunctionVersion, segmentOwners);
}
@GuardedBy("lock")
protected void updateCacheInfo(WrappedBytes cacheName, CacheInfo newCacheInfo, boolean quiet) {
List<InetSocketAddress> newServers = newCacheInfo.getServers();
CacheInfo oldCacheInfo = topologyInfo.getCacheInfo(cacheName);
List<InetSocketAddress> oldServers = oldCacheInfo.getServers();
Set<SocketAddress> addedServers = new HashSet<>(newServers);
oldServers.forEach(addedServers::remove);
Set<SocketAddress> removedServers = new HashSet<>(oldServers);
newServers.forEach(removedServers::remove);
if (log.isTraceEnabled()) {
String cacheNameString = newCacheInfo.getCacheName();
log.tracef("[%s] Current list: %s", cacheNameString, oldServers);
log.tracef("[%s] New list: %s", cacheNameString, newServers);
log.tracef("[%s] Added servers: %s", cacheNameString, addedServers);
log.tracef("[%s] Removed servers: %s", cacheNameString, removedServers);
}
// First add new servers. For servers that went down, the returned transport will fail for now
for (SocketAddress server : addedServers) {
HOTROD.newServerAdded(server);
fetchChannelAndInvoke(server, new ReleaseChannelOperation(quiet));
}
// Then update the server list for new operations
topologyInfo.updateCacheInfo(cacheName, oldCacheInfo, newCacheInfo);
// TODO Do not close a server pool until the server has been removed from all cache infos
// And finally remove the failed servers
closeChannelPools(removedServers);
if (!removedServers.isEmpty()) {
listenerNotifier.failoverListeners(removedServers);
}
}
public Collection<InetSocketAddress> getServers() {
lock.readLock().lock();
try {
return topologyInfo.getAllServers();
} finally {
lock.readLock().unlock();
}
}
public Collection<InetSocketAddress> getServers(byte[] cacheName) {
lock.readLock().lock();
try {
return topologyInfo.getServers(wrapBytes(cacheName));
} finally {
lock.readLock().unlock();
}
}
/**
* Note that the returned <code>ConsistentHash</code> may not be thread-safe.
*/
public ConsistentHash getConsistentHash(byte[] cacheName) {
lock.readLock().lock();
try {
return topologyInfo.getCacheInfo(wrapBytes(cacheName)).getConsistentHash();
} finally {
lock.readLock().unlock();
}
}
public ConsistentHashFactory getConsistentHashFactory() {
return topologyInfo.getConsistentHashFactory();
}
public boolean isTcpNoDelay() {
return configuration.tcpNoDelay();
}
public boolean isTcpKeepAlive() {
return configuration.tcpKeepAlive();
}
public int getMaxRetries() {
return maxRetries;
}
public AtomicReference<ClientTopology> createTopologyId(byte[] cacheName) {
lock.writeLock().lock();
try {
return topologyInfo.getOrCreateCacheInfo(wrapBytes(cacheName)).getClientTopologyRef();
} finally {
lock.writeLock().unlock();
}
}
public int getTopologyId(byte[] cacheName) {
return topologyInfo.getCacheInfo(wrapBytes(cacheName)).getTopologyId();
}
public void onConnectionEvent(ChannelPool pool, ChannelEventType type) {
boolean allInitialServersFailed;
lock.writeLock().lock();
try {
// TODO Replace with a simpler "pool healthy/unhealthy" event?
if (type == ChannelEventType.CONNECTED) {
failedServers.remove(pool.getAddress());
return;
} else if (type == ChannelEventType.CONNECT_FAILED) {
if (pool.getConnected() == 0) {
failedServers.add(pool.getAddress());
}
} else {
// Nothing to do
return;
}
if (log.isTraceEnabled())
log.tracef("Connection attempt failed, we now have %d servers with no established connections: %s",
failedServers.size(), failedServers);
allInitialServersFailed = failedServers.containsAll(topologyInfo.getCluster().getInitialServers());
if (!allInitialServersFailed || clusters.isEmpty()) {
resetCachesWithFailedServers();
}
} finally {
lock.writeLock().unlock();
}
if (allInitialServersFailed && !clusters.isEmpty()) {
trySwitchCluster();
}
}
private void trySwitchCluster() {
int ageBeforeSwitch;
ClusterInfo cluster;
lock.writeLock().lock();
try {
ageBeforeSwitch = topologyInfo.getTopologyAge();
cluster = topologyInfo.getCluster();
if (clusterSwitchStage != null) {
if (log.isTraceEnabled())
log.tracef("Cluster switch is already in progress for topology age %d", ageBeforeSwitch);
return;
}
clusterSwitchStage = new CompletableFuture<>();
} finally {
lock.writeLock().unlock();
}
checkServersAlive(cluster.getInitialServers())
.thenCompose(alive -> {
if (alive) {
// The live check removed the server from failedServers when it established a connection
if (log.isTraceEnabled()) log.tracef("Cluster %s is still alive, not switching", cluster);
return CompletableFuture.completedFuture(null);
}
if (log.isTraceEnabled())
log.tracef("Trying to switch cluster away from '%s'", cluster.getName());
return findLiveCluster(cluster, ageBeforeSwitch);
})
.thenAccept(newCluster -> {
if (newCluster != null) {
automaticSwitchToCluster(newCluster, cluster, ageBeforeSwitch);
}
})
.whenComplete((__, t) -> completeClusterSwitch());
}
@GuardedBy("lock")
private void resetCachesWithFailedServers() {
List<WrappedBytes> failedCaches = new ArrayList<>();
List<String> nameStrings = new ArrayList<>();
topologyInfo.forEachCache((cacheNameBytes, cacheInfo) -> {
List<InetSocketAddress> cacheServers = cacheInfo.getServers();
boolean currentServersHaveFailed = failedServers.containsAll(cacheServers);
boolean canReset = !cacheServers.equals(topologyInfo.getCluster().getInitialServers());
if (currentServersHaveFailed && canReset) {
failedCaches.add(cacheNameBytes);
nameStrings.add(cacheInfo.getCacheName());
}
});
if (!failedCaches.isEmpty()) {
HOTROD.revertCacheToInitialServerList(nameStrings);
for (WrappedBytes cacheNameBytes : failedCaches) {
topologyInfo.reset(cacheNameBytes);
}
}
}
private void completeClusterSwitch() {
CompletableFuture<Void> localStage;
lock.writeLock().lock();
try {
localStage = this.clusterSwitchStage;
this.clusterSwitchStage = null;
} finally {
lock.writeLock().unlock();
}
// An automatic cluster switch could be cancelled by a manual switch,
// and a manual cluster switch would not have a stage to begin with
if (localStage != null) {
localStage.complete(null);
}
}
private CompletionStage<ClusterInfo> findLiveCluster(ClusterInfo failedCluster, int ageBeforeSwitch) {
List<ClusterInfo> candidateClusters = new ArrayList<>();
for (ClusterInfo cluster : clusters) {
String clusterName = cluster.getName();
if (!clusterName.equals(failedCluster.getName()))
candidateClusters.add(cluster);
}
Iterator<ClusterInfo> clusterIterator = candidateClusters.iterator();
return findLiveCluster0(false, null, clusterIterator, ageBeforeSwitch);
}
private CompletionStage<ClusterInfo> findLiveCluster0(boolean alive, ClusterInfo testedCluster,
Iterator<ClusterInfo> clusterIterator, int ageBeforeSwitch) {
lock.writeLock().lock();
try {
if (clusterSwitchStage == null || topologyInfo.getTopologyAge() != ageBeforeSwitch) {
log.debugf("Cluster switch already completed by another thread, bailing out");
return CompletableFuture.completedFuture(null);
}
} finally {
lock.writeLock().unlock();
}
if (alive) return CompletableFuture.completedFuture(testedCluster);
if (!clusterIterator.hasNext()) {
log.debugf("All cluster addresses viewed and none worked: %s", clusters);
return CompletableFuture.completedFuture(null);
}
ClusterInfo nextCluster = clusterIterator.next();
return checkServersAlive(nextCluster.getInitialServers())
.thenCompose(aliveNext -> findLiveCluster0(aliveNext, nextCluster, clusterIterator, ageBeforeSwitch));
}
private CompletionStage<Boolean> checkServersAlive(Collection<InetSocketAddress> servers) {
if (servers.isEmpty())
return CompletableFuture.completedFuture(false);
AtomicInteger remainingResponses = new AtomicInteger(servers.size());
CompletableFuture<Boolean> allFuture = new CompletableFuture<>();
for (SocketAddress server : servers) {
fetchChannelAndInvoke(server, cacheOperationsFactory.newPingOperation(true)).whenComplete((result, throwable) -> {
if (throwable != null) {
if (log.isTraceEnabled()) {
log.tracef(throwable, "Error checking whether this server is alive: %s", server);
}
if (remainingResponses.decrementAndGet() == 0) {
allFuture.complete(false);
}
} else {
// One successful response is enough to be able to switch to this cluster
log.tracef("Ping to server %s succeeded", server);
allFuture.complete(true);
}
});
}
return allFuture;
}
private void automaticSwitchToCluster(ClusterInfo newCluster, ClusterInfo failedCluster, int ageBeforeSwitch) {
lock.writeLock().lock();
try {
if (clusterSwitchStage == null || topologyInfo.getTopologyAge() != ageBeforeSwitch) {
log.debugf("Cluster switch already completed by another thread, bailing out");
return;
}
topologyInfo.switchCluster(newCluster);
} finally {
lock.writeLock().unlock();
}
if (!newCluster.getName().equals(DEFAULT_CLUSTER_NAME))
HOTROD.switchedToCluster(newCluster.getName());
else
HOTROD.switchedBackToMainCluster();
}
/**
* Switch to an alternate cluster (or from an alternate cluster back to the main cluster).
*
* <p>Overrides any automatic cluster switch in progress, which may be useful
* when the automatic switch takes too long.</p>
*/
public boolean manualSwitchToCluster(String clusterName) {
if (clusters.isEmpty()) {
log.debugf("No alternative clusters configured, so can't switch cluster");
return false;
}
ClusterInfo cluster = findCluster(clusterName);
if (cluster == null) {
log.debugf("Cluster named %s does not exist in the configuration", clusterName);
return false;
}
lock.writeLock().lock();
boolean shouldComplete = false;
try {
if (clusterSwitchStage != null) {
log.debugf("Another cluster switch is already in progress, overriding it");
shouldComplete = true;
}
log.debugf("Switching to cluster %s, servers: %s", clusterName, cluster.getInitialServers());
topologyInfo.switchCluster(cluster);
} finally {
lock.writeLock().unlock();
}
if (!clusterName.equals(DEFAULT_CLUSTER_NAME))
HOTROD.manuallySwitchedToCluster(clusterName);
else
HOTROD.manuallySwitchedBackToMainCluster();
if (shouldComplete) {
completeClusterSwitch();
}
return true;
}
public Marshaller getMarshaller() {
return marshaller;
}
public String getCurrentClusterName() {
return topologyInfo.getCluster().getName();
}
public int getTopologyAge() {
return topologyInfo.getTopologyAge();
}
private ClusterInfo findCluster(String clusterName) {
for (ClusterInfo cluster : clusters) {
if (cluster.getName().equals(clusterName))
return cluster;
}
return null;
}
/**
* Note that the returned <code>RequestBalancingStrategy</code> may not be thread-safe.
*/
public FailoverRequestBalancingStrategy getBalancer(byte[] cacheName) {
lock.readLock().lock();
try {
return topologyInfo.getCacheInfo(wrapBytes(cacheName)).getBalancer();
} finally {
lock.readLock().unlock();
}
}
public int socketTimeout() {
return configuration.socketTimeout();
}
public int getNumActive(SocketAddress address) {
ChannelPool pool = channelPoolMap.get(address);
return pool == null ? 0 : pool.getActive();
}
public int getNumIdle(SocketAddress address) {
ChannelPool pool = channelPoolMap.get(address);
return pool == null ? 0 : pool.getIdle();
}
public int getNumActive() {
return channelPoolMap.values().stream().mapToInt(ChannelPool::getActive).sum();
}
public int getNumIdle() {
return channelPoolMap.values().stream().mapToInt(ChannelPool::getIdle).sum();
}
public HotRodConfiguration getConfiguration() {
return configuration;
}
public long getRetries() {
return totalRetries.longValue();
}
public void incrementRetryCount() {
totalRetries.increment();
}
public ClientIntelligence getClientIntelligence() {
lock.readLock().lock();
try {
return topologyInfo.getCluster().getIntelligence();
} finally {
lock.readLock().unlock();
}
}
private class ReleaseChannelOperation implements ChannelOperation {
private final boolean quiet;
private ReleaseChannelOperation(boolean quiet) {
this.quiet = quiet;
}
@Override
public void invoke(Channel channel) {
releaseChannel(channel);
}
@Override
public void cancel(SocketAddress address, Throwable cause) {
if (!quiet) {
HOTROD.failedAddingNewServer(address, cause);
}
}
}
}
| 33,861
| 39.945586
| 128
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/NativeTransport.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.util.concurrent.ExecutorService;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollDatagramChannel;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.DatagramChannel;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioDatagramChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
// This is a separate class for easier replacement within Quarkus
public final class NativeTransport {
private static final boolean IS_LINUX = System.getProperty("os.name").toLowerCase().startsWith("linux");
private static final String USE_EPOLL_PROPERTY = "infinispan.server.channel.epoll";
private static final String USE_IOURING_PROPERTY = "infinispan.server.channel.iouring";
private static final boolean EPOLL_DISABLED = System.getProperty(USE_EPOLL_PROPERTY, "true").equalsIgnoreCase("false");
private static final boolean IOURING_DISABLED = System.getProperty(USE_IOURING_PROPERTY, "true").equalsIgnoreCase("false");
// Has to be after other static variables to ensure they are initialized
static final boolean USE_NATIVE_EPOLL = useNativeEpoll();
static final boolean USE_NATIVE_IOURING = useNativeIOUring();
private static boolean useNativeEpoll() {
try {
Class.forName("io.netty.channel.epoll.Epoll", true, NativeTransport.class.getClassLoader());
if (Epoll.isAvailable()) {
return !EPOLL_DISABLED && IS_LINUX;
} else {
if (IS_LINUX) {
HOTROD.epollNotAvailable(Epoll.unavailabilityCause().toString());
}
}
} catch (ClassNotFoundException e) {
if (IS_LINUX) {
HOTROD.epollNotAvailable(e.getMessage());
}
}
return false;
}
private static boolean useNativeIOUring() {
try {
Class.forName("io.netty.incubator.channel.uring.IOUring", true, NativeTransport.class.getClassLoader());
if (io.netty.incubator.channel.uring.IOUring.isAvailable()) {
return !IOURING_DISABLED && IS_LINUX;
} else {
if (IS_LINUX) {
HOTROD.ioUringNotAvailable(io.netty.incubator.channel.uring.IOUring.unavailabilityCause().toString());
}
}
} catch (ClassNotFoundException e) {
if (IS_LINUX) {
HOTROD.ioUringNotAvailable(e.getMessage());
}
}
return false;
}
public static Class<? extends SocketChannel> socketChannelClass() {
if (USE_NATIVE_EPOLL) {
return EpollSocketChannel.class;
} else if (USE_NATIVE_IOURING) {
return IOURingNativeTransport.socketChannelClass();
} else {
return NioSocketChannel.class;
}
}
public static Class<? extends DatagramChannel> datagramChannelClass() {
if (USE_NATIVE_EPOLL) {
return EpollDatagramChannel.class;
} else if (USE_NATIVE_IOURING) {
return IOURingNativeTransport.datagramChannelClass();
} else {
return NioDatagramChannel.class;
}
}
public static EventLoopGroup createEventLoopGroup(int maxExecutors, ExecutorService executorService) {
if (USE_NATIVE_EPOLL) {
return new EpollEventLoopGroup(maxExecutors, executorService);
} else if (USE_NATIVE_IOURING) {
return IOURingNativeTransport.createEventLoopGroup(maxExecutors, executorService);
} else {
return new NioEventLoopGroup(maxExecutors, executorService);
}
}
}
| 3,772
| 37.896907
| 126
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ByteBufUtil.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.commons.io.SignedNumeric.encode;
import javax.transaction.xa.Xid;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.transaction.manager.RemoteXid;
import org.infinispan.commons.util.Util;
import io.netty.buffer.ByteBuf;
/**
* Helper methods for writing varints, arrays and strings to {@link ByteBuf}.
*/
public final class ByteBufUtil {
private ByteBufUtil() {}
public static byte[] readArray(ByteBuf buf) {
int length = readVInt(buf);
byte[] bytes = new byte[length];
buf.readBytes(bytes, 0, length);
return bytes;
}
public static byte[] readMaybeArray(ByteBuf bf) {
int idx = bf.readerIndex();
int length = readMaybeVInt(bf);
if (idx == bf.readerIndex()) {
return null;
}
return readMaybeRangedArray(bf, length);
}
public static byte[] readMaybeRangedArray(ByteBuf bf, int length) {
if (length == 0) {
return Util.EMPTY_BYTE_ARRAY;
}
if (!bf.isReadable(length)) {
bf.resetReaderIndex();
return null;
}
byte[] bytes = new byte[length];
bf.readBytes(bytes, 0, length);
return bytes;
}
public static String readString(ByteBuf bf) {
bf.markReaderIndex();
int idx = bf.readerIndex();
int length = readMaybeVInt(bf);
if (idx == bf.readerIndex()) {
return null;
}
if (length == 0) {
return "";
}
if (!bf.isReadable(length)) {
bf.resetReaderIndex();
return null;
}
int startIndex = bf.readerIndex();
bf.skipBytes(length);
return bf.toString(startIndex, length, HotRodConstants.HOTROD_STRING_CHARSET);
}
public static void writeString(ByteBuf buf, String string) {
if (string != null && !string.isEmpty()) {
writeArray(buf, string.getBytes(HotRodConstants.HOTROD_STRING_CHARSET));
} else {
writeVInt(buf, 0);
}
}
public static void writeOptionalString(ByteBuf buf, String string) {
if (string == null) {
writeSignedVInt(buf, -1);
} else {
writeOptionalArray(buf, string.getBytes(HotRodConstants.HOTROD_STRING_CHARSET));
}
}
public static void writeArray(ByteBuf buf, byte[] toAppend) {
writeVInt(buf, toAppend.length);
buf.writeBytes(toAppend);
}
public static void writeArray(ByteBuf buf, byte[] toAppend, int offset, int count) {
writeVInt(buf, count);
buf.writeBytes(toAppend, offset, count);
}
public static int estimateArraySize(byte[] array) {
return estimateVIntSize(array.length) + array.length;
}
public static int estimateVIntSize(int value) {
return (32 - Integer.numberOfLeadingZeros(value)) / 7 + 1;
}
public static void writeOptionalArray(ByteBuf buf, byte[] toAppend) {
writeSignedVInt(buf, toAppend.length);
buf.writeBytes(toAppend);
}
public static void writeVInt(ByteBuf buf, int i) {
while ((i & ~0x7F) != 0) {
buf.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
buf.writeByte((byte) i);
}
public static void writeSignedVInt(ByteBuf buf, int i) {
writeVInt(buf, encode(i));
}
public static void writeVLong(ByteBuf buf, long i) {
while ((i & ~0x7F) != 0) {
buf.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
buf.writeByte((byte) i);
}
public static int estimateVLongSize(long value) {
return (64 - Long.numberOfLeadingZeros(value)) / 7 + 1;
}
public static long readVLong(ByteBuf buf) {
byte b = buf.readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = buf.readByte();
i |= (b & 0x7FL) << shift;
}
return i;
}
public static int readVInt(ByteBuf buf) {
byte b = buf.readByte();
int i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = buf.readByte();
i |= (b & 0x7FL) << shift;
}
return i;
}
public static int readMaybeVInt(ByteBuf buf) {
buf.markReaderIndex();
int i = 0;
for (int shift = 0; buf.isReadable(); shift += 7) {
byte b = buf.readByte();
i |= (b & 0x7FL) << shift;
if ((b & 0x80) == 0)
return i;
}
buf.resetReaderIndex();
return Integer.MIN_VALUE;
}
public static String limitedHexDump(ByteBuf buf) {
return Util.hexDump(buf::getByte, buf.readerIndex(), buf.readableBytes());
}
/**
* Estimates the {@link Xid} encoding size.
* <p>
* If the instance is a {@link RemoteXid}, the estimation is accurate. Otherwise, the max size is used.
*
* @param xid the {@link Xid} instance to test.
* @return the estimated size.
*/
public static int estimateXidSize(Xid xid) {
if (xid instanceof RemoteXid) {
return ((RemoteXid) xid).estimateSize();
} else {
// Worst case.
// To be more accurate, we need to invoke getGlobalTransactionId and getBranchQualifier that will most likely
//create and copy the array
return estimateVIntSize(xid.getFormatId()) + Xid.MAXBQUALSIZE + Xid.MAXGTRIDSIZE;
}
}
/**
* Writes the {@link Xid} to the {@link ByteBuf}.
*
* @param buf the buffer to write to.
* @param xid the {@link Xid} to encode
*/
public static void writeXid(ByteBuf buf, Xid xid) {
if (xid instanceof RemoteXid) {
((RemoteXid) xid).writeTo(buf);
} else {
ByteBufUtil.writeSignedVInt(buf, xid.getFormatId());
writeArray(buf, xid.getGlobalTransactionId());
writeArray(buf, xid.getBranchQualifier());
}
}
}
| 5,842
| 27.227053
| 118
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ActivationHandler.java
|
package org.infinispan.hotrod.impl.transport.netty;
import org.infinispan.hotrod.exceptions.TransportException;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
/**
* Handler that is added to the end of pipeline during channel creation and handshake.
* Its task is to complete {@link ChannelRecord}.
*/
@Sharable
class ActivationHandler extends ChannelInboundHandlerAdapter {
static final String NAME = "activation-handler";
private static final Log log = LogFactory.getLog(ActivationHandler.class);
static final ActivationHandler INSTANCE = new ActivationHandler();
static final Object ACTIVATION_EVENT = new Object();
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
if (log.isTraceEnabled()) {
log.tracef("Activating channel %s", ctx.channel());
}
ChannelRecord.of(ctx.channel()).complete(ctx.channel());
ctx.pipeline().remove(this);
}
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
if (evt == ACTIVATION_EVENT) {
channelActive(ctx);
} else {
ctx.fireUserEventTriggered(evt);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
Channel channel = ctx.channel();
if (log.isTraceEnabled()) {
log.tracef(cause, "Failed to activate channel %s", channel);
}
try {
ctx.close();
} finally {
ChannelRecord channelRecord = ChannelRecord.of(channel);
// With sync Hot Rod any failure to fetch a transport from pool was wrapped in TransportException
channelRecord.completeExceptionally(new TransportException(cause, channelRecord.getUnresolvedAddress()));
}
}
}
| 2,024
| 35.160714
| 114
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelPool.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.net.SocketAddress;
import java.util.Deque;
import java.util.NoSuchElementException;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.concurrent.locks.StampedLock;
import java.util.function.BiConsumer;
import org.infinispan.hotrod.configuration.ExhaustedAction;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import io.netty.channel.Channel;
import io.netty.util.concurrent.EventExecutor;
import io.netty.util.concurrent.ScheduledFuture;
import io.netty.util.internal.PlatformDependent;
/**
* This is a custom implementation of {@link io.netty.channel.Channel} pooling.
* Compared to {@link io.netty.channel.pool.ChannelPool} implementations in Netty it does not enforce context switch before writing to the channel.
* **Update**: Netty enforces going through event loop later on by delegating the write through {@link io.netty.channel.AbstractChannelHandlerContext.WriteTask}.
* So writing the socket in caller thread is still TODO.
* <p>
* It should be also more allocation-efficient since it does not create futures and invokes the callback directly if the
* channel is available.
* <p>
* The connections are handled LIFO, pending requests are handled FIFO.
*/
class ChannelPool {
enum ChannelEventType { CONNECTED, CLOSED_IDLE, CLOSED_ACTIVE, CONNECT_FAILED}
private static final AtomicIntegerFieldUpdater<TimeoutCallback> invokedUpdater = AtomicIntegerFieldUpdater.newUpdater(TimeoutCallback.class, "invoked");
private static final Log log = LogFactory.getLog(ChannelPool.class);
private static final int MAX_FULL_CHANNELS_SEEN = 10;
private final Deque<Channel> channels = PlatformDependent.newConcurrentDeque();
private final Deque<ChannelOperation> callbacks = PlatformDependent.newConcurrentDeque();
private final EventExecutor executor;
private final SocketAddress address;
private final ChannelInitializer newChannelInvoker;
private final ExhaustedAction exhaustedAction;
private final BiConsumer<ChannelPool, ChannelEventType> connectionFailureListener;
private final long maxWait;
private final int maxConnections;
private final int maxPendingRequests;
private final AtomicInteger created = new AtomicInteger();
private final AtomicInteger active = new AtomicInteger();
private final AtomicInteger connected = new AtomicInteger();
private final StampedLock lock = new StampedLock();
private volatile boolean terminated = false;
private volatile boolean suspected = false;
ChannelPool(EventExecutor executor, SocketAddress address, ChannelInitializer newChannelInvoker,
ExhaustedAction exhaustedAction, BiConsumer<ChannelPool, ChannelEventType> connectionFailureListener,
long maxWait, int maxConnections, int maxPendingRequests) {
this.connectionFailureListener = connectionFailureListener;
this.executor = executor;
this.address = address;
this.newChannelInvoker = newChannelInvoker;
this.exhaustedAction = exhaustedAction;
this.maxWait = maxWait;
this.maxConnections = maxConnections;
this.maxPendingRequests = maxPendingRequests;
}
public void acquire(ChannelOperation callback) {
if (terminated) {
callback.cancel(address, new RejectedExecutionException("Pool was terminated"));
return;
}
// We could acquire an active channel and submit the callback.
if (executeDirectlyIfPossible(callback)) return;
// wait action
if (maxWait > 0) {
TimeoutCallback timeoutCallback = new TimeoutCallback(callback);
timeoutCallback.timeoutFuture = executor.schedule(timeoutCallback, maxWait, TimeUnit.MILLISECONDS);
callback = timeoutCallback;
}
// Between the check time and adding the callback to the queue, we could have a channel available.
// Let's just try again.
if (!executeOrEnqueue(callback)) {
boolean remove = false;
try {
remove = executeDirectlyIfPossible(callback);
} finally {
if (remove) {
callbacks.remove(callback);
}
}
}
}
boolean executeDirectlyIfPossible(ChannelOperation callback) {
Channel channel;
int fullChannelsSeen = 0;
while ((channel = channels.pollFirst()) != null) {
if (!channel.isActive()) {
// The channel was closed while idle but not removed - just forget it
continue;
}
if (!channel.isWritable() || channel.pipeline().get(HotRodClientDecoder.class).registeredOperations() >= maxPendingRequests) {
channels.addLast(channel);
// prevent looping on non-writable channels
if (++fullChannelsSeen < MAX_FULL_CHANNELS_SEEN) {
continue;
} else {
break;
}
}
return activateChannel(channel, callback, false);
}
int current = created.get();
while (current < maxConnections) {
if (created.compareAndSet(current, current + 1)) {
int currentActive = active.incrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Creating new channel, created = %d, active = %d", address, current + 1, currentActive);
// create new connection and apply callback
createAndInvoke(callback);
return true;
}
current = created.get();
}
// reached max connections
switch (exhaustedAction) {
case EXCEPTION:
throw new NoSuchElementException("Reached maximum number of connections");
case WAIT:
break;
case CREATE_NEW:
int currentCreated = created.incrementAndGet();
int currentActive = active.incrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Creating new channel, created = %d, active = %d", address, currentCreated, currentActive);
createAndInvoke(callback);
return true;
default:
throw new IllegalArgumentException(String.valueOf(exhaustedAction));
}
return false;
}
private boolean executeOrEnqueue(ChannelOperation callback) {
Channel channel;
// To prevent adding channel and callback concurrently we'll synchronize all additions
// TODO: completely lock-free algorithm would be better
long stamp = lock.writeLock();
try {
for (;;) {
// at this point we won't be picky and use non-writable channel anyway
channel = channels.pollFirst();
if (channel == null) {
if (log.isTraceEnabled()) log.tracef("[%s] No channel available, adding callback to the queue %s", address, callback);
callbacks.addLast(callback);
return false;
} else if (channel.isActive()) {
break;
}
}
} finally {
lock.unlockWrite(stamp);
}
return activateChannel(channel, callback, false);
}
private void createAndInvoke(ChannelOperation callback) {
try {
newChannelInvoker.createChannel().whenComplete((channel, throwable) -> {
if (throwable != null) {
int currentActive = active.decrementAndGet();
if (currentActive < 0) {
HOTROD.invalidActiveCountAfterClose(channel);
}
int currentCreated = created.decrementAndGet();
if (currentCreated < 0) {
HOTROD.invalidCreatedCountAfterClose(channel);
}
if (log.isTraceEnabled()) log.tracef(throwable, "[%s] Channel could not be created, created = %d, active = %d, connected = %d",
address, currentCreated, currentActive, connected.get());
connectionFailureListener.accept(this, ChannelEventType.CONNECT_FAILED);
callback.cancel(address, throwable);
maybeRejectPendingCallbacks(throwable);
} else {
suspected = false;
int currentConnected = connected.incrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Channel connected, created = %d, active = %d, connected = %d",
address, created.get(), active.get(), currentConnected);
callback.invoke(channel);
connectionFailureListener.accept(this, ChannelEventType.CONNECTED);
}
});
} catch (Throwable t) {
int currentActive = active.decrementAndGet();
int currentCreated = created.decrementAndGet();
if (log.isTraceEnabled()) log.tracef(t, "[%s] Channel could not be created, created = %d, active = %d, connected = %d",
address, currentCreated, currentActive, connected.get());
if (currentCreated < 0) {
HOTROD.warnf("Invalid created count after channel create failure");
}
if (currentActive < 0) {
HOTROD.warnf("Invalid active count after channel create failure");
}
callback.cancel(address, t);
maybeRejectPendingCallbacks(t);
}
}
/**
* Release a channel back into the pool after an operation has finished.
*/
public void release(Channel channel, ChannelRecord record) {
// The channel can be closed when it's idle (due to idle timeout or closed connection)
if (record.isIdle()) {
HOTROD.warnf("Cannot release channel %s because it is idle", channel);
return;
}
if (record.setIdleAndIsClosed()) {
if (log.isTraceEnabled()) log.tracef("[%s] Attempt to release already closed channel %s, active = %d",
address, channel, active.get());
return;
}
int currentActive = active.decrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Released channel %s, active = %d", address, channel, currentActive);
if (currentActive < 0) {
HOTROD.warnf("[%s] Invalid active count after releasing channel %s", address, channel);
}
ChannelOperation callback;
// We're protecting against concurrent acquires, concurrent releases are fine
// hopefully the acquire will usually get the channel through the fast (non-locking) path
long stamp = lock.readLock();
try {
callback = callbacks.pollFirst();
if (callback == null) {
channels.addFirst(channel);
return;
}
} finally {
lock.unlockRead(stamp);
}
activateChannel(channel, callback, true);
}
/**
* Update counts after a channel has been closed.
*/
public void releaseClosedChannel(Channel channel, ChannelRecord channelRecord) {
if (channel.isActive()) {
HOTROD.warnf("Channel %s cannot be released because it is not closed", channel);
return;
}
boolean idle = channelRecord.closeAndWasIdle();
int currentCreated = created.decrementAndGet();
int currentActive = !idle ? active.decrementAndGet() : active.get();
int currentConnected = connected.decrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Closed channel %s, created = %s, idle = %b, active = %d, connected = %d",
address, channel, currentCreated, idle, currentActive, currentConnected);
if (currentCreated < 0) {
HOTROD.warnf("Invalid created count after closing channel %s", channel);
}
if (currentActive < 0) {
HOTROD.warnf("Invalid active count after closing channel %s", channel);
}
connectionFailureListener.accept( this, idle ? ChannelEventType.CLOSED_IDLE : ChannelEventType.CLOSED_ACTIVE);
}
private boolean activateChannel(Channel channel, ChannelOperation callback, boolean useExecutor) {
if (!channel.isActive()) return false;
int currentActive = active.incrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Activated record %s, created = %d, active = %d", address, channel, created.get(), currentActive);
ChannelRecord record = ChannelRecord.of(channel);
record.setAcquired();
if (useExecutor) {
// Do not execute another operation in releasing thread, we could run out of stack
executor.execute(() -> {
try {
callback.invoke(channel);
} catch (Throwable t) {
log.tracef(t, "Closing channel %s due to exception", channel);
discardChannel(channel);
}
});
} else {
try {
callback.invoke(channel);
} catch (Throwable t) {
log.tracef(t, "Closing channel %s due to exception", channel);
discardChannel(channel);
throw t;
}
}
return true;
}
private void discardChannel(Channel channel) {
channel.close();
}
public SocketAddress getAddress() {
return address;
}
public int getActive() {
return active.get();
}
public int getIdle() {
return Math.max(0, created.get() - active.get());
}
public int getConnected() {
return connected.get();
}
public void close() {
terminated = true;
long stamp = lock.writeLock();
try {
RejectedExecutionException cause = new RejectedExecutionException("Pool was terminated");
callbacks.forEach(callback -> callback.cancel(address, cause));
channels.forEach(channel -> {
// We don't want to fail all operations on given channel,
// e.g. when moving from unresolved to resolved addresses
channel.pipeline().fireUserEventTriggered(ChannelPoolCloseEvent.INSTANCE);
});
} finally {
lock.unlockWrite(stamp);
}
}
public void inspectPool() {
if (terminated || suspected || getConnected() > 0 || getActive() > 0) return;
ChannelOperation cb = acquireHead();
if (cb != null) {
int currentCreated = created.incrementAndGet();
int currentActive = active.incrementAndGet();
if (log.isTraceEnabled()) log.tracef("[%s] Creating new channel to inspect server, created = %d, active = %d", address, currentCreated, currentActive);
suspected = true;
createAndInvoke(cb);
}
}
private void maybeRejectPendingCallbacks(Throwable t) {
if (terminated || !suspected || getConnected() > 0 || getActive() > 0) return;
ChannelOperation cb;
while ((cb = acquireHead()) != null) {
cb.cancel(address, t);
}
}
private ChannelOperation acquireHead() {
long stamp = lock.readLock();
try {
return callbacks.pollFirst();
} finally {
lock.unlockRead(stamp);
}
}
@Override
public String toString() {
return "ChannelPool[" +
"address=" + address +
", maxWait=" + maxWait +
", maxConnections=" + maxConnections +
", maxPendingRequests=" + maxPendingRequests +
", created=" + created +
", active=" + active +
", connected=" + connected +
", suspected=" + suspected +
", terminated=" + terminated +
']';
}
private class TimeoutCallback implements ChannelOperation, Runnable {
final ChannelOperation callback;
volatile ScheduledFuture<?> timeoutFuture;
@SuppressWarnings("unused")
volatile int invoked = 0;
private TimeoutCallback(ChannelOperation callback) {
this.callback = callback;
}
@Override
public void run() {
callbacks.remove(this);
if (invokedUpdater.compareAndSet(this, 0, 1)) {
callback.cancel(address, new TimeoutException("Timed out waiting for connection"));
}
}
@Override
public void invoke(Channel channel) {
ScheduledFuture<?> timeoutFuture = this.timeoutFuture;
if (timeoutFuture != null) {
timeoutFuture.cancel(false);
}
if (invokedUpdater.compareAndSet(this, 0, 1)) {
callback.invoke(channel);
}
}
@Override
public void cancel(SocketAddress address, Throwable cause) {
throw new UnsupportedOperationException();
}
}
}
| 16,814
| 38.658019
| 161
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelRecord.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.net.SocketAddress;
import java.util.concurrent.CompletableFuture;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.util.AttributeKey;
import io.netty.util.concurrent.GenericFutureListener;
/**
* This class serves multiple purposes:
*
* 1) Activation: this extends {@link CompletableFuture} which is completed once the connection and initial handshake
* are completed.
* 2) Storage for unresolved address and pool info.
*/
public class ChannelRecord extends CompletableFuture<Channel> implements GenericFutureListener<ChannelFuture> {
private static final Log log = LogFactory.getLog(ChannelRecord.class);
static AttributeKey<ChannelRecord> KEY = AttributeKey.newInstance("activation");
private final SocketAddress unresolvedAddress;
private final ChannelPool channelPool;
private boolean closed = false;
private boolean acquired = true;
ChannelRecord(SocketAddress unresolvedAddress, ChannelPool channelPool) {
this.unresolvedAddress = unresolvedAddress;
this.channelPool = channelPool;
}
public static ChannelRecord of(Channel channel) {
return channel.attr(KEY).get();
}
public SocketAddress getUnresolvedAddress() {
return unresolvedAddress;
}
@Override
public boolean complete(Channel channel) {
// Only add the listener once (or never, if completed exceptionally)
boolean complete = super.complete(channel);
if (complete) {
channel.closeFuture().addListener(this);
}
return complete;
}
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (log.isTraceEnabled()) {
if (!future.isSuccess()) {
log.tracef(future.cause(), "Channel %s is closed, see exception for details", get());
}
}
channelPool.releaseClosedChannel(future.channel(), this);
channelPool.inspectPool();
}
synchronized void setAcquired() {
assert !acquired;
acquired = true;
}
public synchronized boolean isIdle() {
return !acquired;
}
public synchronized boolean setIdleAndIsClosed() {
assert acquired;
acquired = false;
return closed;
}
public synchronized boolean closeAndWasIdle() {
assert !closed;
closed = true;
return !acquired;
}
public void release(Channel channel) {
channelPool.release(channel, this);
}
}
| 2,604
| 27.315217
| 117
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ChannelOperation.java
|
package org.infinispan.hotrod.impl.transport.netty;
import java.net.SocketAddress;
import io.netty.channel.Channel;
/**
* A callback to be invoked on a channel.
*/
public interface ChannelOperation {
/**
* Invoked on an active channel ready to be written
*/
void invoke(Channel channel);
/**
* Invoked when the callback cannot be invoked due to timeout or terminated pool.
* @param address
* @param cause
*/
void cancel(SocketAddress address, Throwable cause);
}
| 505
| 21
| 84
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/ParserUtils.java
|
package org.infinispan.hotrod.impl.transport.netty;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_CREATED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_EXPIRED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_MODIFIED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.CACHE_ENTRY_REMOVED_EVENT_RESPONSE;
import static org.infinispan.hotrod.impl.protocol.HotRodConstants.COUNTER_EVENT_RESPONSE;
public class ParserUtils {
public static boolean isEntryEventOp(short opCode) {
return opCode == CACHE_ENTRY_CREATED_EVENT_RESPONSE
|| opCode == CACHE_ENTRY_MODIFIED_EVENT_RESPONSE
|| opCode == CACHE_ENTRY_REMOVED_EVENT_RESPONSE
|| opCode == CACHE_ENTRY_EXPIRED_EVENT_RESPONSE;
}
public static boolean isCounterEventOp(short opCode) {
return opCode == COUNTER_EVENT_RESPONSE;
}
}
| 995
| 44.272727
| 102
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/Intrinsics.java
|
package org.infinispan.hotrod.impl.transport.netty;
import io.netty.buffer.ByteBuf;
public class Intrinsics {
public static int vInt(ByteBuf buf) {
return ByteBufUtil.readMaybeVInt(buf);
}
public static long vLong(ByteBuf buf) {
buf.markReaderIndex();
if (buf.readableBytes() > 0) {
byte b = buf.readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
if (buf.readableBytes() == 0) {
buf.resetReaderIndex();
return Long.MIN_VALUE;
}
b = buf.readByte();
i |= (b & 0x7FL) << shift;
}
return i;
} else {
buf.resetReaderIndex();
return Long.MIN_VALUE;
}
}
public static long long_(ByteBuf buf) {
if (buf.readableBytes() >= 8) {
return buf.readLong();
}
return 0;
}
public static byte byte_(ByteBuf buffer) {
if (buffer.isReadable()) {
return buffer.readByte();
}
return 0;
}
public static short uByte(ByteBuf buffer) {
if (buffer.isReadable()) {
return buffer.readUnsignedByte();
}
return 0;
}
public static short vShort(ByteBuf buffer) {
if (buffer.readableBytes() >= Short.BYTES)
return buffer.readShort();
return 0;
}
public static int uShort(ByteBuf buf) {
if (buf.readableBytes() >= Short.BYTES)
return buf.readUnsignedShort();
return 0;
}
public static byte[] array(ByteBuf buf) {
buf.markReaderIndex();
return ByteBufUtil.readMaybeArray(buf);
}
public static String string(ByteBuf buf) {
return ByteBufUtil.readString(buf);
}
}
| 1,733
| 23.083333
| 59
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transport/netty/SaslDecoderEncoder.java
|
package org.infinispan.hotrod.impl.transport.netty;
import javax.security.sasl.SaslClient;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
public class SaslDecoderEncoder implements ChannelInboundHandlerDefaults, ChannelOutboundHandlerDefaults {
private final SaslClient saslClient;
public SaslDecoderEncoder(SaslClient saslClient) {
this.saslClient = saslClient;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (!(msg instanceof ByteBuf)) {
throw new IllegalArgumentException(String.valueOf(msg));
}
ByteBuf buf = (ByteBuf) msg;
// the correct buf size is guaranteed by prepended LengthFieldBaseFrameDecoder
byte[] decoded;
if (buf.hasArray()) {
decoded = saslClient.unwrap(buf.array(), buf.arrayOffset() + buf.readerIndex(), buf.readableBytes());
} else {
byte[] bytes = new byte[buf.readableBytes()];
buf.getBytes(buf.readerIndex(), bytes);
decoded = saslClient.unwrap(bytes, 0, bytes.length);
}
buf.release();
ctx.fireChannelRead(Unpooled.wrappedBuffer(decoded));
}
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (!(msg instanceof ByteBuf)) {
throw new IllegalArgumentException(String.valueOf(msg));
}
ByteBuf buf = (ByteBuf) msg;
byte[] encoded;
if (buf.hasArray()) {
encoded = saslClient.wrap(buf.array(), buf.arrayOffset() + buf.readerIndex(), buf.readableBytes());
} else {
byte[] bytes = new byte[buf.readableBytes()];
buf.getBytes(buf.readerIndex(), bytes);
encoded = saslClient.wrap(bytes, 0, bytes.length);
}
buf.release();
ctx.write(Unpooled.wrappedBuffer(Unpooled.copyInt(encoded.length), Unpooled.wrappedBuffer(encoded)), promise);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
ctx.fireExceptionCaught(cause);
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
// noop
}
@Override
public void handlerRemoved(ChannelHandlerContext ctx) throws Exception {
// noop
}
}
| 2,381
| 33.028571
| 116
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/NotificationManager.java
|
package org.infinispan.hotrod.impl.counter;
import static org.infinispan.hotrod.impl.Util.await;
import java.net.SocketAddress;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.function.Consumer;
import org.infinispan.hotrod.impl.counter.operation.AddListenerOperation;
import org.infinispan.hotrod.impl.counter.operation.RemoveListenerOperation;
import org.infinispan.hotrod.event.impl.ClientListenerNotifier;
import org.infinispan.hotrod.event.impl.CounterEventDispatcher;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.commons.util.concurrent.NonReentrantLock;
import org.infinispan.counter.api.CounterListener;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.counter.api.Handle;
/**
* A Hot Rod client notification manager for a single {@link CounterManager}.
* <p>
* This handles all the users listeners.
*
* @since 14.0
*/
public class NotificationManager {
private static final Log log = LogFactory.getLog(NotificationManager.class, Log.class);
private static final CompletableFuture<Short> NO_ERROR_FUTURE = CompletableFuture.completedFuture((short) HotRodConstants.NO_ERROR_STATUS);
private final byte[] listenerId;
private final ClientListenerNotifier notifier;
private final CounterOperationFactory factory;
private final ConcurrentMap<String, List<Consumer<HotRodCounterEvent>>> clientListeners = new ConcurrentHashMap<>();
private final Lock lock = new NonReentrantLock();
private volatile CounterEventDispatcher dispatcher;
NotificationManager(ClientListenerNotifier notifier, CounterOperationFactory factory) {
this.notifier = notifier;
this.factory = factory;
this.listenerId = new byte[16];
ThreadLocalRandom.current().nextBytes(listenerId);
}
public <T extends CounterListener> Handle<T> addListener(String counterName, T listener) {
if (log.isTraceEnabled()) {
log.tracef("Add listener for counter '%s'", counterName);
}
CounterEventDispatcher dispatcher = this.dispatcher;
if (dispatcher != null) {
return registerListener(counterName, listener, dispatcher.address());
}
log.debugf("ALock %s", lock);
lock.lock();
try {
dispatcher = this.dispatcher;
return registerListener(counterName, listener, dispatcher == null ? null : dispatcher.address());
} finally {
lock.unlock();
log.debugf("AUnLock %s", lock);
}
}
private <T extends CounterListener> Handle<T> registerListener(String counterName, T listener, SocketAddress address) {
HandleImpl<T> handle = new HandleImpl<>(counterName, listener);
clientListeners.computeIfAbsent(counterName, name -> {
AddListenerOperation op = factory.newAddListenerOperation(counterName, listenerId, address);
if (await(op.execute())) {
if (address == null) {
this.dispatcher = new CounterEventDispatcher(listenerId, clientListeners, op.getChannel().remoteAddress(), this::failover, op::cleanup);
notifier.addDispatcher(dispatcher);
notifier.startClientListener(listenerId);
}
}
return new CopyOnWriteArrayList<>();
}).add(handle);
return handle;
}
private void removeListener(String counterName, HandleImpl<?> handle) {
if (log.isTraceEnabled()) {
log.tracef("Remove listener for counter '%s'", counterName);
}
clientListeners.computeIfPresent(counterName, (name, list) -> {
list.remove(handle);
if (list.isEmpty()) {
if (dispatcher != null) {
RemoveListenerOperation op = factory.newRemoveListenerOperation(counterName, listenerId, dispatcher.address());
if (!await(op.execute())) {
log.debugf("Failed to remove counter listener %s on server side", counterName);
}
}
return null;
}
return list;
});
}
private CompletableFuture<Void> failover() {
dispatcher = null;
Iterator<String> iterator = clientListeners.keySet().iterator();
if (!iterator.hasNext()) {
return null;
}
CompletableFuture<Void> cf = new CompletableFuture<>();
String firstCounterName = iterator.next();
AddListenerOperation op = factory.newAddListenerOperation(firstCounterName, listenerId, null);
log.debugf("Lock %s", lock);
lock.lock();
if (dispatcher == null) {
op.execute().whenComplete((useChannel, throwable) -> {
if (throwable != null) {
lock.unlock();
log.debugf(throwable, "Failed to failover counter listener %s", firstCounterName);
cf.completeExceptionally(throwable);
} else {
AtomicInteger counter = new AtomicInteger(1);
SocketAddress address;
try {
if (useChannel) {
log.debugf("Creating new counter event dispatcher on %s", op.getChannel());
dispatcher = new CounterEventDispatcher(listenerId, clientListeners, op.getChannel().remoteAddress(), this::failover, op::cleanup);
notifier.addDispatcher(dispatcher);
notifier.startClientListener(listenerId);
}
address = dispatcher.address();
} catch (Throwable t) {
cf.completeExceptionally(t);
return;
} finally {
lock.unlock();
log.debugf("UnLock %s", lock);
}
while (iterator.hasNext()) {
String counterName = iterator.next();
factory.newAddListenerOperation(counterName, listenerId, address).execute()
.whenComplete((useChannel2, throwable2) -> {
if (throwable2 != null) {
log.debugf(throwable2, "Failed to failover counter listener %s", counterName);
cf.completeExceptionally(throwable2);
} else {
if (useChannel2) {
cf.completeExceptionally(new IllegalStateException("Unexpected to use another channel for the same counter"));
}
if (counter.decrementAndGet() == 0) {
cf.complete(null);
}
}
});
}
if (counter.decrementAndGet() == 0) {
cf.complete(null);
}
}
});
return cf;
} else {
lock.unlock();
log.debugf("UnLock %s", lock);
return null;
}
}
public void stop() {
log.debugf("Stopping %s (%s)", this, lock);
lock.lock();
try {
CompletableFuture[] futures = clientListeners.keySet().stream().map(counterName ->
factory.newRemoveListenerOperation(counterName, listenerId, dispatcher.address()).execute())
.toArray(CompletableFuture[]::new);
await(CompletableFuture.allOf(futures));
clientListeners.clear();
} finally {
lock.unlock();
}
}
private class HandleImpl<T extends CounterListener> implements Handle<T>, Consumer<HotRodCounterEvent> {
private final T listener;
private final String counterName;
private HandleImpl(String counterName, T listener) {
this.counterName = counterName;
this.listener = listener;
}
@Override
public T getCounterListener() {
return listener;
}
@Override
public void remove() {
removeListener(counterName, this);
}
@Override
public void accept(HotRodCounterEvent event) {
try {
listener.onUpdate(event);
} catch (Throwable t) {
log.debug("Exception in user listener", t);
}
}
}
}
| 8,594
| 38.426606
| 152
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/RemoteCounterManager.java
|
package org.infinispan.hotrod.impl.counter;
import static org.infinispan.hotrod.impl.Util.await;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.Supplier;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.event.impl.ClientListenerNotifier;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.commons.logging.Log;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.counter.api.CounterType;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.WeakCounter;
import org.infinispan.counter.exception.CounterException;
/**
* A {@link CounterManager} implementation for Hot Rod clients.
*
* @since 14.0
*/
public class RemoteCounterManager implements CounterManager {
private static final Log commonsLog = LogFactory.getLog(RemoteCounterManager.class, Log.class);
private final Map<String, Object> counters;
private CounterOperationFactory factory;
private NotificationManager notificationManager;
public RemoteCounterManager() {
counters = new ConcurrentHashMap<>();
}
public void start(ChannelFactory channelFactory, Codec codec, HotRodConfiguration configuration, ClientListenerNotifier listenerNotifier) {
this.factory = new CounterOperationFactory(configuration, channelFactory, codec);
this.notificationManager = new NotificationManager(listenerNotifier, factory);
}
@Override
public StrongCounter getStrongCounter(String name) {
return getOrCreateCounter(name, StrongCounter.class, this::createStrongCounter,
() -> commonsLog.invalidCounterType("Strong", "Weak"));
}
@Override
public WeakCounter getWeakCounter(String name) {
return getOrCreateCounter(name, WeakCounter.class, this::createWeakCounter,
() -> commonsLog.invalidCounterType("Weak", "Strong"));
}
@Override
public boolean defineCounter(String name, CounterConfiguration configuration) {
return await(factory.newDefineCounterOperation(name, configuration).execute());
}
@Override
public void undefineCounter(String name) {
}
@Override
public boolean isDefined(String name) {
return await(factory.newIsDefinedOperation(name).execute());
}
@Override
public CounterConfiguration getConfiguration(String counterName) {
return await(factory.newGetConfigurationOperation(counterName).execute());
}
@Override
public void remove(String counterName) {
await(factory.newRemoveOperation(counterName, true).execute());
}
@Override
public Collection<String> getCounterNames() {
return await(factory.newGetCounterNamesOperation().execute());
}
public void stop() {
if (notificationManager != null) {
notificationManager.stop();
}
}
private <T> T getOrCreateCounter(String name, Class<T> tClass, Function<String, T> createFunction,
Supplier<CounterException> invalidCounter) {
Object counter = counters.computeIfAbsent(name, createFunction);
if (!tClass.isInstance(counter)) {
throw invalidCounter.get();
}
return tClass.cast(counter);
}
private void assertWeakCounter(CounterConfiguration configuration) {
if (configuration.type() != CounterType.WEAK) {
throw commonsLog.invalidCounterType("Weak", "Strong");
}
}
private WeakCounter createWeakCounter(String counterName) {
CounterConfiguration configuration = getConfiguration(counterName);
if (configuration == null) {
throw commonsLog.undefinedCounter(counterName);
}
assertWeakCounter(configuration);
return new WeakCounterImpl(counterName, configuration, factory, notificationManager);
}
private StrongCounter createStrongCounter(String counterName) {
CounterConfiguration configuration = getConfiguration(counterName);
if (configuration == null) {
throw commonsLog.undefinedCounter(counterName);
}
assertStrongCounter(configuration);
return new StrongCounterImpl(counterName, configuration, factory, notificationManager);
}
private void assertStrongCounter(CounterConfiguration configuration) {
if (configuration.type() == CounterType.WEAK) {
throw commonsLog.invalidCounterType("Strong", "Weak");
}
}
}
| 4,633
| 34.374046
| 142
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/BaseCounter.java
|
package org.infinispan.hotrod.impl.counter;
import java.util.concurrent.CompletableFuture;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.CounterListener;
import org.infinispan.counter.api.Handle;
public class BaseCounter {
protected final String name;
protected final CounterConfiguration configuration;
protected final CounterOperationFactory factory;
private final NotificationManager notificationManager;
BaseCounter(CounterConfiguration configuration, String name, CounterOperationFactory factory,
NotificationManager notificationManager) {
this.configuration = configuration;
this.name = name;
this.factory = factory;
this.notificationManager = notificationManager;
}
public String getName() {
return name;
}
public CompletableFuture<Void> reset() {
return factory.newResetOperation(name, useConsistentHash()).execute().toCompletableFuture();
}
public CompletableFuture<Void> remove() {
return factory.newRemoveOperation(name, useConsistentHash()).execute().toCompletableFuture();
}
public CounterConfiguration getConfiguration() {
return configuration;
}
public <T extends CounterListener> Handle<T> addListener(T listener) {
return notificationManager.addListener(name, listener);
}
boolean useConsistentHash() {
return false;
}
}
| 1,415
| 29.12766
| 99
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/HotRodCounterEvent.java
|
package org.infinispan.hotrod.impl.counter;
import org.infinispan.counter.api.CounterEvent;
import org.infinispan.counter.api.CounterState;
/**
* A {@link CounterEvent} implementation for the Hot Rod client.
*
* @since 14.0
*/
public class HotRodCounterEvent implements CounterEvent {
private final byte[] listenerId;
private final String counterName;
private final long oldValue;
private final CounterState oldState;
private final long newValue;
private final CounterState newState;
public HotRodCounterEvent(byte[] listenerId, String counterName, long oldValue, CounterState oldState, long newValue,
CounterState newState) {
this.listenerId = listenerId;
this.counterName = counterName;
this.oldValue = oldValue;
this.oldState = oldState;
this.newValue = newValue;
this.newState = newState;
}
public byte[] getListenerId() {
return listenerId;
}
public String getCounterName() {
return counterName;
}
@Override
public long getOldValue() {
return oldValue;
}
@Override
public CounterState getOldState() {
return oldState;
}
@Override
public long getNewValue() {
return newValue;
}
@Override
public CounterState getNewState() {
return newState;
}
@Override
public String toString() {
return "HotRodCounterEvent{" +
"counterName='" + counterName + '\'' +
", oldValue=" + oldValue +
", oldState=" + oldState +
", newValue=" + newValue +
", newState=" + newState +
'}';
}
}
| 1,652
| 22.956522
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/StrongCounterImpl.java
|
package org.infinispan.hotrod.impl.counter;
import static org.infinispan.hotrod.impl.Util.await;
import java.util.concurrent.CompletableFuture;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.SyncStrongCounter;
/**
* A {@link StrongCounter} implementation for Hot Rod clients.
*
* @since 14.0
*/
class StrongCounterImpl extends BaseCounter implements StrongCounter {
private final SyncStrongCounter syncCounter;
StrongCounterImpl(String name, CounterConfiguration configuration, CounterOperationFactory operationFactory,
NotificationManager notificationManager) {
super(configuration, name, operationFactory, notificationManager);
this.syncCounter = new Sync();
}
public CompletableFuture<Long> getValue() {
return factory.newGetValueOperation(name, useConsistentHash()).execute().toCompletableFuture();
}
public CompletableFuture<Long> addAndGet(long delta) {
return factory.newAddOperation(name, delta, useConsistentHash()).execute().toCompletableFuture();
}
@Override
public CompletableFuture<Long> compareAndSwap(long expect, long update) {
return factory.newCompareAndSwapOperation(name, expect, update, super.getConfiguration()).execute().toCompletableFuture();
}
@Override
public SyncStrongCounter sync() {
return syncCounter;
}
@Override
public CompletableFuture<Long> getAndSet(long value) {
return factory.newSetOperation(name, value, useConsistentHash()).execute().toCompletableFuture();
}
@Override
boolean useConsistentHash() {
return true;
}
private class Sync implements SyncStrongCounter {
@Override
public long addAndGet(long delta) {
return await(StrongCounterImpl.this.addAndGet(delta));
}
@Override
public void reset() {
await(StrongCounterImpl.this.reset());
}
@Override
public long getValue() {
return await(StrongCounterImpl.this.getValue());
}
@Override
public long compareAndSwap(long expect, long update) {
return await(StrongCounterImpl.this.compareAndSwap(expect, update));
}
@Override
public long getAndSet(long value) {
return await(StrongCounterImpl.this.getAndSet(value));
}
@Override
public String getName() {
return name;
}
@Override
public CounterConfiguration getConfiguration() {
return configuration;
}
@Override
public void remove() {
await(StrongCounterImpl.this.remove());
}
}
}
| 2,683
| 26.958333
| 128
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/CounterOperationFactory.java
|
package org.infinispan.hotrod.impl.counter;
import java.net.SocketAddress;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.impl.HotRodTransport;
import org.infinispan.hotrod.impl.counter.operation.AddListenerOperation;
import org.infinispan.hotrod.impl.counter.operation.AddOperation;
import org.infinispan.hotrod.impl.counter.operation.CompareAndSwapOperation;
import org.infinispan.hotrod.impl.counter.operation.DefineCounterOperation;
import org.infinispan.hotrod.impl.counter.operation.GetConfigurationOperation;
import org.infinispan.hotrod.impl.counter.operation.GetCounterNamesOperation;
import org.infinispan.hotrod.impl.counter.operation.GetValueOperation;
import org.infinispan.hotrod.impl.counter.operation.IsDefinedOperation;
import org.infinispan.hotrod.impl.counter.operation.RemoveListenerOperation;
import org.infinispan.hotrod.impl.counter.operation.RemoveOperation;
import org.infinispan.hotrod.impl.counter.operation.ResetOperation;
import org.infinispan.hotrod.impl.counter.operation.SetOperation;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
/**
* A operation factory that builds counter operations.
*
* @since 14.0
*/
public class CounterOperationFactory {
public static final byte[] COUNTER_CACHE_NAME = HotRodTransport.cacheNameBytes("org.infinispan.COUNTER");
private final OperationContext operationContext;
CounterOperationFactory(HotRodConfiguration configuration, ChannelFactory channelFactory, Codec codec) {
this.operationContext = new OperationContext(channelFactory, codec, null, configuration, null, null, "org.infinispan.COUNTER");
}
IsDefinedOperation newIsDefinedOperation(String counterName) {
return new IsDefinedOperation(operationContext, counterName);
}
GetConfigurationOperation newGetConfigurationOperation(String counterName) {
return new GetConfigurationOperation(operationContext, counterName);
}
DefineCounterOperation newDefineCounterOperation(String counterName, CounterConfiguration cfg) {
return new DefineCounterOperation(operationContext, counterName, cfg);
}
RemoveOperation newRemoveOperation(String counterName, boolean useConsistentHash) {
return new RemoveOperation(operationContext, counterName, useConsistentHash);
}
AddOperation newAddOperation(String counterName, long delta, boolean useConsistentHash) {
return new AddOperation(operationContext, counterName, delta, useConsistentHash);
}
GetValueOperation newGetValueOperation(String counterName, boolean useConsistentHash) {
return new GetValueOperation(operationContext, counterName, useConsistentHash);
}
ResetOperation newResetOperation(String counterName, boolean useConsistentHash) {
return new ResetOperation(operationContext, counterName, useConsistentHash);
}
CompareAndSwapOperation newCompareAndSwapOperation(String counterName, long expect, long update,
CounterConfiguration counterConfiguration) {
return new CompareAndSwapOperation(operationContext, counterName, expect, update, counterConfiguration);
}
SetOperation newSetOperation(String counterName, long value, boolean useConsistentHash) {
return new SetOperation(operationContext, counterName, value, useConsistentHash);
}
GetCounterNamesOperation newGetCounterNamesOperation() {
return new GetCounterNamesOperation(operationContext);
}
AddListenerOperation newAddListenerOperation(String counterName, byte[] listenerId, SocketAddress server) {
return new AddListenerOperation(operationContext, counterName, listenerId, server);
}
RemoveListenerOperation newRemoveListenerOperation(String counterName, byte[] listenerId, SocketAddress server) {
return new RemoveListenerOperation(operationContext, counterName, listenerId, server);
}
}
| 4,091
| 46.034483
| 133
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/CounterHelper.java
|
package org.infinispan.hotrod.impl.counter;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.WeakCounter;
/**
* A helper class for {@link StrongCounter} and {@link WeakCounter}.
*
* @since 14.0
*/
class CounterHelper {
private final CounterOperationFactory factory;
CounterHelper(CounterOperationFactory factory) {
this.factory = factory;
}
}
| 402
| 18.190476
| 68
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/WeakCounterImpl.java
|
package org.infinispan.hotrod.impl.counter;
import static org.infinispan.hotrod.impl.Util.await;
import java.util.concurrent.CompletableFuture;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.SyncWeakCounter;
import org.infinispan.counter.api.WeakCounter;
/**
* A {@link WeakCounter} implementation for Hot Rod client.
*
* @since 14.0
*/
class WeakCounterImpl extends BaseCounter implements WeakCounter {
private final SyncWeakCounter syncCounter;
WeakCounterImpl(String name, CounterConfiguration configuration, CounterOperationFactory operationFactory,
NotificationManager notificationManager) {
super(configuration, name, operationFactory, notificationManager);
syncCounter = new Sync();
}
@Override
public long getValue() {
return await(factory.newGetValueOperation(name, useConsistentHash()).execute());
}
@Override
public CompletableFuture<Void> add(long delta) {
return factory.newAddOperation(name, delta, useConsistentHash()).execute().thenRun(() -> {}).toCompletableFuture();
}
@Override
public SyncWeakCounter sync() {
return syncCounter;
}
private class Sync implements SyncWeakCounter {
@Override
public String getName() {
return name;
}
@Override
public long getValue() {
return WeakCounterImpl.this.getValue();
}
@Override
public void add(long delta) {
await(WeakCounterImpl.this.add(delta));
}
@Override
public void reset() {
await(WeakCounterImpl.this.reset());
}
@Override
public CounterConfiguration getConfiguration() {
return configuration;
}
@Override
public void remove() {
await(WeakCounterImpl.this.remove());
}
}
}
| 1,855
| 24.424658
| 121
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/AddOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.counter.exception.CounterOutOfBoundsException;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Add operation.
* <p>
* Adds the {@code delta} to the counter's value and returns the result.
* <p>
* It can throw a {@link CounterOutOfBoundsException} if the counter is bounded and the it has been reached.
*
* @since 14.0
*/
public class AddOperation extends BaseCounterOperation<Long> {
private static final Log commonsLog = LogFactory.getLog(AddOperation.class, Log.class);
private final long delta;
public AddOperation(OperationContext operationContext,
String counterName, long delta, boolean useConsistentHash) {
super(operationContext, COUNTER_ADD_AND_GET_REQUEST, COUNTER_ADD_AND_GET_RESPONSE, counterName, useConsistentHash);
this.delta = delta;
}
@Override
protected void executeOperation(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, 8);
buf.writeLong(delta);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
assertBoundaries(status);
assert status == NO_ERROR_STATUS;
complete(buf.readLong());
}
private void assertBoundaries(short status) {
if (status == NOT_EXECUTED_WITH_PREVIOUS) {
if (delta > 0) {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.UPPER_BOUND);
} else {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.LOWER_BOUND);
}
}
}
}
| 1,920
| 32.12069
| 121
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/AddListenerOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import java.net.SocketAddress;
import java.util.Set;
import org.infinispan.counter.api.CounterListener;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.WeakCounter;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import org.infinispan.hotrod.impl.transport.netty.HotRodClientDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* An add listener operation for {@link StrongCounter#addListener(CounterListener)} and {@link
* WeakCounter#addListener(CounterListener)}
*
* @since 14.0
*/
public class AddListenerOperation extends BaseCounterOperation<Boolean> {
private final byte[] listenerId;
private final SocketAddress server;
private Channel channel;
public AddListenerOperation(OperationContext operationContext, String counterName, byte[] listenerId, SocketAddress server) {
super(operationContext, COUNTER_ADD_LISTENER_REQUEST, COUNTER_ADD_LISTENER_RESPONSE, counterName, false);
this.listenerId = listenerId;
this.server = server;
}
public Channel getChannel() {
return channel;
}
@Override
protected void executeOperation(Channel channel) {
this.channel = channel;
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, ByteBufUtil.estimateArraySize(listenerId));
ByteBufUtil.writeArray(buf, listenerId);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
if (status != NO_ERROR_STATUS) {
complete(false);
} else {
decoder.addListener(listenerId);
complete(true);
}
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
if (server == null) {
super.fetchChannelAndInvoke(retryCount, failedServers);
} else {
operationContext.getChannelFactory().fetchChannelAndInvoke(server, this);
}
}
@Override
public void releaseChannel(Channel channel) {
if (operationContext.getCodec().allowOperationsAndEvents()) {
//we aren't using this channel. we can release it
super.releaseChannel(channel);
}
}
public void cleanup() {
// To prevent releasing concurrently from the channel and closing it
channel.eventLoop().execute(() -> {
if (log.isTraceEnabled()) {
log.tracef("Cleanup for %s on %s", this, channel);
}
if (!operationContext.getCodec().allowOperationsAndEvents()) {
if (channel.isOpen()) {
super.releaseChannel(channel);
}
}
HotRodClientDecoder decoder = channel.pipeline().get(HotRodClientDecoder.class);
if (decoder != null) {
decoder.removeListener(listenerId);
}
});
}
}
| 3,064
| 31.956989
| 128
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/GetCounterNamesOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import java.util.ArrayList;
import java.util.Collection;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter operation for {@link CounterManager#getCounterNames()}.
*
* @since 14.0
*/
public class GetCounterNamesOperation extends BaseCounterOperation<Collection<String>> {
private int size;
private Collection<String> names;
public GetCounterNamesOperation(OperationContext operationContext) {
super(operationContext, COUNTER_GET_NAMES_REQUEST, COUNTER_GET_NAMES_RESPONSE, "", false);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
sendHeader(channel);
setCacheName();
}
@Override
protected void reset() {
super.reset();
names = null;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
assert status == NO_ERROR_STATUS;
if (names == null) {
size = ByteBufUtil.readVInt(buf);
names = new ArrayList<>(size);
decoder.checkpoint();
}
while (names.size() < size) {
names.add(ByteBufUtil.readString(buf));
decoder.checkpoint();
}
complete(names);
}
}
| 1,516
| 26.581818
| 96
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/ResetOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.WeakCounter;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter operation for {@link StrongCounter#reset()} and {@link WeakCounter#reset()}.
*
* @since 14.0
*/
public class ResetOperation extends BaseCounterOperation<Void> {
public ResetOperation(OperationContext operationContext, String counterName, boolean useConsistentHash) {
super(operationContext, COUNTER_RESET_REQUEST, COUNTER_RESET_RESPONSE, counterName, useConsistentHash);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndCounterNameAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
complete(null);
}
}
| 1,025
| 30.090909
| 109
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/RemoveOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.api.WeakCounter;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter operation for {@link CounterManager#remove(String)}, {@link StrongCounter#remove()} and {@link
* WeakCounter#remove()}.
*
* @since 14.0
*/
public class RemoveOperation extends BaseCounterOperation<Void> {
public RemoveOperation(OperationContext operationContext, String counterName, boolean useConsistentHash) {
super(operationContext, COUNTER_REMOVE_REQUEST, COUNTER_REMOVE_RESPONSE, counterName, useConsistentHash);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndCounterNameAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
complete(null);
}
}
| 1,122
| 32.029412
| 111
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/IsDefinedOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter operation for {@link CounterManager#isDefined(String)}.
*
* @since 14.0
*/
public class IsDefinedOperation extends BaseCounterOperation<Boolean> {
public IsDefinedOperation(OperationContext operationContext, String counterName) {
super(operationContext, COUNTER_IS_DEFINED_REQUEST, COUNTER_IS_DEFINED_RESPONSE, counterName, false);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndCounterNameAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
complete(status == NO_ERROR_STATUS);
}
}
| 934
| 29.16129
| 107
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/CompareAndSwapOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.StrongCounter;
import org.infinispan.counter.exception.CounterOutOfBoundsException;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A compare-and-set operation for {@link StrongCounter#compareAndSwap(long, long)} and {@link
* StrongCounter#compareAndSet(long, long)}.
*
* @since 14.0
*/
public class CompareAndSwapOperation extends BaseCounterOperation<Long> {
private static final Log commonsLog = LogFactory.getLog(CompareAndSwapOperation.class, Log.class);
private final long expect;
private final long update;
private final CounterConfiguration counterConfiguration;
public CompareAndSwapOperation(OperationContext operationContext, String counterName, long expect, long update, CounterConfiguration counterConfiguration) {
super(operationContext, COUNTER_CAS_REQUEST, COUNTER_CAS_RESPONSE, counterName, true);
this.expect = expect;
this.update = update;
this.counterConfiguration = counterConfiguration;
}
@Override
protected void executeOperation(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, 16);
buf.writeLong(expect);
buf.writeLong(update);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
assertBoundaries(status);
assert status == NO_ERROR_STATUS;
complete(buf.readLong());
}
private void assertBoundaries(short status) {
if (status == NOT_EXECUTED_WITH_PREVIOUS) {
if (update >= counterConfiguration.upperBound()) {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.UPPER_BOUND);
} else {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.LOWER_BOUND);
}
}
}
}
| 2,204
| 35.147541
| 159
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/SetOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.commons.logging.Log;
import org.infinispan.commons.logging.LogFactory;
import org.infinispan.counter.exception.CounterOutOfBoundsException;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Set operation.
* <p>
* Sets the {@code value} to the counter's value and returns the old value.
* <p>
* It can throw a {@link CounterOutOfBoundsException} if the counter is bounded and has been reached.
*
* @author Dipanshu Gupta
* @since 15.0
*/
public class SetOperation extends BaseCounterOperation<Long> {
private static final Log commonsLog = LogFactory.getLog(SetOperation.class, Log.class);
private final long value;
public SetOperation(OperationContext operationContext, String counterName, long value, boolean useConsistentHash) {
super(operationContext, COUNTER_GET_AND_SET_REQUEST, COUNTER_GET_AND_SET_RESPONSE, counterName, useConsistentHash);
this.value = value;
}
@Override
protected void executeOperation(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, 8);
buf.writeLong(value);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
assertBoundaries(status);
assert status == NO_ERROR_STATUS;
complete(buf.readLong());
}
private void assertBoundaries(short status) {
if (status == NOT_EXECUTED_WITH_PREVIOUS) {
if (value > 0) {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.UPPER_BOUND);
} else {
throw commonsLog.counterOurOfBounds(CounterOutOfBoundsException.LOWER_BOUND);
}
}
}
}
| 1,918
| 32.666667
| 121
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/BaseCounterOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import java.net.SocketAddress;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Set;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.commons.logging.Log;
import org.infinispan.counter.exception.CounterException;
import org.infinispan.hotrod.impl.counter.CounterOperationFactory;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.operations.RetryOnFailureOperation;
import org.infinispan.hotrod.impl.protocol.HotRodConstants;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A base operation class for the counter's operation.
*
* @since 14.0
*/
abstract class BaseCounterOperation<T> extends RetryOnFailureOperation<T> {
private static final Log commonsLog = LogFactory.getLog(BaseCounterOperation.class, Log.class);
private static final Charset CHARSET = StandardCharsets.UTF_8;
private final String counterName;
private final boolean useConsistentHash;
BaseCounterOperation(OperationContext operationContext, short requestCode, short responseCode, String counterName, boolean useConsistentHash) {
super(operationContext, requestCode, responseCode, CacheOptions.DEFAULT, null);
this.counterName = counterName;
this.useConsistentHash = useConsistentHash;
}
/**
* Writes the operation header followed by the counter's name.
*/
void sendHeaderAndCounterNameAndRead(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, 0);
channel.writeAndFlush(buf);
}
ByteBuf getHeaderAndCounterNameBufferAndRead(Channel channel, int extraBytes) {
scheduleRead(channel);
// counterName should never be null/empty
byte[] counterBytes = counterName.getBytes(HotRodConstants.HOTROD_STRING_CHARSET);
ByteBuf buf = channel.alloc().buffer(operationContext.getCodec().estimateHeaderSize(header) + ByteBufUtil.estimateArraySize(counterBytes) + extraBytes);
operationContext.getCodec().writeHeader(buf, header);
ByteBufUtil.writeString(buf, counterName);
setCacheName();
return buf;
}
/**
* If the status is {@link #KEY_DOES_NOT_EXIST_STATUS}, the counter is undefined and a {@link CounterException} is
* thrown.
*/
void checkStatus(short status) {
if (status == KEY_DOES_NOT_EXIST_STATUS) {
throw commonsLog.undefinedCounter(counterName);
}
}
void setCacheName() {
header.cacheName(CounterOperationFactory.COUNTER_CACHE_NAME);
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
if (retryCount == 0 && useConsistentHash) {
operationContext.getChannelFactory().fetchChannelAndInvoke(new ByteString(counterName), failedServers, CounterOperationFactory.COUNTER_CACHE_NAME, this);
} else {
operationContext.getChannelFactory().fetchChannelAndInvoke(failedServers, CounterOperationFactory.COUNTER_CACHE_NAME, this);
}
}
@Override
protected Throwable handleException(Throwable cause, Channel channel, SocketAddress address) {
cause = super.handleException(cause, channel, address);
if (cause instanceof CounterException) {
completeExceptionally(cause);
return null;
}
return cause;
}
@Override
protected void addParams(StringBuilder sb) {
sb.append("counter=").append(counterName);
}
private class ByteString {
private final int hash;
private final byte[] b;
private ByteString(String s) {
//copied from ByteString in core
this.b = s.getBytes(CHARSET);
this.hash = Arrays.hashCode(b);
}
@Override
public int hashCode() {
return hash;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ByteString that = (ByteString) o;
return Arrays.equals(b, that.b);
}
@Override
public String toString() {
return new String(b, CHARSET);
}
}
}
| 4,357
| 32.782946
| 162
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/GetValueOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter operation that returns the counter's value.
*
* @since 14.0
*/
public class GetValueOperation extends BaseCounterOperation<Long> {
public GetValueOperation(OperationContext operationContext, String counterName, boolean useConsistentHash) {
super(operationContext, COUNTER_GET_REQUEST, COUNTER_GET_RESPONSE, counterName, useConsistentHash);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndCounterNameAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
assert status == NO_ERROR_STATUS;
complete(buf.readLong());
}
}
| 948
| 28.65625
| 111
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/GetConfigurationOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import static org.infinispan.counter.util.EncodeUtil.decodeConfiguration;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter configuration for {@link CounterManager#getConfiguration(String)}.
*
* @since 14.0
*/
public class GetConfigurationOperation extends BaseCounterOperation<CounterConfiguration> {
public GetConfigurationOperation(OperationContext operationContext, String counterName) {
super(operationContext, COUNTER_GET_CONFIGURATION_REQUEST, COUNTER_GET_CONFIGURATION_RESPONSE, counterName, false);
}
@Override
protected void executeOperation(Channel channel) {
sendHeaderAndCounterNameAndRead(channel);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (status != NO_ERROR_STATUS) {
complete(null);
return;
}
complete(decodeConfiguration(buf::readByte, buf::readLong, () -> ByteBufUtil.readVInt(buf)));
}
}
| 1,327
| 32.2
| 121
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/DefineCounterOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import static org.infinispan.counter.util.EncodeUtil.encodeConfiguration;
import org.infinispan.counter.api.CounterConfiguration;
import org.infinispan.counter.api.CounterManager;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A counter define operation for {@link CounterManager#defineCounter(String, CounterConfiguration)}.
*
* @since 14.0
*/
public class DefineCounterOperation extends BaseCounterOperation<Boolean> {
private final CounterConfiguration configuration;
public DefineCounterOperation(OperationContext operationContext, String counterName, CounterConfiguration configuration) {
super(operationContext, COUNTER_CREATE_REQUEST, COUNTER_CREATE_RESPONSE, counterName, false);
this.configuration = configuration;
}
@Override
protected void executeOperation(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, 28);
encodeConfiguration(configuration, buf::writeByte, buf::writeLong, i -> ByteBufUtil.writeVInt(buf, i));
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
complete(status == NO_ERROR_STATUS);
}
}
| 1,487
| 35.292683
| 125
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/counter/operation/RemoveListenerOperation.java
|
package org.infinispan.hotrod.impl.counter.operation;
import java.net.SocketAddress;
import java.util.Set;
import org.infinispan.counter.api.Handle;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A remove listener operation for {@link Handle#remove()}.
*
* @since 14.0
*/
public class RemoveListenerOperation extends BaseCounterOperation<Boolean> {
private final byte[] listenerId;
private final SocketAddress server;
public RemoveListenerOperation(OperationContext operationContext, String counterName, byte[] listenerId, SocketAddress server) {
super(operationContext, COUNTER_REMOVE_LISTENER_REQUEST, COUNTER_REMOVE_LISTENER_RESPONSE, counterName, false);
this.listenerId = listenerId;
this.server = server;
}
@Override
protected void executeOperation(Channel channel) {
ByteBuf buf = getHeaderAndCounterNameBufferAndRead(channel, ByteBufUtil.estimateArraySize(listenerId));
ByteBufUtil.writeArray(buf, listenerId);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
checkStatus(status);
if (status == NO_ERROR_STATUS) {
decoder.removeListener(listenerId);
}
complete(status == NO_ERROR_STATUS);
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
if (server == null) {
super.fetchChannelAndInvoke(retryCount, failedServers);
} else {
operationContext.getChannelFactory().fetchChannelAndInvoke(server, this);
}
}
}
| 1,808
| 31.303571
| 131
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/XaModeTransactionTable.java
|
package org.infinispan.hotrod.impl.transaction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.commons.CacheException;
import org.infinispan.hotrod.impl.cache.RemoteCache;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.transaction.recovery.RecoveryIterator;
import org.infinispan.hotrod.impl.transaction.recovery.RecoveryManager;
/**
* A {@link TransactionTable} that registers the {@link RemoteCache} as a {@link XAResource} in the transaction.
* <p>
* Only a single {@link XAResource} is registered even if multiple {@link RemoteCache}s interact with the same
* transaction.
* <p>
* When more than one {@link RemoteCache} is involved in the {@link Transaction}, the prepare, commit and rollback
* requests are sent sequential and they are ordered by the {@link RemoteCache}'s name.
* <p>
* If a {@link RemoteCache} is read-only, the commit/rollback isn't invoked.
*
* @since 14.0
*/
public class XaModeTransactionTable extends AbstractTransactionTable {
private static final Log log = LogFactory.getLog(XaModeTransactionTable.class, Log.class);
private final Map<Transaction, XaAdapter> registeredTransactions = new ConcurrentHashMap<>();
private final RecoveryManager recoveryManager = new RecoveryManager();
private final Function<Transaction, XaAdapter> constructor = this::createTransactionData;
public XaModeTransactionTable(long timeout) {
super(timeout);
}
@Override
public <K, V> TransactionContext<K, V> enlist(TransactionalRemoteCacheImpl<K, V> txRemoteCache, Transaction tx) {
XaAdapter xaAdapter = registeredTransactions.computeIfAbsent(tx, constructor);
return xaAdapter.registerCache(txRemoteCache);
}
public XAResource getXaResource() {
return new XaAdapter(null, getTimeout());
}
private XaAdapter createTransactionData(Transaction transaction) {
XaAdapter xaAdapter = new XaAdapter(transaction, getTimeout());
try {
transaction.enlistResource(xaAdapter);
} catch (RollbackException | SystemException e) {
throw new CacheException(e);
}
return xaAdapter;
}
private class XaAdapter implements XAResource {
private final Transaction transaction;
private final Map<String, TransactionContext<?, ?>> registeredCaches;
private volatile Xid currentXid;
private volatile RecoveryIterator iterator;
private long timeoutMs;
private boolean needsRecovery = false;
private XaAdapter(Transaction transaction, long timeout) {
this.transaction = transaction;
this.timeoutMs = timeout;
this.registeredCaches = transaction == null ?
Collections.emptyMap() :
new ConcurrentSkipListMap<>();
}
@Override
public String toString() {
return "XaResource{" +
"transaction=" + transaction +
", caches=" + registeredCaches.keySet() +
'}';
}
@Override
public void start(Xid xid, int flags) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.start(%s, %s)", xid, flags);
}
switch (flags) {
case TMJOIN:
case TMRESUME: // means joining a previously seen transaction. it should exist otherwise throw XAER_NOTA
if (currentXid != null && !currentXid.equals(xid)) {
//we have a running tx but it isn't the same tx
throw new XAException(XAException.XAER_OUTSIDE);
}
assertStartInvoked();
break;
case TMNOFLAGS: //new transaction.
if (currentXid != null) {
//we have a running transaction already!
throw new XAException(XAException.XAER_RMERR);
}
currentXid = xid;
break;
default: //no other flag should be used.
throw new XAException(XAException.XAER_RMERR);
}
}
@Override
public void end(Xid xid, int flags) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.end(%s, %s)", xid, flags);
}
assertStartInvoked();
assertSameXid(xid, XAException.XAER_OUTSIDE);
}
@Override
public int prepare(Xid xid) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.prepare(%s)", xid);
}
assertStartInvoked();
assertSameXid(xid, XAException.XAER_INVAL);
return internalPrepare();
}
@Override
public void commit(Xid xid, boolean onePhaseCommit) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.commit(%s, %s)", xid, onePhaseCommit);
}
if (currentXid == null) {
//no transaction running. we are doing some recovery work
currentXid = xid;
} else {
assertSameXid(xid, XAException.XAER_INVAL);
}
try {
if (onePhaseCommit) {
onePhaseCommit();
} else {
internalCommit();
}
} finally {
cleanup();
}
}
@Override
public void rollback(Xid xid) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.rollback(%s)", xid);
}
boolean ignoreNoTx = true;
if (currentXid == null) {
//no transaction running. we are doing some recovery work
currentXid = xid;
ignoreNoTx = false;
} else {
assertSameXid(xid, XAException.XAER_INVAL);
}
try {
internalRollback(ignoreNoTx);
} finally {
cleanup();
}
}
@Override
public boolean isSameRM(XAResource xaResource) {
if (log.isTraceEnabled()) {
log.tracef("XaResource.isSameRM(%s)", xaResource);
}
return xaResource instanceof XaAdapter && Objects.equals(transaction, ((XaAdapter) xaResource).transaction);
}
@Override
public void forget(Xid xid) {
if (log.isTraceEnabled()) {
log.tracef("XaResource.forget(%s)", xid);
}
recoveryManager.forgetTransaction(xid);
forgetTransaction(xid);
}
@Override
public Xid[] recover(int flags) throws XAException {
if (log.isTraceEnabled()) {
log.tracef("XaResource.recover(%s)", flags);
}
RecoveryIterator it = this.iterator;
if ((flags & XAResource.TMSTARTRSCAN) != 0) {
if (it == null) {
it = recoveryManager.startScan(fetchPreparedTransactions());
iterator = it;
} else {
//we have an iteration in progress.
throw new XAException(XAException.XAER_INVAL);
}
}
if ((flags & XAResource.TMENDRSCAN) != 0) {
if (it == null) {
//we don't have an iteration in progress
throw new XAException(XAException.XAER_INVAL);
} else {
iterator.finish(timeoutMs);
iterator = null;
}
}
if (it == null) {
//we don't have an iteration in progress
throw new XAException(XAException.XAER_INVAL);
}
return it.next();
}
@Override
public boolean setTransactionTimeout(int timeoutSeconds) {
this.timeoutMs = TimeUnit.SECONDS.toMillis(timeoutSeconds);
return true;
}
@Override
public int getTransactionTimeout() {
return (int) TimeUnit.MILLISECONDS.toSeconds(timeoutMs);
}
private void assertStartInvoked() throws XAException {
if (currentXid == null) {
//we don't have a transaction
throw new XAException(XAException.XAER_NOTA);
}
}
private void assertSameXid(Xid otherXid, int xaErrorCode) throws XAException {
if (!currentXid.equals(otherXid)) {
//we have another tx running
throw new XAException(xaErrorCode);
}
}
private void internalRollback(boolean ignoreNoTx) throws XAException {
int xa_code = completeTransaction(currentXid, false);
switch (xa_code) {
case XAResource.XA_OK: //no issues
case XAResource.XA_RDONLY: //no issues
case XAException.XA_HEURRB: //heuristically rolled back
break;
case XAException.XAER_NOTA: //no transaction in server. already rolled-back or never reached the server
if (ignoreNoTx) {
break;
}
default:
throw new XAException(xa_code);
}
}
private void internalCommit() throws XAException {
int xa_code = completeTransaction(currentXid, true);
switch (xa_code) {
case XAResource.XA_OK: //no issues
case XAResource.XA_RDONLY: //no issues
case XAException.XA_HEURCOM: //heuristically committed
break;
default:
throw new XAException(xa_code);
}
}
private int internalPrepare() throws XAException {
boolean readOnly = true;
for (TransactionContext<?, ?> ctx : registeredCaches.values()) {
switch (ctx.prepareContext(currentXid, false, timeoutMs)) {
case XAResource.XA_OK:
readOnly = false;
break;
case XAResource.XA_RDONLY:
break; //read only tx.
case Integer.MIN_VALUE:
//signals a marshaller error of key or value. the server wasn't contacted
throw new XAException(XAException.XA_RBROLLBACK);
default:
//any other code we need to rollback
//we may need to send the rollback later
throw new XAException(XAException.XA_RBROLLBACK);
}
}
if (needsRecovery) {
recoveryManager.addTransaction(currentXid);
}
return readOnly ? XAResource.XA_RDONLY : XAResource.XA_OK;
}
private void onePhaseCommit() throws XAException {
//check only the write transaction to know who is the last cache to commit
List<TransactionContext<?, ?>> txCaches = registeredCaches.values().stream()
.filter(TransactionContext::isReadWrite)
.collect(Collectors.toList());
int size = txCaches.size();
if (size == 0) {
return;
}
boolean commit = true;
outer:
for (int i = 0; i < size - 1; ++i) {
TransactionContext<?, ?> ctx = txCaches.get(i);
switch (ctx.prepareContext(currentXid, false, timeoutMs)) {
case XAResource.XA_OK:
break;
case Integer.MIN_VALUE:
//signals a marshaller error of key or value. the server wasn't contacted
commit = false;
break outer;
default:
//any other code we need to rollback
//we may need to send the rollback later
commit = false;
break outer;
}
}
//last resource one phase commit!
if (commit && txCaches.get(size - 1).prepareContext(currentXid, true, timeoutMs) == XAResource.XA_OK) {
internalCommit(); //commit other caches
} else {
internalRollback(true);
throw new XAException(XAException.XA_RBROLLBACK); //tell TM to rollback
}
}
private <K, V> TransactionContext<K, V> registerCache(TransactionalRemoteCacheImpl<K, V> txRemoteCache) {
if (currentXid == null) {
throw new CacheException("XaResource wasn't invoked!");
}
needsRecovery |= txRemoteCache.isRecoveryEnabled();
//noinspection unchecked
return (TransactionContext<K, V>) registeredCaches
.computeIfAbsent(txRemoteCache.getName(), s -> createTxContext(txRemoteCache));
}
private <K, V> TransactionContext<K, V> createTxContext(TransactionalRemoteCacheImpl<K, V> remoteCache) {
if (log.isTraceEnabled()) {
log.tracef("Registering remote cache '%s' for transaction xid=%s", remoteCache.getName(), currentXid);
}
return new TransactionContext<>(remoteCache.keyMarshaller(), remoteCache.valueMarshaller(),
remoteCache.getOperationsFactory(), remoteCache.getName(), remoteCache.isRecoveryEnabled());
}
private void cleanup() {
//if null, it was created by RemoteCacheManager.getXaResource()
if (transaction != null) {
//enlisted with a cache
registeredTransactions.remove(transaction);
//this instance can be used for recovery. we need at least one cache registered in order to access
// the operation factory
registeredCaches.values().forEach(TransactionContext::cleanupEntries);
}
recoveryManager.forgetTransaction(currentXid); //transaction completed, we can remove it from recovery
currentXid = null;
}
}
}
| 14,095
| 36.192612
| 117
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/TransactionContext.java
|
package org.infinispan.hotrod.impl.transaction;
import static org.infinispan.commons.util.Util.toStr;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import jakarta.transaction.Transaction;
import javax.transaction.xa.XAException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.commons.util.ByRef;
import org.infinispan.commons.util.CloseableIterator;
import org.infinispan.commons.util.CloseableIteratorSet;
import org.infinispan.hotrod.impl.cache.MetadataValue;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.operations.CacheOperationsFactory;
import org.infinispan.hotrod.impl.transaction.entry.Modification;
import org.infinispan.hotrod.impl.transaction.entry.TransactionEntry;
import org.infinispan.hotrod.impl.transaction.operations.PrepareTransactionOperation;
/**
* A context with the keys involved in a {@link Transaction}.
* <p>
* There is a single context for each ({@link TransactionalRemoteCacheImpl}, {@link Transaction}) pair.
* <p>
* It keeps the keys read and written in order to maintain the transactions isolated.
*
* @since 14.0
*/
public class TransactionContext<K, V> {
private static final Log log = LogFactory.getLog(TransactionContext.class, Log.class);
private final Map<WrappedKey<K>, TransactionEntry<K, V>> entries;
private final Function<K, byte[]> keyMarshaller;
private final Function<V, byte[]> valueMarshaller;
private final CacheOperationsFactory cacheOperationsFactory;
private final String cacheName;
private final boolean recoverable;
TransactionContext(Function<K, byte[]> keyMarshaller, Function<V, byte[]> valueMarshaller,
CacheOperationsFactory cacheOperationsFactory, String cacheName, boolean recoveryEnabled) {
this.keyMarshaller = keyMarshaller;
this.valueMarshaller = valueMarshaller;
this.cacheOperationsFactory = cacheOperationsFactory;
this.cacheName = cacheName;
this.recoverable = recoveryEnabled;
entries = new ConcurrentHashMap<>();
}
@Override
public String toString() {
return "TransactionContext{" +
"cacheName='" + cacheName + '\'' +
", context-size=" + entries.size() + " (entries)" +
'}';
}
CompletableFuture<Boolean> containsKey(Object key, Function<K, MetadataValue<V>> remoteValueSupplier) {
CompletableFuture<Boolean> result = new CompletableFuture<>();
//noinspection unchecked
entries.compute(wrap((K) key), (wKey, entry) -> {
if (entry == null) {
entry = createEntryFromRemote(wKey.key, remoteValueSupplier);
}
result.complete(!entry.isNonExists());
return entry;
});
return result;
}
boolean containsValue(Object value, Supplier<CloseableIteratorSet<Map.Entry<K, V>>> iteratorSupplier,
Function<K, MetadataValue<V>> remoteValueSupplier) {
boolean found = entries.values().stream()
.map(TransactionEntry::getValue)
.filter(Objects::nonNull)
.anyMatch(v -> Objects.deepEquals(v, value));
return found || searchValue(value, iteratorSupplier.get(), remoteValueSupplier);
}
<T> CompletableFuture<T> compute(K key, Function<TransactionEntry<K, V>, T> function) {
CompletableFuture<T> future = new CompletableFuture<>();
entries.compute(wrap(key), (wKey, entry) -> {
if (entry == null) {
entry = TransactionEntry.notReadEntry(wKey.key);
}
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). Before=%s", wKey, entry);
}
T result = function.apply(entry);
future.complete(result);
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). After=%s (result=%s)", wKey, entry, result);
}
return entry;
});
return future;
}
<T> CompletableFuture<T> compute(K key, Function<TransactionEntry<K, V>, T> function,
Function<K, MetadataValue<V>> remoteValueSupplier) {
CompletableFuture<T> future = new CompletableFuture<>();
entries.compute(wrap(key), (wKey, entry) -> {
if (entry == null) {
entry = createEntryFromRemote(wKey.key, remoteValueSupplier);
if (log.isTraceEnabled()) {
log.tracef("Fetched key (%s) from remote. Entry=%s", wKey, entry);
}
}
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). Before=%s", wKey, entry);
}
T result = function.apply(entry);
future.complete(result);
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). After=%s (result=%s)", wKey, entry, result);
}
return entry;
});
return future;
}
boolean isReadWrite() {
return entries.values().stream().anyMatch(TransactionEntry::isModified);
}
<T> T computeSync(K key, Function<TransactionEntry<K, V>, T> function,
Function<K, MetadataValue<V>> remoteValueSupplier) {
ByRef<T> ref = new ByRef<>(null);
entries.compute(wrap(key), (wKey, entry) -> {
if (entry == null) {
entry = createEntryFromRemote(wKey.key, remoteValueSupplier);
if (log.isTraceEnabled()) {
log.tracef("Fetched key (%s) from remote. Entry=%s", wKey, entry);
}
}
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). Before=%s", wKey, entry);
}
T result = function.apply(entry);
ref.set(result);
if (log.isTraceEnabled()) {
log.tracef("Compute key (%s). After=%s (result=%s)", wKey, entry, result);
}
return entry;
});
return ref.get();
}
/**
* Prepares the {@link Transaction} in the server and returns the {@link XAResource} code.
* <p>
* A special value {@link Integer#MIN_VALUE} is used to signal an error before contacting the server (for example, it
* wasn't able to marshall the key/value)
*/
int prepareContext(Xid xid, boolean onePhaseCommit, long timeout) {
PrepareTransactionOperation operation;
List<Modification> modifications;
try {
modifications = toModification();
if (log.isTraceEnabled()) {
log.tracef("Preparing transaction xid=%s, remote-cache=%s, modification-size=%d", xid, cacheName,
modifications.size());
}
if (modifications.isEmpty()) {
return XAResource.XA_RDONLY;
}
} catch (Exception e) {
return Integer.MIN_VALUE;
}
try {
int xaReturnCode;
do {
operation = cacheOperationsFactory
.newPrepareTransactionOperation(xid, onePhaseCommit, modifications, recoverable, timeout);
xaReturnCode = operation.execute().toCompletableFuture().get();
} while (operation.shouldRetry());
return xaReturnCode;
} catch (Exception e) {
HOTROD.exceptionDuringPrepare(xid, e);
return XAException.XA_RBROLLBACK;
}
}
void cleanupEntries() {
entries.clear();
}
private List<Modification> toModification() {
return entries.values().stream()
.filter(TransactionEntry::isModified)
.map(entry -> entry.toModification(keyMarshaller, valueMarshaller))
.collect(Collectors.toList());
}
private TransactionEntry<K, V> createEntryFromRemote(K key, Function<K, MetadataValue<V>> remoteValueSupplier) {
MetadataValue<V> remoteValue = remoteValueSupplier.apply(key);
return remoteValue == null ? TransactionEntry.nonExistingEntry(key) : TransactionEntry.read(key, remoteValue);
}
private boolean searchValue(Object value, CloseableIteratorSet<Map.Entry<K, V>> iterator,
Function<K, MetadataValue<V>> remoteValueSupplier) {
try (CloseableIterator<Map.Entry<K, V>> it = iterator.iterator()) {
while (it.hasNext()) {
Map.Entry<K, V> entry = it.next();
if (!entries.containsKey(wrap(entry.getKey())) && Objects.deepEquals(entry.getValue(), value)) {
ByRef.Boolean result = new ByRef.Boolean(false);
entries.computeIfAbsent(wrap(entry.getKey()), wrappedKey -> {
MetadataValue<V> remoteValue = remoteValueSupplier.apply(wrappedKey.key);
if (Objects.deepEquals(remoteValue.getValue(), value)) {
//value didn't change. store it locally.
result.set(true);
return TransactionEntry.read(wrappedKey.key, remoteValue);
} else {
return null;
}
});
if (result.get()) {
return true;
}
}
}
}
//we iterated over all keys.
return false;
}
private WrappedKey<K> wrap(K key) {
return new WrappedKey<>(key);
}
private static class WrappedKey<K> {
private final K key;
private WrappedKey(K key) {
this.key = key;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
WrappedKey<?> that = (WrappedKey<?>) o;
return Objects.deepEquals(key, that.key);
}
@Override
public int hashCode() {
if (key instanceof Object[]) {
return Arrays.deepHashCode((Object[]) key);
} else if (key instanceof byte[]) {
return Arrays.hashCode((byte[]) key);
} else if (key instanceof short[]) {
return Arrays.hashCode((short[]) key);
} else if (key instanceof int[]) {
return Arrays.hashCode((int[]) key);
} else if (key instanceof long[]) {
return Arrays.hashCode((long[]) key);
} else if (key instanceof char[]) {
return Arrays.hashCode((char[]) key);
} else if (key instanceof float[]) {
return Arrays.hashCode((float[]) key);
} else if (key instanceof double[]) {
return Arrays.hashCode((double[]) key);
} else if (key instanceof boolean[]) {
return Arrays.hashCode((boolean[]) key);
} else {
return Objects.hashCode(key);
}
}
@Override
public String toString() {
return "WrappedKey{" +
"key=" + toStr(key) +
'}';
}
}
}
| 11,021
| 35.986577
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/TransactionalRemoteCacheImpl.java
|
package org.infinispan.hotrod.impl.transaction;
import java.util.function.Function;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.commons.time.TimeService;
import org.infinispan.hotrod.impl.HotRodTransport;
import org.infinispan.hotrod.impl.cache.RemoteCacheImpl;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
public class TransactionalRemoteCacheImpl<K, V> extends RemoteCacheImpl<K, V> {
private static final Log log = LogFactory.getLog(TransactionalRemoteCacheImpl.class, Log.class);
private final boolean recoveryEnabled;
private final TransactionManager transactionManager;
private final TransactionTable transactionTable;
private final Function<K, byte[]> keyMarshaller = this::keyToBytes;
private final Function<V, byte[]> valueMarshaller = this::valueToBytes;
public TransactionalRemoteCacheImpl(HotRodTransport hotRodTransport, String name,
boolean recoveryEnabled, TransactionManager transactionManager,
TransactionTable transactionTable, TimeService timeService) {
super(hotRodTransport, name, timeService, null);
this.recoveryEnabled = recoveryEnabled;
this.transactionManager = transactionManager;
this.transactionTable = transactionTable;
}
@Override
public TransactionManager getTransactionManager() {
return transactionManager;
}
@Override
public boolean isTransactional() {
return true;
}
boolean isRecoveryEnabled() {
return recoveryEnabled;
}
Function<K, byte[]> keyMarshaller() {
return keyMarshaller;
}
Function<V, byte[]> valueMarshaller() {
return valueMarshaller;
}
private TransactionContext<K, V> getTransactionContext() {
assertRemoteCacheManagerIsStarted();
Transaction tx = getRunningTransaction();
if (tx != null) {
return transactionTable.enlist(this, tx);
}
return null;
}
private Transaction getRunningTransaction() {
try {
return transactionManager.getTransaction();
} catch (SystemException e) {
log.debug("Exception in getRunningTransaction().", e);
return null;
}
}
}
| 2,370
| 30.613333
| 102
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/TransactionOperationFactory.java
|
package org.infinispan.hotrod.impl.transaction;
import javax.transaction.xa.Xid;
import org.infinispan.hotrod.configuration.HotRodConfiguration;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transaction.operations.CompleteTransactionOperation;
import org.infinispan.hotrod.impl.transaction.operations.ForgetTransactionOperation;
import org.infinispan.hotrod.impl.transaction.operations.RecoveryOperation;
import org.infinispan.hotrod.impl.transport.netty.ChannelFactory;
/**
* An operation factory that builds operations independent from the cache used.
* <p>
* This operations are the commit/rollback request, forget request and in-doubt transactions request.
* <p>
* This operation aren't associated to any cache, but they use the default cache topology to pick the server to
* contact.
*
* @since 14.0
*/
public class TransactionOperationFactory {
private final OperationContext operationContext;
public TransactionOperationFactory(HotRodConfiguration configuration, ChannelFactory channelFactory, Codec codec) {
this.operationContext = new OperationContext(channelFactory, codec, null, configuration, null, null, null);
}
CompleteTransactionOperation newCompleteTransactionOperation(Xid xid, boolean commit) {
return new CompleteTransactionOperation(operationContext, xid, commit);
}
ForgetTransactionOperation newForgetTransactionOperation(Xid xid) {
return new ForgetTransactionOperation(operationContext, xid);
}
RecoveryOperation newRecoveryOperation() {
return new RecoveryOperation(operationContext);
}
}
| 1,684
| 38.186047
| 118
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/TransactionTable.java
|
package org.infinispan.hotrod.impl.transaction;
import jakarta.transaction.Transaction;
/**
* A {@link Transaction} table that knows how to interact with the {@link Transaction} and how the {@link
* TransactionalRemoteCacheImpl} is enlisted.
*
* @since 14.0
*/
public interface TransactionTable {
<K, V> TransactionContext<K, V> enlist(TransactionalRemoteCacheImpl<K, V> txRemoteCache, Transaction tx);
/**
* It initializes the {@link TransactionTable} with the {@link TransactionOperationFactory} to use.
*
* @param operationFactory The {@link TransactionOperationFactory} to use.
*/
void start(TransactionOperationFactory operationFactory);
}
| 679
| 29.909091
| 108
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/AbstractTransactionTable.java
|
package org.infinispan.hotrod.impl.transaction;
import java.util.Collection;
import java.util.Collections;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import javax.transaction.xa.XAException;
import javax.transaction.xa.Xid;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.impl.transaction.operations.CompleteTransactionOperation;
import org.infinispan.hotrod.impl.transaction.operations.ForgetTransactionOperation;
/**
* A Base {@link TransactionTable} with common logic.
* <p>
* It contains the functions to handle server requests that don't depend of the cache. Such operations are the commit,
* rollback and forget request and the recovery process.
*
* @since 14.0
*/
abstract class AbstractTransactionTable implements TransactionTable {
private static final Log log = LogFactory.getLog(AbstractTransactionTable.class, Log.class);
private final long timeout;
private volatile TransactionOperationFactory operationFactory;
AbstractTransactionTable(long timeout) {
this.timeout = timeout;
}
@Override
public final void start(TransactionOperationFactory operationFactory) {
this.operationFactory = operationFactory;
}
/**
* Check this component has started (i.e. {@link TransactionOperationFactory} isn't null)
*
* @return the {@link TransactionOperationFactory} to use.
*/
TransactionOperationFactory assertStartedAndReturnFactory() {
TransactionOperationFactory tmp = operationFactory;
if (tmp == null) {
throw log.transactionTableNotStarted();
}
return tmp;
}
final long getTimeout() {
return timeout;
}
/**
* It completes the transaction with the commit or rollback request.
* <p>
* It can be a commit or rollback request.
*
* @param xid The transaction {@link Xid}.
* @param commit {@code True} to commit the transaction, {@link false} to rollback.
* @return The server's return code.
*/
int completeTransaction(Xid xid, boolean commit) {
try {
TransactionOperationFactory factory = assertStartedAndReturnFactory();
CompleteTransactionOperation operation = factory.newCompleteTransactionOperation(xid, commit);
return operation.execute().toCompletableFuture().get();
} catch (Exception e) {
log.debug("Exception while commit/rollback.", e);
return XAException.XA_HEURRB; //heuristically rolled-back
}
}
/**
* Tells the server to forget this transaction.
*
* @param xid The transaction {@link Xid}.
*/
void forgetTransaction(Xid xid) {
try {
TransactionOperationFactory factory = assertStartedAndReturnFactory();
ForgetTransactionOperation operation = factory.newForgetTransactionOperation(xid);
//async.
//we don't need the reply from server. If we can't forget for some reason (timeouts or other exception),
// the server reaper will cleanup the completed transactions after a while. (default 1 min)
operation.execute();
} catch (Exception e) {
if (log.isTraceEnabled()) {
log.tracef(e, "Exception in forget transaction xid=%s", xid);
}
}
}
/**
* It requests the server for all in-doubt prepared transactions, to be handled by the recovery process.
*
* @return A {@link CompletionStage} which is completed with a collections of transaction {@link Xid}.
*/
CompletionStage<Collection<Xid>> fetchPreparedTransactions() {
try {
TransactionOperationFactory factory = assertStartedAndReturnFactory();
return factory.newRecoveryOperation().execute();
} catch (Exception e) {
if (log.isTraceEnabled()) {
log.trace("Exception while fetching prepared transactions", e);
}
return CompletableFuture.completedFuture(Collections.emptyList());
}
}
}
| 4,033
| 34.385965
| 118
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/SyncModeTransactionTable.java
|
package org.infinispan.hotrod.impl.transaction;
import static org.infinispan.commons.tx.Util.transactionStatusToString;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.function.Consumer;
import java.util.function.Function;
import jakarta.transaction.RollbackException;
import jakarta.transaction.Status;
import jakarta.transaction.Synchronization;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import javax.transaction.xa.XAResource;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.hotrod.transaction.manager.RemoteXid;
/**
* A {@link TransactionTable} that registers the cache as a {@link Synchronization} in the transaction.
* <p>
* Only a single {@link Synchronization} is registered even if multiple caches interact with the same
* transaction.
* <p>
* When more than one cache is involved in the {@link Transaction}, the prepare, commit and rollback
* requests are sent sequential and they are ordered by the cache's name.
* <p>
* If a cache is read-only, the commit/rollback isn't invoked.
*
* @since 14.0
*/
public class SyncModeTransactionTable extends AbstractTransactionTable {
private static final Log log = LogFactory.getLog(SyncModeTransactionTable.class, Log.class);
private final Map<Transaction, SynchronizationAdapter> registeredTransactions = new ConcurrentHashMap<>();
private final UUID uuid = Util.threadLocalRandomUUID();
private final Consumer<Transaction> cleanup = registeredTransactions::remove;
private final Function<Transaction, SynchronizationAdapter> constructor = this::createSynchronizationAdapter;
public SyncModeTransactionTable(long timeout) {
super(timeout);
}
@Override
public <K, V> TransactionContext<K, V> enlist(TransactionalRemoteCacheImpl<K, V> txRemoteCache, Transaction tx) {
assertStartedAndReturnFactory();
//registers a synchronization if it isn't done yet.
SynchronizationAdapter adapter = registeredTransactions.computeIfAbsent(tx, constructor);
//registers the cache.
TransactionContext<K, V> context = adapter.registerCache(txRemoteCache);
if (log.isTraceEnabled()) {
log.tracef("Xid=%s retrieving context: %s", adapter.xid, context);
}
return context;
}
/**
* Creates and registers the {@link SynchronizationAdapter} in the {@link Transaction}.
*/
private SynchronizationAdapter createSynchronizationAdapter(Transaction transaction) {
SynchronizationAdapter adapter = new SynchronizationAdapter(transaction, RemoteXid.create(uuid));
try {
transaction.registerSynchronization(adapter);
} catch (RollbackException | SystemException e) {
throw new CacheException(e);
}
if (log.isTraceEnabled()) {
log.tracef("Registered synchronization for transaction %s. Sync=%s", transaction, adapter);
}
return adapter;
}
private class SynchronizationAdapter implements Synchronization {
private final Map<String, TransactionContext<?, ?>> registeredCaches = new ConcurrentSkipListMap<>();
private final Transaction transaction;
private final RemoteXid xid;
private SynchronizationAdapter(Transaction transaction, RemoteXid xid) {
this.transaction = transaction;
this.xid = xid;
}
@Override
public String toString() {
return "SynchronizationAdapter{" +
"registeredCaches=" + registeredCaches.keySet() +
", transaction=" + transaction +
", xid=" + xid +
'}';
}
@Override
public void beforeCompletion() {
if (log.isTraceEnabled()) {
log.tracef("BeforeCompletion(xid=%s, remote-caches=%s)", xid, registeredCaches.keySet());
}
if (isMarkedRollback()) {
return;
}
for (TransactionContext<?, ?> txContext : registeredCaches.values()) {
switch (txContext.prepareContext(xid, false, getTimeout())) {
case XAResource.XA_OK:
case XAResource.XA_RDONLY:
break; //read only tx.
case Integer.MIN_VALUE:
//signals a marshaller error of key or value. the server wasn't contacted
markAsRollback();
return;
default:
markAsRollback();
return;
}
}
}
@Override
public void afterCompletion(int status) {
if (log.isTraceEnabled()) {
log.tracef("AfterCompletion(xid=%s, status=%s, remote-caches=%s)", xid, transactionStatusToString(status),
registeredCaches.keySet());
}
//the server commits everything when the first request arrives.
try {
boolean commit = status == Status.STATUS_COMMITTED;
completeTransaction(xid, commit);
} finally {
forgetTransaction(xid);
cleanup.accept(transaction);
}
}
private void markAsRollback() {
try {
transaction.setRollbackOnly();
} catch (SystemException e) {
log.debug("Exception in markAsRollback", e);
}
}
private boolean isMarkedRollback() {
try {
return transaction.getStatus() == Status.STATUS_MARKED_ROLLBACK;
} catch (SystemException e) {
log.debug("Exception in isMarkedRollback", e);
//lets assume not.
return false;
}
}
private <K, V> TransactionContext<K, V> registerCache(TransactionalRemoteCacheImpl<K, V> txRemoteCache) {
//noinspection unchecked
return (TransactionContext<K, V>) registeredCaches
.computeIfAbsent(txRemoteCache.getName(), s -> createTxContext(txRemoteCache));
}
private <K, V> TransactionContext<K, V> createTxContext(TransactionalRemoteCacheImpl<K, V> remoteCache) {
if (log.isTraceEnabled()) {
log.tracef("Registering remote cache '%s' for transaction xid=%s", remoteCache.getName(), xid);
}
return new TransactionContext<>(remoteCache.keyMarshaller(), remoteCache.valueMarshaller(),
remoteCache.getOperationsFactory(), remoteCache.getName(), false);
}
}
}
| 6,607
| 37.418605
| 118
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/entry/Modification.java
|
package org.infinispan.hotrod.impl.transaction.entry;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.writeArray;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.hotrod.impl.protocol.Codec;
import io.netty.buffer.ByteBuf;
/**
* The final modification of a specific key.
*
* @since 14.0
*/
public class Modification {
private final byte[] key;
private final byte[] value;
private final long versionRead;
private final CacheEntryExpiration.Impl expiration;
private final byte control;
Modification(byte[] key, byte[] value, long versionRead, CacheEntryExpiration expiration, byte control) {
this.key = key;
this.value = value;
this.versionRead = versionRead;
this.expiration = (CacheEntryExpiration.Impl) expiration;
this.control = control;
}
/**
* Writes this modification to the {@link ByteBuf}.
*
* @param byteBuf the {@link ByteBuf} to write to.
* @param codec the {@link Codec} to use.
*/
public void writeTo(ByteBuf byteBuf, Codec codec) {
writeArray(byteBuf, key);
byteBuf.writeByte(control);
if (!ControlByte.NON_EXISTING.hasFlag(control) && !ControlByte.NOT_READ.hasFlag(control)) {
byteBuf.writeLong(versionRead);
}
if (ControlByte.REMOVE_OP.hasFlag(control)) {
return;
}
codec.writeExpirationParams(byteBuf, expiration);
writeArray(byteBuf, value);
}
/**
* The estimated size.
*
* @param codec the {@link Codec} to use for the size estimation.
* @return the estimated size.
*/
public int estimateSize(Codec codec) {
int size = key.length + 1; //key + control
if (!ControlByte.NON_EXISTING.hasFlag(control) && !ControlByte.NOT_READ.hasFlag(control)) {
size += 8; //long
}
if (!ControlByte.REMOVE_OP.hasFlag(control)) {
size += value.length;
size += codec.estimateExpirationSize(expiration);
}
return size;
}
/**
* @return The key changed by this modification.
*/
public byte[] getKey() {
return key;
}
}
| 2,140
| 26.805195
| 108
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/entry/ControlByte.java
|
package org.infinispan.hotrod.impl.transaction.entry;
/**
* A control byte used by each write operation to flag if the key was read or not, or if the write operation is a remove
* operation
*
* @since 14.0
*/
public enum ControlByte {
NOT_READ(0x1),
NON_EXISTING(0x2),
REMOVE_OP(0x4);
private final byte bitSet;
ControlByte(int bitSet) {
this.bitSet = (byte) bitSet;
}
public static String prettyPrint(byte bitSet) {
StringBuilder builder = new StringBuilder("[");
if (NOT_READ.hasFlag(bitSet)) {
builder.append("NOT_READ");
} else if (NON_EXISTING.hasFlag(bitSet)) {
builder.append("NON_EXISTING");
} else {
builder.append("READ");
}
if (REMOVE_OP.hasFlag(bitSet)) {
builder.append(", REMOVED");
}
return builder.append("]").toString();
}
/**
* Sets {@code this} flag to the {@code bitSet}.
*
* @return The new bit set.
*/
public byte set(byte bitSet) {
return (byte) (bitSet | this.bitSet);
}
/**
* @return {@code true} if {@code this} flag is set in the {@code bitSet}, {@code false} otherwise.
*/
public boolean hasFlag(byte bitSet) {
return (bitSet & this.bitSet) == this.bitSet;
}
/**
* @return The bit corresponding to {@code this} flag.
*/
public byte bit() {
return bitSet;
}
}
| 1,390
| 22.982759
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/entry/TransactionEntry.java
|
package org.infinispan.hotrod.impl.transaction.entry;
import static org.infinispan.commons.util.Util.toStr;
import java.time.Duration;
import java.util.function.Function;
import org.infinispan.api.common.CacheEntry;
import org.infinispan.api.common.CacheEntryExpiration;
import org.infinispan.api.common.CacheEntryMetadata;
import org.infinispan.hotrod.impl.cache.CacheEntryMetadataImpl;
import org.infinispan.hotrod.impl.cache.CacheEntryVersionImpl;
import org.infinispan.hotrod.impl.cache.MetadataValue;
import org.infinispan.hotrod.impl.transaction.TransactionContext;
/**
* An entry in the {@link TransactionContext}.
* <p>
* It represents a single key and contains its initial version (if it was read) and the most up-to-date value (can be
* null if the key was removed).
*
* @since 14.0
*/
public class TransactionEntry<K, V> {
private final K key;
private final long version; //version read. never changes during the transaction
private final byte readControl;
private V value; //null == removed
private CacheEntryMetadata metadata;
private boolean modified;
private TransactionEntry(K key, long version, byte readControl) {
this.key = key;
this.version = version;
this.readControl = readControl;
this.modified = false;
}
public static <K, V> TransactionEntry<K, V> nonExistingEntry(K key) {
return new TransactionEntry<>(key, 0, ControlByte.NON_EXISTING.bit());
}
public static <K, V> TransactionEntry<K, V> notReadEntry(K key) {
return new TransactionEntry<>(key, 0, ControlByte.NOT_READ.bit());
}
public static <K, V> TransactionEntry<K, V> read(K key, MetadataValue<V> value) {
TransactionEntry<K, V> txEntry = new TransactionEntry<>(key, value.getVersion(), (byte) 0);
txEntry.value = value.getValue();
CacheEntryExpiration expiration;
if (value.getLifespan() < 0) {
if (value.getMaxIdle() < 0) {
expiration = CacheEntryExpiration.IMMORTAL;
} else {
expiration = CacheEntryExpiration.withMaxIdle(Duration.ofSeconds(value.getMaxIdle()));
}
} else {
if (value.getMaxIdle() < 0) {
expiration = CacheEntryExpiration.withLifespan(Duration.ofSeconds(value.getLifespan()));
} else {
expiration = CacheEntryExpiration.withLifespanAndMaxIdle(Duration.ofSeconds(value.getLifespan()), Duration.ofSeconds(value.getMaxIdle()));
}
}
txEntry.metadata = new CacheEntryMetadataImpl(value.getCreated(), value.getLastUsed(), expiration, new CacheEntryVersionImpl(value.getVersion()));
return txEntry;
}
public long getVersion() {
return version;
}
public V getValue() {
return value;
}
public boolean isModified() {
return modified;
}
public boolean isNonExists() {
return value == null;
}
public boolean exists() {
return value != null;
}
public void set(CacheEntry<K, V> entry) {
this.value = entry.value();
this.metadata = entry.metadata();
this.modified = true;
}
public void remove() {
this.value = null;
this.modified = true;
}
public Modification toModification(Function<K, byte[]> keyMarshaller, Function<V, byte[]> valueMarshaller) {
if (value == null) {
//remove operation
return new Modification(keyMarshaller.apply(key), null, version, metadata.expiration(), ControlByte.REMOVE_OP.set(readControl));
} else {
return new Modification(keyMarshaller.apply(key), valueMarshaller.apply(value), version, metadata.expiration(), readControl);
}
}
@Override
public String toString() {
return "TransactionEntry{" +
"key=" + toStr(key) +
", version=" + version +
", readControl=" + ControlByte.prettyPrint(readControl) +
", value=" + toStr(value) +
", expiration=" + metadata.expiration() +
", modified=" + modified +
'}';
}
}
| 4,026
| 32.280992
| 152
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/recovery/RecoveryIterator.java
|
package org.infinispan.hotrod.impl.transaction.recovery;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
/**
* The iterator return when {@link XAResource#recover(int)} is invoked with {@link XAResource#TMSTARTRSCAN}.
* <p>
* Initially, it returns the in-doubt transaction stored locally while it sends the request to the server. When {@link
* XAResource#recover(int)} is invoked with {@link XAResource#TMENDRSCAN}, it waits for the server reply and return the
* remaining in-doubt transactions.
*
* @since 14.0
*/
public class RecoveryIterator {
private static final Log log = LogFactory.getLog(RecoveryIterator.class, Log.class);
private static final Xid[] NOTHING = new Xid[0];
private final Set<Xid> uniqueFilter = Collections.synchronizedSet(new HashSet<>());
private final BlockingDeque<Xid> inDoubtTransactions = new LinkedBlockingDeque<>();
private final CompletionStage<Void> remoteRequest;
RecoveryIterator(Collection<Xid> localTransactions, CompletionStage<Collection<Xid>> remoteRequest) {
add(localTransactions);
this.remoteRequest = remoteRequest.thenAccept(this::add);
}
public Xid[] next() {
if (inDoubtTransactions.isEmpty()) {
if (log.isTraceEnabled()) {
log.trace("RecoveryIterator.next() = []");
}
return NOTHING;
}
Collection<Xid> txs = new ArrayList<>(inDoubtTransactions.size());
inDoubtTransactions.drainTo(txs);
if (log.isTraceEnabled()) {
log.tracef("RecoveryIterator.next() = %s", txs);
}
return txs.toArray(NOTHING);
}
public void finish(long timeout) {
try {
remoteRequest.toCompletableFuture().get(timeout, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
if (log.isTraceEnabled()) {
log.trace("Exception while waiting for prepared transaction from server.", e);
}
}
}
private void add(Collection<Xid> transactions) {
for (Xid xid : transactions) {
if (uniqueFilter.add(xid)) {
if (log.isTraceEnabled()) {
log.tracef("RecoveryIterator new xid=%s", xid);
}
inDoubtTransactions.add(xid);
}
}
}
}
| 2,802
| 33.604938
| 119
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/recovery/RecoveryManager.java
|
package org.infinispan.hotrod.impl.transaction.recovery;
import java.util.Collection;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ConcurrentHashMap;
import javax.transaction.xa.Xid;
/**
* It keeps the local in-doubt transactions.
*
* @since 14.0
*/
//TODO merge with org.infinispan.hotrod.impl.transaction.XaModeTransactionTable ?
public class RecoveryManager {
private final Collection<Xid> preparedTransactions = ConcurrentHashMap.newKeySet();
public void addTransaction(Xid xid) {
preparedTransactions.add(xid);
}
public void forgetTransaction(Xid xid) {
preparedTransactions.remove(xid);
}
public RecoveryIterator startScan(CompletionStage<Collection<Xid>> requestFuture) {
return new RecoveryIterator(preparedTransactions, requestFuture);
}
}
| 827
| 25.709677
| 86
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/operations/CompleteTransactionOperation.java
|
package org.infinispan.hotrod.impl.transaction.operations;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.XAException;
import javax.transaction.xa.Xid;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.operations.RetryOnFailureOperation;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* Represents a commit or rollback request from the {@link TransactionManager}.
*
* @since 14.0
*/
public class CompleteTransactionOperation extends RetryOnFailureOperation<Integer> {
private final Xid xid;
public CompleteTransactionOperation(OperationContext operationContext, Xid xid, boolean commit) {
super(operationContext, commit ? COMMIT_REQUEST : ROLLBACK_REQUEST, commit ? COMMIT_RESPONSE : ROLLBACK_RESPONSE, CacheOptions.DEFAULT, null);
this.xid = xid;
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
ByteBuf buf = channel.alloc().buffer(estimateSize());
operationContext.getCodec().writeHeader(buf, header);
ByteBufUtil.writeXid(buf, xid);
channel.writeAndFlush(buf);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (status == NO_ERROR_STATUS) {
complete(buf.readInt());
} else {
complete(XAException.XA_HEURRB);
}
}
private int estimateSize() {
return operationContext.getCodec().estimateHeaderSize(header) + ByteBufUtil.estimateXidSize(xid);
}
}
| 1,731
| 31.679245
| 148
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/operations/ForgetTransactionOperation.java
|
package org.infinispan.hotrod.impl.transaction.operations;
import javax.transaction.xa.XAResource;
import javax.transaction.xa.Xid;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.operations.RetryOnFailureOperation;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* It forgets the transaction identified by {@link Xid} in the server.
* <p>
* It affects all caches involved in the transaction. It is requested from {@link XAResource#forget(Xid)}.
*
* @since 14.0
*/
public class ForgetTransactionOperation extends RetryOnFailureOperation<Void> {
private final Xid xid;
public ForgetTransactionOperation(OperationContext operationContext, Xid xid) {
super(operationContext, FORGET_TX_REQUEST, FORGET_TX_RESPONSE, CacheOptions.DEFAULT, null);
this.xid = xid;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
complete(null);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
ByteBuf buf = channel.alloc().buffer(estimateSize());
operationContext.getCodec().writeHeader(buf, header);
ByteBufUtil.writeXid(buf, xid);
channel.writeAndFlush(buf);
}
private int estimateSize() {
return operationContext.getCodec().estimateHeaderSize(header) + ByteBufUtil.estimateXidSize(xid);
}
}
| 1,597
| 31.612245
| 106
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/operations/RecoveryOperation.java
|
package org.infinispan.hotrod.impl.transaction.operations;
import static java.util.Collections.emptyList;
import java.util.ArrayList;
import java.util.Collection;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.Xid;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.commons.io.SignedNumeric;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.operations.RetryOnFailureOperation;
import org.infinispan.hotrod.impl.transport.netty.ByteBufUtil;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import org.infinispan.hotrod.transaction.manager.RemoteXid;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A recovery request from the {@link TransactionManager}.
* <p>
* It returns all in-doubt transactions seen by the server.
*
* @since 14.0
*/
public class RecoveryOperation extends RetryOnFailureOperation<Collection<Xid>> {
public RecoveryOperation(OperationContext operationContext) {
super(operationContext, FETCH_TX_RECOVERY_REQUEST, FETCH_TX_RECOVERY_RESPONSE, CacheOptions.DEFAULT, null);
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (status != NO_ERROR_STATUS) {
complete(emptyList());
return;
}
int size = ByteBufUtil.readVInt(buf);
if (size == 0) {
complete(emptyList());
return;
}
Collection<Xid> xids = new ArrayList<>(size);
for (int i = 0; i < size; ++i) {
int formatId = SignedNumeric.decode(ByteBufUtil.readVInt(buf));
byte[] globalId = ByteBufUtil.readArray(buf);
byte[] branchId = ByteBufUtil.readArray(buf);
//the Xid class does't matter since it only compares the format-id, global-id and branch-id
xids.add(RemoteXid.create(formatId, globalId, branchId));
}
complete(xids);
}
@Override
protected void executeOperation(Channel channel) {
scheduleRead(channel);
ByteBuf buf = channel.alloc().buffer(estimateSize());
operationContext.getCodec().writeHeader(buf, header);
channel.writeAndFlush(buf);
}
private int estimateSize() {
return operationContext.getCodec().estimateHeaderSize(header);
}
}
| 2,308
| 31.985714
| 113
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/impl/transaction/operations/PrepareTransactionOperation.java
|
package org.infinispan.hotrod.impl.transaction.operations;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.estimateVIntSize;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.estimateXidSize;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.writeVInt;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.writeXid;
import java.net.SocketAddress;
import java.util.List;
import java.util.Set;
import jakarta.transaction.TransactionManager;
import javax.transaction.xa.Xid;
import org.infinispan.api.common.CacheOptions;
import org.infinispan.hotrod.impl.operations.OperationContext;
import org.infinispan.hotrod.impl.operations.RetryOnFailureOperation;
import org.infinispan.hotrod.impl.protocol.Codec;
import org.infinispan.hotrod.impl.transaction.entry.Modification;
import org.infinispan.hotrod.impl.transport.netty.HeaderDecoder;
import io.netty.buffer.ByteBuf;
import io.netty.channel.Channel;
/**
* A prepare request from the {@link TransactionManager}.
* <p>
* It contains all the transaction modification to perform the validation.
*
* @since 14.0
*/
public class PrepareTransactionOperation extends RetryOnFailureOperation<Integer> {
private final Xid xid;
private final boolean onePhaseCommit;
private final List<Modification> modifications;
private final boolean recoverable;
private final long timeoutMs;
private boolean retry;
public PrepareTransactionOperation(OperationContext operationContext, Xid xid, boolean onePhaseCommit,
List<Modification> modifications, boolean recoverable, long timeoutMs) {
super(operationContext, PREPARE_TX_2_REQUEST, PREPARE_TX_2_RESPONSE, CacheOptions.DEFAULT, null);
this.xid = xid;
this.onePhaseCommit = onePhaseCommit;
this.modifications = modifications;
this.recoverable = recoverable;
this.timeoutMs = timeoutMs;
}
public boolean shouldRetry() {
return retry;
}
@Override
public void acceptResponse(ByteBuf buf, short status, HeaderDecoder decoder) {
if (status == NO_ERROR_STATUS) {
complete(buf.readInt());
} else {
retry = status == NOT_PUT_REMOVED_REPLACED_STATUS;
complete(0);
}
}
@Override
protected void executeOperation(Channel channel) {
retry = false;
scheduleRead(channel);
Codec codec = operationContext.getCodec();
ByteBuf buf = channel.alloc().buffer(estimateSize(codec));
codec.writeHeader(buf, header);
writeXid(buf, xid);
buf.writeBoolean(onePhaseCommit);
buf.writeBoolean(recoverable);
buf.writeLong(timeoutMs);
writeVInt(buf, modifications.size());
for (Modification m : modifications) {
m.writeTo(buf, codec);
}
channel.writeAndFlush(buf);
}
@Override
protected void fetchChannelAndInvoke(int retryCount, Set<SocketAddress> failedServers) {
if (modifications.isEmpty()) {
super.fetchChannelAndInvoke(retryCount, failedServers);
} else {
operationContext.getChannelFactory().fetchChannelAndInvoke(modifications.get(0).getKey(), failedServers, operationContext.getCacheNameBytes(), this);
}
}
private int estimateSize(Codec codec) {
int size = codec.estimateHeaderSize(header) + estimateXidSize(xid) + 1 + estimateVIntSize(modifications.size());
for (Modification modification : modifications) {
size += modification.estimateSize(codec);
}
return size;
}
}
| 3,563
| 34.64
| 158
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/marshall/BytesOnlyMarshaller.java
|
package org.infinispan.hotrod.marshall;
import java.util.Arrays;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.commons.io.ByteBuffer;
import org.infinispan.commons.io.ByteBufferImpl;
import org.infinispan.commons.marshall.BufferSizePredictor;
import org.infinispan.commons.marshall.Marshaller;
/**
* Marshaller that only supports byte[] instances writing them as is
*
* @since 14.0
*/
public class BytesOnlyMarshaller implements Marshaller {
private BytesOnlyMarshaller() { }
public static final BytesOnlyMarshaller INSTANCE = new BytesOnlyMarshaller();
private static final BufferSizePredictor predictor = new IdentityBufferSizePredictor();
private void checkByteArray(Object o) {
if (!(o instanceof byte[])) {
throw new IllegalArgumentException("Only byte[] instances are supported currently!");
}
}
@Override
public byte[] objectToByteBuffer(Object obj, int estimatedSize) {
checkByteArray(obj);
return (byte[]) obj;
}
@Override
public byte[] objectToByteBuffer(Object obj) {
checkByteArray(obj);
return (byte[]) obj;
}
@Override
public Object objectFromByteBuffer(byte[] buf) {
return buf;
}
@Override
public Object objectFromByteBuffer(byte[] buf, int offset, int length) {
if (offset == 0 && length == buf.length) {
return buf;
}
return Arrays.copyOfRange(buf, offset, offset + length);
}
@Override
public ByteBuffer objectToBuffer(Object o) {
checkByteArray(o);
return ByteBufferImpl.create((byte[]) o);
}
@Override
public boolean isMarshallable(Object o) {
return o instanceof byte[];
}
@Override
public BufferSizePredictor getBufferSizePredictor(Object o) {
return predictor;
}
@Override
public MediaType mediaType() {
return MediaType.APPLICATION_OCTET_STREAM;
}
private static final class IdentityBufferSizePredictor implements BufferSizePredictor {
@Override
public int nextSize(Object obj) {
return ((byte[]) obj).length;
}
@Override
public void recordSize(int previousSize) {
// NOOP
}
}
}
| 2,213
| 23.876404
| 94
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/marshall/MarshallerUtil.java
|
package org.infinispan.hotrod.marshall;
import static org.infinispan.hotrod.impl.logging.Log.HOTROD;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectStreamConstants;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.commons.marshall.BufferSizePredictor;
import org.infinispan.commons.marshall.CheckedInputStream;
import org.infinispan.commons.marshall.Marshaller;
import org.infinispan.commons.marshall.ProtoStreamMarshaller;
import org.infinispan.commons.util.Util;
import org.infinispan.hotrod.exceptions.HotRodClientException;
import org.infinispan.hotrod.impl.HotRodTransport;
import org.infinispan.hotrod.impl.logging.Log;
import org.infinispan.hotrod.impl.logging.LogFactory;
import org.infinispan.protostream.SerializationContext;
/**
*/
public final class MarshallerUtil {
private static final Log log = LogFactory.getLog(MarshallerUtil.class, Log.class);
private MarshallerUtil() {
}
/**
* A convenience method to return the {@link SerializationContext} associated with the configured {@link ProtoStreamMarshaller}
*
* @return the associated {@link SerializationContext}
* @throws HotRodClientException if the cache manager is not started or is not configured to use a {@link ProtoStreamMarshaller}
*/
public static SerializationContext getSerializationContext(HotRodTransport hotRodTransport) {
Marshaller marshaller = hotRodTransport.getMarshaller();
if (marshaller instanceof ProtoStreamMarshaller) {
return ((ProtoStreamMarshaller) marshaller).getSerializationContext();
}
if (marshaller == null) {
throw new HotRodClientException("The cache manager must be configured with a ProtoStreamMarshaller and must be started before attempting to retrieve the ProtoStream SerializationContext");
}
throw new HotRodClientException("The cache manager is not configured with a ProtoStreamMarshaller");
}
@SuppressWarnings("unchecked")
public static <T> T bytes2obj(Marshaller marshaller, byte[] bytes, boolean objectStorage, ClassAllowList allowList) {
if (bytes == null || bytes.length == 0) return null;
try {
Object ret = marshaller.objectFromByteBuffer(bytes);
if (objectStorage) {
// Server stores objects
// No extra configuration is required for client in this scenario,
// and no different marshaller should be required to deal with standard serialization.
// So, if the unmarshalled object is still a byte[], it could be a standard
// serialized object, so check for stream magic
if (ret instanceof byte[] && isJavaSerialized((byte[]) ret)) {
T ois = tryJavaDeserialize(bytes, (byte[]) ret, allowList);
if (ois != null)
return ois;
}
}
return (T) ret;
} catch (Exception e) {
throw HOTROD.unableToUnmarshallBytes(Util.toHexString(bytes), e);
}
}
public static <T> T tryJavaDeserialize(byte[] bytes, byte[] ret, ClassAllowList allowList) {
try (ObjectInputStream ois = new CheckedInputStream(new ByteArrayInputStream(ret), allowList)) {
return (T) ois.readObject();
} catch (CacheException ce) {
throw ce;
} catch (Exception ee) {
if (log.isDebugEnabled())
log.debugf("Standard deserialization not in use for %s", Util.printArray(bytes));
}
return null;
}
private static boolean isJavaSerialized(byte[] bytes) {
if (bytes.length > 2) {
short magic = (short) ((bytes[1] & 0xFF) + (bytes[0] << 8));
return magic == ObjectStreamConstants.STREAM_MAGIC;
}
return false;
}
public static byte[] obj2bytes(Marshaller marshaller, Object o, BufferSizePredictor sizePredictor) {
try {
byte[] bytes = marshaller.objectToByteBuffer(o, sizePredictor.nextSize(o));
sizePredictor.recordSize(bytes.length);
return bytes;
} catch (IOException ioe) {
throw new HotRodClientException(
"Unable to marshall object of type [" + o.getClass().getName() + "]", ioe);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
return null;
}
}
}
| 4,427
| 38.891892
| 197
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/transaction/manager/RemoteTransactionManager.java
|
package org.infinispan.hotrod.transaction.manager;
import java.util.UUID;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.commons.tx.TransactionManagerImpl;
/**
* A simple {@link TransactionManager} implementation.
* <p>
* It provides the basic to handle {@link Transaction}s and supports any {@link javax.transaction.xa.XAResource}.
* <p>
* Implementation notes: <ul> <li>The state is kept in memory only.</li> <li>Does not support recover.</li> <li>Does not
* support multi-thread transactions. Although it is possible to execute the transactions in multiple threads, this
* transaction manager does not wait for them to complete. It is the application responsibility to wait before invoking
* {@link #commit()} or {@link #rollback()}</li> <li>The transaction should not block. It is no possible to {@link
* #setTransactionTimeout(int)} and this transaction manager won't rollback the transaction if it takes too long.</li>
* </ul>
* <p>
* If you need any of the requirements above, please consider use another implementation.
* <p>
* Also, it does not implement any 1-phase-commit optimization.
*
* @since 14.0
*/
public final class RemoteTransactionManager extends TransactionManagerImpl {
private RemoteTransactionManager() {
super();
}
public static RemoteTransactionManager getInstance() {
return LazyInitializeHolder.INSTANCE;
}
@Override
protected Transaction createTransaction() {
return new RemoteTransaction(this);
}
UUID getTransactionManagerId() {
return transactionManagerId;
}
private static class LazyInitializeHolder {
static final RemoteTransactionManager INSTANCE = new RemoteTransactionManager();
}
}
| 1,772
| 33.764706
| 120
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/transaction/manager/RemoteXid.java
|
package org.infinispan.hotrod.transaction.manager;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.estimateVIntSize;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.writeArray;
import static org.infinispan.hotrod.impl.transport.netty.ByteBufUtil.writeSignedVInt;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import javax.transaction.xa.Xid;
import org.infinispan.commons.tx.XidImpl;
import io.netty.buffer.ByteBuf;
/**
* Implementation of {@link Xid} used by {@link RemoteTransactionManager}.
*
* @since 14.0
*/
public final class RemoteXid extends XidImpl {
//HRTX in hex
private static final int FORMAT_ID = 0x48525458;
private static final AtomicLong GLOBAL_ID_GENERATOR = new AtomicLong(1);
private static final AtomicLong BRANCH_QUALIFIER_GENERATOR = new AtomicLong(1);
private RemoteXid(int formatId, byte[] globalTransactionId, byte[] branchQualifier) {
super(formatId, globalTransactionId, branchQualifier);
}
public static RemoteXid create(UUID tmId) {
long creationTime = System.currentTimeMillis();
byte[] gid = create(tmId, creationTime, GLOBAL_ID_GENERATOR);
byte[] bid = create(tmId, creationTime, BRANCH_QUALIFIER_GENERATOR);
return new RemoteXid(FORMAT_ID, gid, bid);
}
private static void longToBytes(long val, byte[] array, int offset) {
for (int i = 7; i > 0; i--) {
array[offset + i] = (byte) val;
val >>>= 8;
}
array[offset] = (byte) val;
}
private static byte[] create(UUID transactionManagerId, long creatingTime, AtomicLong generator) {
byte[] field = new byte[32]; //size of 4 longs
longToBytes(transactionManagerId.getLeastSignificantBits(), field, 0);
longToBytes(transactionManagerId.getMostSignificantBits(), field, 8);
longToBytes(creatingTime, field, 16);
longToBytes(generator.getAndIncrement(), field, 24);
return field;
}
public void writeTo(ByteBuf byteBuf) {
writeSignedVInt(byteBuf, FORMAT_ID);
byte[] rawData = rawData();
writeArray(byteBuf, rawData, globalIdOffset(), globalIdLength());
writeArray(byteBuf, rawData, branchQualifierOffset(), branchQualifierLength());
}
public int estimateSize() {
return estimateVIntSize(FORMAT_ID) + globalIdLength() + branchQualifierLength();
}
}
| 2,386
| 34.102941
| 101
|
java
|
null |
infinispan-main/client/hotrod/src/main/java/org/infinispan/hotrod/transaction/manager/RemoteTransaction.java
|
package org.infinispan.hotrod.transaction.manager;
import jakarta.transaction.Transaction;
import org.infinispan.commons.tx.TransactionImpl;
/**
* A {@link Transaction} implementation used by {@link RemoteTransactionManager}.
*
* @since 14.0
* @see RemoteTransactionManager
*/
final class RemoteTransaction extends TransactionImpl {
RemoteTransaction(RemoteTransactionManager transactionManager) {
super();
setXid(RemoteXid.create(transactionManager.getTransactionManagerId()));
}
}
| 511
| 23.380952
| 81
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.