repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/SleepingCacheRpcCommand.java
|
package org.infinispan.remoting.rpc;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
import org.infinispan.commons.util.concurrent.CompletableFutures;
/**
* @author Pedro Ruivo
* @since 5.3
*/
public class SleepingCacheRpcCommand extends BaseRpcCommand {
public static final byte COMMAND_ID = 125;
private long sleepTime;
public SleepingCacheRpcCommand() {
super(null);
}
public SleepingCacheRpcCommand(ByteString cacheName) {
super(cacheName);
}
public SleepingCacheRpcCommand(ByteString cacheName, long sleepTime) {
super(cacheName);
this.sleepTime = sleepTime;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
Thread.sleep(sleepTime);
return CompletableFutures.completedNull();
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeLong(sleepTime);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
sleepTime = input.readLong();
}
@Override
public boolean isReturnValueExpected() {
return true;
}
}
| 1,459
| 22.934426
| 87
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/CustomCacheRpcCommand.java
|
package org.infinispan.remoting.rpc;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.Visitor;
import org.infinispan.commands.remote.BaseRpcCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.util.ByteString;
/**
* @author anistor@redhat.com
* @since 5.3
*/
public class CustomCacheRpcCommand extends BaseRpcCommand implements VisitableCommand, Serializable {
public static final byte COMMAND_ID = 126;
private static final long serialVersionUID = -1L;
private Object arg;
public CustomCacheRpcCommand() {
super(null); // For command id uniqueness test
}
public CustomCacheRpcCommand(ByteString cacheName) {
super(cacheName);
}
public CustomCacheRpcCommand(ByteString cacheName, Object arg) {
this(cacheName);
this.arg = arg;
}
@Override
public CompletionStage<?> invokeAsync(ComponentRegistry registry) throws Throwable {
if (arg instanceof Throwable) {
throw (Throwable) arg;
}
// echo the arg back to the caller
return CompletableFuture.completedFuture(arg);
}
@Override
public byte getCommandId() {
return COMMAND_ID;
}
@Override
public void writeTo(ObjectOutput output) throws IOException {
output.writeObject(arg);
}
@Override
public void readFrom(ObjectInput input) throws IOException, ClassNotFoundException {
arg = input.readObject();
}
@Override
public boolean isReturnValueExpected() {
return true;
}
@Override
public Object acceptVisitor(InvocationContext ctx, Visitor visitor) throws Throwable {
return visitor.visitUnknownCommand(ctx, this);
}
@Override
public LoadType loadType() {
throw new UnsupportedOperationException();
}
}
| 2,064
| 23.879518
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/RpcManagerTest.java
|
package org.infinispan.remoting.rpc;
import static org.infinispan.remoting.responses.SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionStage;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHashFactory;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.remoting.transport.jgroups.SuspectException;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.jgroups.util.NameCache;
import org.jgroups.util.UUID;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
* @since 9.2
*/
@Test(groups = "functional", testName = "remoting.rpc.RpcManagerTest")
public class RpcManagerTest extends MultipleCacheManagersTest {
private static final JGroupsAddress SUSPECT = new JGroupsAddress(UUID.randomUUID());
@Override
protected void createCacheManagers() throws Throwable {
NameCache.add(SUSPECT.getJGroupsAddress(), "SUSPECT");
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.clustering().cacheMode(CacheMode.REPL_SYNC);
createCluster(builder, 3);
waitForClusterToForm();
}
public void testInvokeCommand1() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
Exceptions.expectException(IllegalArgumentException.class, () ->
rpcManager0.invokeCommand(address(0), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions()));
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<ValidResponse> stage1 =
rpcManager0.invokeCommand(address(0), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(null, stage1);
CompletionStage<ValidResponse> stage2 =
rpcManager0.invokeCommand(address(1), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage2);
CompletionStage<ValidResponse> stage3 =
rpcManager0.invokeCommand(SUSPECT, command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
Exceptions.expectExecutionException(SuspectException.class, stage3.toCompletableFuture());
}
public void testInvokeCommandCollection() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
Exceptions.expectException(IllegalArgumentException.class, () ->
rpcManager0.invokeCommand(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions()));
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<Map<Address, Response>> stage1 =
rpcManager0.invokeCommand(Arrays.asList(address(0)), command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(Collections.emptyMap(), stage1);
CompletionStage<Map<Address, Response>> stage2 =
rpcManager0.invokeCommand(Arrays.asList(address(1)), command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage2);
CompletionStage<Map<Address, Response>> stage3 =
rpcManager0.invokeCommand(Arrays.asList(address(0), address(1)), command,
MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage3);
}
public void testInvokeCommandCollectionSuspect() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<Map<Address, Response>> stage1 =
rpcManager0.invokeCommand(Arrays.asList(SUSPECT), command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
Exceptions.expectExecutionException(SuspectException.class, stage1.toCompletableFuture());
CompletionStage<Map<Address, Response>> stage2 =
rpcManager0.invokeCommand(Arrays.asList(address(0), SUSPECT), command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
Exceptions.expectExecutionException(SuspectException.class, stage2.toCompletableFuture());
CompletionStage<Map<Address, Response>> stage3 =
rpcManager0.invokeCommand(Arrays.asList(address(0), address(1), SUSPECT), command,
MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
Exceptions.expectExecutionException(SuspectException.class, stage3.toCompletableFuture());
}
public void testInvokeCommandOnAll() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
Exceptions.expectException(IllegalArgumentException.class, () ->
rpcManager0.invokeCommandOnAll(command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions()));
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<Map<Address, Response>> stage1 =
rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(makeMap(address(1), SUCCESSFUL_EMPTY_RESPONSE, address(2), SUCCESSFUL_EMPTY_RESPONSE), stage1);
}
public void testInvokeCommandOnAllSuspect() throws Exception {
DistributionManager distributionManager = cache(0).getAdvancedCache().getDistributionManager();
CacheTopology initialTopology = distributionManager.getCacheTopology();
assertEquals(CacheTopology.Phase.NO_REBALANCE, initialTopology.getPhase());
try {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
// Add a node to the cache topology, but not to the JGroups cluster view
List<Address> newMembers = new ArrayList<>(initialTopology.getMembers());
newMembers.add(SUSPECT);
ConsistentHash newCH = new ReplicatedConsistentHashFactory().create(1, 1,
newMembers, null);
CacheTopology suspectTopology =
new CacheTopology(initialTopology.getTopologyId(), initialTopology.getRebalanceId(), newCH, null, null,
CacheTopology.Phase.NO_REBALANCE, newCH.getMembers(), null);
distributionManager.setCacheTopology(suspectTopology);
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<Map<Address, Response>> stage1 =
rpcManager0.invokeCommandOnAll(command, MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
Exceptions.expectExecutionException(SuspectException.class, stage1.toCompletableFuture());
} finally {
distributionManager.setCacheTopology(initialTopology);
}
}
public void testInvokeCommandStaggered() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
Exceptions.expectException(IllegalArgumentException.class, () ->
rpcManager0.invokeCommandStaggered(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions()));
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<ValidResponse> stage1 =
rpcManager0.invokeCommandStaggered(Arrays.asList(address(0)), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(null, stage1);
CompletionStage<ValidResponse> stage2 =
rpcManager0.invokeCommandStaggered(Arrays.asList(address(1)), command, SingleResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage2);
CompletionStage<ValidResponse> stage3 =
rpcManager0.invokeCommandStaggered(Arrays.asList(address(0), address(1)), command,
SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage3);
CompletionStage<ValidResponse> stage4 =
rpcManager0.invokeCommandStaggered(Arrays.asList(address(0), address(1), address(2)), command,
SingleResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
assertResponse(SUCCESSFUL_EMPTY_RESPONSE, stage4);
}
public void testInvokeCommands() throws Exception {
ClusteredGetCommand command =
TestingUtil.extractCommandsFactory(cache(0)).buildClusteredGetCommand("key", 0, 0L);
RpcManager rpcManager0 = cache(0).getAdvancedCache().getRpcManager();
Exceptions.expectException(IllegalArgumentException.class, () -> {
rpcManager0.invokeCommands(Arrays.asList(address(1)), a -> command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
});
command.setTopologyId(rpcManager0.getTopologyId());
CompletionStage<Map<Address, Response>> stage1 =
rpcManager0.invokeCommands(Arrays.asList(address(0)), a -> command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(Collections.emptyMap(), stage1);
CompletionStage<Map<Address, Response>> stage2 =
rpcManager0.invokeCommands(Arrays.asList(address(1)), a -> command, MapResponseCollector.validOnly(),
rpcManager0.getSyncRpcOptions());
assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage2);
CompletionStage<Map<Address, Response>> stage3 =
rpcManager0.invokeCommands(Arrays.asList(address(0), address(1)), a -> command,
MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
assertResponse(Collections.singletonMap(address(1), SUCCESSFUL_EMPTY_RESPONSE), stage3);
CompletionStage<Map<Address, Response>> stage4 =
rpcManager0.invokeCommands(Arrays.asList(address(0), address(1), address(2)), a -> command,
MapResponseCollector.validOnly(), rpcManager0.getSyncRpcOptions());
assertResponse(makeMap(address(1), SUCCESSFUL_EMPTY_RESPONSE, address(2), SUCCESSFUL_EMPTY_RESPONSE), stage4);
}
private <T> void assertResponse(T expected, CompletionStage<T> stage2) {
assertEquals(expected, stage2.toCompletableFuture().join());
}
private <T, U> Map<T, U> makeMap(T a1, U r1, T a2, U r2) {
Map<T, U> map = new HashMap<>();
map.put(a1, r1);
map.put(a2, r2);
return map;
}
}
| 12,959
| 51.682927
| 119
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/rpc/RpcManagerTimeoutTest.java
|
package org.infinispan.remoting.rpc;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.impl.FilterMapResponseCollector;
import org.infinispan.remoting.transport.impl.VoidResponseCollector;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.util.ByteString;
import org.infinispan.util.concurrent.TimeoutException;
import org.testng.Assert;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 5.3
*/
@Test(groups = "functional", testName = "remoting.rpc.RpcManagerTimeoutTest")
public class RpcManagerTimeoutTest extends MultipleCacheManagersTest {
private static final String CACHE_NAME = "_cache_name_";
@Test(expectedExceptions = TimeoutException.class)
public void testTimeoutWithResponseFilter() {
RpcManager rpcManager = advancedCache(0, CACHE_NAME).getRpcManager();
final List<Address> members = rpcManager.getMembers();
//wait for the responses from the last two members.
ResponseFilter filter = new ResponseFilter() {
private int expectedResponses = 2;
@Override
public boolean isAcceptable(Response response, Address sender) {
if (sender.equals(members.get(2)) || sender.equals(members.get(3))) {
expectedResponses--;
}
return true;
}
@Override
public boolean needMoreResponses() {
return expectedResponses > 0;
}
};
doTest(new FilterMapResponseCollector(filter, true, 2), false);
}
@Test(expectedExceptions = TimeoutException.class)
public void testTimeoutWithoutFilter() {
doTest(null, false);
}
@Test(expectedExceptions = TimeoutException.class)
public void testTimeoutWithBroadcast() {
doTest(null, true);
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
createClusteredCaches(4, CACHE_NAME, builder);
waitForClusterToForm(CACHE_NAME);
}
private void doTest(ResponseCollector<?> collector,boolean broadcast) {
if (collector == null)
collector = VoidResponseCollector.ignoreLeavers();
RpcManager rpcManager = advancedCache(0, CACHE_NAME).getRpcManager();
RpcOptions rpcOptions = new RpcOptions(DeliverOrder.NONE, 1000, TimeUnit.MILLISECONDS);
CacheRpcCommand command = new SleepingCacheRpcCommand(ByteString.fromString(CACHE_NAME), 5000);
if (broadcast) {
rpcManager.blocking(rpcManager.invokeCommandOnAll(command, collector, rpcOptions));
} else {
List<Address> members = rpcManager.getMembers();
ArrayList<Address> recipients = new ArrayList<>(2);
recipients.add(members.get(2));
recipients.add(members.get(3));
rpcManager.blocking(rpcManager.invokeCommand(recipients, command, collector, rpcOptions));
}
Assert.fail("Timeout exception wasn't thrown");
}
}
| 3,464
| 35.09375
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/transport/InitialClusterSizeTest.java
|
package org.infinispan.remoting.transport;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.math.FieldElement;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.EmbeddedCacheManagerStartupException;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
@Test(testName = "transport.InitialClusterSizeTest", groups = "functional")
@CleanupAfterMethod
public class InitialClusterSizeTest extends MultipleCacheManagersTest {
public static final int CLUSTER_SIZE = 4;
public static final int CLUSTER_TIMEOUT_SECONDS = 5;
@Override
protected void createCacheManagers() throws Throwable {
for (int i = 0; i < CLUSTER_SIZE; i++) {
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
gc.transport().initialClusterSize(CLUSTER_SIZE).initialClusterTimeout(CLUSTER_TIMEOUT_SECONDS, TimeUnit.SECONDS);
cacheManagers.add(TestCacheManagerFactory.createClusteredCacheManager(false, gc,
getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC), new TransportFlags()));
}
}
public void testInitialClusterSize() throws ExecutionException, InterruptedException, TimeoutException {
Future<?>[] threads = new Future[CLUSTER_SIZE];
for (int i = 0; i < CLUSTER_SIZE; i++) {
final int index = i;
threads[i] = fork(() -> {
manager(index).start();
});
}
for(Future<?> f : threads) {
f.get(15, TimeUnit.SECONDS);
}
assertEquals(CLUSTER_SIZE, manager(0).getMembers().size());
}
public <T extends FieldElement<T>> void testInitialClusterSizeFail() throws Throwable {
List<Future<Void>> futures = new ArrayList<>();
for (int i = 0; i < CLUSTER_SIZE - 1; i++) {
EmbeddedCacheManager manager = manager(i);
futures.add(fork(() -> {
manager.start();
return null;
}));
}
for (Future<Void> future : futures) {
try {
// JGroupsTransport only starts counting down on initialClusterTimeout *after* it connects.
// The initial connection may take take 2 seconds (GMS.join_timeout) because of JGRP-2028
// Shutdown may also take 2 seconds (GMS.view_ack_collection_timeout) because of JGRP-2030
future.get(CLUSTER_TIMEOUT_SECONDS + 2 + 2, TimeUnit.SECONDS);
fail("Should have thrown an exception");
} catch (ExecutionException ee) {
Exceptions.assertException(EmbeddedCacheManagerStartupException.class,
org.infinispan.util.concurrent.TimeoutException.class,
"ISPN000399:.*", ee.getCause());
}
}
}
}
| 3,401
| 41
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/transport/MockTransport.java
|
package org.infinispan.remoting.transport;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commons.CacheConfigurationException;
import org.infinispan.commons.util.Util;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.ResponseFilter;
import org.infinispan.remoting.rpc.ResponseMode;
import org.infinispan.remoting.transport.impl.EmptyRaftManager;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.remoting.transport.raft.RaftManager;
import org.infinispan.topology.HeartBeatCommand;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.XSiteBackup;
import org.infinispan.xsite.XSiteReplicateCommand;
/**
* Mock implementation of {@link Transport} that allows intercepting remote calls and replying asynchronously.
* <p>
* TODO Allow blocking invocations until the test explicitly unblocks them
*
* @author Dan Berindei
* @since 9.2
*/
@Scope(Scopes.GLOBAL)
public class MockTransport implements Transport {
private static final Log log = LogFactory.getLog(MockTransport.class);
private final Address localAddress;
private final BlockingQueue<BlockedRequest> blockedRequests = new LinkedBlockingDeque<>();
private int viewId;
private List<Address> members;
private CompletableFuture<Void> nextViewFuture;
public MockTransport(Address localAddress) {
this.localAddress = localAddress;
}
public void init(int viewId, List<Address> members) {
this.viewId = viewId;
this.members = members;
this.nextViewFuture = new CompletableFuture<>();
}
public void updateView(int viewId, List<Address> members) {
log.debugf("Installing view %d %s", viewId, members);
this.viewId = viewId;
this.members = members;
CompletableFuture<Void> nextViewFuture = this.nextViewFuture;
this.nextViewFuture = new CompletableFuture<>();
nextViewFuture.complete(null);
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand> BlockedRequest expectCommand(Class<T> expectedCommandClass)
throws InterruptedException {
return expectCommand(expectedCommandClass, c -> {});
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand> BlockedRequest expectCommand(Class<T> expectedCommandClass,
Consumer<T> checker)
throws InterruptedException {
BlockedRequest request = blockedRequests.poll(10, TimeUnit.SECONDS);
assertNotNull("Timed out waiting for invocation", request);
T command = expectedCommandClass.cast(request.getCommand());
checker.accept(command);
return request;
}
public BlockedRequest expectHeartBeatCommand() throws InterruptedException {
return expectCommand(HeartBeatCommand.class);
}
/**
* Expect a non-{@link CacheRpcCommand} wrapped in a {@link SingleRpcCommand}.
*/
public BlockedRequest expectSingleRpcCommand(Class<? extends ReplicableCommand> wrappedCommand) throws InterruptedException {
assertFalse(CacheRpcCommand.class.isAssignableFrom(wrappedCommand));
return expectCommand(SingleRpcCommand.class, c -> assertTrue(wrappedCommand.isInstance(c.getCommand())));
}
/**
* Assert that all the commands already invoked remotely have been verified and there were no errors.
*/
public void verifyNoErrors() {
assertTrue("Unexpected remote invocations: " +
blockedRequests.stream().map(i -> i.getCommand().toString()).collect(Collectors.joining(", ")),
blockedRequests.isEmpty());
}
@Deprecated
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpcCommand,
ResponseMode mode, long timeout, ResponseFilter responseFilter,
DeliverOrder deliverOrder, boolean anycast) throws Exception {
Collection<Address> targets = recipients != null ? recipients : members;
MapResponseCollector collector = MapResponseCollector.ignoreLeavers(shouldIgnoreLeavers(mode), targets.size());
CompletableFuture<Map<Address, Response>> rpcFuture = blockRequest(recipients, rpcCommand, collector);
if (mode.isAsynchronous()) {
return Collections.emptyMap();
} else {
try {
return rpcFuture.get(10, TimeUnit.SECONDS);
} catch (ExecutionException e) {
throw Util.rewrapAsCacheException(e.getCause());
}
}
}
@Override
public CompletableFuture<Map<Address, Response>> invokeRemotelyAsync(Collection<Address> recipients,
ReplicableCommand rpcCommand, ResponseMode mode,
long timeout, ResponseFilter responseFilter,
DeliverOrder deliverOrder, boolean anycast) {
Collection<Address> targets = recipients != null ? recipients : members;
MapResponseCollector collector =
mode.isSynchronous() ? MapResponseCollector.ignoreLeavers(shouldIgnoreLeavers(mode), targets.size()) : null;
return blockRequest(recipients, rpcCommand, collector);
}
@Override
public void sendTo(Address destination, ReplicableCommand rpcCommand, DeliverOrder deliverOrder) {
blockRequest(Collections.singleton(destination), rpcCommand, null);
}
@Override
public void sendToMany(Collection<Address> destinations, ReplicableCommand rpcCommand, DeliverOrder deliverOrder) {
blockRequest(destinations, rpcCommand, null);
}
@Override
public void sendToAll(ReplicableCommand rpcCommand, DeliverOrder deliverOrder) {
blockRequest(members, rpcCommand, null);
}
@Deprecated
@Override
public Map<Address, Response> invokeRemotely(Map<Address, ReplicableCommand> rpcCommands, ResponseMode mode, long
timeout, boolean usePriorityQueue, ResponseFilter responseFilter, boolean totalOrder, boolean anycast) {
throw new UnsupportedOperationException();
}
@Deprecated
@Override
public Map<Address, Response> invokeRemotely(Map<Address, ReplicableCommand> rpcCommands, ResponseMode mode, long
timeout, ResponseFilter responseFilter, DeliverOrder deliverOrder, boolean anycast) {
throw new UnsupportedOperationException();
}
@Deprecated
@Override
public BackupResponse backupRemotely(Collection<XSiteBackup> backups, XSiteReplicateCommand rpcCommand) {
throw new UnsupportedOperationException();
}
@Override
public <O> XSiteResponse<O> backupRemotely(XSiteBackup backup, XSiteReplicateCommand<O> rpcCommand) {
throw new UnsupportedOperationException();
}
@Override
public boolean isCoordinator() {
return localAddress.equals(members.get(0));
}
@Override
public Address getCoordinator() {
return members.get(0);
}
@Override
public Address getAddress() {
return localAddress;
}
@Override
public List<Address> getPhysicalAddresses() {
throw new UnsupportedOperationException();
}
@Override
public List<Address> getMembers() {
return members;
}
@Override
public List<Address> getMembersPhysicalAddresses() {
throw new UnsupportedOperationException();
}
@Override
public boolean isMulticastCapable() {
return true;
}
@Override
public void checkCrossSiteAvailable() throws CacheConfigurationException {
}
@Override
public String localSiteName() {
return null;
}
@Start
@Override
public void start() {
}
@Stop
@Override
public void stop() {
}
@Override
public int getViewId() {
return viewId;
}
@Override
public CompletableFuture<Void> withView(int expectedViewId) {
if (viewId <= expectedViewId) {
return CompletableFutures.completedNull();
}
return nextViewFuture.thenCompose(v -> withView(expectedViewId));
}
@Override
public void waitForView(int viewId) throws InterruptedException {
try {
withView(viewId).get();
} catch (ExecutionException e) {
throw new AssertionError(e);
}
}
@Override
public Log getLog() {
throw new UnsupportedOperationException();
}
@Deprecated
@Override
public void checkTotalOrderSupported() {
}
@Override
public Set<String> getSitesView() {
return null;
}
@Override
public boolean isSiteCoordinator() {
return false;
}
@Override
public Collection<Address> getRelayNodesAddress() {
return Collections.emptyList();
}
@Override
public <T> CompletionStage<T> invokeCommand(Address target, ReplicableCommand command, ResponseCollector<T>
collector, DeliverOrder deliverOrder, long timeout, TimeUnit unit) {
return blockRequest(Collections.singleton(target), command, collector);
}
@Override
public <T> CompletionStage<T> invokeCommand(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder, long
timeout, TimeUnit unit) {
return blockRequest(targets, command, collector);
}
@Override
public <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector,
DeliverOrder deliverOrder, long timeout, TimeUnit unit) {
return blockRequest(members, command, collector);
}
@Override
public <T> CompletableFuture<T> invokeCommandOnAll(Collection<Address> requiredTargets, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder,
long timeout, TimeUnit unit) {
return blockRequest(requiredTargets, command, collector);
}
@Override
public <T> CompletionStage<T> invokeCommandStaggered(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder,
long timeout, TimeUnit unit) {
return blockRequest(targets, command, collector);
}
@Override
public <T> CompletionStage<T> invokeCommands(Collection<Address> targets, Function<Address, ReplicableCommand>
commandGenerator, ResponseCollector<T> responseCollector, DeliverOrder deliverOrder, long timeout, TimeUnit unit) {
throw new UnsupportedOperationException();
}
@Override
public RaftManager raftManager() {
return EmptyRaftManager.INSTANCE;
}
private <T> CompletableFuture<T> blockRequest(Collection<Address> targets, ReplicableCommand command, ResponseCollector<T> collector) {
log.debugf("Intercepted command %s to %s", command, targets);
BlockedRequest request = new BlockedRequest(command, collector);
blockedRequests.add(request);
return request.getResultFuture();
}
private boolean shouldIgnoreLeavers(ResponseMode mode) {
return mode != ResponseMode.SYNCHRONOUS;
}
/**
* Receive responses for a blocked remote invocation.
* <p>
* For example, {@code remoteInvocation.addResponse(a1, r1).addResponse(a2, r2).finish()},
* or {@code remoteInvocation.singleResponse(a, r)}
*/
public static class BlockedRequest {
private final ReplicableCommand command;
private final ResponseCollector<?> collector;
private final CompletableFuture<Object> resultFuture = new CompletableFuture<>();
private BlockedRequest(ReplicableCommand command, ResponseCollector<?> collector) {
this.command = command;
this.collector = collector;
}
public BlockedRequest addResponse(Address sender, Response response) {
assertFalse(isDone());
log.debugf("Replying to remote invocation %s with %s from %s", getCommand(), response, sender);
Object result = collector.addResponse(sender, response);
if (result != null) {
resultFuture.complete(result);
}
return this;
}
public BlockedRequest addLeaver(Address a) {
return addResponse(a, CacheNotFoundResponse.INSTANCE);
}
public BlockedRequest addException(Address a, Exception e) {
return addResponse(a, new ExceptionResponse(e));
}
public void throwException(Exception e) {
resultFuture.completeExceptionally(e);
}
public void finish() {
if (collector == null) {
// sendToX methods do not need a finish() call, ignoring it
return;
}
try {
Object result = collector.finish();
resultFuture.complete(result);
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
public void singleResponse(Address sender, Response response) {
addResponse(sender, response);
if (!isDone()) {
finish();
}
}
public ReplicableCommand getCommand() {
return command;
}
boolean isDone() {
return resultFuture.isDone();
}
@SuppressWarnings("unchecked")
<U> CompletableFuture<U> getResultFuture() {
return (CompletableFuture<U>) resultFuture;
}
}
}
| 15,110
| 33.898383
| 138
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/transport/jgroups/JGroupsBackupResponseUnitTest.java
|
package org.infinispan.remoting.transport.jgroups;
import static org.infinispan.commons.test.Exceptions.assertException;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingDeque;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.LongConsumer;
import org.infinispan.commons.CacheException;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.transport.BackupResponse;
import org.infinispan.remoting.transport.XSiteAsyncAckListener;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.ControlledTimeService;
import org.infinispan.xsite.XSiteBackup;
import org.testng.annotations.Test;
/**
* Unit test for the methods of {@link BackupResponse}.
*
* @author Pedro Ruivo
* @since 10.0
*/
@Test(groups = "unit", testName = "remoting.transport.jgroups.JGroupsBackupResponseUnitTest")
public class JGroupsBackupResponseUnitTest extends AbstractInfinispanTest {
private final ControlledTimeService timeService = new ControlledTimeService();
private static Map<XSiteBackup, CompletableFuture<ValidResponse>> createResponseMap(
Collection<XSiteBackup> backups) {
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = new HashMap<>(backups.size());
for (XSiteBackup backup : backups) {
responses.put(backup, new CompletableFuture<>());
}
return responses;
}
private static XSiteBackup createSyncBackup(String siteName, long timeoutMs) {
return new XSiteBackup(siteName, true, timeoutMs);
}
private static XSiteBackup createAsyncBackup(String siteName) {
return new XSiteBackup(siteName, false, 15000);
}
public void testNoWaitForAsyncWithMix() {
List<XSiteBackup> backups = new ArrayList<>(2);
backups.add(createSyncBackup("sync", 10000));
backups.add(createAsyncBackup("async"));
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = createResponseMap(backups);
BackupResponse response = newBackupResponse(responses);
Future<Void> waiting = waitBackupResponse(response);
assertNotCompleted(waiting);
//complete the sync request
responses.get(backups.get(0)).complete(null);
//it shouldn't wait for the async request
assertCompleted(waiting);
}
public void testNoWaitForAsyncWith() {
List<XSiteBackup> backups = Collections.singletonList(createAsyncBackup("async-only"));
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = createResponseMap(backups);
BackupResponse response = newBackupResponse(responses);
Future<Void> waiting = waitBackupResponse(response);
//we only have async. it should be completed
assertCompleted(waiting);
}
public void testAsyncListener() {
Listener listener = new Listener();
long sendTimestamp = timeService.time();
List<XSiteBackup> backups = new ArrayList<>(2);
backups.add(createAsyncBackup("async-1"));
backups.add(createAsyncBackup("async-2"));
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = createResponseMap(backups);
BackupResponse backupResponse = newBackupResponse(responses);
backupResponse.notifyAsyncAck(listener);
assertTrue(listener.queue.isEmpty());
timeService.advance(10);
responses.get(backups.get(0)).complete(null);
assertListenerData(listener, sendTimestamp, "async-1", null);
timeService.advance(10);
CacheException exception = new CacheException("Test-Exception");
responses.get(backups.get(1)).completeExceptionally(exception);
assertListenerData(listener, sendTimestamp, "async-2", exception);
assertTrue(listener.queue.isEmpty());
assertEquals(TimeUnit.NANOSECONDS.toMillis(sendTimestamp), backupResponse.getSendTimeMillis());
}
public void testSyncListener() {
Listener listener = new Listener();
List<XSiteBackup> backups = new ArrayList<>(2);
backups.add(createSyncBackup("sync-1", 10000));
backups.add(createAsyncBackup("async-2"));
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = createResponseMap(backups);
BackupResponse backupResponse = newBackupResponse(responses);
backupResponse.notifyFinish(listener);
assertTrue(listener.queue.isEmpty());
Future<Void> waiting = waitBackupResponse(backupResponse);
timeService.advance(10);
responses.get(backups.get(1)).complete(null);
assertNotCompleted(waiting);
assertTrue(listener.queue.isEmpty());
timeService.advance(10);
responses.get(backups.get(0)).complete(null);
assertCompleted(waiting);
assertListenerData(listener, 20, null, null);
assertTrue(listener.queue.isEmpty());
}
public void testNoErrorsFromAsync() {
//tests if JGroupsBackupResponse doesn't waitBackupResponse for the async request
long timeoutMs = 10000;
List<XSiteBackup> backups = new ArrayList<>(3);
backups.add(createSyncBackup("sync-1", timeoutMs));
backups.add(createSyncBackup("sync-2", 2 * timeoutMs));
backups.add(createAsyncBackup("async"));
Map<XSiteBackup, CompletableFuture<ValidResponse>> responses = createResponseMap(backups);
BackupResponse response = newBackupResponse(responses);
timeService.advance(timeoutMs + 1); //this will trigger a timeout for sync-1
Future<Void> waiting = waitBackupResponse(response);
assertNotCompleted(waiting);
//complete the async request
CacheException exception = new CacheException("Test-Exception");
responses.get(backups.get(1)).complete(null);
responses.get(backups.get(2)).completeExceptionally(exception);
assertCompleted(waiting);
assertEquals(1, response.getCommunicationErrors().size());
assertEquals(1, response.getFailedBackups().size());
assertTrue(response.getCommunicationErrors().contains("sync-1"));
assertTrue(response.getFailedBackups().containsKey("sync-1"));
assertException(org.infinispan.util.concurrent.TimeoutException.class, response.getFailedBackups().get("sync-1"));
}
public void testEmpty() {
List<XSiteBackup> backups = new ArrayList<>(1);
backups.add(createAsyncBackup("async"));
BackupResponse response = newBackupResponse(createResponseMap(backups));
assertTrue(response.isEmpty());
backups.add(createSyncBackup("sync", 10000));
response = newBackupResponse(createResponseMap(backups));
assertFalse(response.isEmpty());
}
private void assertListenerData(Listener listener, long sendTimestamp, String siteName, Throwable throwable) {
try {
ListenerData data = listener.queue.poll(10, TimeUnit.SECONDS);
assertNotNull("Failed to get event for site " + siteName, data);
assertEquals(siteName, data.siteName);
assertEquals(sendTimestamp, data.time);
assertEquals(throwable, data.throwable);
} catch (InterruptedException e) {
fail("Interrupted while waiting for event for site " + siteName);
}
}
private void assertNotCompleted(Future<Void> future) {
expectException(TimeoutException.class, () -> future.get(1, TimeUnit.SECONDS));
}
private void assertCompleted(Future<Void> future) {
try {
future.get(1, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
fail("Backup Response must be completed by now!");
}
}
private Future<Void> waitBackupResponse(BackupResponse response) {
return fork(response::waitForBackupToFinish);
}
private BackupResponse newBackupResponse(Map<XSiteBackup, CompletableFuture<ValidResponse>> responses) {
return new JGroupsBackupResponse(responses, timeService);
}
private static class Listener implements XSiteAsyncAckListener, LongConsumer {
private final BlockingDeque<ListenerData> queue = new LinkedBlockingDeque<>();
@Override
public void onAckReceived(long sendTimestampNanos, String siteName, Throwable throwable) {
queue.add(new ListenerData(sendTimestampNanos, siteName, throwable));
}
@Override
public void accept(long value) {
//well, just lazy to create another listener data
queue.add(new ListenerData(value, null, null));
}
}
private static class ListenerData {
private final long time;
private final String siteName;
private final Throwable throwable;
private ListenerData(long time, String siteName, Throwable throwable) {
this.time = time;
this.siteName = siteName;
this.throwable = throwable;
}
}
}
| 9,396
| 37.044534
| 120
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/remoting/transport/jgroups/JGroupsTransportTest.java
|
package org.infinispan.remoting.transport.jgroups;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.InboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.ResponseMode;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.impl.MapResponseCollector;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.ByteString;
import org.infinispan.xsite.XSiteReplicateCommand;
import org.jgroups.util.UUID;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
* @since 9.0
*/
@Test(groups = "unit", testName = "remoting.transport.jgroups.JGroupsTransportTest")
public class JGroupsTransportTest extends MultipleCacheManagersTest {
public static final ByteString CACHE_NAME = ByteString.fromString("cache");
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder configurationBuilder = new ConfigurationBuilder();
configurationBuilder.clustering().cacheMode(CacheMode.REPL_SYNC);
createCluster(configurationBuilder, 2);
}
public void testSynchronousIgnoreLeaversInvocationToNonMembers() throws Exception {
UUID randomUuid = UUID.randomUUID();
Address randomAddress = JGroupsAddressCache.fromJGroupsAddress(randomUuid);
JGroupsTransport transport = (JGroupsTransport) manager(0).getTransport();
long initialMessages = transport.getChannel().getProtocolStack().getTransport().getMessageStats().getNumMsgsSent();
ReplicableCommand command = new ClusteredGetCommand("key", CACHE_NAME, 0, 0);
CompletableFuture<Map<Address, Response>> future = transport
.invokeRemotelyAsync(Collections.singletonList(randomAddress), command,
ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS, 1, null, DeliverOrder.NONE, true);
assertEquals(CacheNotFoundResponse.INSTANCE, future.get().get(randomAddress));
assertEquals(initialMessages, transport.getChannel().getProtocolStack().getTransport().getMessageStats().getNumMsgsSent());
}
public void testInvokeCommandStaggeredToNonMember() throws Exception {
UUID randomUuid = UUID.randomUUID();
Address randomAddress = JGroupsAddressCache.fromJGroupsAddress(randomUuid);
// Send message only to non-member
JGroupsTransport transport = (JGroupsTransport) manager(0).getTransport();
ReplicableCommand command = new ClusteredGetCommand("key", CACHE_NAME, 0, 0);
CompletionStage<Map<Address, Response>> future =
transport.invokeCommandStaggered(Collections.singletonList(randomAddress), command,
MapResponseCollector.ignoreLeavers(), DeliverOrder.NONE, 5,
TimeUnit.SECONDS);
assertEquals(Collections.singletonMap(randomAddress, CacheNotFoundResponse.INSTANCE),
future.toCompletableFuture().get());
// Send message to view member that doesn't have the cache and to non-member
CompletionStage<Map<Address, Response>> future2 =
transport.invokeCommandStaggered(Arrays.asList(address(1), randomAddress), command,
MapResponseCollector.ignoreLeavers(), DeliverOrder.NONE, 5,
TimeUnit.SECONDS);
Map<Object, Object> expected = TestingUtil.mapOf(address(1), CacheNotFoundResponse.INSTANCE,
randomAddress, CacheNotFoundResponse.INSTANCE);
assertEquals(expected, future2.toCompletableFuture().get());
// Send message to view member that doesn't have the cache and to non-member
// and block the response from the view member
CompletableFuture<Void> blocker = blockRemoteGets();
try {
CompletionStage<Map<Address, Response>> future3 =
transport.invokeCommandStaggered(Arrays.asList(address(1), randomAddress), command,
MapResponseCollector.ignoreLeavers(), DeliverOrder.NONE, 5,
TimeUnit.SECONDS);
// Wait for the stagger timeout (5s / 10 / 2) to expire before sending a reply back
Thread.sleep(500);
blocker.complete(null);
assertEquals(expected, future3.toCompletableFuture().get());
} finally {
blocker.complete(null);
}
}
private CompletableFuture<Void> blockRemoteGets() {
CompletableFuture<Void> blocker = new CompletableFuture<>();
InboundInvocationHandler oldInvocationHandler = TestingUtil.extractGlobalComponent(manager(1),
InboundInvocationHandler
.class);
InboundInvocationHandler blockingInvocationHandler = new InboundInvocationHandler() {
@Override
public void handleFromCluster(Address origin, ReplicableCommand command, Reply reply, DeliverOrder order) {
if (command instanceof ClusteredGetCommand) {
log.tracef("Blocking clustered get");
blocker.thenRun(() -> oldInvocationHandler.handleFromCluster(origin, command, reply, order));
} else {
oldInvocationHandler.handleFromCluster(origin, command, reply, order);
}
}
@Override
public void handleFromRemoteSite(String origin, XSiteReplicateCommand command, Reply reply,
DeliverOrder order) {
oldInvocationHandler.handleFromRemoteSite(origin, command, reply, order);
}
};
TestingUtil.replaceComponent(manager(1), InboundInvocationHandler.class, blockingInvocationHandler, true);
return blocker;
}
}
| 6,595
| 50.937008
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/filter/CompositeKeyValueFilterConverter.java
|
package org.infinispan.filter;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.metadata.Metadata;
/**
* Allows to composite a KeyValueFilter and a Converter together to form a KeyValueFilterConverter. There are no
* performance gains by doing this though since the
* {@link org.infinispan.filter.CompositeKeyValueFilterConverter#filterAndConvert(Object, Object, org.infinispan.metadata.Metadata)}
* just composes of calling the filter and then converter as needed completely invalidating it's usage. This is more
* for testing where performance is not of a concern.
*
* @author wburns
* @since 7.0
*/
@SerializeWith(CompositeKeyValueFilterConverter.Externalizer.class)
public class CompositeKeyValueFilterConverter<K, V, C> implements KeyValueFilterConverter<K, V, C> {
private final KeyValueFilter<? super K, ? super V> filter;
private final Converter<? super K, ? super V, ? extends C> converter;
public CompositeKeyValueFilterConverter(KeyValueFilter<? super K, ? super V> filter,
Converter<? super K, ? super V, ? extends C> converter) {
this.filter = filter;
this.converter = converter;
}
@Override
public C filterAndConvert(K key, V value, Metadata metadata) {
if (accept(key, value, metadata)) {
return convert(key, value, metadata);
}
else {
return null;
}
}
@Override
public C convert(K key, V value, Metadata metadata) {
return converter.convert(key, value, metadata);
}
@Override
public boolean accept(K key, V value, Metadata metadata) {
return filter.accept(key, value, metadata);
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<CompositeKeyValueFilterConverter> {
@Override
public void writeObject(ObjectOutput output, CompositeKeyValueFilterConverter object) throws IOException {
output.writeObject(object.filter);
output.writeObject(object.converter);
}
@Override
public CompositeKeyValueFilterConverter readObject(ObjectInput input) throws IOException, ClassNotFoundException {
KeyValueFilter kvf = (KeyValueFilter) input.readObject();
Converter converter = (Converter) input.readObject();
return new CompositeKeyValueFilterConverter<>(kvf, converter);
}
}
}
| 2,487
| 36.69697
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/ForceSyncAsyncFlagsTest.java
|
package org.infinispan.replication;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcManagerImpl;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.util.ReplicatedControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* Tests for FORCE_ASYNCHRONOUS and FORCE_SYNCHRONOUS flags.
*
* @author Tomas Sykora
* @author anistor@redhat.com
*/
@Test(testName = "replication.ForceSyncAsyncFlagsTest", groups = "functional")
@CleanupAfterMethod
public class ForceSyncAsyncFlagsTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() {
// each test will create the needed caches
}
public void testForceAsyncFlagUsage() throws Exception {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
builder.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
createClusteredCaches(2, "replSync", builder);
AdvancedCache<String, String> cache1 = this.<String, String>cache(0, "replSync").getAdvancedCache();
cache(1, "replSync").getAdvancedCache();
Transport originalTransport = TestingUtil.extractGlobalComponent(cache1.getCacheManager(), Transport.class);
RpcManagerImpl rpcManager = (RpcManagerImpl) TestingUtil.extractComponent(cache1, RpcManager.class);
Transport mockTransport = spy(originalTransport);
rpcManager.setTransport(mockTransport);
// check that the replication call was sync
cache1.put("k", "v");
verify(mockTransport).invokeCommandOnAll(any(), any(), any(), any(), anyLong(), any());
reset(mockTransport);
// verify FORCE_ASYNCHRONOUS flag on SYNC cache
cache1.withFlags(Flag.FORCE_ASYNCHRONOUS).put("k", "v");
verify(mockTransport)
.sendToAll(any(ReplicableCommand.class), any(DeliverOrder.class));
}
public void testForceSyncFlagUsage() throws Exception {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.REPL_ASYNC, false);
builder.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
createClusteredCaches(2, "replAsync", builder);
AdvancedCache<String, String> cache1 = this.<String, String>cache(0, "replAsync").getAdvancedCache();
cache(1, "replAsync").getAdvancedCache();
Transport originalTransport = TestingUtil.extractGlobalComponent(cache1.getCacheManager(), Transport.class);
RpcManagerImpl rpcManager = (RpcManagerImpl) TestingUtil.extractComponent(cache1, RpcManager.class);
Transport mockTransport = spy(originalTransport);
rpcManager.setTransport(mockTransport);
cache1.put("k", "v");
verify(mockTransport)
.sendToAll(any(ReplicableCommand.class), any(DeliverOrder.class));
reset(mockTransport);
// verify FORCE_SYNCHRONOUS flag on ASYNC cache
cache1.withFlags(Flag.FORCE_SYNCHRONOUS).put("k", "v");
verify(mockTransport).invokeCommandOnAll(any(), any(), any(), any(), anyLong(), any());
}
}
| 3,806
| 41.775281
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/ReplicationExceptionTest.java
|
package org.infinispan.replication;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.io.Serializable;
import jakarta.transaction.NotSupportedException;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.TransactionManager;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.marshall.MarshallingException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.BaseAsyncInterceptor;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.infinispan.util.concurrent.IsolationLevel;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "replication.ReplicationExceptionTest")
public class ReplicationExceptionTest extends MultipleCacheManagersTest {
@Override
protected void createCacheManagers() {
ConfigurationBuilder configuration = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
configuration.locking()
.isolationLevel(IsolationLevel.REPEATABLE_READ)
.lockAcquisitionTimeout(60000L)
.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup());
createClusteredCaches(2, "syncReplCache", configuration);
waitForClusterToForm("syncReplCache");
ConfigurationBuilder noTx = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
noTx.locking()
.isolationLevel(IsolationLevel.REPEATABLE_READ)
.lockAcquisitionTimeout(60000L);
defineConfigurationOnAllManagers("syncReplCacheNoTx", noTx);
ConfigurationBuilder replAsyncNoTx = getDefaultClusteredCacheConfig(CacheMode.REPL_ASYNC, false);
defineConfigurationOnAllManagers("asyncReplCacheNoTx", replAsyncNoTx);
}
private TransactionManager beginTransaction() throws SystemException, NotSupportedException {
AdvancedCache cache1 = cache(0, "syncReplCache").getAdvancedCache();
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
return mgr;
}
@Test(expectedExceptions = MarshallingException.class)
public void testNonMarshallableRepl() {
doNonMarshallableReplTest("syncReplCacheNoTx");
}
@Test(expectedExceptions = MarshallingException.class)
public void testNonMarshallableAsyncRepl() {
doNonMarshallableReplTest("asyncReplCacheNoTx");
}
private void doNonMarshallableReplTest(String cacheName) {
AdvancedCache<Object, Object> cache1 = advancedCache(0, cacheName);
AdvancedCache<Object, Object> cache2 = advancedCache(1, cacheName);
cache1.put("test", new ContainerData());
}
public void testNonSerializableReplWithTx() throws Exception {
AdvancedCache<Object, Object> cache1 = advancedCache(0, "syncReplCache");
AdvancedCache<Object, Object> cache2 = advancedCache(1, "syncReplCache");
TransactionManager tm;
try {
tm = beginTransaction();
cache1.put("test", new ContainerData());
tm.commit();
// We should not come here.
assertNotNull("NonSerializableData should not be null on cache2", cache2.get("test"));
} catch (RollbackException rollback) {
log.trace("received RollbackException - as expected");
} catch (Exception e) {
// We should also examine that it is indeed throwing a NonSerilaizable exception.
fail(e.toString());
}
}
@Test(groups = "functional", expectedExceptions = { CacheException.class })
public void testSyncReplTimeout() {
AdvancedCache<Object, Object> cache1 = advancedCache(0, "syncReplCache");
AdvancedCache<Object, Object> cache2 = advancedCache(1, "syncReplCache");
extractInterceptorChain(cache2).addInterceptor(new DelayInterceptor(), 0);
cache1.getCacheConfiguration().clustering().remoteTimeout(10);
cache2.getCacheConfiguration().clustering().remoteTimeout(10);
TestingUtil.blockUntilViewsReceived(10000, cache1, cache2);
cache1.put("k", "v");
}
@Test(groups = "functional", expectedExceptions = { CacheException.class })
public void testLockAcquisitionTimeout() throws Exception {
AdvancedCache<Object, Object> cache1 = advancedCache(0, "syncReplCache");
AdvancedCache<Object, Object> cache2 = advancedCache(1, "syncReplCache");
cache1.getCacheConfiguration().locking().lockAcquisitionTimeout(10);
cache2.getCacheConfiguration().locking().lockAcquisitionTimeout(10);
TestingUtil.blockUntilViewsReceived(10000, cache1, cache2);
// get a lock on cache 2 and hold on to it.
EmbeddedTransactionManager tm = (EmbeddedTransactionManager) TestingUtil.getTransactionManager(cache2);
tm.begin();
cache2.put("block", "block");
assertTrue(tm.getTransaction().runPrepare());
tm.suspend();
cache1.put("block", "v");
}
static class NonSerializabeData {
int i;
}
public static class ContainerData implements Serializable {
int i;
NonSerializabeData non_serializable_data;
private static final long serialVersionUID = -8322197791060897247L;
public ContainerData() {
i = 99;
non_serializable_data = new NonSerializabeData();
}
}
static class DelayInterceptor extends BaseAsyncInterceptor {
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
// Add a delay
Thread.sleep(100);
return invokeNext(ctx, command);
}
}
}
| 6,106
| 39.443709
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/FlagsReplicationTest.java
|
package org.infinispan.replication;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.fail;
import org.infinispan.AdvancedCache;
import org.infinispan.context.Flag;
import org.infinispan.distribution.BaseDistFunctionalTest;
import org.infinispan.transaction.LockingMode;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import jakarta.transaction.Transaction;
/**
* Verifies the Flags affect both local and remote nodes.
*
* @author Sanne Grinovero <sanne@hibernate.org> (C) 2011 Red Hat Inc.
* @since 4.2.1
*/
@Test(groups = "functional", testName = FlagsReplicationTest.TEST_NAME)
public class FlagsReplicationTest extends BaseDistFunctionalTest<Object, String> {
static final String TEST_NAME = "replication.FlagsReplicationTest";
static final String DATA_PROVIDER = TEST_NAME + ".dataprovider";
private final Integer one = 1;
private final String key = TEST_NAME;
public FlagsReplicationTest() {
transactional = true;
cacheName = TEST_NAME;
cleanup = CleanupPhase.AFTER_METHOD;
lockingMode = LockingMode.PESSIMISTIC;
lockTimeout = 1;
}
@DataProvider(name = DATA_PROVIDER)
public Object[][] createTestConfigurations() {
return new Object[][] {
{ true, true },
{ false, false },
{ false, true },
{ true, false },
};
}
@Test(dataProvider = DATA_PROVIDER)
public void testScenario(boolean cache1IsOwner, boolean cache2IsOwner) throws Throwable {
log.tracef("Start cache1IsOwner = %s, cache2IsOwner %s", cache1IsOwner, cache2IsOwner);
AdvancedCache cache1 = (cache1IsOwner ? getFirstOwner(key) : getFirstNonOwner(key)).getAdvancedCache();
AdvancedCache cache2 = (cache2IsOwner ? getFirstOwner(key) : getFirstNonOwner(key)).getAdvancedCache();
assertNull(cache1.put(key, one));
log.trace("About to try to acquire a lock.");
cache2.getTransactionManager().begin();
if (! cache2.lock(key)) {
fail("Could not acquire lock");
}
Transaction tx2 = cache2.getTransactionManager().suspend();
cache1.getTransactionManager().begin();
boolean locked = cache1.withFlags(Flag.ZERO_LOCK_ACQUISITION_TIMEOUT, Flag.FAIL_SILENTLY).lock(key);
assertFalse(locked);
Object removed = cache1.withFlags(Flag.SKIP_LOCKING).remove(key);
assertEquals(one, removed);
Transaction tx1 = cache1.getTransactionManager().suspend();
cache2.getTransactionManager().resume(tx2);
cache2.getTransactionManager().commit();
cache1.getTransactionManager().resume(tx1);
cache1.getTransactionManager().commit();
assertNull(cache2.get(key));
log.tracef("End cache1IsOwner = %s, cache2IsOwner %s", cache1IsOwner, cache2IsOwner);
}
}
| 2,960
| 35.555556
| 109
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/SyncReplTest.java
|
package org.infinispan.replication;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcManagerImpl;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.ReplicatedControlledConsistentHashFactory;
import org.testng.annotations.Test;
/**
* @author <a href="mailto:manik@jboss.org">Manik Surtani (manik@jboss.org)</a>
*/
@Test(groups = "functional", testName = "replication.SyncReplTest")
public class SyncReplTest extends MultipleCacheManagersTest {
private String k = "key", v = "value";
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder replSync = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
replSync.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
createClusteredCaches(2, "replSync", replSync);
}
public void testBasicOperation() {
Cache<String, String> cache1 = cache(0, "replSync");
Cache cache2 = cache(1, "replSync");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
cache1.put(k, v);
assertEquals(v, cache1.get(k));
assertEquals("Should have replicated", v, cache2.get(k));
cache2.remove(k);
assert cache1.isEmpty();
assert cache2.isEmpty();
}
public void testMultpleCachesOnSharedTransport() {
Cache<String, String> cache1 = cache(0, "replSync");
Cache cache2 = cache(1, "replSync");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assert cache1.isEmpty();
assert cache2.isEmpty();
ConfigurationBuilder newConf = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
defineConfigurationOnAllManagers("newCache", newConf);
Cache<String, String> altCache1 = manager(0).getCache("newCache");
Cache altCache2 = manager(1).getCache("newCache");
try {
assert altCache1.isEmpty();
assert altCache2.isEmpty();
cache1.put(k, v);
assert cache1.get(k).equals(v);
assert cache2.get(k).equals(v);
assert altCache1.isEmpty();
assert altCache2.isEmpty();
altCache1.put(k, "value2");
assert altCache1.get(k).equals("value2");
assert altCache2.get(k).equals("value2");
assert cache1.get(k).equals(v);
assert cache2.get(k).equals(v);
} finally {
removeCacheFromCluster("newCache");
}
}
public void testReplicateToNonExistentCache() {
// strictPeerToPeer is now disabled by default
boolean strictPeerToPeer = false;
Cache<String, String> cache1 = cache(0, "replSync");
Cache cache2 = cache(1, "replSync");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assert cache1.isEmpty();
assert cache2.isEmpty();
ConfigurationBuilder newConf = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, false);
defineConfigurationOnAllManagers("newCache2", newConf);
Cache<String, String> altCache1 = manager(0).getCache("newCache2");
try {
assert altCache1.isEmpty();
cache1.put(k, v);
assert cache1.get(k).equals(v);
assert cache2.get(k).equals(v);
assert altCache1.isEmpty();
altCache1.put(k, "value2");
assert altCache1.get(k).equals("value2");
assert cache1.get(k).equals(v);
assert cache2.get(k).equals(v);
assert manager(0).getCache("newCache2").get(k).equals("value2");
} finally {
removeCacheFromCluster("newCache2");
}
}
public void testMixingSyncAndAsyncOnSameTransport() throws Exception {
Cache<String, String> cache1 = cache(0, "replSync");
cache(1, "replSync");
waitForClusterToForm("replSync");
Transport originalTransport = null;
RpcManagerImpl rpcManager = null;
RpcManagerImpl asyncRpcManager = null;
try {
ConfigurationBuilder asyncCache = getDefaultClusteredCacheConfig(CacheMode.REPL_ASYNC, false);
asyncCache.clustering().hash().numSegments(1).consistentHashFactory(new ReplicatedControlledConsistentHashFactory(0));
defineConfigurationOnAllManagers("asyncCache", asyncCache);
Cache<String, String> asyncCache1 = manager(0).getCache("asyncCache");
manager(1).getCache("asyncCache");
waitForClusterToForm("asyncCache");
// this is shared by all caches managed by the cache manager
originalTransport = TestingUtil.extractGlobalComponent(cache1.getCacheManager(), Transport.class);
Transport mockTransport = spy(originalTransport);
// replace the transport with a mock object
rpcManager = (RpcManagerImpl) TestingUtil.extractComponent(cache1, RpcManager.class);
rpcManager.setTransport(mockTransport);
// check that the replication call was sync
cache1.put("k", "v");
verify(mockTransport)
.invokeCommandOnAll(any(), any(), any(), any(), anyLong(), any());
// resume to test for async
asyncRpcManager = (RpcManagerImpl) TestingUtil.extractComponent(asyncCache1, RpcManager.class);
asyncRpcManager.setTransport(mockTransport);
reset(mockTransport);
asyncCache1.put("k", "v");
verify(mockTransport).sendToAll(any(ReplicableCommand.class), any(DeliverOrder.class));
} finally {
// replace original transport
if (rpcManager != null)
rpcManager.setTransport(originalTransport);
if (asyncRpcManager != null)
asyncRpcManager.setTransport(originalTransport);
}
}
}
| 6,472
| 36.633721
| 127
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/AsyncAPITxSyncReplTest.java
|
package org.infinispan.replication;
import static org.testng.AssertJUnit.assertEquals;
import java.util.Collections;
import java.util.concurrent.Future;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.data.Key;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "replication.AsyncAPITxSyncReplTest")
public class AsyncAPITxSyncReplTest extends MultipleCacheManagersTest {
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder c = getConfig();
c.transaction().autoCommit(false);
createClusteredCaches(2, TestDataSCI.INSTANCE, c);
}
protected ConfigurationBuilder getConfig() {
return getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
}
protected void assertOnAllCaches(Key k, String v, Cache c1, Cache c2) {
assertEquals("Error in cache 1.", v, c1.get(k));
assertEquals("Error in cache 2,", v, c2.get(k));
}
public void testAsyncTxMethods() throws Exception {
Cache<Object, String> c1 = cache(0);
Cache c2 = cache(1);
String v = "v";
String v2 = "v2";
String v3 = "v3";
String v4 = "v4";
String v5 = "v5";
String v6 = "v6";
String v_null = "v_nonexistent";
Key key = new Key("k");
TransactionManager tm = TestingUtil.getTransactionManager(c1);
// put
tm.begin();
Future<String> f = c1.putAsync(key, v);
assert f != null;
Transaction t = tm.suspend();
assert c2.get(key) == null;
tm.resume(t);
assert f.get() == null;
tm.commit();
assertOnAllCaches(key, v, c1, c2);
tm.begin();
f = c1.putAsync(key, v2);
assert f != null;
t = tm.suspend();
assert c2.get(key).equals(v);
tm.resume(t);
assert !f.isCancelled();
assert f.get().equals(v);
tm.commit();
assertOnAllCaches(key, v2, c1, c2);
// putAll
tm.begin();
final Future<Void> f2 = c1.putAllAsync(Collections.singletonMap(key, v3));
assert f2 != null;
eventually(f2::isDone);
t = tm.suspend();
assert c2.get(key).equals(v2);
tm.resume(t);
assert !f2.isCancelled();
assert f2.get() == null;
tm.commit();
assertOnAllCaches(key, v3, c1, c2);
// putIfAbsent
tm.begin();
final Future f1 = c1.putIfAbsentAsync(key, v4);
assert f1 != null;
eventually(f1::isDone);
t = tm.suspend();
assert c2.get(key).equals(v3);
tm.resume(t);
assert !f1.isCancelled();
assert f1.get().equals(v3);
tm.commit();
assertOnAllCaches(key, v3, c1, c2);
// remove
tm.begin();
final Future f3 = c1.removeAsync(key);
assert f3 != null;
eventually(f3::isDone);
t = tm.suspend();
assert c2.get(key).equals(v3);
tm.resume(t);
assert !f3.isCancelled();
assert f3.get().equals(v3);
tm.commit();
assertOnAllCaches(key, null, c1, c2);
// putIfAbsent again
tm.begin();
final Future f4 = c1.putIfAbsentAsync(key, v4);
assert f4 != null;
eventually(f4::isDone);
assert f4.isDone();
t = tm.suspend();
assert c2.get(key) == null;
tm.resume(t);
assert !f4.isCancelled();
assert f4.get() == null;
tm.commit();
assertOnAllCaches(key, v4, c1, c2);
// removecond
tm.begin();
Future<Boolean> f5 = c1.removeAsync(key, v_null);
assert f5 != null;
assert !f5.isCancelled();
assert f5.get().equals(false);
assert f5.isDone();
tm.commit();
assertOnAllCaches(key, v4, c1, c2);
tm.begin();
final Future f6 = c1.removeAsync(key, v4);
assert f6 != null;
eventually(f6::isDone);
assert f6.isDone();
t = tm.suspend();
assert c2.get(key).equals(v4);
tm.resume(t);
assert !f6.isCancelled();
assert f6.get().equals(true);
tm.commit();
assertOnAllCaches(key, null, c1, c2);
// replace
tm.begin();
final Future f7 = c1.replaceAsync(key, v5);
assert f7 != null;
assert !f7.isCancelled();
assert f7.get() == null;
assert f7.isDone();
tm.commit();
assertOnAllCaches(key, null, c1, c2);
tm.begin();
c1.put(key, v);
tm.commit();
tm.begin();
final Future f8 = c1.replaceAsync(key, v5);
assert f8 != null;
eventually(f8::isDone);
t = tm.suspend();
assert c2.get(key).equals(v);
tm.resume(t);
assert !f8.isCancelled();
assert f8.get().equals(v);
tm.commit();
assertOnAllCaches(key, v5, c1, c2);
//replace2
tm.begin();
final Future f9 = c1.replaceAsync(key, v_null, v6);
assert f9 != null;
assert !f9.isCancelled();
assert f9.get().equals(false);
assert f9.isDone();
tm.commit();
assertOnAllCaches(key, v5, c1, c2);
tm.begin();
final Future f10 = c1.replaceAsync(key, v5, v6);
assert f10 != null;
eventually(f10::isDone);
t = tm.suspend();
assert c2.get(key).equals(v5);
tm.resume(t);
assert !f10.isCancelled();
assert f10.get().equals(true);
tm.commit();
assertOnAllCaches(key, v6, c1, c2);
}
}
| 5,617
| 27.09
| 80
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/SyncPessimisticLockingTest.java
|
package org.infinispan.replication;
import static org.testng.Assert.assertTrue;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestDataSCI;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.impl.LocalTransaction;
import org.infinispan.transaction.impl.RemoteTransaction;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.testng.annotations.Test;
/**
* Tests for implicit locking
* <p/>
* Transparent eager locking for transactions https://jira.jboss.org/jira/browse/ISPN-70
*
* @author Vladimir Blagojevic
*/
@Test(groups = "functional", testName = "replication.SyncPessimisticLockingTest")
@InCacheMode({ CacheMode.DIST_SYNC, CacheMode.REPL_SYNC})
public class SyncPessimisticLockingTest extends MultipleCacheManagersTest {
private String k = "key", v = "value";
public SyncPessimisticLockingTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder cfg = getDefaultClusteredCacheConfig(cacheMode, true);
cfg.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.lockingMode(LockingMode.PESSIMISTIC)
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
createClusteredCaches(2, "testcache", TestDataSCI.INSTANCE, cfg);
}
public void testBasicOperation() throws Exception {
testBasicOperationHelper(false);
testBasicOperationHelper(true);
}
public void testLocksReleasedWithNoMods() throws Exception {
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
// do a dummy read
cache1.get(k);
mgr.commit();
assertNotLocked(cache1.getName(), k);
assertNotLocked(cache2.getName(), k);
cache1.clear();
cache2.clear();
}
public void testReplaceNonExistentKey() throws Exception {
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
// do a replace on empty key
// https://jira.jboss.org/browse/ISPN-514
Object old = cache1.replace(k, "blah");
boolean replaced = cache1.replace(k, "Vladimir", "Blagojevic");
assert !replaced;
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
mgr.commit();
assertNotLocked(cache1.getName(), k);
assertNotLocked(cache2.getName(), k);
cache1.clear();
cache2.clear();
}
private void testBasicOperationHelper(boolean useCommit) throws Exception {
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
String name = "Infinispan";
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
cache1.put(k, name);
//automatically locked on another cache node
assertKeyLockedCorrectly(k, "testcache");
String key2 = "name";
cache1.put(key2, "Vladimir");
//automatically locked on another cache node
assertKeyLockedCorrectly(key2, "testcache");
String key3 = "product";
String key4 = "org";
Map<String, String> newMap = new HashMap<String, String>();
newMap.put(key3, "Infinispan");
newMap.put(key4, "JBoss");
cache1.putAll(newMap);
//automatically locked on another cache node
assertLocked(getLockOwner(key3, "testcache"), key3);
assertLocked(getLockOwner(key4, "testcache"), key4);
if (useCommit)
mgr.commit();
else
mgr.rollback();
if (useCommit) {
assertEquals(name, cache1.get(k));
assertEquals("Should have replicated", name, cache2.get(k));
} else {
assertEquals(null, cache1.get(k));
assertEquals("Should not have replicated", null, cache2.get(k));
}
cache2.remove(k);
cache2.remove(key2);
cache2.remove(key3);
cache2.remove(key4);
}
public void testSimpleCommit() throws Throwable {
tm(0, "testcache").begin();
cache(0, "testcache").put("k", "v");
tm(0, "testcache").commit();
assertEquals(cache(0, "testcache").get("k"), "v");
assertEquals(cache(1, "testcache").get("k"), "v");
assertNotLocked("testcache", "k");
tm(0, "testcache").begin();
cache(0, "testcache").put("k", "v");
cache(0, "testcache").remove("k");
tm(0, "testcache").commit();
assertEquals(cache(0, "testcache").get("k"), null);
assertEquals(cache(1, "testcache").get("k"), null);
assertNotLocked("testcache", "k");
}
public void testSimpleRollabck() throws Throwable {
tm(0, "testcache").begin();
cache(0, "testcache").put("k", "v");
tm(0, "testcache").rollback();
assert !lockManager(1, "testcache").isLocked("k");
assertEquals(cache(0, "testcache").get("k"), null);
assertEquals(cache(1, "testcache").get("k"), null);
assert !lockManager(0, "testcache").isLocked("k");
}
@Test
public void testRemoteLocksReleasedWhenReadTransactionCommitted() throws Exception {
testRemoteLocksReleased(false, true);
}
@Test
public void testRemoteLocksReleasedWhenReadTransactionRolledBack() throws Exception {
testRemoteLocksReleased(false, false);
}
@Test
public void testRemoteLocksReleasedWhenWriteTransactionCommitted() throws Exception {
testRemoteLocksReleased(true, true);
}
@Test
public void testRemoteLocksReleasedWhenWriteTransactionRolledBack() throws Exception {
testRemoteLocksReleased(true, false);
}
private void testRemoteLocksReleased(boolean write, boolean commit) throws Exception {
final MagicKey key = new MagicKey(cache(0, "testcache"));
tm(1, "testcache").begin();
if (write) {
cache(1, "testcache").put(key, "somevalue");
} else {
cache(1, "testcache").getAdvancedCache().withFlags(Flag.FORCE_WRITE_LOCK).get(key);
}
Collection<LocalTransaction> localTxs = TestingUtil.getTransactionTable(cache(1, "testcache")).getLocalTransactions();
assertEquals(1, localTxs.size());
LocalTransaction localTx = localTxs.iterator().next();
if (write) {
assertFalse(localTx.isReadOnly());
} else {
assertTrue(localTx.isReadOnly());
}
final Collection<RemoteTransaction> remoteTxs = TestingUtil.getTransactionTable(cache(0, "testcache")).getRemoteTransactions();
assertEquals(1, remoteTxs.size());
RemoteTransaction remoteTx = remoteTxs.iterator().next();
assertTrue(remoteTx.getLockedKeys().contains(key));
assertTrue(TestingUtil.extractLockManager(cache(0, "testcache")).isLocked(key));
if (commit) {
tm(1, "testcache").commit();
} else {
tm(1, "testcache").rollback();
}
eventually(new Condition() {
@Override
public boolean isSatisfied() throws Exception {
return remoteTxs.isEmpty();
}
});
eventually(new Condition() {
@Override
public boolean isSatisfied() throws Exception {
return !TestingUtil.extractLockManager(cache(0, "testcache")).isLocked(key);
}
});
}
}
| 8,481
| 32.525692
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/SyncLockingTest.java
|
package org.infinispan.replication;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.lookup.EmbeddedTransactionManagerLookup;
import org.infinispan.transaction.tm.EmbeddedTransactionManager;
import org.testng.annotations.Test;
/**
* Tests for lock API
* <p/>
* Introduce lock() API methods https://jira.jboss.org/jira/browse/ISPN-48
*
* @author Manik Surtani
* @author Vladimir Blagojevic
*/
@Test(groups = "functional", testName = "replication.SyncLockingTest")
@InCacheMode({ CacheMode.DIST_SYNC, CacheMode.REPL_SYNC })
public class SyncLockingTest extends MultipleCacheManagersTest {
private String k = "key", v = "value";
public SyncLockingTest() {
cleanup = CleanupPhase.AFTER_METHOD;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder cfg = getDefaultClusteredCacheConfig(cacheMode, true);
cfg.transaction().transactionManagerLookup(new EmbeddedTransactionManagerLookup())
.lockingMode(LockingMode.PESSIMISTIC)
.locking().lockAcquisitionTimeout(TestingUtil.shortTimeoutMillis());
createClusteredCaches(2, "testcache", cfg);
waitForClusterToForm("testcache");
}
public void testLocksReleasedWithoutExplicitUnlock() throws Exception {
locksReleasedWithoutExplicitUnlockHelper(false, false);
locksReleasedWithoutExplicitUnlockHelper(true, false);
locksReleasedWithoutExplicitUnlockHelper(false, true);
locksReleasedWithoutExplicitUnlockHelper(true, true);
}
public void testConcurrentNonTxLocking() throws Exception {
concurrentLockingHelper(false, false);
concurrentLockingHelper(true, false);
}
public void testConcurrentTxLocking() throws Exception {
concurrentLockingHelper(false, true);
concurrentLockingHelper(true, true);
}
public void testLocksReleasedWithNoMods() throws Exception {
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
cache1.getAdvancedCache().lock(k);
//do a dummy read
cache1.get(k);
mgr.commit();
assertEventuallyNotLocked(cache1, "testcache");
assertEventuallyNotLocked(cache2, "testcache");
assert cache1.isEmpty();
assert cache2.isEmpty();
cache1.clear();
cache2.clear();
}
public void testReplaceNonExistentKey() throws Exception {
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
cache1.getAdvancedCache().lock(k);
// do a replace on empty key
// https://jira.jboss.org/browse/ISPN-514
Object old = cache1.replace(k, "blah");
assertNull("Should be null", cache1.get(k));
boolean replaced = cache1.replace(k, "Vladimir", "Blagojevic");
assert !replaced;
assertNull("Should be null", cache1.get(k));
mgr.commit();
assertEventuallyNotLocked(cache1, "testcache");
assertEventuallyNotLocked(cache2, "testcache");
assert cache1.isEmpty();
assert cache2.isEmpty();
cache1.clear();
cache2.clear();
}
private void concurrentLockingHelper(final boolean sameNode, final boolean useTx) throws Exception {
log.debugf("sameNode=%s, useTx=%s", sameNode, useTx);
final Cache cache1 = cache(0, "testcache");
final Cache cache2 = cache(1, "testcache");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
final CountDownLatch latch = new CountDownLatch(1);
Thread t = getTestThreadFactory("Worker").newThread(new Runnable() {
@Override
public void run() {
log.info("Concurrent " + (useTx ? "tx" : "non-tx") + " write started "
+ (sameNode ? "on same node..." : "on a different node..."));
EmbeddedTransactionManager mgr = null;
try {
if (useTx) {
mgr = (EmbeddedTransactionManager) TestingUtil.getTransactionManager(sameNode ? cache1 : cache2);
mgr.begin();
}
if (sameNode) {
cache1.put(k, "JBC");
} else {
cache2.put(k, "JBC");
}
if (useTx) {
if (!mgr.getTransaction().runPrepare()) { //couldn't prepare
latch.countDown();
mgr.rollback();
}
}
} catch (Exception e) {
if (useTx) {
try {
mgr.commit();
} catch (Exception e1) {
}
}
latch.countDown();
}
}
});
String name = "Infinispan";
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
log.trace("Here is where the fun starts...Here is where the fun starts...");
// lock node and start other thread whose write should now block
cache1.getAdvancedCache().lock(k);
t.start();
// wait till the put in thread t times out
assert latch.await(10, TimeUnit.SECONDS) : "Concurrent put didn't time out!";
cache1.put(k, name);
mgr.commit();
assertNotLocked("testcache", k);
t.join();
cache2.remove(k);
assert cache1.isEmpty();
assert cache2.isEmpty();
cache1.clear();
cache2.clear();
}
private void locksReleasedWithoutExplicitUnlockHelper(boolean lockPriorToPut, boolean useCommit)
throws Exception {
log.debugf("lockPriorToPut=%s, useCommit=%s", lockPriorToPut, useCommit);
Cache cache1 = cache(0, "testcache");
Cache cache2 = cache(1, "testcache");
assertClusterSize("Should only be 2 caches in the cluster!!!", 2);
assertNull("Should be null", cache1.get(k));
assertNull("Should be null", cache2.get(k));
String name = "Infinispan";
TransactionManager mgr = TestingUtil.getTransactionManager(cache1);
mgr.begin();
if (lockPriorToPut)
cache1.getAdvancedCache().lock(k);
cache1.put(k, name);
if (!lockPriorToPut)
cache1.getAdvancedCache().lock(k);
if (useCommit)
mgr.commit();
else
mgr.rollback();
if (useCommit) {
assertEquals(name, cache1.get(k));
assertEquals("Should have replicated", name, cache2.get(k));
} else {
assertEquals(null, cache1.get(k));
assertEquals("Should not have replicated", null, cache2.get(k));
}
cache2.remove(k);
assert cache1.isEmpty();
assert cache2.isEmpty();
cache1.clear();
cache2.clear();
}
}
| 7,737
| 31.788136
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/SyncCacheListenerTest.java
|
package org.infinispan.replication;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.fail;
import java.util.Map;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryModified;
import org.infinispan.notifications.cachelistener.annotation.CacheEntryRemoved;
import org.infinispan.notifications.cachelistener.event.Event;
import org.infinispan.notifications.cachelistener.event.TransactionalEvent;
import org.infinispan.test.MultipleCacheManagersTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* Test out the CacheListener
*/
@Test(groups = "functional", testName = "replication.SyncCacheListenerTest")
public class SyncCacheListenerTest extends MultipleCacheManagersTest {
private static final Log log = LogFactory.getLog(SyncCacheListenerTest.class);
private Cache<Object, Object> cache1, cache2;
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder builder =
getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
builder.locking().isolationLevel(IsolationLevel.SERIALIZABLE)
// TODO: Another case of default values changed (see ISPN-2651)
.transaction().useSynchronization(false);
createClusteredCaches(2, "cache", builder);
cache1 = cache(0, "cache");
cache2 = cache(1, "cache");
}
public void testSyncTxRepl() throws Exception {
Integer age;
TransactionManager tm = TestingUtil.getTransactionManager(cache1);
tm.begin();
Transaction tx = tm.getTransaction();
LocalListener lis = new LocalListener();
cache1.addListener(lis);
try {
lis.put("age", 38);
} finally {
cache1.removeListener(lis);
}
tm.suspend();
assertNull("age on cache2 must be null as the TX has not yet been committed", cache2.get("age"));
tm.resume(tx);
tm.commit();
// value on cache2 must be 38
age = (Integer) cache2.get("age");
assertNotNull("\"age\" obtained from cache2 must be non-null ", age);
assertEquals("\"age\" must be 38", (int) age, 38);
}
public void testRemoteCacheListener() throws Exception {
Integer age;
RemoteListener lis = new RemoteListener();
cache2.addListener(lis);
try {
cache1.put("age", 38);
// value on cache2 must be 38
age = (Integer) cache2.get("age");
assertNotNull("\"age\" obtained from cache2 must be non-null ", age);
assertEquals("\"age\" must be 38", (int) age, 38);
cache1.remove("age");
} finally {
cache2.removeListener(lis);
}
}
public void testSyncRepl() throws Exception {
Integer age;
LocalListener lis = new LocalListener();
cache1.addListener(lis);
try {
lis.put("age", 38);
} finally {
cache2.removeListener(lis);
}
// value on cache2 must be 38
age = (Integer) cache2.get("age");
assertNotNull("\"age\" obtained from cache2 must be non-null ", age);
assertEquals("\"age\" must be 38", (int) age, 38);
}
public void simpleReplicationTest() throws Exception {
TransactionManager tm = TestingUtil.getTransactionManager(cache1);
tm.begin();
cache1.put("key", "value");
tm.commit();
assertEquals("value", cache2.get("key"));
}
public void testSyncTxReplMap() throws Exception {
Integer age;
TransactionManager tm = TestingUtil.getTransactionManager(cache1);
tm.begin();
Transaction tx = tm.getTransaction();
LocalListener lis = new LocalListener();
try {
cache1.put("age", 38);
lis.put("name", "Ben");
} finally {
cache1.removeListener(lis);
}
assertEquals(38, cache1.get("age"));
tm.suspend();
assertNull("age on cache2 must be null as the TX has not yet been committed", cache2.get("age"));
assertNull("age on cache1 must be null as the TX has been resumed", cache1.get("age"));
tm.resume(tx);
assertNotNull("age on cache1 must be not be null", cache1.get("age"));
tm.commit();
assertNotNull("age on cache1 must be not be null", cache1.get("age"));
log.trace(" ********************** ");
// value on cache2 must be 38
age = (Integer) cache2.get("age");
assertNotNull("\"age\" obtained from cache2 must be non-null ", age);
assertEquals("\"age\" must be 38", (int) age, 38);
}
public void testSyncReplMap() throws Exception {
Integer age;
LockManager lm1 = TestingUtil.extractComponent(cache1, LockManager.class);
assertNull("lock info is " + lm1.printLockInfo(), lm1.getOwner("age"));
LocalListener lis = new LocalListener();
cache1.addListener(lis);
try {
lis.put("age", 38);
cache1.put("name", "Ben");
} finally {
cache1.removeListener(lis);
}
assertNull("lock info is " + lm1.printLockInfo(), lm1.getOwner("age"));
// value on cache2 must be 38
age = (Integer) cache2.get("age");
assertNotNull("\"age\" obtained from cache2 must be non-null ", age);
assertEquals("\"age\" must be 38", (int) age, 38);
assertNull("lock info is " + lm1.printLockInfo(), lm1.getOwner("age"));
}
@Listener
public class LocalListener {
Object key = null;
public void put(Object key, Object val) {
this.key = key;
cache1.put(key, val);
}
public void put(Map<?, ?> map) {
if (map.isEmpty()) fail("put(): map size can't be 0");
cache1.putAll(map);
}
@CacheEntryModified
public void modified(Event<Object, Object> ne) {
if (!ne.isPre()) {
log.debug("modified visited with key: " + key);
try {
// test out if we can get the read lock since there is a write lock going as well.
cache1.get(key);
}
catch (CacheException e) {
log.error("Error reading the cache", e);
throw e;
}
}
}
}
@Listener
static public class RemoteListener {
@CacheEntryRemoved
@CacheEntryModified
public void callback(TransactionalEvent e) {
log.trace("Callback got event " + e);
log.debug("Callback got event " + e);
assertFalse("entry was removed on remote cache so isLocal should be false", e.isOriginLocal());
}
}
}
| 7,216
| 32.105505
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/replication/ReplicatedAPITest.java
|
package org.infinispan.replication;
import static org.infinispan.context.Flag.CACHE_MODE_LOCAL;
import static org.testng.AssertJUnit.assertEquals;
import java.util.HashMap;
import java.util.Map;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.test.MultipleCacheManagersTest;
import org.testng.annotations.Test;
@Test(groups = {"functional", "smoke"}, testName = "replication.ReplicatedAPITest")
public class ReplicatedAPITest extends MultipleCacheManagersTest {
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder build = getDefaultClusteredCacheConfig(CacheMode.REPL_SYNC, true);
build.clustering().stateTransfer().timeout(30000);
createClusteredCaches(2, "replication", build);
}
public void put() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
// test a simple put!
assert cache1.get("key") == null;
assert cache2.get("key") == null;
cache1.put("key", "value");
assert cache1.get("key").equals("value");
assert cache2.get("key").equals("value");
Map<String, String> map = new HashMap<>();
map.put("key2", "value2");
map.put("key3", "value3");
cache1.putAll(map);
assert cache1.get("key").equals("value");
assert cache2.get("key").equals("value");
assert cache1.get("key2").equals("value2");
assert cache2.get("key2").equals("value2");
assert cache1.get("key3").equals("value3");
assert cache2.get("key3").equals("value3");
}
public void remove() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value");
assert cache2.get("key").equals("value");
assert cache1.get("key") == null;
cache1.remove("key");
assert cache1.get("key") == null;
assert cache2.get("key") == null;
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "value");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value");
assert cache1.get("key").equals("value");
assert cache2.get("key").equals("value");
cache1.remove("key");
assert cache1.get("key") == null;
assert cache2.get("key") == null;
}
public void testPutIfAbsent() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "valueOld");
assert cache2.get("key").equals("valueOld");
assert cache1.get("key") == null;
cache1.putIfAbsent("key", "value");
assertEquals("value", cache1.get("key"));
assertEquals("value", cache2.get("key"));
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value3");
assert cache1.get("key").equals("value");
assert cache2.get("key").equals("value3");
cache1.putIfAbsent("key", "value4");
assert cache1.get("key").equals("value");
assert cache2.get("key").equals("value3"); // should not invalidate cache2!!
}
public void testRemoveIfPresent() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "value1");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value2");
assert cache1.get("key").equals("value1");
assert cache2.get("key").equals("value2");
cache1.remove("key", "value");
assert cache1.get("key").equals("value1") : "Should not remove";
assert cache2.get("key").equals("value2") : "Should not remove";
cache1.remove("key", "value1");
assert cache1.get("key") == null;
assert cache2.get("key") == null;
}
public void testClear() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "value1");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value2");
assert cache1.get("key").equals("value1");
assert cache2.get("key").equals("value2");
cache1.clear();
assert cache1.get("key") == null;
assert cache2.get("key") == null;
}
public void testReplace() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value2");
assert cache1.get("key") == null;
assert cache2.get("key").equals("value2");
cache1.replace("key", "value1"); // should do nothing since there is nothing to replace on cache1
assert cache1.get("key") == null;
assert cache2.get("key").equals("value2");
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "valueN");
cache1.replace("key", "value1");
assert cache1.get("key").equals("value1");
assert cache2.get("key").equals("value1");
}
public void testReplaceWithOldVal() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value2");
assert cache1.get("key") == null;
assert cache2.get("key").equals("value2");
cache1.replace("key", "valueOld", "value1"); // should do nothing since there is nothing to replace on cache1
assert cache1.get("key") == null;
assert cache2.get("key").equals("value2");
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "valueN");
cache1.replace("key", "valueOld", "value1"); // should do nothing since there is nothing to replace on cache1
assert cache1.get("key").equals("valueN");
assert cache2.get("key").equals("value2");
cache1.replace("key", "valueN", "value1");
// the replace executed identically on both of them
assertEquals("value1", cache1.get("key"));
assertEquals("value1", cache2.get("key"));
}
public void testLocalOnlyClear() {
AdvancedCache<String, String> cache1 = advancedCache(0,"replication");
AdvancedCache<String, String> cache2 = advancedCache(1,"replication");
cache1.withFlags(CACHE_MODE_LOCAL).put("key", "value1");
cache2.withFlags(CACHE_MODE_LOCAL).put("key", "value2");
assert cache1.get("key").equals("value1");
assert cache2.get("key").equals("value2");
cache1.withFlags(CACHE_MODE_LOCAL).clear();
assert cache1.get("key") == null;
assert cache2.get("key") != null;
assert cache2.get("key").equals("value2");
}
}
| 6,967
| 35.673684
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/TxConflictResolutionTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.distribution.MagicKey;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.testng.annotations.Test;
/**
* Check that conflict resolution completes successfully in transactional caches with autocommit disabled.
*
* <ol>
* <li>do a split, and let k -> A, k -> null in the two partitions</li>
* <li>merge partitions</li>
* <li>check that all members read A</li>
* </ol>
*
* <p>See ISPN-12725.</p>
*
* @author Dan Berindei
* @since 12.1
*/
@Test(groups = "functional", testName = "conflict.impl.TxConflictResolutionTest")
public class TxConflictResolutionTest extends BaseMergePolicyTest {
private static final String VAL = "A";
private boolean autoCommit;
@Override
public Object[] factory() {
return new Object[] {
new TxConflictResolutionTest().autoCommit(true).lockingMode(LockingMode.PESSIMISTIC),
new TxConflictResolutionTest().autoCommit(false).lockingMode(LockingMode.PESSIMISTIC),
new TxConflictResolutionTest().autoCommit(true).lockingMode(LockingMode.OPTIMISTIC),
new TxConflictResolutionTest().autoCommit(false).lockingMode(LockingMode.OPTIMISTIC),
};
}
public TxConflictResolutionTest() {
super(DIST_SYNC, null, new int[]{0,1}, new int[]{2,3});
this.mergePolicy = MergePolicy.PREFERRED_NON_NULL;
this.valueAfterMerge = VAL;
}
TxConflictResolutionTest autoCommit(boolean autoCommit) {
this.autoCommit = autoCommit;
return this;
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), new String[]{"autoCommit"});
}
@Override
protected Object[] parameterValues() {
return concat(super.parameterValues(), autoCommit);
}
@Override
protected void customizeCacheConfiguration(ConfigurationBuilder dcc) {
dcc.transaction()
.transactionMode(TransactionMode.TRANSACTIONAL)
.lockingMode(lockingMode)
.autoCommit(autoCommit);
}
@Override
protected void beforeSplit() {
conflictKey = new MagicKey(cache(p0.node(0)), cache(p1.node(0)));
}
@Override
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache) throws Exception {
try {
tm(p0.node(0)).begin();
cache(p0.node(0)).put(conflictKey, VAL);
} finally {
tm(p0.node(0)).commit();
}
assertCacheGet(conflictKey, VAL, p0.getNodes());
assertCacheGet(conflictKey, null, p1.getNodes());
}
@Override
protected void performMerge() {
assertCacheGet(conflictKey, VAL, p0.getNodes());
assertCacheGet(conflictKey, null, p1.getNodes());
partition(0).merge(partition(1), false);
TestingUtil.waitForNoRebalance(caches());
assertCacheGet(conflictKey, VAL, cacheIndexes());
}
}
| 3,186
| 30.554455
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/MergePolicyPreferredNonNullTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "partitionhandling.MergePolicyPreferredNonNullTest")
public class MergePolicyPreferredNonNullTest extends BaseMergePolicyTest {
@Override
public Object[] factory() {
return new Object[] {
new MergePolicyPreferredNonNullTest(REPL_SYNC, 2, "5N", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredNonNullTest(REPL_SYNC, 2, "4N", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredNonNullTest(REPL_SYNC, 2, "3N", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredNonNullTest(REPL_SYNC, 2, "2N", new int[]{0}, new int[]{1}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 1, "5N-1", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 1, "4N-1", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 1, "3N-1", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 1, "2N-1", new int[]{0}, new int[]{1}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 2, "5N-2", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 2, "4N-2", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 2, "3N-2", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredNonNullTest(DIST_SYNC, 2, "2N-2", new int[]{0}, new int[]{1})
};
}
public MergePolicyPreferredNonNullTest(){}
public MergePolicyPreferredNonNullTest(CacheMode cacheMode, int owners, String description, int[] partition1, int[] partition2) {
super(cacheMode, owners, description, AvailabilityMode.AVAILABLE, partition1, partition2);
this.mergePolicy = MergePolicy.PREFERRED_NON_NULL;
}
@Override
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache) {
preferredPartitionCache.remove(conflictKey);
otherCache.put(conflictKey, "DURING SPLIT");
}
}
| 2,441
| 49.875
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/OperationsDuringMergeConflictTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.infinispan.test.TestingUtil.replaceComponent;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import static org.infinispan.topology.CacheTopology.Phase.READ_OLD_WRITE_ALL;
import static org.testng.AssertJUnit.fail;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.topology.TopologyUpdateCommand;
import org.infinispan.distribution.MagicKey;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.InboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.transport.Address;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TestInternalCacheEntryFactory;
import org.infinispan.topology.CacheTopology;
import org.infinispan.xsite.XSiteReplicateCommand;
import org.testng.annotations.Test;
/**
1. do a split, and let k -> A, k -> B in the two partitions
2. initiate a conflict resolution, with merge policy saying that merge A,B = C
3. check that members from each partition read A (in p1) or B (in p2)
4. let someone from p1 issue a write k -> D, check that both p1 and p2 now reads D
5. let the actual merge proceed (be ignored)
6. check that all nodes still read D
7. let state transfer proceed and check that D is still in
For sanity check, you should be able to disable the write of D and see C everywhere instead.
And the same should work for removal as well (merge should not overwrite removal), though I think that CommitManager will behave in the same way.
* @author Ryan Emerson
* @since 9.1
*/
@Test(groups = "functional", testName = "conflict.impl.OperationsDuringMergeConflictTest")
public class OperationsDuringMergeConflictTest extends BaseMergePolicyTest {
private static final String PARTITION_0_VAL = "A";
private static final String PARTITION_1_VAL = "B";
private static final String MERGE_RESULT = "C";
private static final String PUT_RESULT = "D";
private enum MergeAction {
PUT(PUT_RESULT),
REMOVE(null),
NONE(MERGE_RESULT);
String value;
MergeAction(String value) {
this.value = value;
}
}
@Override
public Object[] factory() {
return new Object[] {
new OperationsDuringMergeConflictTest(MergeAction.NONE),
new OperationsDuringMergeConflictTest(MergeAction.PUT),
new OperationsDuringMergeConflictTest(MergeAction.REMOVE)
};
}
private MergeAction mergeAction;
public OperationsDuringMergeConflictTest(){}
public OperationsDuringMergeConflictTest(MergeAction mergeAction) {
super(DIST_SYNC, null, new int[]{0,1}, new int[]{2,3});
this.mergePolicy = ((preferredEntry, otherEntries) -> TestInternalCacheEntryFactory.create(conflictKey, MERGE_RESULT));
this.description = mergeAction.toString();
this.mergeAction = mergeAction;
this.valueAfterMerge = mergeAction.value;
}
@Override
protected void beforeSplit() {
conflictKey = new MagicKey(cache(p0.node(0)), cache(p1.node(0)));
}
@Override
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache) {
cache(p0.node(0)).put(conflictKey, PARTITION_0_VAL);
cache(p1.node(0)).put(conflictKey, PARTITION_1_VAL);
assertCacheGet(conflictKey, PARTITION_0_VAL, p0.getNodes());
assertCacheGet(conflictKey, PARTITION_1_VAL, p1.getNodes());
}
@Override
protected void performMerge() {
boolean modifyDuringMerge = mergeAction != MergeAction.NONE;
CountDownLatch conflictLatch = new CountDownLatch(1);
CountDownLatch stateTransferLatch = new CountDownLatch(1);
try {
IntStream.range(0, numMembersInCluster).forEach(i -> {
wrapInboundInvocationHandler(cache(i), handler -> new BlockStateResponseCommandHandler(handler, conflictLatch));
EmbeddedCacheManager manager = manager(i);
InboundInvocationHandler handler = extractGlobalComponent(manager, InboundInvocationHandler.class);
BlockingInboundInvocationHandler ourHandler = new BlockingInboundInvocationHandler(handler, stateTransferLatch);
replaceComponent(manager, InboundInvocationHandler.class, ourHandler, true);
});
assertCacheGet(conflictKey, PARTITION_0_VAL, p0.getNodes());
assertCacheGet(conflictKey, PARTITION_1_VAL, p1.getNodes());
partition(0).merge(partition(1), false);
assertCacheGet(conflictKey, PARTITION_0_VAL, p0.getNodes());
assertCacheGet(conflictKey, PARTITION_1_VAL, p1.getNodes());
if (modifyDuringMerge) {
// Wait for CONFLICT_RESOLUTION topology to have been installed by the coordinator and then proceed
List<Address> allMembers = caches().stream().map(cache -> cache.getCacheManager().getAddress()).collect(Collectors.toList());
TestingUtil.waitForTopologyPhase(allMembers, CacheTopology.Phase.CONFLICT_RESOLUTION, caches().toArray(new Cache[numMembersInCluster]));
if (mergeAction == MergeAction.PUT) {
cache(0).put(conflictKey, mergeAction.value);
} else {
cache(0).remove(conflictKey);
}
}
conflictLatch.countDown();
stateTransferLatch.countDown();
TestingUtil.waitForNoRebalance(caches());
assertCacheGetValAllCaches(mergeAction);
} catch (Throwable t) {
conflictLatch.countDown();
stateTransferLatch.countDown();
throw t;
}
}
private void assertCacheGetValAllCaches(MergeAction action) {
assertCacheGet(conflictKey, action.value, cacheIndexes());
}
private class BlockingInboundInvocationHandler implements InboundInvocationHandler {
final InboundInvocationHandler delegate;
final CountDownLatch latch;
BlockingInboundInvocationHandler(InboundInvocationHandler delegate, CountDownLatch latch) {
this.delegate = delegate;
this.latch = latch;
}
@Override
public void handleFromCluster(Address origin, ReplicableCommand command, Reply reply, DeliverOrder order) {
if (command instanceof TopologyUpdateCommand &&
((TopologyUpdateCommand) command).getPhase() == READ_OLD_WRITE_ALL) {
awaitLatch(latch);
}
delegate.handleFromCluster(origin, command, reply, order);
}
@Override
public void handleFromRemoteSite(String origin, XSiteReplicateCommand command, Reply reply, DeliverOrder order) {
delegate.handleFromRemoteSite(origin, command, reply, order);
}
}
private class BlockStateResponseCommandHandler extends AbstractDelegatingHandler {
final CountDownLatch latch;
BlockStateResponseCommandHandler(PerCacheInboundInvocationHandler delegate, CountDownLatch latch) {
super(delegate);
this.latch = latch;
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
if (command instanceof StateResponseCommand)
awaitLatch(latch);
delegate.handle(command, reply, order);
}
}
private void awaitLatch(CountDownLatch latch) {
try {
// Timeout has to be large enough to allow for rebalance and subsequent operations, so we double the
// rebalance timeout. Timeout necessary as for some reason the latch is not always counted down in the Handler
if (!latch.await(120, TimeUnit.SECONDS))
fail("CountDownLatch await timedout");
} catch (InterruptedException ignore) {
fail("CountDownLatch Interrupted");
}
}
}
| 8,465
| 41.33
| 148
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/ConflictManagerTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotSame;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.function.IntConsumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.statetransfer.ConflictResolutionStartCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.conflict.ConflictManager;
import org.infinispan.conflict.ConflictManagerFactory;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.container.entries.NullCacheEntry;
import org.infinispan.context.Flag;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.MagicKey;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.DataRehashed;
import org.infinispan.notifications.cachelistener.event.DataRehashedEvent;
import org.infinispan.partitionhandling.BasePartitionHandlingTest;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.InboundTransferTask;
import org.infinispan.statetransfer.StateChunk;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "conflict.resolution.ConflictManagerTest")
public class ConflictManagerTest extends BasePartitionHandlingTest {
private static final String CACHE_NAME = "conflict-cache";
private static final int NUMBER_OF_OWNERS = 2;
private static final int NUMBER_OF_CACHE_ENTRIES = 100;
private static final int INCONSISTENT_VALUE_INCREMENT = 10;
private static final int NULL_VALUE_FREQUENCY = 20;
public ConflictManagerTest() {
this.cacheMode = CacheMode.DIST_SYNC;
this.partitionHandling = PartitionHandling.ALLOW_READ_WRITES;
}
@Override
protected void createCacheManagers() throws Throwable {
super.createCacheManagers();
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
builder.clustering().partitionHandling().whenSplit(partitionHandling).mergePolicy(null).stateTransfer().fetchInMemoryState(true);
defineConfigurationOnAllManagers(CACHE_NAME, builder);
}
public void testGetAllVersionsDuringStateTransfer() throws Exception {
final int key = 1;
final int value = 1;
createCluster();
getCache(2).put(key, value);
splitCluster();
RehashListener listener = new RehashListener();
getCache(0).addListener(listener);
CountDownLatch latch = new CountDownLatch(1);
delayStateTransferCompletion(latch);
// Trigger the merge and wait for state transfer to begin
Future<?> mergeFuture = fork(() -> partition(0).merge(partition(1)));
assertTrue(listener.latch.await(10, TimeUnit.SECONDS));
Future<Map<Address, InternalCacheValue<Object>>> versionFuture = fork(() -> getAllVersions(0, key));
// Check that getAllVersions doesn't return while state transfer is in progress
TestingUtil.assertNotDone(versionFuture);
// Allow and wait for state transfer to finish
latch.countDown();
mergeFuture.get(30, TimeUnit.SECONDS);
// Check the results
Map<Address, InternalCacheValue<Object>> versionMap = versionFuture.get(60, TimeUnit.SECONDS);
assertTrue(versionMap != null);
assertTrue(!versionMap.isEmpty());
// mergepolicy == null, so no conflict resolution occurs therefore it's possible that versionMap may contain null entries
assertEquals(String.format("Returned versionMap %s", versionMap),2, versionMap.size());
}
public void testGetAllVersionsTimeout() throws Throwable {
ConfigurationBuilder builder = getDefaultClusteredCacheConfig(CacheMode.DIST_SYNC);
builder.clustering().remoteTimeout(5000).stateTransfer().fetchInMemoryState(true);
String cacheName = CACHE_NAME + "2";
defineConfigurationOnAllManagers(cacheName, builder);
waitForClusterToForm(cacheName);
dropClusteredGetCommands();
Exceptions.expectException(CacheException.class, ".* encountered when attempting '.*.' on cache '.*.'", () -> getAllVersions(0, "Test"));
}
public void testGetConflictsDuringStateTransfer() throws Throwable {
createCluster();
splitCluster();
RehashListener listener = new RehashListener();
getCache(0).addListener(listener);
CountDownLatch latch = new CountDownLatch(1);
delayStateTransferCompletion(latch);
fork(() -> partition(0).merge(partition(1), false));
listener.latch.await();
Exceptions.expectException(IllegalStateException.class, ".* Unable to retrieve conflicts as StateTransfer is currently in progress for cache .*", () -> getConflicts(0));
latch.countDown();
}
public void testGetConflictAfterCancellation() throws Exception {
waitForClusterToForm(CACHE_NAME);
CountDownLatch latch = new CountDownLatch(1);
cancelStateTransfer(latch);
Future<Long> f = fork(() -> getConflicts(0).count());
if (!latch.await(10, TimeUnit.SECONDS)) {
throw new TestException("No state transfer cancelled");
}
assertEquals(0, (long) f.get(10, TimeUnit.SECONDS));
}
public void testAllVersionsOfKeyReturned() {
// Test with and without conflicts
waitForClusterToForm(CACHE_NAME);
IntStream.range(0, NUMBER_OF_CACHE_ENTRIES).forEach(i -> getCache(0).put(i, "v" + i));
compareCacheValuesForKey(INCONSISTENT_VALUE_INCREMENT, true);
introduceCacheConflicts();
compareCacheValuesForKey(INCONSISTENT_VALUE_INCREMENT, false);
compareCacheValuesForKey(NULL_VALUE_FREQUENCY, false);
}
public void testConsecutiveInvocationOfAllVersionsForKey() throws Exception {
waitForClusterToForm(CACHE_NAME);
int key = 1;
Map<Address, InternalCacheValue<Object>> result1 = getAllVersions(0, key);
Map<Address, InternalCacheValue<Object>> result2 = getAllVersions(0, key);
assertNotSame(result1, result2); // Assert that a different map is returned, i.e. a new CompletableFuture was created
assertEquals(result1, result2); // Assert that returned values are still logically equivalent
}
public void testConflictsDetected() {
// Test that no conflicts are detected at the start
// Deliberately introduce conflicts and make sure they are detected
waitForClusterToForm(CACHE_NAME);
IntStream.range(0, NUMBER_OF_CACHE_ENTRIES).forEach(i -> getCache(0).put(i, "v" + i));
final int cacheIndex = numMembersInCluster - 1;
assertEquals(0, getConflicts(cacheIndex).count());
introduceCacheConflicts();
List<Map<Address, CacheEntry<Object, Object>>> conflicts = getConflicts(cacheIndex).collect(Collectors.toList());
assertEquals(INCONSISTENT_VALUE_INCREMENT, conflicts.size());
for (Map<Address, CacheEntry<Object, Object>> map : conflicts) {
assertEquals(NUMBER_OF_OWNERS, map.keySet().size());
Collection<CacheEntry<Object, Object>> mapValues = map.values();
int key = mapValues.stream().filter(e -> !(e instanceof NullCacheEntry)).mapToInt(e -> (Integer) e.getKey()).findAny().orElse(-1);
assertTrue(key > -1);
if (key % NULL_VALUE_FREQUENCY == 0) {
assertTrue(map.values().stream().anyMatch(NullCacheEntry.class::isInstance));
} else {
List<Object> icvs = map.values().stream().map(CacheEntry::getValue).distinct().collect(Collectors.toList());
assertEquals(NUMBER_OF_OWNERS, icvs.size());
assertTrue("Expected one of the conflicting string values to be 'INCONSISTENT'", icvs.contains("INCONSISTENT"));
}
}
}
public void testConflictsResolvedWithProvidedMergePolicy() {
createCluster();
AdvancedCache<Object, Object> cache = getCache(0);
ConflictManager<Object, Object> cm = ConflictManagerFactory.get(cache);
MagicKey key = new MagicKey(cache(0), cache(1));
cache.put(key, 1);
cache.withFlags(Flag.CACHE_MODE_LOCAL).put(key, 2);
assertEquals(1, getConflicts(0).count());
cm.resolveConflicts(((preferredEntry, otherEntries) -> preferredEntry));
assertEquals(0, getConflicts(0).count());
}
public void testCacheOperationOnConflictStream() {
createCluster();
AdvancedCache<Object, Object> cache = getCache(0);
ConflictManager<Object, Object> cm = ConflictManagerFactory.get(cache);
MagicKey key = new MagicKey(cache(0), cache(1));
cache.put(key, 1);
cache.withFlags(Flag.CACHE_MODE_LOCAL).put(key, 2);
cm.getConflicts().forEach(map -> {
CacheEntry<Object, Object> entry = map.values().iterator().next();
Object conflictKey = entry.getKey();
cache.remove(conflictKey);
});
assertTrue(cache.isEmpty());
}
public void testNoEntryMergePolicyConfigured() {
Exceptions.expectException(CacheException.class, () -> ConflictManagerFactory.get(getCache(0)).resolveConflicts());
}
private void introduceCacheConflicts() {
LocalizedCacheTopology topology = getCache(0).getDistributionManager().getCacheTopology();
for (int i = 0; i < NUMBER_OF_CACHE_ENTRIES; i += INCONSISTENT_VALUE_INCREMENT) {
Address primary = topology.getDistribution(i).primary();
AdvancedCache<Object, Object> primaryCache = manager(primary).getCache(CACHE_NAME).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL);
if (i % NULL_VALUE_FREQUENCY == 0)
primaryCache.remove(i);
else
primaryCache.put(i, "INCONSISTENT");
}
}
private void compareCacheValuesForKey(int key, boolean expectEquality) {
List<Map<Address, InternalCacheValue<Object>>> cacheVersions = new ArrayList<>();
for (int i = 0; i < numMembersInCluster; i++)
cacheVersions.add(getAllVersions(i, key));
boolean allowNullValues = key % NULL_VALUE_FREQUENCY == 0;
int expectedValues = allowNullValues ? NUMBER_OF_OWNERS - 1 : NUMBER_OF_OWNERS;
for (Map<Address, InternalCacheValue<Object>> map : cacheVersions) {
assertEquals(map.toString(), NUMBER_OF_OWNERS, map.keySet().size());
if (!allowNullValues)
assertTrue("Version map contains null entries.", !map.values().contains(null));
List<Object> values = map.values().stream()
.filter(Objects::nonNull)
.map(InternalCacheValue::getValue)
.collect(Collectors.toList());
assertEquals(values.toString(), expectedValues, values.size());
if (expectEquality) {
assertTrue("Inconsistent values returned, they should be the same", values.stream().allMatch(v -> v.equals(values.get(0))));
} else {
assertTrue("Expected inconsistent values, but all values were equal", map.values().stream().distinct().count() > 1);
}
}
}
private void createCluster() {
waitForClusterToForm(CACHE_NAME);
List<Address> members = getCache(0).getRpcManager().getMembers();
TestingUtil.waitForNoRebalance(caches());
assertTrue(members.size() == 4);
}
private void splitCluster() {
splitCluster(new int[]{0, 1}, new int[]{2, 3});
TestingUtil.blockUntilViewsChanged(10000, 2, getCache(0), getCache(1), getCache(2), getCache(3));
TestingUtil.waitForNoRebalance(getCache(0), getCache(1));
TestingUtil.waitForNoRebalance(getCache(2), getCache(3));
}
private AdvancedCache<Object, Object> getCache(int index) {
return advancedCache(index, CACHE_NAME);
}
private Stream<Map<Address, CacheEntry<Object, Object>>> getConflicts(int index) {
return ConflictManagerFactory.get(getCache(index)).getConflicts();
}
private Map<Address, InternalCacheValue<Object>> getAllVersions(int index, Object key) {
return ConflictManagerFactory.get(getCache(index)).getAllVersions(key);
}
private void dropClusteredGetCommands() {
IntStream.range(0, numMembersInCluster).forEach(i -> wrapInboundInvocationHandler(getCache(i), DropClusteredGetCommandHandler::new));
}
private void delayStateTransferCompletion(CountDownLatch latch) {
IntStream.range(0, numMembersInCluster).forEach(i -> wrapInboundInvocationHandler(getCache(i), delegate -> new DelayStateResponseCommandHandler(latch, delegate)));
}
private void cancelStateTransfer(CountDownLatch latch) {
IntStream.range(0, numMembersInCluster).forEach(i -> wrapInboundInvocationHandler(getCache(i), delegate -> new StateTransferCancellation(latch, delegate)));
}
public class DelayStateResponseCommandHandler extends AbstractDelegatingHandler {
final CountDownLatch latch;
DelayStateResponseCommandHandler(CountDownLatch latch, PerCacheInboundInvocationHandler delegate) {
super(delegate);
this.latch = latch;
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
if (command instanceof StateResponseCommand) {
StateResponseCommand stc = (StateResponseCommand) command;
boolean isLastChunk = stc.getStateChunks().stream().anyMatch(StateChunk::isLastChunk);
if (isLastChunk) {
try {
latch.await(60, TimeUnit.MILLISECONDS);
} catch (InterruptedException ignore) {
}
}
}
delegate.handle(command, reply, order);
}
}
public class StateTransferCancellation extends AbstractDelegatingHandler {
private final CountDownLatch latch;
protected StateTransferCancellation(CountDownLatch latch, PerCacheInboundInvocationHandler delegate) {
super(delegate);
this.latch = latch;
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
// ISPN-14084
// Simulate the condition where the InboundTransferTask is cancelled before the SegmentRequest future is done.
if (command instanceof ConflictResolutionStartCommand) {
StateReceiverImpl<?, ?> sr = (StateReceiverImpl<?, ?>) TestingUtil.extractComponent(cache(0, command.getCacheName().toString()), StateReceiver.class);
Map<Address, InboundTransferTask> tasks = new HashMap<>();
((ConflictResolutionStartCommand) command).getSegments().forEach((IntConsumer) value -> tasks.putAll(sr.getTransferTaskMap(value)));
sr.nonBlockingExecutor.execute(() -> {
tasks.forEach((k, v) -> v.cancel());
delegate.handle(command, reply, order);
latch.countDown();
});
return;
}
delegate.handle(command, reply, order);
}
}
private class DropClusteredGetCommandHandler extends AbstractDelegatingHandler {
DropClusteredGetCommandHandler(PerCacheInboundInvocationHandler delegate) {
super(delegate);
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
if (!(command instanceof ClusteredGetCommand)) {
delegate.handle(command, reply, order);
}
}
}
@Listener
private class RehashListener {
final CountDownLatch latch = new CountDownLatch(1);
@DataRehashed
@SuppressWarnings("unused")
public void onDataRehashed(DataRehashedEvent event) {
if (event.isPre())
latch.countDown();
}
}
}
| 16,772
| 43.847594
| 175
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/MergePolicyCustomTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.test.fwk.TestInternalCacheEntryFactory;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "partitionhandling.MergePolicyCustomTest")
public class MergePolicyCustomTest extends BaseMergePolicyTest {
@Override
public Object[] factory() {
return new Object[] {
new MergePolicyCustomTest(REPL_SYNC, 2, "5N", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyCustomTest(REPL_SYNC, 2, "4N", new int[]{0,1}, new int[]{2,3}),
new MergePolicyCustomTest(REPL_SYNC, 2, "3N", new int[]{0,1}, new int[]{2}),
new MergePolicyCustomTest(REPL_SYNC, 2, "2N", new int[]{0}, new int[]{1}),
new MergePolicyCustomTest(DIST_SYNC, 1, "5N-1", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyCustomTest(DIST_SYNC, 1, "4N-1", new int[]{0,1}, new int[]{2,3}),
new MergePolicyCustomTest(DIST_SYNC, 1, "3N-1", new int[]{0,1}, new int[]{2}),
new MergePolicyCustomTest(DIST_SYNC, 1, "2N-1", new int[]{0}, new int[]{1}),
new MergePolicyCustomTest(DIST_SYNC, 2, "5N-2", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyCustomTest(DIST_SYNC, 2, "4N-2", new int[]{0,1}, new int[]{2,3}),
new MergePolicyCustomTest(DIST_SYNC, 2, "3N-2", new int[]{0,1}, new int[]{2}),
new MergePolicyCustomTest(DIST_SYNC, 2, "2N-2", new int[]{0}, new int[]{1})
};
}
public MergePolicyCustomTest(){}
public MergePolicyCustomTest(CacheMode cacheMode, int owners, String description, int[] partition1, int[] partition2) {
super(cacheMode, owners, description, AvailabilityMode.AVAILABLE, partition1, partition2);
this.mergePolicy = ((preferredEntry, otherEntries) -> TestInternalCacheEntryFactory.create(conflictKey, "Custom"));
this.valueAfterMerge = "Custom";
}
}
| 2,148
| 50.166667
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/BaseMergePolicyTest.java
|
package org.infinispan.conflict.impl;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import java.util.Arrays;
import java.util.Map;
import java.util.stream.Collectors;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.conflict.ConflictManager;
import org.infinispan.conflict.ConflictManagerFactory;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.distribution.MagicKey;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.BasePartitionHandlingTest;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.partitionhandling.impl.LostDataCheck;
import org.infinispan.partitionhandling.impl.PreferAvailabilityStrategy;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheStatusResponse;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.ClusterTopologyManagerImpl;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.topology.ManagerStatusResponse;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
public abstract class BaseMergePolicyTest extends BasePartitionHandlingTest {
private static Log log = LogFactory.getLog(BaseMergePolicyTest.class);
protected MagicKey conflictKey;
protected Object valueAfterMerge;
protected PartitionDescriptor p0;
protected PartitionDescriptor p1;
protected String description;
protected BaseMergePolicyTest() {
this.partitionHandling = PartitionHandling.ALLOW_READ_WRITES;
this.valueAfterMerge = "DURING SPLIT";
}
protected BaseMergePolicyTest(CacheMode cacheMode, String description, int[] partition1, int[] partition2) {
this(cacheMode, 2, description, null, partition1, partition2);
}
protected BaseMergePolicyTest(CacheMode cacheMode, String description, AvailabilityMode availabilityMode,
int[] partition1, int[] partition2) {
this(cacheMode, 2, description, availabilityMode, partition1, partition2);
}
protected BaseMergePolicyTest(CacheMode cacheMode, int numOwners, String description, AvailabilityMode availabilityMode,
int[] partition1, int[] partition2) {
this();
this.cacheMode = cacheMode;
this.description = description;
p0 = new PartitionDescriptor(availabilityMode, partition1);
p1 = new PartitionDescriptor(availabilityMode, partition2);
numMembersInCluster = p0.getNodes().length + p1.getNodes().length;
if (cacheMode == CacheMode.REPL_SYNC) {
numberOfOwners = numMembersInCluster;
} else {
this.numberOfOwners = numOwners;
}
}
@Override
protected String[] parameterNames() {
return concat(super.parameterNames(), new String[]{null});
}
@Override
protected Object[] parameterValues() {
return concat(super.parameterValues(), description);
}
protected void beforeSplit() {
conflictKey = numberOfOwners > 1 ? new MagicKey(cache(p0.node(0)), cache(p1.node(0))) : new MagicKey(cache(p0.node(0)));
cache(p0.node(0)).put(conflictKey, "BEFORE SPLIT");
}
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache)
throws Exception {
preferredPartitionCache.put(conflictKey, "DURING SPLIT");
}
protected void splitCluster() {
splitCluster(p0.getNodes(), p1.getNodes());
TestingUtil.waitForNoRebalance(getPartitionCaches(p0));
TestingUtil.waitForNoRebalance(getPartitionCaches(p1));
}
protected void performMerge() throws Exception {
partition(0).merge(partition(1));
}
protected void afterConflictResolutionAndMerge() {
ConflictManager cm = conflictManager(0);
assert !cm.isConflictResolutionInProgress();
Map<Address, InternalCacheValue> versionMap = cm.getAllVersions(conflictKey);
assertNotNull(versionMap);
assertEquals("Versions: " + versionMap, numberOfOwners, versionMap.size());
String message = String.format("Key=%s. VersionMap: %s", conflictKey, versionMap);
for (InternalCacheValue icv : versionMap.values()) {
if (valueAfterMerge != null) {
assertNotNull(message, icv);
assertNotNull(message, icv.getValue());
assertEquals(message, valueAfterMerge, icv.getValue());
} else {
assertNull(message, icv);
}
}
assertEquals(0, cm.getConflicts().count());
}
public void testPartitionMergePolicy() throws Exception {
log.tracef("beforeSplit()");
beforeSplit();
log.tracef("splitCluster");
splitCluster();
log.tracef("duringSplit()");
AdvancedCache preferredPartitionCache = getCacheFromPreferredPartition();
duringSplit(preferredPartitionCache, getCacheFromNonPreferredPartition(preferredPartitionCache));
log.tracef("performMerge()");
performMerge();
log.tracef("afterConflictResolutionAndMerge()");
afterConflictResolutionAndMerge();
}
protected <K, V> AdvancedCache<K, V> getCacheFromNonPreferredPartition(AdvancedCache preferredCache) {
for (Cache c : caches()) {
AdvancedCache cache = (AdvancedCache) c;
if (!cache.getDistributionManager().getWriteConsistentHash().equals(preferredCache.getDistributionManager().getWriteConsistentHash()))
return cache;
}
return null;
}
protected <K, V> AdvancedCache<K, V> getCacheFromPreferredPartition() {
AdvancedCache[] caches = caches().stream().map(Cache::getAdvancedCache).toArray(AdvancedCache[]::new);
return getCacheFromPreferredPartition(caches);
}
protected <K, V> AdvancedCache<K, V> getCacheFromPreferredPartition(AdvancedCache... caches) {
Map<Address, CacheStatusResponse> statusResponses =
Arrays.stream(caches).collect(Collectors.toMap(this::address, this::getCacheStatus));
LostDataCheck lostDataCheck = ClusterTopologyManagerImpl::distLostDataCheck;
CacheTopology preferredTopology = new PreferAvailabilityStrategy(null, null, lostDataCheck)
.computePreferredTopology(statusResponses);
log.tracef("getCacheFromPreferredPartition: partition=%s", preferredTopology.getMembers());
return Arrays.stream(caches)
.filter(c -> address(c).equals(preferredTopology.getMembers().get(0)))
.findFirst().get();
}
private CacheStatusResponse getCacheStatus(AdvancedCache cache) {
LocalTopologyManager localTopologyManager = cache.getComponentRegistry().getComponent(LocalTopologyManager.class);
int viewId = cache.getRpcManager().getTransport().getViewId();
ManagerStatusResponse statusResponse = CompletionStages.join(localTopologyManager.handleStatusRequest(viewId));
return statusResponse.getCaches().get(cache.getName());
}
protected void assertCacheGet(Object key, Object value, int... caches) {
for (int index : caches) {
AdvancedCache cache = advancedCache(index);
String message = String.format("Key=%s, Value=%s, Cache Index=%s, Topology=%s", key, value, index, cache.getDistributionManager().getCacheTopology());
assertEquals(message, value, cache.get(key));
}
}
protected ConflictManager conflictManager(int index) {
return ConflictManagerFactory.get(advancedCache(index));
}
protected int[] cacheIndexes() {
int[] indexes = new int[numMembersInCluster];
int count = 0;
for (int i : p0.getNodes())
indexes[count++] = i;
return indexes;
}
}
| 7,957
| 39.395939
| 159
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/DenyReadWriteRemoveAllTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import static org.infinispan.partitionhandling.AvailabilityMode.AVAILABLE;
import static org.infinispan.partitionhandling.AvailabilityMode.DEGRADED_MODE;
import static org.testng.Assert.assertEquals;
import static org.testng.AssertJUnit.assertNull;
import java.util.HashSet;
import java.util.List;
import org.infinispan.AdvancedCache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.context.Flag;
import org.infinispan.distribution.MagicKey;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "partitionhandling.DenyReadWriteRemoveAllTest")
public class DenyReadWriteRemoveAllTest extends BaseMergePolicyTest {
@Override
public Object[] factory() {
/*
* AD => All partitions are degraded during split
* PD => Only the minority partition is degraded during split
* N => total number of nodes
*/
return new Object[] {
new DenyReadWriteRemoveAllTest(REPL_SYNC, "PD-5N", new int[]{0,1,2}, new int[]{3,4}),
new DenyReadWriteRemoveAllTest(REPL_SYNC, "AD-4N", DEGRADED_MODE, new int[]{0,1}, new int[]{2,3}),
new DenyReadWriteRemoveAllTest(REPL_SYNC, "PD-4N", new int[]{0,1,2}, new int[]{3}),
new DenyReadWriteRemoveAllTest(REPL_SYNC, "PD-3N", new int[]{0,1}, new int[]{2}),
new DenyReadWriteRemoveAllTest(REPL_SYNC, "AD-2N", DEGRADED_MODE, new int[]{0}, new int[]{1}),
new DenyReadWriteRemoveAllTest(DIST_SYNC, "AD-5N", DEGRADED_MODE, new int[]{0,1,2}, new int[]{3,4}),
new DenyReadWriteRemoveAllTest(DIST_SYNC, "AD-4N", DEGRADED_MODE, new int[]{0,1}, new int[]{2,3}),
new DenyReadWriteRemoveAllTest(DIST_SYNC, "PD-4N", new int[]{0,1,2}, new int[]{3}),
new DenyReadWriteRemoveAllTest(DIST_SYNC, "PD-3N", new int[]{0,1}, new int[]{2}),
new DenyReadWriteRemoveAllTest(DIST_SYNC, "AD-2N", DEGRADED_MODE, new int[]{0}, new int[]{1}),
};
}
public DenyReadWriteRemoveAllTest(){}
public DenyReadWriteRemoveAllTest(CacheMode cacheMode, String description, int[] partition1, int[] partition2) {
this(cacheMode, description, null, partition1, partition2);
}
public DenyReadWriteRemoveAllTest(CacheMode cacheMode, String description, AvailabilityMode availabilityMode,
int[] partition1, int[] partition2) {
super(cacheMode, description, availabilityMode, partition1, partition2);
this.mergePolicy = MergePolicy.REMOVE_ALL;
this.partitionHandling = PartitionHandling.DENY_READ_WRITES;
this.valueAfterMerge = null;
}
@Override
protected void beforeSplit() {
// Put values locally before the split as an AvaililibityException will be thrown during the split
conflictKey = new MagicKey(cache(p0.node(0)), cache(p1.node(0)));
cache(p0.node(0)).put(conflictKey, "V1");
cache(p1.node(0)).getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).put(conflictKey, "V2");
}
@Override
protected void splitCluster() {
List<Address> allMembers = advancedCache(0).getRpcManager().getMembers();
for (int i = 0; i < numMembersInCluster; i++)
assertEquals(new HashSet<>(partitionHandlingManager(i).getLastStableTopology().getMembers()), new HashSet<>(allMembers));
if (p0.getExpectedMode() == DEGRADED_MODE) {
eventually(() -> {
for (int i = 0; i < numMembersInCluster; i++)
if (partitionHandlingManager(i).getAvailabilityMode() != AVAILABLE)
return false;
return true;
});
}
splitCluster(p0.getNodes(), p1.getNodes());
if (p0.getExpectedMode() != DEGRADED_MODE) {
TestingUtil.waitForNoRebalance(getPartitionCaches(p0));
} else {
for (int i = 0; i < numMembersInCluster; i++)
assertEquals(new HashSet<>(partitionHandlingManager(i).getLastStableTopology().getMembers()), new HashSet<>(allMembers));
partition(0).assertDegradedMode();
partition(0).assertKeyNotAvailableForRead(conflictKey);
}
partition(1).assertDegradedMode();
partition(1).assertKeyNotAvailableForRead(conflictKey);
}
@Override
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache) {
}
@Override
protected void afterConflictResolutionAndMerge() {
partition(0).assertAvailabilityMode(AVAILABLE);
super.afterConflictResolutionAndMerge();
assertNull(cache(p0.node(0)).get(conflictKey));
}
}
| 4,978
| 43.061947
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/MergePolicyPreferredAlwaysTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "partitionhandling.MergePolicyPreferredAlwaysTest")
public class MergePolicyPreferredAlwaysTest extends BaseMergePolicyTest {
@Override
public Object[] factory() {
return new Object[] {
new MergePolicyPreferredAlwaysTest(REPL_SYNC, 2, "5N", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredAlwaysTest(REPL_SYNC, 2, "4N", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredAlwaysTest(REPL_SYNC, 2, "3N", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredAlwaysTest(REPL_SYNC, 2, "2N", new int[]{0}, new int[]{1}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 1, "5N-1", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 1, "4N-1", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 1, "3N-1", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 1, "2N-1", new int[]{0}, new int[]{1}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 2, "5N-2", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 2, "4N-2", new int[]{0,1}, new int[]{2,3}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 2, "3N-2", new int[]{0,1}, new int[]{2}),
new MergePolicyPreferredAlwaysTest(DIST_SYNC, 2, "2N-2", new int[]{0}, new int[]{1})
};
}
public MergePolicyPreferredAlwaysTest(){}
public MergePolicyPreferredAlwaysTest(CacheMode cacheMode, int owners, String description, int[] partition1, int[] partition2) {
super(cacheMode, owners, description, AvailabilityMode.AVAILABLE, partition1, partition2);
this.mergePolicy = MergePolicy.PREFERRED_ALWAYS;
}
}
| 2,168
| 51.902439
| 131
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/MultipleCachesDuringConflictResolutionTest.java
|
package org.infinispan.conflict.impl;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.partitionhandling.BasePartitionHandlingTest;
import org.infinispan.partitionhandling.PartitionHandling;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.testng.annotations.Test;
/**
* ISPN-8925 This test creates several caches and then initiates a split-brain followed by a merge. The purpose of this
* test is to ensure that when many caches exist, it's still possible for conflict resolution and the rebalance to proceed.
* Previously the executor in the ClusterTopologyManagerImpl would be blocked indefinitely if the number of caches was
* >= ProcessorInfo.availableProcessors() / 2 + 1
*
* @author Ryan Emerson
*/
@Test(groups = "functional", testName = "conflict.impl.MultipleCachesDuringConflictResolutionTest")
public class MultipleCachesDuringConflictResolutionTest extends BasePartitionHandlingTest {
// Does not include the org.infinispan.CONFIG and ___defaultCache, so total caches = numberOfCaches + 2
private int numberOfCaches = 10;
private int numMembersInCluster;
private PartitionDescriptor p0;
private PartitionDescriptor p1;
public MultipleCachesDuringConflictResolutionTest() {
this.p0 = new PartitionDescriptor(0);
this.p1 = new PartitionDescriptor(1);
this.numMembersInCluster = 2;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = cacheConfiguration();
dcc.clustering().cacheMode(cacheMode)
.partitionHandling().whenSplit(PartitionHandling.ALLOW_READ_WRITES).mergePolicy(MergePolicy.PREFERRED_ALWAYS);
String[] cacheNames = getCacheNames();
// Create a default cache because waitForPartitionToForm() needs it
GlobalConfigurationBuilder gc = GlobalConfigurationBuilder.defaultClusteredBuilder();
gc.defaultCacheName(cacheNames[0]);
createClusteredCaches(numMembersInCluster, gc, dcc, false, new TransportFlags().withFD(true).withMerge(true),
cacheNames);
waitForClusterToForm(cacheNames);
}
private String[] getCacheNames() {
String[] cacheNames = new String[numberOfCaches];
for (int i = 0; i < numberOfCaches; i++)
cacheNames[i] = "cache" + i;
return cacheNames;
}
public void testPartitionMergePolicy() {
TestingUtil.waitForNoRebalanceAcrossManagers(managers());
log.tracef("split test");
splitCluster(p0.getNodes(), p1.getNodes());
TestingUtil.waitForNoRebalanceAcrossManagers(manager(0));
TestingUtil.waitForNoRebalanceAcrossManagers(manager(1));
log.tracef("performMerge");
partition(0).merge(partition(1));
TestingUtil.waitForNoRebalanceAcrossManagers(managers());
}
}
| 2,969
| 41.428571
| 123
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/CrashedNodeDuringConflictResolutionTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.infinispan.AdvancedCache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.statetransfer.ConflictResolutionStartCommand;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.HashConfiguration;
import org.infinispan.conflict.ConflictManager;
import org.infinispan.conflict.EntryMergePolicy;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheValue;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.testng.annotations.Test;
/**
* 1. Partition cluster
* 2. When coordinator sends InboundTransferTask with segment for DURING_CR_CRASH_KEY crash the node
* 3. CR should hang until a new view is received
* 4. The previous CR should be cancelled and restarted with the crashed node removed
* 5. All keys should have the resolved value from the EntryMergePolicy
*/
@Test(groups = "functional", testName = "org.infinispan.conflict.impl.CrashedNodeDuringConflictResolutionTest")
public class CrashedNodeDuringConflictResolutionTest extends BaseMergePolicyTest {
private static final Log log = LogFactory.getLog(CrashedNodeDuringConflictResolutionTest.class);
private static final String PARTITION_0_VAL = "A";
private static final String PARTITION_1_VAL = "B";
private static final String BEFORE_CR_CRASH_KEY = "BEFORE_CR_CRASH";
private static final String DURING_CR_CRASH_KEY = "DURING_CR_CRASH";
private static final String AFTER_CR_RESTART_KEY = "AFTER_CR_CRASH";
private static final String RESOLVED_VALUE = "RESOLVED";
private static final String[] ALL_KEYS = new String[] {BEFORE_CR_CRASH_KEY, DURING_CR_CRASH_KEY, AFTER_CR_RESTART_KEY};
private static final EntryMergePolicy POLICY = (preferredEntry, otherEntries) -> {
Object key = preferredEntry != null ? preferredEntry.getKey() : ((CacheEntry)otherEntries.get(0)).getKey();
return new ImmortalCacheEntry(key, RESOLVED_VALUE);
};
private static final KeyPartitioner PARTITIONER = new TestKeyPartioner();
public CrashedNodeDuringConflictResolutionTest() {
super(DIST_SYNC, null, new int[]{0, 1}, new int[]{2, 3});
this.mergePolicy = POLICY;
this.valueAfterMerge = RESOLVED_VALUE;
}
@Override
protected void createCacheManagers() throws Throwable {
ConfigurationBuilder dcc = cacheConfiguration();
dcc.clustering()
.cacheMode(cacheMode).partitionHandling().whenSplit(partitionHandling).mergePolicy(mergePolicy)
.hash().keyPartitioner(PARTITIONER);
createClusteredCaches(numMembersInCluster, dcc, new TransportFlags().withFD(true).withMerge(true));
waitForClusterToForm();
}
@Override
protected void beforeSplit() {
// Ignore
}
@Override
protected void duringSplit(AdvancedCache preferredPartitionCache, AdvancedCache otherCache) {
for (String key : ALL_KEYS) {
cache(p0.node(0)).put(key, PARTITION_0_VAL);
cache(p1.node(0)).put(key, PARTITION_1_VAL);
}
for (String key : ALL_KEYS) {
assertCacheGet(key, PARTITION_0_VAL, p0.getNodes());
assertCacheGet(key, PARTITION_1_VAL, p1.getNodes());
}
}
@Override
protected void performMerge() throws Exception {
CompletableFuture<ConflictResolutionStartCommand> blockedStateRequest = createStateRequestFuture();
for (String key : ALL_KEYS) {
assertCacheGet(key, PARTITION_0_VAL, p0.getNodes());
assertCacheGet(key, PARTITION_1_VAL, p1.getNodes());
}
partition(0).merge(partition(1), false);
blockedStateRequest.get(60, TimeUnit.SECONDS);
if (log.isTraceEnabled()) log.trace("crashCacheManager(2)");
TestingUtil.crashCacheManagers(manager(2));
// Once the JGroups view has been updated to remove manager(index), then the CR should be restarted when the
// coordinator continues to recover the cluster state
TestingUtil.waitForNoRebalance(cache(0), cache(1), cache(3));
}
@Override
protected void afterConflictResolutionAndMerge() {
ConflictManager cm = conflictManager(0);
assertFalse(cm.isConflictResolutionInProgress());
for (String key : ALL_KEYS) {
Map<Address, InternalCacheValue> versionMap = cm.getAllVersions(key);
assertNotNull(versionMap);
assertEquals("Versions: " + versionMap, numberOfOwners, versionMap.size());
String message = String.format("Key=%s. VersionMap: %s", key, versionMap);
for (InternalCacheValue icv : versionMap.values()) {
assertNotNull(message, icv);
assertNotNull(message, icv.getValue());
assertEquals(message, valueAfterMerge, icv.getValue());
}
}
assertEquals(0, cm.getConflicts().peek(m -> log.errorf("Conflict: " + m)).count());
}
private CompletableFuture<ConflictResolutionStartCommand> createStateRequestFuture() {
int segment = PARTITIONER.getSegment(DURING_CR_CRASH_KEY);
CompletableFuture<ConflictResolutionStartCommand> future = new CompletableFuture<>();
wrapInboundInvocationHandler(cache(2), handler -> new CompleteFutureOnStateRequestHandler(handler, segment, manager(2), future));
return future;
}
private class CompleteFutureOnStateRequestHandler extends AbstractDelegatingHandler {
final int segment;
final EmbeddedCacheManager manager;
final CompletableFuture<ConflictResolutionStartCommand> future;
CompleteFutureOnStateRequestHandler(PerCacheInboundInvocationHandler delegate, int segment, EmbeddedCacheManager manager,
CompletableFuture<ConflictResolutionStartCommand> future) {
super(delegate);
this.segment = segment;
this.manager = manager;
this.future = future;
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
if (command instanceof ConflictResolutionStartCommand) {
ConflictResolutionStartCommand src = (ConflictResolutionStartCommand) command;
if (src.getSegments().contains(segment)) {
log.debugf("Completing future and ignoring state request %s", command);
future.complete(src);
return;
}
}
delegate.handle(command, reply, order);
}
}
public static class TestKeyPartioner implements KeyPartitioner {
private KeyPartitioner delegate = new HashFunctionPartitioner();
@Override
public void init(HashConfiguration configuration) {
delegate.init(configuration);
}
@Override
public int getSegment(Object key) {
if (key instanceof String) {
String keyString = (String) key;
switch (keyString) {
case BEFORE_CR_CRASH_KEY:
return 10;
case DURING_CR_CRASH_KEY:
return 20;
case AFTER_CR_RESTART_KEY:
return 30;
}
}
return delegate.getSegment(key);
}
}
}
| 8,269
| 41.628866
| 135
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/MergePolicyRemoveAllTest.java
|
package org.infinispan.conflict.impl;
import static org.infinispan.configuration.cache.CacheMode.DIST_SYNC;
import static org.infinispan.configuration.cache.CacheMode.REPL_SYNC;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.conflict.MergePolicy;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.testng.annotations.Test;
/**
* @author Ryan Emerson
* @since 9.1
*/
@Test(groups = "functional", testName = "conflict.impl.MergePolicyRemoveAllTest")
public class MergePolicyRemoveAllTest extends BaseMergePolicyTest {
@Override
public Object[] factory() {
return new Object[] {
new MergePolicyRemoveAllTest(REPL_SYNC, 2, "5N", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyRemoveAllTest(REPL_SYNC, 2, "4N", new int[]{0,1}, new int[]{2,3}),
new MergePolicyRemoveAllTest(REPL_SYNC, 2, "3N", new int[]{0,1}, new int[]{2}),
new MergePolicyRemoveAllTest(REPL_SYNC, 2, "2N", new int[]{0}, new int[]{1}),
new MergePolicyRemoveAllTest(DIST_SYNC, 1, "5N-1", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyRemoveAllTest(DIST_SYNC, 1, "4N-1", new int[]{0,1}, new int[]{2,3}),
new MergePolicyRemoveAllTest(DIST_SYNC, 1, "3N-1", new int[]{0,1}, new int[]{2}),
new MergePolicyRemoveAllTest(DIST_SYNC, 1, "2N-1", new int[]{0}, new int[]{1}),
new MergePolicyRemoveAllTest(DIST_SYNC, 2, "5N-2", new int[]{0,1,2}, new int[]{3,4}),
new MergePolicyRemoveAllTest(DIST_SYNC, 2, "4N-2", new int[]{0,1}, new int[]{2,3}),
new MergePolicyRemoveAllTest(DIST_SYNC, 2, "3N-2", new int[]{0,1}, new int[]{2}),
new MergePolicyRemoveAllTest(DIST_SYNC, 2, "2N-2", new int[]{0}, new int[]{1})
};
}
public MergePolicyRemoveAllTest(){}
public MergePolicyRemoveAllTest(CacheMode cacheMode, int owners, String description, int[] partition1, int[] partition2) {
super(cacheMode, owners, description, AvailabilityMode.AVAILABLE, partition1, partition2);
this.mergePolicy = MergePolicy.REMOVE_ALL;
this.valueAfterMerge = null;
}
}
| 2,143
| 45.608696
| 125
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/conflict/impl/StateReceiverTest.java
|
package org.infinispan.conflict.impl;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.statetransfer.ConflictResolutionStartCommand;
import org.infinispan.commands.statetransfer.StateTransferCancelCommand;
import org.infinispan.commons.CacheException;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.ImmortalCacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.TestAddress;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHashFactory;
import org.infinispan.distribution.ch.impl.HashFunctionPartitioner;
import org.infinispan.notifications.cachelistener.CacheNotifier;
import org.infinispan.notifications.cachelistener.event.Event;
import org.infinispan.notifications.cachelistener.event.impl.EventImpl;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.InboundTransferTask;
import org.infinispan.statetransfer.StateChunk;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.PersistentUUID;
import org.infinispan.topology.PersistentUUIDManager;
import org.infinispan.topology.PersistentUUIDManagerImpl;
import org.mockito.stubbing.Answer;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
@Test(groups = "functional", testName = "conflict.resolution.StateReceiverTest")
public class StateReceiverTest extends AbstractInfinispanTest {
private StateReceiverImpl<Object, Object> stateReceiver;
private LocalizedCacheTopology localizedCacheTopology;
private ExecutorService stateTransferExecutor =
Executors.newSingleThreadExecutor(getTestThreadFactory("StateTransfer"));
public void testGetReplicaException() {
CompletableFuture<Void> taskFuture = new CompletableFuture<>();
taskFuture.completeExceptionally(new CacheException("Problem encountered retrieving state"));
initTransferTaskMock(taskFuture);
CompletableFuture<List<Map<Address, CacheEntry<Object, Object>>>> cf = stateReceiver.getAllReplicasForSegment(0, localizedCacheTopology, 10000);
Exceptions.expectExecutionException(CacheException.class, cf);
}
public void testTopologyChangeDuringSegmentRequest() {
initTransferTaskMock(new CompletableFuture<>());
CompletableFuture<List<Map<Address, CacheEntry<Object, Object>>>> cf = stateReceiver.getAllReplicasForSegment(0, localizedCacheTopology, 10000);
assertTrue(!cf.isCancelled());
assertTrue(!cf.isCompletedExceptionally());
// Reduce #nodes to less than numowners to force hash change
stateReceiver.onDataRehash(createEventImpl(4, 1, Event.Type.DATA_REHASHED));
assertTrue(cf.isCompletedExceptionally());
Exceptions.expectExecutionException(CacheException.class, cf);
stateReceiver.onDataRehash(createEventImpl(4, 4, Event.Type.DATA_REHASHED));
cf = stateReceiver.getAllReplicasForSegment(1, localizedCacheTopology, 10000);
assertTrue(!cf.isCompletedExceptionally());
assertTrue(!cf.isCancelled());
}
public void testOldAndInvalidStateIgnored() {
initTransferTaskMock(new CompletableFuture<>());
int segmentId = 0;
stateReceiver.getAllReplicasForSegment(segmentId, localizedCacheTopology, 10000);
List<Address> sourceAddresses = new ArrayList<>(stateReceiver.getTransferTaskMap(segmentId).keySet());
Map<Object, Map<Address, CacheEntry<Object, Object>>> receiverKeyMap = stateReceiver.getKeyReplicaMap(segmentId);
assertEquals(0, receiverKeyMap.size());
stateReceiver.receiveState(sourceAddresses.get(0), 2, createStateChunks("Key1", "Value1"));
assertEquals(1, receiverKeyMap.size());
stateReceiver.receiveState(new TestAddress(5), 2, createStateChunks("Key2", "Value2"));
assertEquals(1, receiverKeyMap.size());
stateReceiver.receiveState(sourceAddresses.get(1), 1, new ArrayList<>());
assertEquals(1, receiverKeyMap.size());
}
@Test(expectedExceptions = CancellationException.class)
public void testRequestCanBeCancelledDuringTransfer() throws Exception {
// Init transfer that blocks and call stop() so the future should complete with CancellationException
InboundTransferTask task = mock(InboundTransferTask.class);
when(task.requestSegments()).thenAnswer(invocationOnMock -> {
TestingUtil.sleepThread(1000);
return CompletableFuture.completedFuture(new HashMap<>());
});
doReturn(task).when(stateReceiver).createTransferTask(any(Integer.class), any(Address.class), any(CacheTopology.class), any(Long.class));
CompletableFuture<List<Map<Address, CacheEntry<Object, Object>>>> future = stateReceiver.getAllReplicasForSegment(0, localizedCacheTopology, 10000);
future.whenComplete((result, throwable) -> {
assertNull(result);
assertNotNull(throwable);
assertTrue(throwable instanceof CancellationException);
});
stateReceiver.stop();
future.get();
}
@BeforeMethod
private void createAndInitStateReceiver() {
CommandsFactory commandsFactory = mock(CommandsFactory.class);
InternalDataContainer<?, ?> dataContainer = mock(InternalDataContainer.class);
RpcManager rpcManager = mock(RpcManager.class);
CacheNotifier<?, ?> cacheNotifier = mock(CacheNotifier.class);
Answer<?> answer = invocation -> {
Collection<Address> recipients = (Collection<Address>) invocation.getArguments()[0];
Address recipient = recipients.iterator().next();
Map<Address, Response> results = new HashMap<>(1);
results.put(recipient, SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
return results;
};
when(rpcManager.invokeCommand(any(Collection.class), any(ConflictResolutionStartCommand.class), any(), any())).thenAnswer(answer);
when(rpcManager.invokeCommand(any(Collection.class), any(StateTransferCancelCommand.class), any(), any())).thenAnswer(answer);
when(rpcManager.getSyncRpcOptions()).thenAnswer(invocation -> new RpcOptions(DeliverOrder.PER_SENDER, 10000, TimeUnit.MILLISECONDS));
StateReceiverImpl<Object, Object> stateReceiver = new StateReceiverImpl<>();
TestingUtil.inject(stateReceiver, cacheNotifier, commandsFactory, dataContainer, rpcManager, stateTransferExecutor);
stateReceiver.start();
stateReceiver.onDataRehash(createEventImpl(2, 4, Event.Type.DATA_REHASHED));
this.localizedCacheTopology = createLocalizedCacheTopology(4);
this.stateReceiver = spy(stateReceiver);
}
@AfterClass(alwaysRun = true)
public void stopExecutor() {
stateTransferExecutor.shutdownNow();
}
private void initTransferTaskMock(CompletableFuture<Void> completableFuture) {
InboundTransferTask task = mock(InboundTransferTask.class);
when(task.requestSegments()).thenReturn(completableFuture);
doReturn(task).when(stateReceiver).createTransferTask(any(Integer.class), any(Address.class), any(CacheTopology.class), any(Long.class));
}
private Collection<StateChunk> createStateChunks(Object key, Object value) {
Collection<InternalCacheEntry<?, ?>> entries = Collections.singleton(new ImmortalCacheEntry(key, value));
return Collections.singleton(new StateChunk(0, entries, true));
}
private ConsistentHash createConsistentHash(int numberOfNodes) {
PersistentUUIDManager persistentUUIDManager = new PersistentUUIDManagerImpl();
List<Address> addresses = new ArrayList<>(numberOfNodes);
for (int i = 0; i < numberOfNodes; i++) {
Address address = new TestAddress(i);
addresses.add(address);
persistentUUIDManager.addPersistentAddressMapping(address, PersistentUUID.randomUUID());
}
DefaultConsistentHashFactory chf = new DefaultConsistentHashFactory();
return chf.create(2, 40, addresses, null);
}
private LocalizedCacheTopology createLocalizedCacheTopology(int numberOfNodes) {
ConsistentHash hash = createConsistentHash(numberOfNodes);
CacheTopology topology = new CacheTopology(-1, -1, hash, null, CacheTopology.Phase.NO_REBALANCE, hash.getMembers(), null);
return new LocalizedCacheTopology(CacheMode.DIST_SYNC, topology, new HashFunctionPartitioner(), hash.getMembers().get(0), true);
}
private EventImpl createEventImpl(int topologyId, int numberOfNodes, Event.Type type) {
EventImpl event = EventImpl.createEvent(null, type);
ConsistentHash hash = createConsistentHash(numberOfNodes);
event.setReadConsistentHashAtEnd(hash);
event.setWriteConsistentHashAtEnd(hash);
event.setNewTopologyId(topologyId);
event.setPre(true);
return event;
}
}
| 10,206
| 48.790244
| 154
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/DependencyGraphTest.java
|
package org.infinispan.util;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
/**
* Tests functionality in {@link org.infinispan.util.DependencyGraph}.
*
* @author gustavonalle
* @since 7.0
*/
@Test(testName = "util.DependencyGraphTest", groups = "unit")
public class DependencyGraphTest extends AbstractInfinispanTest {
@Test
public void testEmpty() throws CyclicDependencyException {
assertTrue(new DependencyGraph().topologicalSort().isEmpty());
}
@Test
public void testLinear() throws CyclicDependencyException {
DependencyGraph<Integer> graph = new DependencyGraph<>();
int size = 100;
for (int i = 1; i <= size; i++) {
graph.addDependency(i, i - 1);
}
List<Integer> sort = graph.topologicalSort();
assertEquals(sort.size(), size + 1);
assertEquals(sort.get(0), Integer.valueOf(100));
assertEquals(sort.get(100), Integer.valueOf(0));
}
@Test
public void testNonLinear() throws CyclicDependencyException {
DependencyGraph<String> graph = new DependencyGraph<>();
String A = "a";
String B = "b";
String C = "c";
String D = "d";
graph.addDependency(C, B);
graph.addDependency(C, D);
graph.addDependency(B, A);
graph.addDependency(A, D);
List<String> sort = graph.topologicalSort();
assertEquals(sort, Arrays.asList(C, B, A, D));
}
@Test
public void testIdempotency() throws CyclicDependencyException {
DependencyGraph<String> g = new DependencyGraph<>();
g.addDependency("N1", "N2");
g.addDependency("N2", "N3");
g.addDependency("N1", "N2");
g.addDependency("N2", "N3");
assertEquals(g.topologicalSort().size(), 3);
assertEquals(g.topologicalSort(), Arrays.asList("N1", "N2", "N3"));
}
@Test
public void testDependent() throws CyclicDependencyException {
DependencyGraph<String> graph = new DependencyGraph<>();
graph.addDependency("A", "B");
graph.addDependency("A", "C");
graph.addDependency("A", "D");
graph.addDependency("D", "F");
assertTrue(graph.hasDependent("B"));
assertTrue(graph.hasDependent("C"));
assertTrue(graph.hasDependent("D"));
assertTrue(graph.hasDependent("F"));
assertFalse(graph.hasDependent("A"));
assertTrue(graph.getDependents("A").isEmpty());
assertEquals(graph.getDependents("B").iterator().next(), "A");
assertEquals(graph.getDependents("C").iterator().next(), "A");
assertEquals(graph.getDependents("D").iterator().next(), "A");
assertEquals(graph.getDependents("F").iterator().next(), "D");
}
@Test
public void testConcurrentAccess() throws Exception {
DependencyGraph<String> graph = new DependencyGraph<>();
ExecutorService service = Executors.newCachedThreadPool(getTestThreadFactory("Worker"));
try {
CountDownLatch startLatch = new CountDownLatch(1);
int threads = 20;
ArrayList<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < threads; i++) {
futures.add(submitTask("A", "B", startLatch, service, graph));
futures.add(submitTask("A", "C", startLatch, service, graph));
futures.add(submitTask("A", "D", startLatch, service, graph));
futures.add(submitTask("A", "B", startLatch, service, graph));
futures.add(submitTask("D", "B", startLatch, service, graph));
futures.add(submitTask("D", "C", startLatch, service, graph));
futures.add(submitTask("C", "B", startLatch, service, graph));
}
startLatch.countDown();
awaitAll(futures);
} finally {
service.shutdownNow();
}
assertEquals(graph.topologicalSort(), Arrays.asList("A", "D", "C", "B"));
}
@Test
public void testRemoveDependency() throws CyclicDependencyException {
DependencyGraph<String> g = new DependencyGraph<>();
g.addDependency("E", "B");
g.addDependency("E", "C");
g.addDependency("E", "D");
g.addDependency("B", "D");
g.addDependency("B", "C");
g.addDependency("C", "D");
assertEquals(g.topologicalSort(), Arrays.asList("E", "B", "C", "D"));
g.removeDependency("E", "B");
g.addDependency("B", "E");
assertEquals(g.topologicalSort(), Arrays.asList("B", "E", "C", "D"));
g.clearAll();
assertTrue(g.topologicalSort().isEmpty());
}
@Test
public void testRemoveElement() throws CyclicDependencyException {
DependencyGraph<String> g = new DependencyGraph<>();
g.addDependency("E", "B");
g.addDependency("E", "C");
g.addDependency("E", "D");
g.addDependency("B", "D");
g.addDependency("B", "C");
g.addDependency("C", "D");
assertEquals(g.topologicalSort(), Arrays.asList("E", "B", "C", "D"));
g.remove("C");
assertEquals(g.topologicalSort(), Arrays.asList("E", "B", "D"));
g.remove("B");
assertEquals(g.topologicalSort(), Arrays.asList("E", "D"));
g.remove("E");
assertEquals(g.topologicalSort(), Arrays.asList("D"));
g.remove("D");
assertTrue(g.topologicalSort().isEmpty());
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testAddSelf() {
new DependencyGraph<>().addDependency("N", "N");
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testAdNull() {
new DependencyGraph<>().addDependency("N", null);
}
@Test(expectedExceptions = CyclicDependencyException.class)
public void testCycle() throws CyclicDependencyException {
DependencyGraph<Object> graph = new DependencyGraph<>();
Object o1 = new Object();
Object o2 = new Object();
Object o3 = new Object();
graph.addDependency(o1, o2);
graph.addDependency(o2, o3);
graph.addDependency(o3, o1);
graph.topologicalSort();
}
private Future<?> submitTask(final String from, final String to, final CountDownLatch waitingFor, ExecutorService onExecutor, final DependencyGraph<String> graph) {
return onExecutor.submit(new Runnable() {
@Override
public void run() {
try {
waitingFor.await();
graph.addDependency(from, to);
} catch (InterruptedException ignored) {
}
}
});
}
private void awaitAll(List<Future<?>> futures) throws Exception {
for (Future f : futures) {
f.get(10, TimeUnit.SECONDS);
}
}
}
| 7,014
| 32.089623
| 167
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/HashFunctionTest.java
|
package org.infinispan.util;
import org.infinispan.commons.hash.Hash;
import org.infinispan.commons.hash.MurmurHash3;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
@Test(testName = "util.HashFunctionTest", groups = "unit")
public class HashFunctionTest extends AbstractInfinispanTest {
public void testMurmurHash3Consistency() {
testHashConsistency(MurmurHash3.getInstance());
}
private void testHashConsistency(Hash hash) {
Object o = new Object();
int i1 = hash.hash(o);
int i2 = hash.hash(o);
int i3 = hash.hash(o);
assert i1 == i2: "i1 and i2 are not the same: " + i1 + ", " + i2;
assert i3 == i2: "i3 and i2 are not the same: " + i2 + ", " + i3;
assert i1 == i3: "i1 and i3 are not the same: " + i1 + ", " + i3;
}
}
| 828
| 29.703704
| 71
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ControlledConsistentHashFactory.java
|
package org.infinispan.util;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.testng.AssertJUnit.assertTrue;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.stream.IntStream;
import org.infinispan.Cache;
import org.infinispan.commons.marshall.AbstractExternalizer;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.commons.util.Util;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.topology.ClusterTopologyManager;
/**
* ConsistentHashFactory implementation that allows the user to control who the owners are.
*
* @author Dan Berindei
* @since 7.0
*/
public abstract class ControlledConsistentHashFactory<CH extends ConsistentHash> extends BaseControlledConsistentHashFactory<CH> {
private volatile int[][] ownerIndexes;
private volatile List<Address> membersToUse;
/**
* Create a consistent hash factory with a single segment.
*/
public ControlledConsistentHashFactory(Trait<CH> trait, int primaryOwnerIndex, int... backupOwnerIndexes) {
super(trait, 1);
setOwnerIndexes(primaryOwnerIndex, backupOwnerIndexes);
}
/**
* Create a consistent hash factory with multiple segments.
*/
public ControlledConsistentHashFactory(Trait<CH> trait, int[][] segmentOwners) {
super(trait, segmentOwners.length);
if (segmentOwners.length == 0)
throw new IllegalArgumentException("Need at least one set of owners");
setOwnerIndexes(segmentOwners);
}
public void setOwnerIndexes(int primaryOwnerIndex, int... backupOwnerIndexes) {
int[] firstSegmentOwners = concatOwners(primaryOwnerIndex, backupOwnerIndexes);
setOwnerIndexes(new int[][]{firstSegmentOwners});
}
private int[] concatOwners(int primaryOwnerIndex, int[] backupOwnerIndexes) {
int[] firstSegmentOwners;
if (backupOwnerIndexes == null || backupOwnerIndexes.length == 0) {
firstSegmentOwners = new int[]{primaryOwnerIndex};
} else {
firstSegmentOwners = new int[backupOwnerIndexes.length + 1];
firstSegmentOwners[0] = primaryOwnerIndex;
System.arraycopy(backupOwnerIndexes, 0, firstSegmentOwners, 1, backupOwnerIndexes.length);
}
return firstSegmentOwners;
}
public void setOwnerIndexes(int[][] segmentOwners) {
this.ownerIndexes = Arrays.stream(segmentOwners)
.map(owners -> Arrays.copyOf(owners, owners.length))
.toArray(int[][]::new);
}
public void triggerRebalance(Cache<?, ?> cache) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
assertTrue("triggerRebalance must be called on the coordinator node",
extractGlobalComponent(cacheManager, Transport.class).isCoordinator());
ClusterTopologyManager clusterTopologyManager =
extractGlobalComponent(cacheManager, ClusterTopologyManager.class);
clusterTopologyManager.forceRebalance(cache.getName());
}
@Override
protected int[][] assignOwners(int numSegments, List<Address> members) {
return Arrays.stream(ownerIndexes)
.map(indexes -> mapOwnersToCurrentMembers(members, indexes))
.toArray(int[][]::new);
}
private int[] mapOwnersToCurrentMembers(List<Address> members, int[] indexes) {
int[] newIndexes = Arrays.stream(indexes).flatMap(index -> {
if (membersToUse != null) {
Address owner = membersToUse.get(index);
int newIndex = members.indexOf(owner);
if (newIndex >= 0) {
return IntStream.of(newIndex);
}
} else if (index < members.size()) {
return IntStream.of(index);
}
return IntStream.empty();
}).toArray();
// A DefaultConsistentHash segment must always have at least one owner
if (newIndexes.length == 0 && trait.requiresPrimaryOwner()) {
return new int[]{0};
}
return newIndexes;
}
/**
* @param membersToUse Owner indexes will be in this list, instead of the current list of members
*/
public void setMembersToUse(List<Address> membersToUse) {
this.membersToUse = membersToUse;
}
@SerializeWith(Externalizer.class)
public static class Default extends ControlledConsistentHashFactory<DefaultConsistentHash> {
public Default(int primaryOwnerIndex, int... backupOwnerIndexes) {
super(new DefaultTrait(), primaryOwnerIndex, backupOwnerIndexes);
}
public Default(int[][] segmentOwners) {
super(new DefaultTrait(), segmentOwners);
}
}
/**
* Ignores backup-owner part of the calls
*/
@SerializeWith(Externalizer.class)
public static class Replicated extends ControlledConsistentHashFactory<ReplicatedConsistentHash> {
public Replicated(int primaryOwnerIndex) {
super(new ReplicatedTrait(), primaryOwnerIndex);
}
public Replicated(int[] segmentPrimaryOwners) {
super(new ReplicatedTrait(), Arrays.stream(segmentPrimaryOwners).mapToObj(o -> new int[]{o}).toArray(int[][]::new));
}
@Override
public void setOwnerIndexes(int primaryOwnerIndex, int... backupOwnerIndexes) {
super.setOwnerIndexes(primaryOwnerIndex);
}
@Override
public void setOwnerIndexes(int[][] segmentOwners) {
super.setOwnerIndexes(segmentOwners);
}
}
public static class Externalizer extends AbstractExternalizer<ControlledConsistentHashFactory<?>> {
@Override
public Set<Class<? extends ControlledConsistentHashFactory<?>>> getTypeClasses() {
return Util.asSet(Default.class, Replicated.class);
}
@Override
public void writeObject(ObjectOutput output, ControlledConsistentHashFactory<?> object) throws IOException {
output.writeByte(object instanceof Default ? 0 : 1);
int numOwners = object.ownerIndexes.length;
MarshallUtil.marshallSize(output, numOwners);
for (int i = 0; i < numOwners; i++) {
int[] ownerSegments = object.ownerIndexes[i];
MarshallUtil.marshallSize(output, ownerSegments.length);
for (int segment : ownerSegments)
output.writeInt(segment);
}
output.writeObject(object.membersToUse);
}
@Override
public ControlledConsistentHashFactory<?> readObject(ObjectInput input) throws IOException, ClassNotFoundException {
byte type = input.readByte();
int numOwners = MarshallUtil.unmarshallSize(input);
int[][] indexes = new int[numOwners][];
for (int i = 0; i < numOwners; i++) {
int numSegments = MarshallUtil.unmarshallSize(input);
int[] segments = new int[numSegments];
for (int j = 0; j < numSegments; j++) {
segments[j] = input.readInt();
}
indexes[i] = segments;
}
ControlledConsistentHashFactory<?> chf;
switch (type) {
case 0:
chf = new Default(indexes);
break;
case 1:
chf = new Replicated(indexes[0]);
break;
default:
throw new IllegalStateException();
}
List<Address> membersToUse = (List<Address>) input.readObject();
chf.setMembersToUse(membersToUse);
return chf;
}
}
}
| 7,928
| 36.578199
| 130
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/TestOperation.java
|
package org.infinispan.util;
import java.util.Objects;
import org.infinispan.Cache;
/**
* @author Pedro Ruivo
* @since 12.0
*/
public enum TestOperation {
PUT {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
cache.put(key, newValue);
return newValue;
}
},
PUT_IF_ABSENT {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
V result = cache.putIfAbsent(key, newValue);
return result == null ? newValue : result;
}
},
REPLACE {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
V result = cache.replace(key, newValue);
return Objects.equals(result, prevValue) ? newValue : result;
}
},
REPLACE_CONDITIONAL {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
boolean result = cache.replace(key, prevValue, newValue);
return result ? newValue : prevValue;
}
},
REMOVE {
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
cache.remove(key);
return null;
}
},
REMOVE_CONDITIONAL {
@Override
public boolean requiresPreviousValue() {
return true;
}
@Override
public <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue) {
boolean result = cache.remove(key, prevValue);
return result ? null : prevValue;
}
};
public boolean requiresPreviousValue() {
return false;
}
public abstract <K, V> V execute(Cache<K, V> cache, K key, V prevValue, V newValue);
}
| 1,935
| 24.473684
| 87
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/FeaturesListenerTest.java
|
package org.infinispan.util;
import org.infinispan.test.AbstractCacheTest;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
@Test(testName = "util.FeaturesListenerTest", groups = "functional")
@AbstractInfinispanTest.FeatureCondition(feature = "A")
public class FeaturesListenerTest extends AbstractCacheTest {
// this test always be skipped because the feature A is present in the test
@Test
public void junitFeatureListenerTest() {
throw new IllegalStateException("Cannot run.");
}
}
| 545
| 31.117647
| 78
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/UtilTest.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import org.infinispan.commons.util.Util;
import org.testng.annotations.Test;
/**
* Test utility methods
*
* @author Galder Zamarreño
* @since 5.1
*/
@Test(groups = "functional", testName = "util.UtilTest")
public class UtilTest {
public void testToHexString() {
byte[] sample1 = {
-36, -4, 27, 111, 3, 49, 118, -49, 55, 74, 25, 12, 57, -96, 17,
-119, -30, -113, 98, -42, -52, 49, 74, 93, 44, -94, 39, -71, -54,
37, 108, -102, -113, 82, 73, -88, 103, 88, -44, -49, -58, 127, -93,
83, 32, 124, 18, 83, 35, -116, -38, 43, -26, -87, 20, -82, -29, 36,
-20, 50, 97, -18, -30, 80};
assertEquals("DCFC1B6F033176CF374A190C39A01189E28F62D6CC314A5D2CA227B9CA" +
"256C9A8F5249A86758D4CFC67FA353207C1253238CDA2BE6A914AEE324EC3261EEE250",
Util.toHexString(sample1));
byte[] sample2 = { -36, -4, 27, 111, 3, 49, 118 };
assertEquals("DCFC1B6F033176", Util.toHexString(sample2, 8));
byte[] sample3 = { -36, -4, 27, 111, 3, 49, 118, -49 };
assertEquals("DCFC1B6F033176CF", Util.toHexString(sample3, 8));
byte[] sample4 = { -36, -4, 27, 111, 3, 49, 118, -49, 55};
assertEquals("DCFC1B6F033176CF", Util.toHexString(sample4, 8));
}
}
| 1,350
| 33.641026
| 91
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/BlockingLocalTopologyManager.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertNotSame;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Replaces the LocalTopologyManager and allows it to block the phases of the state transfer:
* <ul>
* <li>Rebalance Start</li>
* <li>Confirm Rebalance</li>
* <li>Consistent Hash Update</li>
* </ul>
*
* @author Pedro Ruivo
* @since 6.0
*/
public class BlockingLocalTopologyManager extends AbstractControlledLocalTopologyManager {
private static final Log log = LogFactory.getLog(BlockingLocalTopologyManager.class);
private static final int TIMEOUT_SECONDS = 10;
private final Address address;
private final String expectedCacheName;
private final BlockingQueue<Event> queuedTopologies = new LinkedBlockingQueue<>();
private volatile boolean enabled = true;
private volatile RuntimeException exception;
private BlockingLocalTopologyManager(LocalTopologyManager delegate, Address address, String cacheName) {
super(delegate);
this.address = address;
this.expectedCacheName = cacheName;
}
public static BlockingLocalTopologyManager replaceTopologyManager(EmbeddedCacheManager cacheContainer,
String cacheName) {
LocalTopologyManager manager = TestingUtil.extractGlobalComponent(cacheContainer, LocalTopologyManager.class);
BlockingLocalTopologyManager controlledLocalTopologyManager =
new BlockingLocalTopologyManager(manager, cacheContainer.getAddress(), cacheName);
TestingUtil.replaceComponent(cacheContainer, LocalTopologyManager.class, controlledLocalTopologyManager, true);
return controlledLocalTopologyManager;
}
public static BlockingLocalTopologyManager replaceTopologyManagerDefaultCache(EmbeddedCacheManager cacheContainer) {
return replaceTopologyManager(cacheContainer, TestingUtil.getDefaultCacheName(cacheContainer));
}
public static void confirmTopologyUpdate(CacheTopology.Phase phase, BlockingLocalTopologyManager... topologyManagers)
throws InterruptedException {
for (BlockingLocalTopologyManager topologyManager : topologyManagers) {
topologyManager.expectTopologyUpdate(phase).unblock();
}
if (needConfirmation(phase)) {
for (BlockingLocalTopologyManager topologyManager : topologyManagers) {
topologyManager.expectPhaseConfirmation().unblock();
}
}
}
public static void finishRebalance(CacheTopology.Phase nextPhase, BlockingLocalTopologyManager... topologyManagers)
throws InterruptedException {
switch (nextPhase) {
case READ_OLD_WRITE_ALL:
confirmTopologyUpdate(CacheTopology.Phase.READ_OLD_WRITE_ALL, topologyManagers);
// fallthrough
case READ_ALL_WRITE_ALL:
confirmTopologyUpdate(CacheTopology.Phase.READ_ALL_WRITE_ALL, topologyManagers);
// fallthrough
case READ_NEW_WRITE_ALL:
confirmTopologyUpdate(CacheTopology.Phase.READ_NEW_WRITE_ALL, topologyManagers);
// fallthrough
case NO_REBALANCE:
confirmTopologyUpdate(CacheTopology.Phase.NO_REBALANCE, topologyManagers);
}
}
public BlockedTopology expectTopologyUpdate(CacheTopology.Phase phase) throws InterruptedException {
BlockedTopology blockedTopology = expectTopologyUpdate();
assertNotSame("Expected a CH_UPDATE or REBALANCE_START, but got a CONFIRMATION",
blockedTopology.getType(), Type.CONFIRMATION);
assertEquals(phase, blockedTopology.getCacheTopology().getPhase());
return blockedTopology;
}
public BlockedTopology expectTopologyUpdate(CacheTopology.Phase phase, int topologyId) throws InterruptedException {
BlockedTopology blockedTopology = expectTopologyUpdate();
assertEquals(topologyId, blockedTopology.getCacheTopology().getTopologyId());
assertEquals(phase, blockedTopology.getCacheTopology().getPhase());
return blockedTopology;
}
public BlockedTopology expectTopologyUpdate() throws InterruptedException {
Event update = queuedTopologies.poll(TIMEOUT_SECONDS, TimeUnit.SECONDS);
if (update == null) {
throw new TimeoutException("Timed out waiting for topology update on " + address);
}
return new BlockedTopology(update);
}
public BlockedConfirmation expectPhaseConfirmation() throws InterruptedException {
Event update = queuedTopologies.poll(TIMEOUT_SECONDS, TimeUnit.SECONDS);
if (update == null) {
throw new TimeoutException("Timed out waiting for phase confirmation on " + address);
}
assertEquals(Type.CONFIRMATION, update.type);
return new BlockedConfirmation(update);
}
public BlockedConfirmation expectPhaseConfirmation(int topologyId) throws InterruptedException {
BlockedConfirmation blockedConfirmation = expectPhaseConfirmation();
assertEquals(topologyId, blockedConfirmation.getTopologyId());
return blockedConfirmation;
}
/**
* Expect a topology updates and unblock it.
* <p>
* If the update requires confirmation, unblock the confirmation as well.
*/
public void confirmTopologyUpdate(CacheTopology.Phase phase) throws InterruptedException {
expectTopologyUpdate(phase).unblock();
if (needConfirmation(phase)) {
expectPhaseConfirmation().unblock();
}
}
public void expectNoTopologyUpdate(long timeout, TimeUnit timeUnit) throws InterruptedException {
Event update = queuedTopologies.poll(timeout, timeUnit);
if (update != null) {
throw new TestException(
"Expected no topology update on " + address + ", but got " + update.type + " " + update.topologyId);
}
}
public BlockedTopology expectRebalanceStartAfterLeave() throws InterruptedException {
// After a node leaves, the coordinator sends a NO_REBALANCE topology update with the remaining members
// Then it immediately starts the rebalance with a READ_OLD topology update
// The members can receive the topology updates in either order
BlockedTopology topology0 = expectTopologyUpdate();
if (topology0.getType() == Type.REBALANCE_START) {
expectTopologyUpdate(CacheTopology.Phase.NO_REBALANCE).unblock();
} else {
topology0.unblock();
topology0 = expectTopologyUpdate(CacheTopology.Phase.READ_OLD_WRITE_ALL);
}
return topology0;
}
private static boolean needConfirmation(CacheTopology.Phase phase) {
return phase == CacheTopology.Phase.READ_OLD_WRITE_ALL ||
phase == CacheTopology.Phase.READ_ALL_WRITE_ALL ||
phase == CacheTopology.Phase.READ_NEW_WRITE_ALL;
}
public void stopBlocking() {
enabled = false;
if (exception != null) {
throw exception;
}
if (!queuedTopologies.isEmpty()) {
log.error("Stopped blocking topology updates, but there are " + queuedTopologies.size() +
" blocked updates in the queue: " + queuedTopologies);
}
log.debugf("Stopped blocking topology updates");
}
@Override
protected final CompletionStage<Void> beforeHandleTopologyUpdate(String cacheName, CacheTopology cacheTopology, int viewId) {
if (!enabled || !expectedCacheName.equals(cacheName))
return CompletableFutures.completedNull();
Event event = new Event(cacheTopology, cacheTopology.getTopologyId(), viewId,
Type.CH_UPDATE);
queuedTopologies.add(event);
log.debugf("Blocking topology update for cache %s: %s", cacheName, cacheTopology);
return event.whenUnblocked().thenRun(() -> {
log.debugf("Continue consistent hash update for cache %s: %s", cacheName, cacheTopology);
});
}
@Override
protected final CompletionStage<Void> beforeHandleRebalance(String cacheName, CacheTopology cacheTopology, int viewId) {
if (!enabled || !expectedCacheName.equals(cacheName))
return CompletableFutures.completedNull();
Event event = new Event(cacheTopology, cacheTopology.getTopologyId(), viewId,
Type.REBALANCE_START);
queuedTopologies.add(event);
log.debugf("Blocking rebalance start for cache %s: %s", cacheName, cacheTopology);
return event.whenUnblocked().thenRun(() -> {
log.debugf("Continue rebalance start for cache %s: %s", cacheName, cacheTopology);
});
}
@Override
protected final CompletionStage<Void> beforeConfirmRebalancePhase(String cacheName, int topologyId, Throwable throwable) {
if (!enabled || !expectedCacheName.equals(cacheName))
return CompletableFutures.completedNull();
Event event = new Event(null, topologyId, -1, Type.CONFIRMATION);
queuedTopologies.add(event);
log.debugf("Blocking rebalance confirmation for cache %s: %s", cacheName, topologyId);
return event.whenUnblocked().thenRun(() -> {
log.debugf("Continue rebalance confirmation for cache %s: %s", cacheName, topologyId);
});
}
void failManager(Throwable e) {
if (e instanceof RuntimeException) {
exception = (RuntimeException) e;
} else {
exception = new TestException(e);
}
}
public enum Type {CH_UPDATE, REBALANCE_START, CONFIRMATION}
class Event {
final CacheTopology cacheTopology;
final int topologyId;
final int viewId;
final Type type;
private final CompletableFuture<Void> latch = new CompletableFuture<>();
Event(CacheTopology cacheTopology, int topologyId, int viewId, Type type) {
this.cacheTopology = cacheTopology;
this.topologyId = topologyId;
this.viewId = viewId;
this.type = type;
}
void awaitUnblock() {
try {
latch.get(TIMEOUT_SECONDS * 2, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
fail(e);
} catch (ExecutionException e) {
fail(e.getCause());
} catch (java.util.concurrent.TimeoutException e) {
fail(e);
}
}
CompletionStage<Void> whenUnblocked() {
return latch;
}
void unblock() {
if (latch.isCompletedExceptionally()) {
latch.join();
}
log.tracef("Unblocking %s %d on %s", type, topologyId, address);
latch.complete(null);
}
void fail(Throwable e) {
if (latch.isCompletedExceptionally()) {
latch.join();
}
log.errorf(e, "Failed waiting for test to unblock %s %d on %s", type, topologyId, address);
failManager(e);
latch.completeExceptionally(e);
}
@Override
public String toString() {
return "Event{" +
"type=" + type +
", topologyId=" + topologyId +
", viewId=" + viewId +
'}';
}
}
public class BlockedTopology {
private Event event;
BlockedTopology(Event event) {
this.event = event;
}
public CacheTopology getCacheTopology() {
return event.cacheTopology;
}
public CacheTopology.Phase getPhase() {
return event.cacheTopology.getPhase();
}
public int getViewId() {
return event.viewId;
}
public Type getType() {
return event.type;
}
public void unblock() {
event.unblock();
}
}
public class BlockedConfirmation {
private Event event;
BlockedConfirmation(Event event) {
this.event = event;
}
public int getTopologyId() {
return event.topologyId;
}
public void unblock() {
event.unblock();
}
}
}
| 12,696
| 36.565089
| 128
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ControlledTimeService.java
|
package org.infinispan.util;
import java.time.Instant;
import java.util.concurrent.TimeUnit;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* TimeService that allows for wall clock time to be adjust manually.
* @deprecated Use {@link org.infinispan.commons.time.ControlledTimeService} instead.
*/
@Deprecated
public class ControlledTimeService extends EmbeddedTimeService {
private static final Log log = LogFactory.getLog(ControlledTimeService.class);
protected long currentMillis;
public ControlledTimeService() {
this.currentMillis = 1_000_000L;
}
@Override
public long wallClockTime() {
return currentMillis;
}
@Override
public long time() {
return TimeUnit.MILLISECONDS.toNanos(currentMillis);
}
@Override
public Instant instant() {
return Instant.ofEpochMilli(currentMillis);
}
public void advance(long time) {
if (time <= 0) {
throw new IllegalArgumentException("Argument must be greater than 0");
}
currentMillis += time;
log.debugf("Current time is now %d", currentMillis);
}
}
| 1,145
| 23.913043
| 85
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/CountingRequestRepository.java
|
package org.infinispan.util;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.transport.impl.Request;
import org.infinispan.remoting.transport.impl.RequestRepository;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.test.TestingUtil;
/**
* Dispatcher that counts actually ongoing unicast RPCs. Its purpose is to isolate RPCs started before
* {@link #advanceGenerationAndAwait(long, TimeUnit)} and those afterwards. It can handle staggered calls as well.
*/
public class CountingRequestRepository extends RequestRepository {
private final AtomicLong generation = new AtomicLong();
private Map<Long, Map<Long, Request<?>>> requests = new ConcurrentHashMap<>();
public static CountingRequestRepository replaceDispatcher(EmbeddedCacheManager cacheManager) {
GlobalComponentRegistry gcr = cacheManager.getGlobalComponentRegistry();
JGroupsTransport transport = (JGroupsTransport) gcr.getComponent(Transport.class);
RequestRepository requestRepository =
(RequestRepository) TestingUtil.extractField(JGroupsTransport.class, transport, "requests");
CountingRequestRepository instance = new CountingRequestRepository(requestRepository);
TestingUtil.replaceField(instance, "requests", transport, JGroupsTransport.class);
return instance;
}
private CountingRequestRepository(RequestRepository requestRepository) {
requestRepository.forEach(this::addRequest);
}
@Override
public void addRequest(Request<?> request) {
requests.compute(generation.get(), (generation, map) -> {
if (map == null) {
map = new ConcurrentHashMap<>();
}
map.put(request.getRequestId(), request);
return map;
});
super.addRequest(request);
}
/**
* Wait until we get responses for all started requests.
*/
public void advanceGenerationAndAwait(long timeout, TimeUnit timeUnit) throws Exception {
long lastGen = generation.getAndIncrement();
Map<Long, Request<?>> lastGenRequests = requests.getOrDefault(lastGen, Collections.emptyMap());
long now = System.nanoTime();
long deadline = now + timeUnit.toNanos(timeout);
synchronized (this) {
for (Map.Entry<Long, Request<?>> entry : lastGenRequests.entrySet()) {
Request<?> request = entry.getValue();
request.toCompletableFuture().get(deadline - now, TimeUnit.NANOSECONDS);
now = System.currentTimeMillis();
}
}
}
}
| 2,856
| 40.405797
| 114
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ThreadLocalLeakTest.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertSame;
import java.io.File;
import java.lang.ref.Reference;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Pattern;
import org.infinispan.Cache;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Tests whether certain cache set ups result in thread local leaks.
*
* @author Galder Zamarreño
* @since 5.3
*/
@Test(groups = "functional", testName = "util.ThreadLocalLeakTest")
public class ThreadLocalLeakTest extends AbstractInfinispanTest {
private static final Pattern THREAD_LOCAL_FILTER = Pattern.compile("org\\.infinispan\\..*");
// Ued to ignore the thread-local in our ConcurrentHashMap backport
private static final Set<String> ACCEPTED_THREAD_LOCALS = new HashSet<>(Arrays.asList());
private final ThreadLocal<ThreadLocalLeakTest> DUMMY_THREAD_LOCAL = ThreadLocal.withInitial(() -> this);
private String tmpDirectory;
@BeforeClass(alwaysRun = true)
protected void setUpTempDir() {
tmpDirectory = CommonsTestingUtil.tmpDirectory(this.getClass());
}
@AfterClass(alwaysRun = true)
protected void clearTempDir() {
org.infinispan.commons.util.Util.recursiveFileRemove(tmpDirectory);
new File(tmpDirectory).mkdirs();
}
public void testCheckThreadLocalLeaks() throws Exception {
// Perform the test in a new thread so we don't have any thread-locals from previous tests
fork(this::doCheckThreadLocalLeaks).get(30, TimeUnit.SECONDS);
}
private void doCheckThreadLocalLeaks() throws Exception {
TestResourceTracker.testThreadStarted(getTestName());
ConfigurationBuilder builder = new ConfigurationBuilder();
builder.memory().maxCount(4096)
.locking().concurrencyLevel(2048)
.invocationBatching().enable()
.persistence().passivation(false)
.addSingleFileStore().shared(false).preload(true);
amendConfiguration(builder);
GlobalConfigurationBuilder globalBuilder = new GlobalConfigurationBuilder().nonClusteredDefault();
globalBuilder.globalState().enable().persistentLocation(tmpDirectory);
CyclicBarrier barrier = new CyclicBarrier(2);
AtomicReference<Thread> putThread = new AtomicReference<>();
Future<Void> putFuture;
try (EmbeddedCacheManager cm = new DefaultCacheManager(globalBuilder.build())) {
cm.defineConfiguration("leak", builder.build());
final Cache<Object, Object> c = cm.getCache("leak");
c.put("key1", "value1");
putFuture = fork(() -> {
assertSame(this, DUMMY_THREAD_LOCAL.get());
putThread.set(Thread.currentThread());
Cache<Object, Object> c1 = cm.getCache("leak");
c1.put("key2", "value2");
c1 = null;
// Let the main thread know it can check for thread locals
barrier.await(10, TimeUnit.SECONDS);
// Wait for the main thread to finish the check
barrier.await(10, TimeUnit.SECONDS);
});
c.put("key3", "value3");
// Sync with the forked thread after cache.put() returns
barrier.await(10, TimeUnit.SECONDS);
}
// The cache manager is stopped and the forked thread is blocked after the operation
Map<Class<?>, Object> mainThreadLeaks = findThreadLocalLeaks(Thread.currentThread());
assertEquals(Collections.emptySet(), mainThreadLeaks.keySet());
Map<Class<?>, Object> forkThreadLeaks = findThreadLocalLeaks(putThread.get());
assertEquals(Collections.singleton(DUMMY_THREAD_LOCAL.getClass()), forkThreadLeaks.keySet());
// Let the put thread finish
barrier.await(10, TimeUnit.SECONDS);
// Check for any exceptions
putFuture.get(10, TimeUnit.SECONDS);
}
protected void amendConfiguration(ConfigurationBuilder builder) {
// To be overridden by subclasses
}
private Map<Class<?>, Object> findThreadLocalLeaks(Thread thread) throws Exception {
// Get a reference to the thread locals table of the current thread
Field threadLocalsField = Thread.class.getDeclaredField("threadLocals");
threadLocalsField.setAccessible(true);
Object threadLocalTable = threadLocalsField.get(thread);
// Get a reference to the array holding the thread local variables inside the
// ThreadLocalMap of the current thread
Class<?> threadLocalMapClass = Class.forName("java.lang.ThreadLocal$ThreadLocalMap");
Field tableField = threadLocalMapClass.getDeclaredField("table");
tableField.setAccessible(true);
Object table;
try {
table = tableField.get(threadLocalTable);
} catch (NullPointerException e) {
// Ignore
return null;
}
Class<?> entryClass = Class.forName("java.lang.ThreadLocal$ThreadLocalMap$Entry");
Field valueField = entryClass.getDeclaredField("value");
valueField.setAccessible(true);
Map<Class<?>, Object> threadLocals = new HashMap<>();
for (int i=0; i < Array.getLength(table); i++) {
// Each entry in the table array of ThreadLocalMap is an Entry object
// representing the thread local reference and its value
Reference<ThreadLocal<?>> entry = (Reference<ThreadLocal<?>>) Array.get(table, i);
if (entry != null) {
// Get a reference to the thread local object
ThreadLocal<?> threadLocal = entry.get();
Object value = valueField.get(entry);
if (threadLocal != null) {
if (filterThreadLocals(threadLocal, value) && !ACCEPTED_THREAD_LOCALS.contains(threadLocal.getClass().getCanonicalName())) {
log.error("Thread local leak: " + threadLocal);
threadLocals.put(threadLocal.getClass(), value);
}
} else {
log.warn("Thread local is not accessible, but it wasn't removed either: " + value);
}
}
}
return threadLocals;
}
private boolean filterThreadLocals(ThreadLocal<?> tl, Object value) {
String tlClassName = tl.getClass().getName();
String valueClassName = value != null ? value.getClass().getName() : "";
log.tracef("Checking thread-local %s = %s", tlClassName, valueClassName);
if (!THREAD_LOCAL_FILTER.matcher(tlClassName).find()
&& !THREAD_LOCAL_FILTER.matcher(valueClassName).find()) {
return false;
}
return !ACCEPTED_THREAD_LOCALS.contains(tlClassName) && !ACCEPTED_THREAD_LOCALS.contains(valueClassName);
}
}
| 7,468
| 39.155914
| 139
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ByteStringTest.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertSame;
import java.io.ByteArrayInputStream;
import java.io.ObjectInput;
import java.io.ObjectInputStream;
import java.io.ObjectOutput;
import java.io.ObjectOutputStream;
import org.infinispan.commons.io.LazyByteArrayOutputStream;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.Test;
/**
* @author Dan Berindei
* @since 9.0
*/
@Test(groups = "unit", testName = "util.ByteStringTest")
public class ByteStringTest extends AbstractInfinispanTest {
public void testEmptyString() throws Exception {
ByteString byteString = ByteString.fromString("");
assertSame(ByteString.emptyString(), byteString);
LazyByteArrayOutputStream outputStream = new LazyByteArrayOutputStream();
try (ObjectOutput output = new ObjectOutputStream(outputStream)) {
ByteString.writeObject(output, byteString);
}
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.getRawBuffer());
try (ObjectInput input = new ObjectInputStream(inputStream)) {
ByteString byteString2 = ByteString.readObject(input);
assertSame(ByteString.emptyString(), byteString2);
}
}
public void testShortString() throws Exception {
ByteString byteString = ByteString.fromString("abc");
LazyByteArrayOutputStream outputStream = new LazyByteArrayOutputStream();
try (ObjectOutput output = new ObjectOutputStream(outputStream)) {
ByteString.writeObject(output, byteString);
}
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.getRawBuffer());
try (ObjectInput input = new ObjectInputStream(inputStream)) {
ByteString byteString2 = ByteString.readObject(input);
assertEquals(byteString, byteString2);
}
}
public void testLargeString() throws Exception {
StringBuilder sb = new StringBuilder(128);
for (int i = 0; i < 128; i++) {
sb.append("a");
}
ByteString.fromString(sb.toString());
}
@Test(expectedExceptions = IllegalArgumentException.class)
public void testTooLargeString() throws Exception {
StringBuilder sb = new StringBuilder(256);
for (int i = 0; i < 256; i++) {
sb.append("a");
}
ByteString.fromString(sb.toString());
}
}
| 2,419
| 34.072464
| 95
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/InfinispanCollectionsTest.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertTrue;
import java.util.HashSet;
import java.util.Set;
import org.infinispan.commons.util.InfinispanCollections;
import org.testng.annotations.Test;
/**
* Tests for the {@link InfinispanCollections} helpers.
*
* @author Galder Zamarreño
* @since 5.2
*/
@Test(groups = "unit", testName = "util.InfinispanCollectionsTest")
public class InfinispanCollectionsTest {
public void testDifferenceNotStored() {
Set<String> store = new HashSet<String>();
store.add("a");
store.add("b");
store.add("c");
Set<String> expected = new HashSet<String>();
expected.add("a");
expected.add("b");
expected.add("c");
expected.add("d");
Set<String> notStored = InfinispanCollections.difference(expected, store);
assertEquals(1, notStored.size());
assertTrue(notStored.contains("d"));
Set<String> notRemoved = InfinispanCollections.difference(store, expected);
assertEquals(0, notRemoved.size());
}
public void testDifferenceNotRemoved() {
Set<String> store = new HashSet<String>();
store.add("a");
store.add("b");
store.add("c");
store.add("d");
Set<String> expected = new HashSet<String>();
expected.add("a");
expected.add("b");
expected.add("c");
Set<String> notStored = InfinispanCollections.difference(expected, store);
assertEquals(0, notStored.size());
Set<String> notRemoved = InfinispanCollections.difference(store, expected);
assertEquals(1, notRemoved.size());
assertTrue(notRemoved.contains("d"));
}
public void testDifferenceNotStoreAndNotRemoved() {
Set<String> store = new HashSet<String>();
store.add("a");
store.add("b");
store.add("c");
store.add("d");
Set<String> expected = new HashSet<String>();
expected.add("a");
expected.add("b");
expected.add("c");
expected.add("e");
Set<String> notStored = InfinispanCollections.difference(expected, store);
assertEquals(1, notStored.size());
assertTrue(notStored.contains("e"));
Set<String> notRemoved = InfinispanCollections.difference(store, expected);
assertEquals(1, notRemoved.size());
assertTrue(notRemoved.contains("d"));
}
public void testNoDifference() {
Set<String> store = new HashSet<String>();
store.add("a");
store.add("b");
store.add("c");
Set<String> expected = new HashSet<String>();
expected.add("a");
expected.add("b");
expected.add("c");
Set<String> notStored = InfinispanCollections.difference(expected, store);
assertEquals(0, notStored.size());
Set<String> notRemoved = InfinispanCollections.difference(store, expected);
assertEquals(0, notRemoved.size());
}
}
| 2,929
| 28.009901
| 81
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/TransactionTrackInterceptor.java
|
package org.infinispan.util;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.BaseCustomAsyncInterceptor;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* @author Pedro Ruivo
* @since 5.3
*/
public class TransactionTrackInterceptor extends BaseCustomAsyncInterceptor {
private static final Log log = LogFactory.getLog(TransactionTrackInterceptor.class);
private static final GlobalTransaction CLEAR_TRANSACTION = new ClearGlobalTransaction();
private final Set<GlobalTransaction> localTransactionsSeen;
private final Set<GlobalTransaction> remoteTransactionsSeen;
//ordered local transaction list constructed from the operations.
private final ArrayList<GlobalTransaction> localTransactionsOperation;
private TransactionTrackInterceptor() {
localTransactionsSeen = new HashSet<>();
remoteTransactionsSeen = new HashSet<>();
localTransactionsOperation = new ArrayList<>(8);
}
public static TransactionTrackInterceptor injectInCache(Cache<?, ?> cache) {
AsyncInterceptorChain chain = cache.getAdvancedCache().getAsyncInterceptorChain();
if (chain.containsInterceptorType(TransactionTrackInterceptor.class)) {
return chain.findInterceptorWithClass(TransactionTrackInterceptor.class);
}
TransactionTrackInterceptor interceptor = new TransactionTrackInterceptor();
cache.getAdvancedCache().getComponentRegistry().wireDependencies(interceptor);
TestingUtil.startComponent(interceptor);
chain.addInterceptor(interceptor, 0);
return interceptor;
}
public synchronized final GlobalTransaction getLastExecutedTransaction() {
int size = localTransactionsOperation.size();
if (size == 0) {
return null;
}
return localTransactionsOperation.get(size - 1);
}
public synchronized final List<GlobalTransaction> getExecutedTransactions() {
return Collections.unmodifiableList(localTransactionsOperation);
}
@Override
public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (rCtx.isOriginLocal()) {
addLocalTransaction(CLEAR_TRANSACTION);
//in total order, the transactions are self delivered. So, we simulate the self-deliver of the clear command.
seen(CLEAR_TRANSACTION, false);
}
seen(CLEAR_TRANSACTION, rCtx.isOriginLocal());
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
seen(command.getGlobalTransaction(), rCtx.isOriginLocal());
});
}
@Override
public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
seen(command.getGlobalTransaction(), rCtx.isOriginLocal());
});
}
@Override
public Object visitCommitCommand(TxInvocationContext ctx, CommitCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
seen(command.getGlobalTransaction(), rCtx.isOriginLocal());
});
}
public boolean awaitForLocalCompletion(GlobalTransaction globalTransaction, long timeout, TimeUnit unit) throws InterruptedException {
long endTimeout = unit.toMillis(timeout) + System.currentTimeMillis();
while (System.currentTimeMillis() < endTimeout && !completedLocalTransactions(globalTransaction)) {
sleep();
}
boolean completed = completedLocalTransactions(globalTransaction);
if (log.isDebugEnabled()) {
log.debugf("[local] is %d completed? %s", (Object)globalTransaction.getId(), completed);
}
return completed;
}
public boolean awaitForRemoteCompletion(GlobalTransaction globalTransaction, long timeout, TimeUnit unit) throws InterruptedException {
long endTimeout = unit.toMillis(timeout) + System.currentTimeMillis();
while (System.currentTimeMillis() < endTimeout && !completedRemoteTransactions(globalTransaction)) {
sleep();
}
boolean completed = completedRemoteTransactions(globalTransaction);
if (log.isDebugEnabled()) {
log.debugf("[remote] is %d completed? %s", (Object)globalTransaction.getId(), completed);
}
return completed;
}
public boolean awaitForLocalCompletion(int expectedTransactions, long timeout, TimeUnit unit) throws InterruptedException {
long endTimeout = unit.toMillis(timeout) + System.currentTimeMillis();
while (System.currentTimeMillis() < endTimeout && completedLocalTransactions() < expectedTransactions) {
sleep();
}
if (log.isDebugEnabled()) {
log.debugf("[local] check for completion. seen=%s, expected=%s", localTransactionsSeen.size(), expectedTransactions);
}
return completedLocalTransactions() >= expectedTransactions;
}
public boolean awaitForRemoteCompletion(int expectedTransactions, long timeout, TimeUnit unit) throws InterruptedException {
long endTimeout = unit.toMillis(timeout) + System.currentTimeMillis();
while (System.currentTimeMillis() < endTimeout && completedRemoteTransactions() < expectedTransactions) {
sleep();
}
if (log.isDebugEnabled()) {
log.debugf("[remote] check for completion. seen=%s, expected=%s", remoteTransactionsSeen.size(), expectedTransactions);
}
return completedRemoteTransactions() >= expectedTransactions;
}
public synchronized void reset() {
localTransactionsSeen.clear();
remoteTransactionsSeen.clear();
localTransactionsOperation.clear();
}
@Override
protected Object handleDefault(InvocationContext ctx, VisitableCommand command) throws Throwable {
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
if (rCtx.isOriginLocal() && rCtx.isInTxScope()) {
GlobalTransaction globalTransaction = ((TxInvocationContext) rCtx).getGlobalTransaction();
addLocalTransaction(globalTransaction);
}
});
}
private synchronized void addLocalTransaction(GlobalTransaction globalTransaction) {
if (!localTransactionsOperation.contains(globalTransaction)) {
localTransactionsOperation.add(globalTransaction);
}
}
private synchronized void seen(GlobalTransaction globalTransaction, boolean local) {
log.tracef("Seen transaction %s. Local? %s", globalTransaction, local);
if (local) {
localTransactionsSeen.add(globalTransaction);
} else {
remoteTransactionsSeen.add(globalTransaction);
}
}
private void sleep() throws InterruptedException {
Thread.sleep(100);
}
private synchronized int completedLocalTransactions() {
int count = 0;
TransactionTable transactionTable = getTransactionTable();
for (GlobalTransaction tx : localTransactionsSeen) {
if (!transactionTable.containsLocalTx(tx)) {
count++;
}
}
return count;
}
private synchronized int completedRemoteTransactions() {
int count = 0;
TransactionTable transactionTable = getTransactionTable();
for (GlobalTransaction tx : remoteTransactionsSeen) {
if (!transactionTable.containRemoteTx(tx)) {
count++;
}
}
return count;
}
private synchronized boolean completedLocalTransactions(GlobalTransaction globalTransaction) {
return localTransactionsSeen.contains(globalTransaction) &&
!getTransactionTable().containsLocalTx(globalTransaction);
}
private synchronized boolean completedRemoteTransactions(GlobalTransaction globalTransaction) {
return remoteTransactionsSeen.contains(globalTransaction) &&
!getTransactionTable().containRemoteTx(globalTransaction);
}
private TransactionTable getTransactionTable() {
return cache.getAdvancedCache().getComponentRegistry().getComponent(TransactionTable.class);
}
private static class ClearGlobalTransaction extends GlobalTransaction {
ClearGlobalTransaction() {
super(null, false);
}
}
}
| 9,150
| 39.49115
| 138
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ControlledTransport.java
|
package org.infinispan.util;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.commons.time.TimeService;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.responses.SuccessfulResponse;
import org.infinispan.remoting.responses.ValidResponse;
import org.infinispan.remoting.rpc.ResponseFilter;
import org.infinispan.remoting.rpc.ResponseMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.AbstractDelegatingTransport;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.BackupResponse;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.SiteAddress;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.XSiteResponse;
import org.infinispan.remoting.transport.impl.SingleResponseCollector;
import org.infinispan.remoting.transport.impl.SingletonMapResponseCollector;
import org.infinispan.remoting.transport.impl.XSiteResponseImpl;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.XSiteBackup;
import org.infinispan.xsite.XSiteReplicateCommand;
import net.jcip.annotations.GuardedBy;
/**
* @author Mircea.Markus@jboss.com
* @author Dan Berindei
* @since 4.2
*/
@Scope(Scopes.GLOBAL)
public class ControlledTransport extends AbstractDelegatingTransport {
private static final Log log = LogFactory.getLog(ControlledTransport.class);
private static final int TIMEOUT_SECONDS = 10;
@Inject EmbeddedCacheManager manager;
@Inject @ComponentName(TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService timeoutExecutor;
@Inject @ComponentName(NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
@Inject TimeService timeService;
private volatile boolean stopped = false;
private volatile boolean excludeAllCacheCommands;
private final Set<Class<? extends ReplicableCommand>> excludedCommands =
Collections.synchronizedSet(new HashSet<>());
private final BlockingQueue<CompletableFuture<ControlledRequest<?>>> waiters = new LinkedBlockingDeque<>();
private RuntimeException globalError;
protected ControlledTransport(Transport realOne) {
super(realOne);
}
public static ControlledTransport replace(Cache<?, ?> cache) {
return replace(cache.getCacheManager());
}
public static ControlledTransport replace(EmbeddedCacheManager manager) {
Transport transport = extractGlobalComponent(manager, Transport.class);
if (transport instanceof ControlledTransport) {
throw new IllegalStateException("One ControlledTransport per cache should be enough");
}
ControlledTransport controlledTransport = new ControlledTransport(transport);
log.tracef("Installing ControlledTransport on %s", controlledTransport.getAddress());
TestingUtil.replaceComponent(manager, Transport.class, controlledTransport, true);
return controlledTransport;
}
@SafeVarargs
public final void excludeCommands(Class<? extends ReplicableCommand>... excluded) {
if (stopped) {
throw new IllegalStateException("Trying to exclude commands but we already stopped intercepting");
}
excludedCommands.clear();
excludedCommands.addAll(Arrays.asList(excluded));
}
public final void excludeCacheCommands() {
if (stopped) {
throw new IllegalStateException("Trying to exclude cache commands but we already stopped intercepting");
}
excludeAllCacheCommands = true;
}
public void stopBlocking() {
log.debugf("Stopping intercepting RPC calls on %s", actual.getAddress());
stopped = true;
throwGlobalError();
if (!waiters.isEmpty()) {
fail("Stopped intercepting RPCs on " + actual.getAddress() + ", but there are " + waiters.size() + " waiters in the queue");
}
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand> BlockedRequest<T> expectCommand(Class<T> expectedCommandClass) {
return uncheckedGet(expectCommandAsync(expectedCommandClass), expectedCommandClass);
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand>
BlockedRequest<T> expectCommand(Class<T> expectedCommandClass, Consumer<T> checker) {
BlockedRequest<T> blockedRequest = uncheckedGet(expectCommandAsync(expectedCommandClass), this);
T command = expectedCommandClass.cast(blockedRequest.request.getCommand());
checker.accept(command);
return blockedRequest;
}
public <T extends ReplicableCommand>
BlockedRequests<T> expectCommands(Class<T> expectedCommandClass, Address... targets) {
return expectCommands(expectedCommandClass, Arrays.asList(targets));
}
public <T extends ReplicableCommand>
BlockedRequests<T> expectCommands(Class<T> expectedCommandClass, Collection<Address> targets) {
Map<Address, BlockedRequest<T>> requests = new HashMap<>();
for (int i = 0; i < targets.size(); i++) {
BlockedRequest<T> request = expectCommand(expectedCommandClass);
requests.put(request.getTarget(), request);
}
assertEquals(new HashSet<>(targets), requests.keySet());
return new BlockedRequests<>(requests);
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand>
CompletableFuture<BlockedRequest<T>> expectCommandAsync(Class<T> expectedCommandClass) {
throwGlobalError();
log.tracef("Waiting for command %s", expectedCommandClass);
CompletableFuture<ControlledRequest<?>> future = new CompletableFuture<>();
waiters.add(future);
return future.thenApply(request -> {
log.tracef("Blocked command %s", request.command);
assertTrue("Expecting a " + expectedCommandClass.getName() + ", got " + request.getCommand(),
expectedCommandClass.isInstance(request.getCommand()));
return new BlockedRequest<>(request);
});
}
public void expectNoCommand() {
throwGlobalError();
assertNull("There should be no queued commands", waiters.poll());
}
public void expectNoCommand(long timeout, TimeUnit timeUnit) throws InterruptedException {
throwGlobalError();
assertNull("There should be no queued commands", waiters.poll(timeout, timeUnit));
}
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpcCommand,
ResponseMode mode, long timeout, ResponseFilter responseFilter,
DeliverOrder deliverOrder, boolean anycast) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public Map<Address, Response> invokeRemotely(Map<Address, ReplicableCommand> rpcCommands, ResponseMode mode,
long timeout, boolean usePriorityQueue, ResponseFilter responseFilter,
boolean totalOrder, boolean anycast) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public Map<Address, Response> invokeRemotely(Map<Address, ReplicableCommand> rpcCommands, ResponseMode mode,
long timeout, ResponseFilter responseFilter, DeliverOrder deliverOrder,
boolean anycast) throws Exception {
throw new UnsupportedOperationException();
}
@Override
public CompletableFuture<Map<Address, Response>> invokeRemotelyAsync(Collection<Address> recipients,
ReplicableCommand rpcCommand, ResponseMode mode,
long timeout, ResponseFilter responseFilter,
DeliverOrder deliverOrder, boolean anycast)
throws Exception {
throw new UnsupportedOperationException();
}
@Override
public void sendTo(Address destination, ReplicableCommand rpcCommand, DeliverOrder deliverOrder) throws Exception {
performSend(Collections.singletonList(destination), rpcCommand, c -> {
try {
actual.sendTo(destination, rpcCommand, deliverOrder);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
return null;
});
}
@Override
public void sendToMany(Collection<Address> destinations, ReplicableCommand rpcCommand, DeliverOrder deliverOrder)
throws Exception {
performSend(destinations, rpcCommand, c -> {
try {
actual.sendToMany(destinations, rpcCommand, deliverOrder);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
return null;
});
}
@Override
public void sendToAll(ReplicableCommand rpcCommand, DeliverOrder deliverOrder) throws Exception {
performSend(actual.getMembers(), rpcCommand, c -> {
try {
actual.sendToAll(rpcCommand, deliverOrder);
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
return null;
});
}
@Override
public BackupResponse backupRemotely(Collection<XSiteBackup> backups, XSiteReplicateCommand rpcCommand)
throws Exception {
throw new UnsupportedOperationException();
}
@Override
public <O> XSiteResponse<O> backupRemotely(XSiteBackup backup, XSiteReplicateCommand<O> rpcCommand) {
XSiteResponseImpl<O> xSiteResponse = new XSiteResponseImpl<>(timeService, backup);
SiteAddress address = new SiteAddress(backup.getSiteName());
CompletionStage<ValidResponse> request =
performRequest(Collections.singletonList(address), rpcCommand, SingleResponseCollector.validOnly(), c -> {
try {
return actual.backupRemotely(backup, rpcCommand).handle(
(rv, t) -> {
// backupRemotely parses the response, here we turn the value/exception back into a response
ValidResponse cv;
if (t == null) {
cv = c.addResponse(address, SuccessfulResponse.create(rv));
} else if (t instanceof Exception) {
cv = c.addResponse(address, new ExceptionResponse((Exception) t));
} else {
cv = c.addResponse(address, new ExceptionResponse(new TestException(t)));
}
if (cv == null) {
cv = c.finish();
}
return cv;
});
} catch (Exception e) {
return CompletableFuture.failedFuture(e);
}
});
request.whenComplete(xSiteResponse);
return xSiteResponse;
}
@Override
public <T> CompletionStage<T> invokeCommand(Address target, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder, long timeout,
TimeUnit unit) {
return performRequest(Collections.singletonList(target), command, collector,
c -> actual.invokeCommand(target, command, c, deliverOrder, timeout, unit));
}
@Override
public <T> CompletionStage<T> invokeCommand(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder, long timeout,
TimeUnit unit) {
return performRequest(targets, command, collector,
c -> actual.invokeCommand(targets, command, c, deliverOrder, timeout, unit));
}
@Override
public <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector,
DeliverOrder deliverOrder, long timeout, TimeUnit unit) {
return performRequest(actual.getMembers(), command, collector,
c -> actual.invokeCommandOnAll(command, c, deliverOrder, timeout, unit));
}
@Override
public <T> CompletionStage<T> invokeCommandStaggered(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, DeliverOrder deliverOrder,
long timeout, TimeUnit unit) {
return performRequest(actual.getMembers(), command, collector,
c -> actual.invokeCommandStaggered(targets, command, c, deliverOrder, timeout,
unit));
}
@Override
public <T> CompletionStage<T> invokeCommands(Collection<Address> targets,
Function<Address, ReplicableCommand> commandGenerator,
ResponseCollector<T> collector, DeliverOrder deliverOrder, long timeout,
TimeUnit timeUnit) {
// Split the invocation into multiple unicast requests
AbstractDelegatingRpcManager.CommandsRequest<T>
action = new AbstractDelegatingRpcManager.CommandsRequest<>(targets, collector);
for (Address target : targets) {
if (target.equals(actual.getAddress()))
continue;
invokeCommand(target, commandGenerator.apply(target), SingletonMapResponseCollector.ignoreLeavers(),
deliverOrder, timeout, timeUnit)
.whenComplete(action);
}
return action.resultFuture;
}
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker) {
if (stopped || isCommandExcluded(command)) {
log.tracef("Not blocking excluded command %s", command);
return invoker.apply(collector);
}
log.debugf("Intercepted command to %s: %s", targets, command);
// Ignore the SingleRpcCommand wrapper
if (command instanceof SingleRpcCommand) {
command = ((SingleRpcCommand) command).getCommand();
}
Address excluded = actual.getAddress();
ControlledRequest<T> controlledRequest =
new ControlledRequest<>(command, targets, collector, invoker, nonBlockingExecutor, excluded);
try {
CompletableFuture<ControlledRequest<?>> waiter = waiters.poll(TIMEOUT_SECONDS, SECONDS);
if (waiter == null) {
TimeoutException t = new TimeoutException("Found no waiters for command " + command);
addGlobalError(t);
throw t;
}
waiter.complete(controlledRequest);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new TestException(e);
} catch (Exception e) {
throw new TestException(e);
}
if (collector != null) {
ScheduledFuture<?> cancelTask = timeoutExecutor.schedule(() -> {
TimeoutException e = new TimeoutException("Timed out waiting for test to unblock command " +
controlledRequest.getCommand());
addGlobalError(e);
controlledRequest.fail(e);
}, TIMEOUT_SECONDS * 2, SECONDS);
controlledRequest.resultFuture.whenComplete((ignored, throwable) -> cancelTask.cancel(false));
}
// resultFuture is completed from a test thread, and we don't want to run the interceptor callbacks there
return controlledRequest.resultFuture.whenCompleteAsync((r, t) -> {}, nonBlockingExecutor);
}
private void addGlobalError(RuntimeException t) {
if (globalError == null) {
globalError = t;
} else {
globalError.addSuppressed(t);
}
}
protected <T> void performSend(Collection<Address> targets, ReplicableCommand command,
Function<ResponseCollector<T>, CompletionStage<T>> invoker) {
performRequest(targets, command, null, invoker);
}
@Override
public void start() {
// Do nothing, the wrapped transport is already started
}
public void stop() {
stopBlocking();
super.stop();
}
private boolean isCommandExcluded(ReplicableCommand command) {
if (excludeAllCacheCommands && command instanceof CacheRpcCommand)
return true;
for (Class<? extends ReplicableCommand> excludedCommand : excludedCommands) {
if (excludedCommand.isInstance(command))
return true;
}
return false;
}
private void throwGlobalError() {
if (globalError != null) {
throw globalError;
}
}
static <T> T uncheckedGet(CompletionStage<T> stage, Object request) {
try {
return stage.toCompletableFuture().get(TIMEOUT_SECONDS, SECONDS);
} catch (Exception e) {
throw new TestException(String.valueOf(request), e);
}
}
/**
* A controlled request.
*
* The real RpcManager will not send the command to the targets until the test calls {@link #send()}.
* Responses received from the targets are stored in {@link #responseFutures}, and after the last response
* is received they are also stored in the {@link #finishFuture} map.
*
* The responses are only passed to the real response collector when the test calls
* {@link #collectResponse(Address, Response)}, and {@link #collectFinish()} finishes the collector.
*/
static class ControlledRequest<T> {
private final ReplicableCommand command;
private final Collection<Address> targets;
private final Function<ResponseCollector<T>, CompletionStage<T>> invoker;
private final ExecutorService executor;
private final CompletableFuture<T> resultFuture = new CompletableFuture<>();
private final LinkedHashMap<Address, CompletableFuture<Response>> responseFutures = new LinkedHashMap<>();
private final CompletableFuture<Map<Address, Response>> finishFuture = new CompletableFuture<>();
private final CompletableFuture<Void> sendFuture = new CompletableFuture<>();
private final Lock collectLock = new ReentrantLock();
@GuardedBy("collectLock")
private final ResponseCollector<T> collector;
@GuardedBy("collectLock")
private final Set<Address> collectedResponses = new HashSet<>();
@GuardedBy("collectLock")
private boolean collectedFinish;
ControlledRequest(ReplicableCommand command, Collection<Address> targets, ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
ExecutorService executor, Address excluded) {
this.command = command;
this.targets = targets;
this.collector = collector;
this.invoker = invoker;
this.executor = executor;
for (Address target : targets) {
if (!target.equals(excluded)) {
responseFutures.put(target, new CompletableFuture<>());
}
}
}
void send() {
invoker.apply(new ResponseCollector<T>() {
@Override
public T addResponse(Address sender, Response response) {
queueResponse(sender, response);
return null;
}
@Override
public T finish() {
queueFinish();
return null;
}
});
sendFuture.complete(null);
}
void skipSend() {
sendFuture.complete(null);
for (CompletableFuture<Response> responseFuture : responseFutures.values()) {
responseFuture.complete(null);
}
}
void awaitSend() {
uncheckedGet(sendFuture, this);
}
private void queueResponse(Address sender, Response response) {
log.tracef("Queueing response from %s for command %s", sender, command);
CompletableFuture<Response> responseFuture = responseFutures.get(sender);
boolean completedNow = responseFuture.complete(response);
if (!completedNow) {
fail(new IllegalStateException("Duplicate response received from " + sender + ": " + response));
}
}
private void queueFinish() {
log.tracef("Queueing finish for command %s", command);
Map<Address, Response> responseMap = new LinkedHashMap<>();
for (Map.Entry<Address, CompletableFuture<Response>> entry : responseFutures.entrySet()) {
Address sender = entry.getKey();
CompletableFuture<Response> responseFuture = entry.getValue();
// Don't wait for all responses in case this is a staggered request
if (responseFuture.isDone()) {
responseMap.put(sender, uncheckedGet(responseFuture, this));
} else {
responseFuture.complete(null);
}
}
boolean completedNow = finishFuture.complete(responseMap);
if (!completedNow) {
fail(new IllegalStateException("Finish queued more than once"));
}
}
void collectResponse(Address sender, Response response) {
try {
T result;
collectLock.lock();
try {
throwIfFailed();
assertTrue(collectedResponses.add(sender));
result = collector.addResponse(sender, response);
if (result != null) {
// Don't allow collectFinish on this request
collectedFinish = true;
}
} finally {
collectLock.unlock();
}
if (result != null) {
resultFuture.complete(result);
}
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
void collectFinish() {
try {
T result;
collectLock.lock();
try {
throwIfFailed();
assertFalse(collectedFinish);
collectedFinish = true;
result = collector.finish();
} finally {
collectLock.unlock();
}
resultFuture.complete(result);
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
void skipFinish() {
collectLock.lock();
try {
assertFalse(collectedFinish);
} finally {
collectLock.unlock();
}
assertTrue(resultFuture.isDone());
}
void fail(Throwable t) {
log.tracef("Failing execution of %s with %s", command, t);
resultFuture.completeExceptionally(t);
// Unblock the thread waiting for the request to be sent, if it's not already sent
sendFuture.completeExceptionally(t);
}
void throwIfFailed() {
if (resultFuture.isCompletedExceptionally()) {
resultFuture.join();
}
}
boolean isDone() {
return resultFuture.isDone();
}
ReplicableCommand getCommand() {
return command;
}
Collection<Address> getTargets() {
return targets;
}
boolean hasCollector() {
return collector != null;
}
CompletableFuture<Response> responseFuture(Address sender) {
return responseFutures.get(sender);
}
CompletableFuture<Map<Address, Response>> finishFuture() {
return finishFuture;
}
@Override
public String toString() {
return "ControlledRequest{" +
"command=" + command +
", targets=" + targets +
'}';
}
}
/**
* Unblock and wait for the responses of a blocked remote invocation.
* <p>
* For example, {@code request.send().expectResponse(a1, r1).replace(r2).receiveAll()}.
*/
public static class BlockedRequest<C extends ReplicableCommand> {
private final ControlledRequest<?> request;
public BlockedRequest(ControlledRequest<?> request) {
this.request = request;
}
/**
* Unblock the request, sending it to its targets.
* <p>
* It will block again when waiting for responses.
*/
public SentRequest send() {
assert !request.isDone();
log.tracef("Sending command %s", request.getCommand());
request.send();
if (request.hasCollector()) {
return new SentRequest(request);
} else {
return null;
}
}
/**
* Avoid sending the request, and finish it with the given responses instead.
*/
public FakeResponses skipSend() {
assert !request.isDone();
log.tracef("Not sending request %s", request.getCommand());
request.skipSend();
if (request.hasCollector()) {
return new FakeResponses(request);
} else {
return null;
}
}
public void fail() {
fail(new TestException("Induced failure!"));
}
public void fail(Exception e) {
request.fail(e);
}
public C getCommand() {
return (C) request.getCommand();
}
public Collection<Address> getTargets() {
return request.getTargets();
}
public Address getTarget() {
Collection<Address> targets = request.getTargets();
assertEquals(1, targets.size());
return targets.iterator().next();
}
@Override
public String toString() {
return "BlockedRequest{" +
"command=" + request.command +
", targets=" + request.targets +
'}';
}
}
public static class SentRequest {
private final ControlledRequest<?> request;
SentRequest(ControlledRequest<?> request) {
this.request = request;
}
/**
* Complete the request with a {@link TimeoutException}
*/
public void forceTimeout() {
assertFalse(request.isDone());
request.fail(log.requestTimedOut(-1, "Induced timeout failure", "some time"));
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender, Consumer<Response> checker) {
BlockedResponse br = uncheckedGet(expectResponseAsync(sender), this);
checker.accept(br.response);
return br;
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender) {
return uncheckedGet(expectResponseAsync(sender), this);
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender, Response expectedResponse) {
return expectResponse(sender, r -> assertEquals(expectedResponse, r));
}
/**
* Wait for a {@code CacheNotFoundResponse} from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectLeaver(Address a) {
return expectResponse(a, CacheNotFoundResponse.INSTANCE);
}
/**
* Wait for an {@code ExceptionResponse} from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectException(Address a, Class<? extends Exception> expectedException) {
return expectResponse(a, r -> {
Exception exception = ((ExceptionResponse) r).getException();
Exceptions.assertException(expectedException, exception);
});
}
/**
* Wait for all the responses.
*/
public BlockedResponseMap expectAllResponses() {
return uncheckedGet(expectAllResponsesAsync(), this);
}
/**
* Wait for all the responses.
*/
public BlockedResponseMap expectAllResponses(BiConsumer<Address, Response> checker) {
BlockedResponseMap blockedResponseMap = uncheckedGet(expectAllResponsesAsync(), this);
blockedResponseMap.responseMap.forEach(checker);
return blockedResponseMap;
}
/**
* Wait for all the responses and process them.
*/
public void receiveAll() {
expectAllResponses().receive();
}
public void receiveAllAsync() {
expectAllResponsesAsync().thenAccept(BlockedResponseMap::receive);
}
/**
* Complete a request after expecting and receiving responses individually, e.g. with
* {@link #expectResponse(Address)}.
*
* This method blocks until all the responses have been received internally, but doesn't pass them on
* to the original response collector (it only calls {@link ResponseCollector#finish()}).
*/
public void finish() {
uncheckedGet(request.finishFuture(), this);
request.collectFinish();
}
public void noFinish() {
request.skipFinish();
}
public CompletionStage<BlockedResponse> expectResponseAsync(Address sender) {
request.throwIfFailed();
assertFalse(request.isDone());
return request.responseFuture(sender).thenApply(response -> {
log.debugf("Got response for %s from %s: %s", request.getCommand(), sender, response);
return new BlockedResponse(request, this, sender, response);
});
}
public CompletionStage<BlockedResponseMap> expectAllResponsesAsync() {
request.throwIfFailed();
assertFalse(request.isDone());
return request.finishFuture()
.thenApply(responseMap -> new BlockedResponseMap(request, responseMap));
}
@Override
public String toString() {
return "BlockedRequest{" +
"command=" + request.command +
", targets=" + request.targets +
'}';
}
}
public static class BlockedResponse {
private final ControlledRequest<?> request;
final SentRequest sentRequest;
final Address sender;
final Response response;
private BlockedResponse(ControlledRequest<?> request, SentRequest sentRequest, Address sender,
Response response) {
this.request = request;
this.sentRequest = sentRequest;
this.sender = sender;
this.response = response;
}
/**
* Process the response from this {@code BlockedResponse}'s target.
* <p>
* Note that processing the last response will NOT complete the request, you still need to call
* {@link SentRequest#receiveAll()}.
*/
public SentRequest receive() {
log.tracef("Unblocking response from %s: %s", sender, response);
request.collectResponse(this.sender, response);
return sentRequest;
}
/**
* Replace the response from this {@code BlockedResponse}'s target with a fake response and process it.
*/
public SentRequest replace(Response newResponse) {
log.tracef("Replacing response from %s: %s (was %s)", sender, newResponse, response);
request.collectResponse(this.sender, newResponse);
return sentRequest;
}
public CompletionStage<SentRequest> receiveAsync() {
return CompletableFuture.supplyAsync(this::receive, request.executor);
}
public CompletionStage<SentRequest> replaceAsync(Response newResponse) {
return CompletableFuture.supplyAsync(() -> replace(newResponse), request.executor);
}
public Address getSender() {
return sender;
}
public Response getResponse() {
return response;
}
@Override
public String toString() {
return "BlockedResponse{" +
"command=" + request.command +
", response={" + sender + "=" + response + '}' +
'}';
}
}
public static class BlockedResponseMap {
private final ControlledRequest<?> request;
private final Map<Address, Response> responseMap;
private BlockedResponseMap(ControlledRequest<?> request,
Map<Address, Response> responseMap) {
this.request = request;
this.responseMap = responseMap;
}
public void receive() {
assertFalse(request.resultFuture.isDone());
log.tracef("Unblocking responses for %s: %s", request.getCommand(), responseMap);
responseMap.forEach(request::collectResponse);
if (!request.isDone()) {
uncheckedGet(request.finishFuture(), this);
request.collectFinish();
}
}
public void replace(Map<Address, Response> newResponses) {
assertFalse(request.resultFuture.isDone());
log.tracef("Replacing responses for %s: %s (was %s)", request.getCommand(), newResponses, responseMap);
newResponses.forEach(request::collectResponse);
if (!request.isDone()) {
uncheckedGet(request.finishFuture(), this);
request.collectFinish();
}
}
public CompletionStage<Void> receiveAsync() {
return CompletableFuture.runAsync(this::receive, request.executor);
}
public CompletionStage<Void> replaceAsync(Map<Address, Response> newResponses) {
return CompletableFuture.runAsync(() -> replace(newResponses), request.executor);
}
public Map<Address, Response> getResponses() {
return responseMap;
}
@Override
public String toString() {
return "BlockedResponseMap{" +
"command=" + request.command +
", responses=" + responseMap +
'}';
}
}
public static class FakeResponses {
private final ControlledRequest<?> request;
public FakeResponses(ControlledRequest<?> request) {
this.request = request;
}
public void receive(Map<Address, Response> responses) {
log.tracef("Faking responses for %s: %s", request.getCommand(), responses);
responses.forEach((sender, response) -> {
// For staggered requests we allow the test to specify only the primary owner's response
assertTrue(responses.containsKey(sender));
request.collectResponse(sender, response);
});
if (!request.isDone()) {
assertEquals(responses.keySet(), request.responseFutures.keySet());
request.collectFinish();
}
}
public void receive(Address sender, Response response) {
receive(Collections.singletonMap(sender, response));
}
public void receive(Address sender1, Response response1,
Address sender2, Response response2) {
Map<Address, Response> responses = new LinkedHashMap<>();
responses.put(sender1, response1);
responses.put(sender2, response2);
receive(responses);
}
public void receive(Address sender1, Response response1,
Address sender2, Response response2,
Address sender3, Response response3) {
Map<Address, Response> responses = new LinkedHashMap<>();
responses.put(sender1, response1);
responses.put(sender2, response2);
responses.put(sender3, response3);
receive(responses);
}
public CompletionStage<Void> receiveAsync(Map<Address, Response> responses) {
return CompletableFuture.runAsync(() -> receive(responses), request.executor);
}
public CompletionStage<Void> receiveAsync(Address sender, Response response) {
return CompletableFuture.runAsync(() -> receive(sender, response), request.executor);
}
public CompletionStage<Void> receiveAsync(Address sender1, Response response1,
Address sender2, Response response2) {
return CompletableFuture.runAsync(() -> receive(sender1, response1, sender2, response2), request.executor);
}
/**
* Complete the request with a {@link TimeoutException}
*/
public void forceTimeout() {
fail(log.requestTimedOut(-1, "Induced failure", "some time"));
}
/**
* Complete the request with a custom exception.
*/
private void fail(Throwable e) {
assertFalse(request.resultFuture.isDone());
request.fail(e);
}
public Collection<Address> getTargets() {
return request.getTargets();
}
public Address getTarget() {
Collection<Address> targets = request.getTargets();
assertEquals(1, targets.size());
return targets.iterator().next();
}
@Override
public String toString() {
return "FakeResponses{" +
"command=" + request.command +
", targets=" + request.targets +
'}';
}
}
/**
* Multiple requests sent to individual targets in parallel, e.g. with
* {@link RpcManager#invokeCommands(Collection, Function, ResponseCollector, RpcOptions)}.
*/
public static class BlockedRequests<T extends ReplicableCommand> {
private final Map<Address, BlockedRequest<T>> requests;
public BlockedRequests(Map<Address, BlockedRequest<T>> requests) {
this.requests = requests;
}
/**
* Unblock the request, sending it to its targets.
* <p>
* It will block again when waiting for responses.
*/
public SentRequest send(Address target) {
return requests.get(target).send();
}
/**
* Avoid sending the request, and finish it with the given responses instead.
*/
public FakeResponses skipSend(Address target) {
return requests.get(target).skipSend();
}
public void skipSendAndReceive(Address target, Response fakeResponse) {
requests.get(target).skipSend().receive(target, fakeResponse);
}
public void skipSendAndReceiveAsync(Address target, Response fakeResponse) {
requests.get(target).skipSend().receiveAsync(target, fakeResponse);
}
@Override
public String toString() {
Map<Address, ReplicableCommand> commandMap =
requests.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().request.command));
return "BlockedRequests{" +
"requests=" + commandMap +
'}';
}
}
}
| 41,792
| 36.959128
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ClassFinderTest.java
|
package org.infinispan.util;
import org.infinispan.commons.util.ClassFinder;
import org.testng.Assert;
import org.testng.annotations.Test;
@Test (groups = "functional", testName = "util.ClassFinderTest")
public class ClassFinderTest {
public void testInfinispanClassesNonEmpty() throws Throwable {
Assert.assertFalse(ClassFinder.infinispanClasses().isEmpty());
}
}
| 380
| 26.214286
| 68
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ExponentialBackOffImplUnitTest.java
|
package org.infinispan.util;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.AssertJUnit;
import org.testng.annotations.Test;
/**
* Unit test for {@link ExponentialBackOffImpl}.
*
* @author Pedro Ruivo
* @since 12.0
*/
@Test(groups = "unit", testName = "util.ExponentialBackOffImplUnitTest")
public class ExponentialBackOffImplUnitTest extends AbstractInfinispanTest {
public void test() {
ExponentialBackOffImpl backOff = new ExponentialBackOffImpl(null); // async back off not used
assertInterval(backOff.nextBackOffMillis(), 250, 750); //500
assertInterval(backOff.nextBackOffMillis(), 500, 1500); //1000
assertInterval(backOff.nextBackOffMillis(), 1000, 3000); //2000
assertInterval(backOff.nextBackOffMillis(), 2000, 6000); //4000
assertInterval(backOff.nextBackOffMillis(), 4000, 12000); //8000
assertInterval(backOff.nextBackOffMillis(), 8000, 24000); //16000
assertInterval(backOff.nextBackOffMillis(), 16000, 48000); //32000
assertInterval(backOff.nextBackOffMillis(), 32000, 96000); //64000
assertInterval(backOff.nextBackOffMillis(), 64000, 192000); //128000
assertInterval(backOff.nextBackOffMillis(), 128000, 300000); //256000
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
backOff.reset();
assertInterval(backOff.nextBackOffMillis(), 250, 750); //500
assertInterval(backOff.nextBackOffMillis(), 500, 1500); //1000
assertInterval(backOff.nextBackOffMillis(), 1000, 3000); //2000
assertInterval(backOff.nextBackOffMillis(), 2000, 6000); //4000
assertInterval(backOff.nextBackOffMillis(), 4000, 12000); //8000
assertInterval(backOff.nextBackOffMillis(), 8000, 24000); //16000
assertInterval(backOff.nextBackOffMillis(), 16000, 48000); //32000
assertInterval(backOff.nextBackOffMillis(), 32000, 96000); //64000
assertInterval(backOff.nextBackOffMillis(), 64000, 192000); //128000
assertInterval(backOff.nextBackOffMillis(), 128000, 300000); //256000
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
assertInterval(backOff.nextBackOffMillis(), 300000, 300000); //MAX_INTERVAL_MILLIS
}
private void assertInterval(long value, long min, long max) {
String msg = String.format("%d in [%d, %d]?", value, min, max);
log.debug(msg);
AssertJUnit.assertTrue(msg, min <= value && value <= max);
}
}
| 2,749
| 48.107143
| 99
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/PersistenceMockUtil.java
|
package org.infinispan.util;
import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ScheduledExecutorService;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commons.configuration.ClassAllowList;
import org.infinispan.commons.io.ByteBufferFactoryImpl;
import org.infinispan.commons.test.BlockHoundHelper;
import org.infinispan.commons.test.CommonsTestingUtil;
import org.infinispan.commons.time.TimeService;
import org.infinispan.configuration.ConfigurationManager;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.distribution.ch.impl.SingleSegmentKeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.impl.TestComponentAccessors;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.manager.TestModuleRepository;
import org.infinispan.marshall.persistence.PersistenceMarshaller;
import org.infinispan.marshall.persistence.impl.MarshalledEntryFactoryImpl;
import org.infinispan.persistence.InitializationContextImpl;
import org.infinispan.persistence.spi.InitializationContext;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.BlockingManager;
import org.infinispan.util.concurrent.BlockingManagerImpl;
import org.infinispan.util.concurrent.NonBlockingManager;
import org.infinispan.util.concurrent.NonBlockingManagerImpl;
import org.infinispan.util.concurrent.WithinThreadExecutor;
import org.mockito.Mockito;
/**
* Util class that mocks {@link org.infinispan.AdvancedCache} and {@link org.infinispan.persistence.spi.InitializationContext}
* for {@link org.infinispan.persistence.spi.CacheWriter} and {@link org.infinispan.persistence.spi.CacheLoader}
*
* @author pedro
* @since 7.0
*/
public class PersistenceMockUtil {
public static class InvocationContextBuilder {
private final Class<?> testClass;
private final Configuration configuration;
private final PersistenceMarshaller persistenceMarshaller;
private ClassAllowList classAllowList;
private TimeService timeService = AbstractInfinispanTest.TIME_SERVICE;
private KeyPartitioner keyPartitioner = SingleSegmentKeyPartitioner.getInstance();
private NonBlockingManager nonBlockingManager;
private BlockingManager blockingManager;
private ScheduledExecutorService timeoutScheduledExecutor;
public InvocationContextBuilder(Class<?> testClass, Configuration configuration, PersistenceMarshaller persistenceMarshaller) {
this.testClass = testClass;
this.configuration = configuration;
this.persistenceMarshaller = persistenceMarshaller;
blockingManager = new BlockingManagerImpl();
TestingUtil.inject(blockingManager,
new TestComponentAccessors.NamedComponent(KnownComponentNames.BLOCKING_EXECUTOR, BlockHoundHelper.allowBlockingExecutor()),
new TestComponentAccessors.NamedComponent(KnownComponentNames.NON_BLOCKING_EXECUTOR, BlockHoundHelper.ensureNonBlockingExecutor()));
TestingUtil.startComponent(blockingManager);
nonBlockingManager = new NonBlockingManagerImpl();
TestingUtil.inject(nonBlockingManager,
new TestComponentAccessors.NamedComponent(KnownComponentNames.NON_BLOCKING_EXECUTOR, BlockHoundHelper.ensureNonBlockingExecutor()));
TestingUtil.startComponent(nonBlockingManager);
timeoutScheduledExecutor = Mockito.mock(ScheduledExecutorService.class);
}
public InvocationContextBuilder setTimeService(TimeService timeService) {
this.timeService = timeService;
return this;
}
public InvocationContextBuilder setClassAllowList(ClassAllowList classAllowList) {
this.classAllowList = classAllowList;
return this;
}
public InvocationContextBuilder setKeyPartitioner(KeyPartitioner keyPartitioner) {
this.keyPartitioner = keyPartitioner;
return this;
}
public InvocationContextBuilder setBlockingManager(BlockingManager blockingManager) {
this.blockingManager = blockingManager;
return this;
}
public InvocationContextBuilder setNonBlockingManager(NonBlockingManager nonBlockingManager) {
this.nonBlockingManager = nonBlockingManager;
return this;
}
public InvocationContextBuilder setScheduledExecutor(ScheduledExecutorService timeoutScheduledExecutor) {
this.timeoutScheduledExecutor = timeoutScheduledExecutor;
return this;
}
public InitializationContext build() {
Cache mockCache = mockCache(testClass.getSimpleName(), configuration, timeService, classAllowList, timeoutScheduledExecutor);
MarshalledEntryFactoryImpl mef = new MarshalledEntryFactoryImpl(persistenceMarshaller);
GlobalConfigurationBuilder global = new GlobalConfigurationBuilder();
global.globalState().persistentLocation(CommonsTestingUtil.tmpDirectory(testClass));
return new InitializationContextImpl(configuration.persistence().stores().get(0), mockCache,
keyPartitioner, persistenceMarshaller, timeService, new ByteBufferFactoryImpl(), mef,
new WithinThreadExecutor(), global.build(), blockingManager, nonBlockingManager);
}
}
public static InitializationContext createContext(Class<?> testClass, Configuration configuration, PersistenceMarshaller marshaller) {
return createContext(testClass, configuration, marshaller, AbstractInfinispanTest.TIME_SERVICE);
}
public static InitializationContext createContext(Class<?> testClass, Configuration configuration, PersistenceMarshaller marshaller, TimeService timeService) {
return createContext(testClass, configuration, marshaller, timeService, null);
}
public static InitializationContext createContext(Class<?> testClass, Configuration configuration, PersistenceMarshaller marshaller,
TimeService timeService, ClassAllowList allowList) {
return new InvocationContextBuilder(testClass, configuration, marshaller)
.setTimeService(timeService)
.setClassAllowList(allowList)
.build();
}
private static Cache mockCache(String nodeName, Configuration configuration, TimeService timeService,
ClassAllowList allowList, ScheduledExecutorService timeoutScheduledExecutor) {
String cacheName = "mock-cache";
AdvancedCache cache = mock(AdvancedCache.class, RETURNS_DEEP_STUBS);
GlobalConfiguration gc = new GlobalConfigurationBuilder()
.transport().nodeName(nodeName)
.build();
Set<String> cachesSet = new HashSet<>();
EmbeddedCacheManager cm = mock(EmbeddedCacheManager.class);
when(cm.getCacheManagerConfiguration()).thenReturn(gc);
when(cm.getClassAllowList()).thenReturn(new ClassAllowList());
GlobalComponentRegistry gcr = new GlobalComponentRegistry(gc, cm, cachesSet, TestModuleRepository.defaultModuleRepository(),
mock(ConfigurationManager.class));
BasicComponentRegistry gbcr = gcr.getComponent(BasicComponentRegistry.class);
gbcr.replaceComponent(TimeService.class.getName(), timeService, true);
gbcr.replaceComponent(KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR, timeoutScheduledExecutor, false);
ComponentRegistry registry = new ComponentRegistry(cacheName, configuration, cache, gcr,
configuration.getClass().getClassLoader());
when (cache.getCacheManager().getGlobalComponentRegistry()).thenReturn(gcr);
when(cache.getClassLoader()).thenReturn(PersistenceMockUtil.class.getClassLoader());
when(cache.getCacheManager().getCacheManagerConfiguration()).thenReturn(gc);
when(cache.getCacheManager().getClassAllowList()).thenReturn(allowList);
when(cache.getName()).thenReturn(cacheName);
when(cache.getAdvancedCache()).thenReturn(cache);
when(cache.getComponentRegistry()).thenReturn(registry);
when(cache.getStatus()).thenReturn(ComponentStatus.RUNNING);
when(cache.getCacheConfiguration()).thenReturn(configuration);
return cache;
}
}
| 8,988
| 51.261628
| 162
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/CountingRpcManager.java
|
package org.infinispan.util;
import java.util.Collection;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
/**
* Use the {@link CountingRpcManager#replaceRpcManager(org.infinispan.Cache)}.
*
* @author Mircea Markus
* @since 5.2
*/
public class CountingRpcManager extends AbstractDelegatingRpcManager {
public volatile int lockCount;
public volatile int clusterGet;
public volatile int otherCount;
public CountingRpcManager(RpcManager realOne) {
super(realOne);
}
public static CountingRpcManager replaceRpcManager(Cache c) {
AdvancedCache advancedCache = c.getAdvancedCache();
CountingRpcManager crm = new CountingRpcManager(advancedCache.getRpcManager());
BasicComponentRegistry bcr = advancedCache.getComponentRegistry().getComponent(BasicComponentRegistry.class);
bcr.replaceComponent(RpcManager.class.getName(), crm, false);
bcr.rewire();
assert advancedCache.getRpcManager().equals(crm);
return crm;
}
public void resetStats() {
lockCount = 0;
clusterGet = 0;
otherCount = 0;
}
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
RpcOptions rpcOptions) {
if (command instanceof LockControlCommand) {
lockCount++;
} else if (command instanceof ClusteredGetCommand) {
clusterGet++;
} else {
otherCount++;
}
return super.performRequest(targets, command, collector, invoker, rpcOptions);
}
}
| 2,303
| 34.446154
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/BaseControlledConsistentHashFactory.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.IntFunction;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.impl.DefaultConsistentHash;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.protostream.annotations.ProtoField;
import org.infinispan.remoting.transport.Address;
/**
* Base consistent hash factory that contains a single segments
*
* @author Pedro Ruivo
* @since 6.0
*/
@SuppressWarnings("unchecked")
public abstract class BaseControlledConsistentHashFactory<CH extends ConsistentHash> implements ConsistentHashFactory<CH> {
protected Trait<CH> trait;
@ProtoField(number = 1, defaultValue = "0")
public int numSegments;
protected BaseControlledConsistentHashFactory() {}
protected BaseControlledConsistentHashFactory(Trait<CH> trait, int numSegments) {
this.trait = trait;
this.numSegments = numSegments;
}
@Override
public CH create(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
assertNumberOfSegments(numSegments);
List<Address>[] segmentOwners = assignSegments(numSegments, numOwners, members);
return create(numOwners, numSegments, members, capacityFactors, segmentOwners, false);
}
private List<Address>[] assignSegments(int numSegments, int numOwners, List<Address> members) {
int[][] ownerIndexes = assignOwners(numSegments, members);
return Arrays.stream(ownerIndexes)
.map(indexes -> Arrays.stream(indexes)
.mapToObj(members::get)
.collect(Collectors.toList()))
.map(indexes -> indexes.subList(0, Math.min(indexes.size(), numOwners)))
.toArray((IntFunction<List<Address>[]>) List[]::new);
}
protected CH create(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors,
List<Address>[] segmentOwners, boolean rebalanced) {
return trait.create(numOwners, numSegments, members, capacityFactors, segmentOwners, rebalanced);
}
@Override
public CH updateMembers(CH baseCH, List<Address> newMembers, Map<Address, Float> capacityFactors) {
assertNumberOfSegments(baseCH.getNumSegments());
List<Address>[] segmentOwners = new List[numSegments];
List<Address>[] balancedOwners = null;
int numOwners = trait.getNumOwners(baseCH);
for (int i = 0; i < numSegments; i++) {
List<Address> owners = new ArrayList<>(baseCH.locateOwnersForSegment(i));
owners.retainAll(newMembers);
// updateMembers should only add new owners if there are no owners left and the trait requires a primary owner
if (owners.isEmpty() && trait.requiresPrimaryOwner()) {
if (balancedOwners == null) {
balancedOwners = assignSegments(numSegments, numOwners, newMembers);
}
owners = balancedOwners[i];
}
segmentOwners[i] = owners;
}
CH updated = create(numOwners, numSegments, newMembers, capacityFactors, segmentOwners, false);
return baseCH.equals(updated) ? baseCH : updated;
}
@Override
public CH rebalance(CH baseCH) {
int numOwners = trait.getNumOwners(baseCH);
List<Address>[] owners = assignSegments(baseCH.getNumSegments(), numOwners, baseCH.getMembers());
CH rebalanced = create(numOwners, baseCH.getNumSegments(), baseCH.getMembers(), baseCH.getCapacityFactors(), owners, true);
return baseCH.equals(rebalanced) ? baseCH : rebalanced;
}
@Override
public CH union(CH ch1, CH ch2) {
assertNumberOfSegments(ch1.getNumSegments());
assertNumberOfSegments(ch2.getNumSegments());
return trait.union(ch1, ch2);
}
protected abstract int[][] assignOwners(int numSegments, List<Address> members);
private void assertNumberOfSegments(int numSegments) {
assertEquals("Wrong number of segments.", this.numSegments, numSegments);
}
protected interface Trait<CH extends ConsistentHash> {
CH create(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors,
List<Address>[] segmentOwners, boolean rebalanced);
CH union(CH ch1, CH ch2);
boolean requiresPrimaryOwner();
int getNumOwners(CH ch);
}
public static class DefaultTrait implements Trait<DefaultConsistentHash> {
@Override
public DefaultConsistentHash create(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors, List<Address>[] segmentOwners,
boolean rebalanced) {
return new DefaultConsistentHash(numOwners, numSegments, members, capacityFactors, segmentOwners);
}
@Override
public DefaultConsistentHash union(DefaultConsistentHash ch1, DefaultConsistentHash ch2) {
return ch1.union(ch2);
}
@Override
public boolean requiresPrimaryOwner() {
return true;
}
@Override
public int getNumOwners(DefaultConsistentHash defaultConsistentHash) {
return defaultConsistentHash.getNumOwners();
}
}
public static class ReplicatedTrait implements Trait<ReplicatedConsistentHash> {
@Override
public ReplicatedConsistentHash create(int numOwners, int numSegments, List<Address> members,
Map<Address, Float> capacityFactors, List<Address>[] segmentOwners,
boolean rebalanced) {
int[] segmentOwners1 = Stream.of(segmentOwners)
.mapToInt(list -> members.indexOf(list.get(0)))
.toArray();
// No support for zero-capacity nodes for now
return new ReplicatedConsistentHash(members, capacityFactors, Collections.emptyList(), segmentOwners1);
}
@Override
public ReplicatedConsistentHash union(ReplicatedConsistentHash ch1, ReplicatedConsistentHash ch2) {
return ch1.union(ch2);
}
@Override
public boolean requiresPrimaryOwner() {
return true;
}
@Override
public int getNumOwners(ReplicatedConsistentHash ReplicatedConsistentHash) {
return 1;
}
}
public static abstract class Default extends BaseControlledConsistentHashFactory<DefaultConsistentHash> {
protected Default(int numSegments) {
super(new DefaultTrait(), numSegments);
}
}
}
| 6,979
| 38.885714
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/EmbeddedTimeServiceTest.java
|
package org.infinispan.util;
import static org.infinispan.test.AbstractInfinispanTest.TIME_SERVICE;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.util.concurrent.TimeUnit;
import org.infinispan.commons.time.TimeService;
import org.testng.annotations.Test;
/**
* @author Pedro Ruivo
* @since 5.3
*/
@Test(groups = "functional", testName = "util.EmbeddedTimeServiceTest")
public class EmbeddedTimeServiceTest {
public void testMonotonicIncrement() {
TimeService timeService = TIME_SERVICE;
//less or equals in all the cases because the system may not have enough precision and the methods may return
// the same value.
assertTrue(timeService.time() <= timeService.time());
assertTrue(timeService.wallClockTime() <= timeService.wallClockTime());
}
public void testDuration() {
TimeService timeService = new EmbeddedTimeService() {
@Override
public long time() {
return 10;
}
@Override
public long wallClockTime() {
return 10;
}
};
assertEquals(timeService.timeDuration(0, TimeUnit.NANOSECONDS), 10);
assertEquals(timeService.timeDuration(-1, TimeUnit.NANOSECONDS), 11);
assertEquals(timeService.timeDuration(10, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(11, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(9, TimeUnit.NANOSECONDS), 1);
assertEquals(timeService.timeDuration(9, TimeUnit.MICROSECONDS), 0);
assertEquals(timeService.timeDuration(9, TimeUnit.MILLISECONDS), 0);
assertEquals(timeService.timeDuration(0, 1, TimeUnit.NANOSECONDS), 1);
assertEquals(timeService.timeDuration(0, -1, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(1, 0, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(1, -1, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(-1, -1, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(0, 0, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.timeDuration(0, 1000, TimeUnit.MICROSECONDS), 1);
assertEquals(timeService.timeDuration(0, 1000000, TimeUnit.MILLISECONDS), 1);
}
public void testExpired() {
TimeService timeService = new EmbeddedTimeService() {
@Override
public long time() {
return 10;
}
};
assertTrue(timeService.isTimeExpired(-1));
assertTrue(timeService.isTimeExpired(0));
assertTrue(timeService.isTimeExpired(9));
assertTrue(timeService.isTimeExpired(10));
assertFalse(timeService.isTimeExpired(11));
}
public void testRemainingTime() {
TimeService timeService = new EmbeddedTimeService() {
@Override
public long time() {
return 10;
}
};
assertEquals(timeService.remainingTime(-1, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.remainingTime(0, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.remainingTime(9, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.remainingTime(10, TimeUnit.NANOSECONDS), 0);
assertEquals(timeService.remainingTime(11, TimeUnit.NANOSECONDS), 1);
assertEquals(timeService.remainingTime(11, TimeUnit.MICROSECONDS), 0);
assertEquals(timeService.remainingTime(11, TimeUnit.MILLISECONDS), 0);
}
public void testExpectedTime() {
TimeService timeService = new EmbeddedTimeService() {
@Override
public long time() {
return 10;
}
@Override
public long wallClockTime() {
return 10;
}
};
assertEquals(timeService.expectedEndTime(-1, TimeUnit.NANOSECONDS), 10);
assertEquals(timeService.expectedEndTime(0, TimeUnit.NANOSECONDS), 10);
assertEquals(timeService.expectedEndTime(1, TimeUnit.NANOSECONDS), 11);
assertEquals(timeService.expectedEndTime(9, TimeUnit.NANOSECONDS), 19);
assertEquals(timeService.expectedEndTime(10, TimeUnit.NANOSECONDS), 20);
assertEquals(timeService.expectedEndTime(11, TimeUnit.NANOSECONDS), 21);
assertEquals(timeService.expectedEndTime(11, TimeUnit.MICROSECONDS), 11010);
assertEquals(timeService.expectedEndTime(11, TimeUnit.MILLISECONDS), 11000010);
}
}
| 4,423
| 37.807018
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/NotifierLatch.java
|
package org.infinispan.util;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A latch that can be open and close. It allows the notification when the some thread is blocking in it.
*
* @author Pedro Ruivo
* @since 6.0
*/
public class NotifierLatch {
private static final Log log = LogFactory.getLog(NotifierLatch.class);
private final String name;
private boolean enabled = false;
private boolean blocked = false;
private int disableOnUnblock = 0;
public NotifierLatch(String name) {
this.name = name;
}
public final synchronized void startBlocking() {
log.tracef("Start blocking %s", name);
this.enabled = true;
}
public final synchronized void stopBlocking() {
log.tracef("Stop blocking %s", name);
this.enabled = false;
this.disableOnUnblock = 0;
notifyAll();
}
public final synchronized void blockIfNeeded() {
log.tracef("Blocking on %s", name);
blocked = true;
notifyAll();
try {
while (enabled) {
try {
wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
}
} finally {
blocked = false;
if (disableOnUnblock > 0 && --disableOnUnblock == 0) {
enabled = true;
}
log.tracef("Resuming on %s", name);
}
}
public final synchronized void waitToBlock() throws InterruptedException {
log.tracef("Waiting for another thread to block on %s", name);
while (!blocked) {
wait();
}
}
public synchronized void unblockOnce() {
log.tracef("Unblocking once %s", name);
enabled = false;
disableOnUnblock++;
notifyAll();
}
public void waitToBlockAndUnblockOnce() throws InterruptedException {
waitToBlock();
unblockOnce();
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("NotifierLatch{");
sb.append("enabled=").append(enabled);
sb.append(", blocked=").append(blocked);
sb.append(", disableOnUnblock=").append(disableOnUnblock);
sb.append('}');
return sb.toString();
}
}
| 2,276
| 25.172414
| 105
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/AbstractDelegatingRpcManager.java
|
package org.infinispan.util;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.function.BiConsumer;
import java.util.function.Function;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.TopologyAffectedCommand;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.XSiteResponse;
import org.infinispan.remoting.transport.impl.SingletonMapResponseCollector;
import org.infinispan.xsite.XSiteBackup;
import org.infinispan.xsite.XSiteReplicateCommand;
/**
* Common rpc manager controls
*
* @author Pedro Ruivo
* @since 6.0
*/
public abstract class AbstractDelegatingRpcManager implements RpcManager {
protected final RpcManager realOne;
public AbstractDelegatingRpcManager(RpcManager realOne) {
this.realOne = realOne;
}
@Override
public final <T> CompletionStage<T> invokeCommand(Address target, ReplicableCommand command,
ResponseCollector<T> collector, RpcOptions rpcOptions) {
return performRequest(Collections.singleton(target), command, collector,
c -> realOne.invokeCommand(target, command, c, rpcOptions), rpcOptions);
}
@Override
public final <T> CompletionStage<T> invokeCommand(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, RpcOptions rpcOptions) {
return performRequest(targets, command, collector,
c -> realOne.invokeCommand(targets, command, c, rpcOptions), rpcOptions);
}
@Override
public final <T> CompletionStage<T> invokeCommandOnAll(ReplicableCommand command, ResponseCollector<T> collector,
RpcOptions rpcOptions) {
return performRequest(getTransport().getMembers(), command, collector,
c -> realOne.invokeCommandOnAll(command, c, rpcOptions), rpcOptions);
}
@Override
public final <T> CompletionStage<T> invokeCommandStaggered(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector, RpcOptions rpcOptions) {
return performRequest(targets, command, collector,
c -> realOne.invokeCommandStaggered(targets, command, c, rpcOptions), rpcOptions);
}
@Override
public final <T> CompletionStage<T> invokeCommands(Collection<Address> targets,
Function<Address, ReplicableCommand> commandGenerator,
ResponseCollector<T> collector, RpcOptions rpcOptions) {
// Split the invocation into multiple unicast requests
CommandsRequest<T> action = new CommandsRequest<>(targets, collector);
for (Address target : targets) {
if (target.equals(realOne.getAddress()))
continue;
invokeCommand(target, commandGenerator.apply(target), SingletonMapResponseCollector.ignoreLeavers(),
rpcOptions)
.whenComplete(action);
}
return action.resultFuture;
}
@Override
public final <T> T blocking(CompletionStage<T> request) {
return realOne.blocking(request);
}
private void setTopologyId(ReplicableCommand command) {
if (command instanceof TopologyAffectedCommand && ((TopologyAffectedCommand) command).getTopologyId() < 0) {
((TopologyAffectedCommand) command).setTopologyId(getTopologyId());
}
}
@Override
public final void sendTo(Address destination, ReplicableCommand command, DeliverOrder deliverOrder) {
setTopologyId(command);
performSend(Collections.singleton(destination), command,
c -> {
realOne.sendTo(destination, command, deliverOrder);
return null;
});
}
@Override
public final void sendToMany(Collection<Address> destinations, ReplicableCommand command,
DeliverOrder deliverOrder) {
setTopologyId(command);
Collection<Address> targets = destinations != null ? destinations : getTransport().getMembers();
performSend(targets, command,
c -> {
realOne.sendToMany(destinations, command, deliverOrder);
return null;
});
}
@Override
public final void sendToAll(ReplicableCommand command, DeliverOrder deliverOrder) {
setTopologyId(command);
performSend(getTransport().getMembers(), command,
c -> {
realOne.sendToAll(command, deliverOrder);
return null;
});
}
@Override
public <O> XSiteResponse<O> invokeXSite(XSiteBackup backup, XSiteReplicateCommand<O> command) {
return realOne.invokeXSite(backup, command);
}
@Override
public Transport getTransport() {
return realOne.getTransport();
}
@Override
public List<Address> getMembers() {
return realOne.getMembers();
}
@Override
public Address getAddress() {
return realOne.getAddress();
}
@Override
public int getTopologyId() {
return realOne.getTopologyId();
}
@Override
public RpcOptions getSyncRpcOptions() {
return realOne.getSyncRpcOptions();
}
@Override
public RpcOptions getTotalSyncRpcOptions() {
return realOne.getTotalSyncRpcOptions();
}
/**
* Wrap the remote invocation.
*/
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
RpcOptions rpcOptions) {
return invoker.apply(collector);
}
/**
* Wrap the remote invocation.
*/
protected <T> void performSend(Collection<Address> targets, ReplicableCommand command,
Function<ResponseCollector<T>, CompletionStage<T>> invoker) {
invoker.apply(null);
}
public static class CommandsRequest<T> implements BiConsumer<Map<Address, Response>, Throwable> {
private final ResponseCollector<T> collector;
CompletableFuture<T> resultFuture;
int missingResponses;
public CommandsRequest(Collection<Address> targets, ResponseCollector<T> collector) {
this.collector = collector;
resultFuture = new CompletableFuture<>();
missingResponses = targets.size();
}
@Override
public void accept(Map<Address, Response> responseMap, Throwable throwable) {
T result;
boolean finish;
synchronized (this) {
missingResponses--;
if (resultFuture.isDone()) {
return;
}
try {
if (responseMap == null) {
// A request to the local node don't get any response in non-total order caches
return;
}
Map.Entry<Address, Response> singleResponse = responseMap.entrySet().iterator().next();
result = collector.addResponse(singleResponse.getKey(), singleResponse.getValue());
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
throw t;
}
finish = missingResponses == 0;
}
if (result != null) {
resultFuture.complete(result);
} else if (finish) {
try {
resultFuture.complete(collector.finish());
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
throw t;
}
}
}
}
}
| 8,446
| 36.211454
| 118
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/EventLogSerializerTest.java
|
package org.infinispan.util;
import static org.testng.AssertJUnit.assertEquals;
import java.io.StringWriter;
import java.time.Instant;
import java.util.Optional;
import java.util.UUID;
import org.infinispan.commons.configuration.io.ConfigurationWriter;
import org.infinispan.commons.dataconversion.MediaType;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.logging.events.EventLog;
import org.infinispan.util.logging.events.EventLogCategory;
import org.infinispan.util.logging.events.EventLogLevel;
import org.infinispan.util.logging.events.EventLogSerializer;
import org.testng.annotations.Test;
@Test(testName = "util.EventLogSerializerTest", groups = "unit")
public class EventLogSerializerTest extends AbstractInfinispanTest {
private static final String JSON_TEMPLATE = "{\"log\":{"
+ "\"category\":\"CLUSTER\",\"content\":{\"level\":\"INFO\","
+ "\"message\":\"%s\",\"detail\":\"%s\"},\"meta\":{"
+ "\"instant\":\"%s\",\"context\":\"%s\",\"scope\":null,"
+ "\"who\":null}}}";
private static final String XML_TEMPLATE = "<?xml version=\"1.0\"?>"
+ "<log category=\"CLUSTER\">"
+ "<content level=\"INFO\" message=\"%s\" detail=\"%s\"/>"
+ "<meta instant=\"%s\" context=\"%s\" scope=\"\" who=\"\"/>"
+ "</log>";
private static final String YAML_TEMPLATE = "log: \n category: \"CLUSTER\"\n"
+ " content: \n level: \"INFO\"\n message: \"%s\"\n detail: \"%s\"\n"
+ " meta: \n instant: \"%s\"\n context: \"%s\"\n scope: ~\n"
+ " who: ~\n";
private final EventLogSerializer serializer = new EventLogSerializer();
public void testJsonSerialization() {
EventLog log = new TestEventLog();
String expected = String.format(JSON_TEMPLATE, log.getMessage(), log.getDetail().get(), log.getWhen(), log.getContext().get());
String actual = serialize(log, MediaType.APPLICATION_JSON);
assertEquals(expected, actual);
}
public void testXmlSerialization() {
EventLog log = new TestEventLog();
String expected = String.format(XML_TEMPLATE, log.getMessage(), log.getDetail().get(), log.getWhen(), log.getContext().get());
String actual = serialize(log, MediaType.APPLICATION_XML);
assertEquals(expected, actual);
}
public void testYamlSerialization() {
EventLog log = new TestEventLog();
String expected = String.format(YAML_TEMPLATE, log.getMessage(), log.getDetail().get(), log.getWhen(), log.getContext().get());
String actual = serialize(log, MediaType.APPLICATION_YAML);
assertEquals(expected, actual);
}
private String serialize(EventLog log, MediaType type) {
StringWriter sw = new StringWriter();
try (ConfigurationWriter cw = ConfigurationWriter.to(sw).withType(type).build()) {
cw.writeStartDocument();
serializer.serialize(cw, log);
cw.writeEndDocument();
}
return sw.toString();
}
private static class TestEventLog implements EventLog {
private final Instant now = Instant.now();
private final String message = UUID.randomUUID().toString();
private final String detail = UUID.randomUUID().toString();
private final String context = UUID.randomUUID().toString();
@Override
public Instant getWhen() {
return now;
}
@Override
public EventLogLevel getLevel() {
return EventLogLevel.INFO;
}
@Override
public String getMessage() {
return message;
}
@Override
public EventLogCategory getCategory() {
return EventLogCategory.CLUSTER;
}
@Override
public Optional<String> getDetail() {
return Optional.of(detail);
}
@Override
public Optional<String> getWho() {
return Optional.empty();
}
@Override
public Optional<String> getContext() {
return Optional.of(context);
}
@Override
public Optional<String> getScope() {
return Optional.empty();
}
@Override
public int compareTo(EventLog eventLog) {
return eventLog.getWhen().compareTo(getWhen());
}
}
}
| 4,238
| 33.745902
| 133
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ActionSequencerUnitTest.java
|
package org.infinispan.util;
import static java.util.Arrays.asList;
import static java.util.Collections.emptyList;
import static java.util.Collections.singleton;
import static org.infinispan.commons.test.Exceptions.expectException;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.fail;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.function.Predicate;
import java.util.function.Supplier;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestException;
import org.infinispan.util.concurrent.ActionSequencer;
import org.infinispan.util.concurrent.CompletionStages;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Unit Tests for {@link ActionSequencer}
*
* @author Pedro Ruivo
* @since 10.0
*/
@Test(groups = "unit", testName = "util.ActionSequencerUnitTest")
public class ActionSequencerUnitTest extends AbstractInfinispanTest {
private static void assertEmpty(ActionSequencer sequencer) {
assertMapSize(sequencer, 0);
assertPendingActions(sequencer, 0);
}
private static int nextInt() {
return ThreadLocalRandom.current().nextInt();
}
private static String nextStringInt() {
return Integer.toString(nextInt());
}
private static int getResult(CompletionStage<Integer> cf) {
return CompletionStages.join(cf);
}
private static void assertMapSize(ActionSequencer sequencer, int size) {
assertEquals("Wrong ActionSequencer.getMapSize()", size, sequencer.getMapSize());
}
private static void assertPendingActions(ActionSequencer sequencer, int size) {
assertEquals("Wrong ActionSequencer.getPendingActions()", size, sequencer.getPendingActions());
}
private static void assertActionResult(CompletionStage<Integer> cf, int result) {
assertEquals("Wrong result", result, getResult(cf));
}
private static void assertActionResult(CompletionStage<Integer> cf, String exceptionMessage) {
expectException(CompletionException.class, TestException.class, exceptionMessage,
() -> CompletionStages.join(cf));
}
private static void assertActionState(NonBlockingAction action, CompletionStage<Integer> cf, boolean started,
boolean completed) {
assertEquals("Is action started?", started, action.isStarted());
assertEquals("Is action completed?", completed, cf.toCompletableFuture().isDone());
}
private static void assertActionState(List<NonBlockingAction> actionList, List<CompletionStage<Integer>> cfList,
Predicate<Integer> started, Predicate<Integer> completed) {
for (int i = 0; i < actionList.size(); ++i) {
assertActionState(actionList.get(i), cfList.get(i), started.test(i), completed.test(i));
}
}
private static void assertAllCompleted(int[] results, List<CompletionStage<Integer>> cfList,
Predicate<Integer> fail) {
for (int i = 0; i < results.length; ++i) {
if (fail.test(i)) {
assertActionResult(cfList.get(i), Integer.toString(results[i]));
} else {
assertActionResult(cfList.get(i), results[i]);
}
}
}
@DataProvider(name = "default-with-keys")
public static Object[][] dataProviderWithKeys() {
return new Object[][]{
{KeysSupplier.NO_KEY},
{KeysSupplier.SINGLE_KEY},
{KeysSupplier.MULTIPLE_KEY}
};
}
@Test(dataProvider = "default-with-keys")
public void testExecution(KeysSupplier keysSupplier) {
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
int retVal = nextInt();
Collection<Object> keys = keysSupplier.get();
NonBlockingAction action = new NonBlockingAction(retVal);
CompletionStage<Integer> cf = keysSupplier.useSingleKeyMethod() ?
sequencer.orderOnKey(keys.iterator().next(), action) :
sequencer.orderOnKeys(keys, action);
assertPendingActions(sequencer, keys.isEmpty() ? 0 : 1);
assertMapSize(sequencer, keys.size());
action.awaitUntilStarted();
assertActionState(action, cf, true, false);
action.continueExecution();
assertActionResult(cf, retVal);
assertEmpty(sequencer);
}
public void testNullParameters() {
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
NonBlockingAction action = new NonBlockingAction(0);
expectException(NullPointerException.class, () -> sequencer.orderOnKeys(asList("k1", "k2"), null));
expectException(NullPointerException.class, () -> sequencer.orderOnKeys(null, action));
expectException(NullPointerException.class, () -> sequencer.orderOnKey("k0", null));
expectException(NullPointerException.class, () -> sequencer.orderOnKey(null, action));
assertEmpty(sequencer);
}
@Test(dataProvider = "default-with-keys")
public void testExceptionExecution(KeysSupplier keysSupplier) {
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
Collection<Object> keys = keysSupplier.get();
String msg = nextStringInt();
NonBlockingAction action = new NonBlockingAction(new TestException(msg));
CompletionStage<Integer> cf = keysSupplier.useSingleKeyMethod() ?
sequencer.orderOnKey(keys.iterator().next(), action) :
sequencer.orderOnKeys(keys, action);
assertPendingActions(sequencer, keys.isEmpty() ? 0 : 1);
assertMapSize(sequencer, keys.size());
action.awaitUntilStarted();
assertActionState(action, cf, true, false);
action.continueExecution();
assertActionResult(cf, msg);
assertEmpty(sequencer);
}
public void testSingleKeyOrder() {
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
Collection<Object> keys = singleton("k");
int[] results = new int[3];
List<NonBlockingAction> actionList = new ArrayList<>(results.length);
List<CompletionStage<Integer>> cfList = new ArrayList<>(results.length);
for (int i = 0; i < results.length; ++i) {
createAndOrderAction(sequencer, results, actionList, cfList, keys, i, i == 1);
}
assertPendingActions(sequencer, results.length);
assertMapSize(sequencer, keys.size());
actionList.get(0).awaitUntilStarted(); //only the first is allowed to start!
assertActionState(actionList, cfList, i -> i == 0, i -> false);
//first is completed, the second should start
actionList.get(0).continueExecution();
actionList.get(1).awaitUntilStarted();
assertActionResult(cfList.get(0), results[0]);
assertActionState(actionList, cfList, i -> i <= 1, i -> i == 0);
//allowing the last task to continue won't finish second task
actionList.get(2).continueExecution();
assertActionState(actionList, cfList, i -> i <= 1, i -> i == 0);
//everything should be completed!
actionList.get(1).continueExecution();
actionList.get(2).awaitUntilStarted();
assertAllCompleted(results, cfList, i -> i == 1);
assertEmpty(sequencer);
}
public void testDistinctKeysWithSameKey() {
doDistinctKeysTest(asList("k1", "k1", "k1"), 1);
}
public void testDistinctKeys() {
doDistinctKeysTest(asList("k1", "k2", "k2"), 2);
}
public void testMultiKeyOrder() {
//test:
// * T1
// * (T2 and T3) depends on T1 but they can run in parallel
// * T4 depends on T2 and T3
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
int[] results = new int[4];
List<NonBlockingAction> actionList = new ArrayList<>(results.length);
List<CompletionStage<Integer>> cfList = new ArrayList<>(results.length);
//T1
createAndOrderAction(sequencer, results, actionList, cfList, asList("k1", "k2", "k3"), 0, false);
//T2 (fail action) and T3
createAndOrderAction(sequencer, results, actionList, cfList, singleton("k1"), 1, true);
createAndOrderAction(sequencer, results, actionList, cfList, asList("k3", "k4"), 2, false);
//T4
createAndOrderAction(sequencer, results, actionList, cfList, asList("k1", "k4"), 3, false);
assertPendingActions(sequencer, results.length);
assertMapSize(sequencer, 4);
//initial state. only T1 is started
actionList.get(0).awaitUntilStarted();
assertActionState(actionList, cfList, i -> i == 0, i -> false);
//T1 is completed, T2 and T3 must be started
actionList.get(0).continueExecution();
actionList.get(1).awaitUntilStarted();
actionList.get(2).awaitUntilStarted();
assertActionResult(cfList.get(0), results[0]);
assertActionState(actionList, cfList, i -> i <= 2, i -> i == 0);
//T2 is finished but T3 isn't. T4 should be blocked!
actionList.get(1).continueExecution();
assertActionResult(cfList.get(1), Integer.toString(results[1]));
assertActionState(actionList, cfList, i -> i <= 2, i -> i <= 1);
//T3 is finished. T4 is started
actionList.get(2).continueExecution();
actionList.get(3).awaitUntilStarted();
assertActionResult(cfList.get(2), results[2]);
assertActionState(actionList, cfList, i -> i <= 3, i -> i <= 2);
//everything is finished!
actionList.get(3).continueExecution();
assertActionResult(cfList.get(3), results[3]);
assertEmpty(sequencer);
}
private void createAndOrderAction(ActionSequencer sequencer,
int[] results, List<NonBlockingAction> actionList, List<CompletionStage<Integer>> cfList,
Collection<Object> keys,
int index, boolean fail) {
results[index] = nextInt();
NonBlockingAction action = fail ? new NonBlockingAction(new TestException(Integer.toString(results[index])))
: new NonBlockingAction(results[index]);
actionList.add(action);
cfList.add(sequencer.orderOnKeys(keys, action));
}
private void doDistinctKeysTest(Collection<Object> keys, int distinctKeys) {
ActionSequencer sequencer = new ActionSequencer(testExecutor(), false, TIME_SERVICE);
sequencer.setStatisticEnabled(true);
int retVal = ThreadLocalRandom.current().nextInt();
NonBlockingAction action = new NonBlockingAction(retVal);
CompletionStage<Integer> cf = sequencer.orderOnKeys(keys, action);
assertPendingActions(sequencer, 1);
assertMapSize(sequencer, distinctKeys);
action.continueExecution();
assertActionResult(cf, retVal);
assertEmpty(sequencer);
}
private enum KeysSupplier implements Supplier<Collection<Object>> {
NO_KEY(emptyList()),
SINGLE_KEY(singleton("k1")),
SINGLE_KEY_WITH_SINGLE_METHOD(singleton("k1"), true),
MULTIPLE_KEY(asList("k2", "k3", "k4"));
final Collection<Object> keys;
final boolean useSingleKeyMethod;
KeysSupplier(Collection<Object> keys) {
this(keys, false);
}
KeysSupplier(Collection<Object> keys, boolean useSingleKeyMethod) {
this.keys = keys;
this.useSingleKeyMethod = useSingleKeyMethod;
}
@Override
public Collection<Object> get() {
return keys;
}
boolean useSingleKeyMethod() {
return useSingleKeyMethod;
}
}
private static class NonBlockingAction implements Callable<CompletableFuture<Integer>> {
private final Integer retVal;
private final Exception throwable;
private final CompletableFuture<Integer> cf;
private final CountDownLatch beforeLatch = new CountDownLatch(1);
private NonBlockingAction(int retVal) {
this.retVal = retVal;
this.throwable = null;
this.cf = new CompletableFuture<>();
}
private NonBlockingAction(Exception throwable) {
this.retVal = null;
this.throwable = throwable;
this.cf = new CompletableFuture<>();
}
@Override
public CompletableFuture<Integer> call() throws Exception {
beforeLatch.countDown();
return cf;
}
void continueExecution() {
if (throwable != null) {
cf.completeExceptionally(throwable);
} else {
cf.complete(retVal);
}
}
boolean isStarted() {
return beforeLatch.getCount() == 0;
}
void awaitUntilStarted() {
try {
if (beforeLatch.await(10, TimeUnit.SECONDS)) {
return;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
fail("Action never started! action=" + this);
}
}
}
| 13,452
| 35.756831
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/CoreTestBlockHoundIntegration.java
|
package org.infinispan.util;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.commons.internal.CommonsBlockHoundIntegration;
import org.infinispan.commons.test.PolarionJUnitXMLWriter;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.commons.test.TestSuiteProgress;
import org.infinispan.conflict.impl.ConflictManagerTest;
import org.infinispan.distribution.BlockingInterceptor;
import org.infinispan.eviction.impl.EvictionWithConcurrentOperationsTest;
import org.infinispan.functional.FunctionalTestUtils;
import org.infinispan.manager.DefaultCacheManagerHelper;
import org.infinispan.notifications.cachelistener.CacheListenerVisibilityTest;
import org.infinispan.persistence.support.WaitNonBlockingStore;
import org.infinispan.test.ReplListener;
import org.infinispan.test.TestBlocking;
import org.infinispan.test.TestingUtil;
import org.infinispan.test.concurrent.InboundRpcSequencerAction;
import org.infinispan.test.concurrent.StateSequencer;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.util.concurrent.ReclosableLatch;
import org.infinispan.util.logging.LogFactory;
import org.kohsuke.MetaInfServices;
import io.reactivex.rxjava3.exceptions.UndeliverableException;
import io.reactivex.rxjava3.plugins.RxJavaPlugins;
import reactor.blockhound.BlockHound;
import reactor.blockhound.integration.BlockHoundIntegration;
@SuppressWarnings("unused")
@MetaInfServices
public class CoreTestBlockHoundIntegration implements BlockHoundIntegration {
@Override
public void applyTo(BlockHound.Builder builder) {
try {
allowTestsToBlock(builder);
} catch (ClassNotFoundException e) {
throw new AssertionError(e);
}
DefaultCacheManagerHelper.enableManagerGetCacheBlockingCheck();
builder.allowBlockingCallsInside(CoreTestBlockHoundIntegration.class.getName(), "writeJUnitReport");
builder.blockingMethodCallback(bm -> {
String testName = TestResourceTracker.getCurrentTestName();
AssertionError assertionError = new AssertionError(String.format("Blocking call! %s on thread %s", bm, Thread.currentThread()));
TestSuiteProgress.fakeTestFailure(testName + ".BlockingChecker", assertionError);
writeJUnitReport(testName, assertionError, "Blocking");
throw assertionError;
});
Thread.setDefaultUncaughtExceptionHandler((thread, t) -> {
LogFactory.getLogger("Infinispan-TEST").fatal("Throwable was not caught in thread " + thread +
" - exception is: " + t);
// RxJava propagates via this and we don't want to worry about it
if (!(t instanceof UndeliverableException)) {
writeJUnitReport(TestResourceTracker.getCurrentTestName(), t, "Uncaught");
}
});
RxJavaPlugins.setErrorHandler(t -> {
// RxJavaPlugins wraps some but not all exceptions in an UndeliverableException
Throwable throwable = t instanceof UndeliverableException ? t.getCause() : t;
// Ignore lifecycle exceptions as this can happen when shutting down executors etc.
if (throwable instanceof IllegalLifecycleStateException) {
return;
}
writeJUnitReport(TestResourceTracker.getCurrentTestName(), throwable, "Undelivered");
});
}
private static void allowTestsToBlock(BlockHound.Builder builder) throws ClassNotFoundException {
builder.allowBlockingCallsInside(EvictionWithConcurrentOperationsTest.class.getName() + "$Latch", "blockIfNeeded");
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, CheckPoint.class);
builder.allowBlockingCallsInside(BlockingInterceptor.class.getName(), "blockIfNeeded");
builder.allowBlockingCallsInside(TestingUtil.class.getName(), "sleepRandom");
builder.allowBlockingCallsInside(TestingUtil.class.getName(), "sleepThread");
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, ReclosableLatch.class);
builder.allowBlockingCallsInside(BlockingLocalTopologyManager.class.getName() + "$Event", "awaitUnblock");
builder.allowBlockingCallsInside(BlockingLocalTopologyManager.class.getName() + "$Event", "unblock");
builder.allowBlockingCallsInside(ControlledRpcManager.class.getName(), "performRequest");
builder.allowBlockingCallsInside(ControlledRpcManager.class.getName(), "expectCommandAsync");
builder.allowBlockingCallsInside(ControlledTransport.class.getName(), "performRequest");
builder.allowBlockingCallsInside(ControlledTransport.class.getName(), "expectCommandAsync");
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, StateSequencer.class);
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, NotifierLatch.class);
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, TestBlocking.class);
builder.allowBlockingCallsInside(FunctionalTestUtils.class.getName(), "await");
builder.allowBlockingCallsInside(TestingUtil.class.getName(), "sleepThread");
CommonsBlockHoundIntegration.allowMethodsToBlock(builder, Class.forName(ReplListener.class.getName() + "$ReplListenerInterceptor"), false);
// This uses a lambda callback to invoke some methods - which aren't public
CommonsBlockHoundIntegration.allowMethodsToBlock(builder, Class.forName(InboundRpcSequencerAction.class.getName() + "$SequencerPerCacheInboundInvocationHandler"), false);
builder.allowBlockingCallsInside(CacheListenerVisibilityTest.EntryModifiedWithAssertListener.class.getName(), "entryCreated");
builder.allowBlockingCallsInside(CacheListenerVisibilityTest.EntryCreatedWithAssertListener.class.getName(), "entryCreated");
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, BlockingLocalTopologyManager.class);
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, AbstractControlledLocalTopologyManager.class);
CommonsBlockHoundIntegration.allowPublicMethodsToBlock(builder, ConflictManagerTest.DelayStateResponseCommandHandler.class);
// The join is used to allow for a sync API for test simplicity - where as the actual store invocation
// must be non blocking
builder.allowBlockingCallsInside(WaitNonBlockingStore.class.getName(), "join");
}
private static void writeJUnitReport(String testName, Throwable throwable, String type) {
try {
File reportsDir = new File("target/surefire-reports");
if (!reportsDir.exists() && !reportsDir.mkdirs()) {
throw new IOException("Cannot create report directory " + reportsDir.getAbsolutePath());
}
PolarionJUnitXMLWriter writer = new PolarionJUnitXMLWriter(
new File(reportsDir, "TEST-" + testName + "-" + type + ".xml"));
String property = System.getProperty("infinispan.modulesuffix");
String moduleName = property != null ? property.substring(1) : "";
writer.start(moduleName, 1, 0, 1, 0, false);
StringWriter exceptionWriter = new StringWriter();
throwable.printStackTrace(new PrintWriter(exceptionWriter));
writer.writeTestCase(type, testName, 0, PolarionJUnitXMLWriter.Status.FAILURE,
exceptionWriter.toString(), throwable.getClass().getName(), throwable.getMessage());
writer.close();
} catch (Exception e) {
throw new RuntimeException("Error reporting " + type, e);
}
}
}
| 7,589
| 51.344828
| 176
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/AbstractControlledLocalTopologyManager.java
|
package org.infinispan.util;
import java.util.concurrent.CompletionStage;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Start;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.partitionhandling.AvailabilityMode;
import org.infinispan.partitionhandling.impl.PartitionHandlingManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.test.TestingUtil;
import org.infinispan.topology.CacheJoinInfo;
import org.infinispan.topology.CacheTopology;
import org.infinispan.topology.CacheTopologyHandler;
import org.infinispan.topology.LocalTopologyManager;
import org.infinispan.topology.LocalTopologyManagerImpl;
import org.infinispan.topology.ManagerStatusResponse;
import org.infinispan.topology.PersistentUUID;
import org.infinispan.topology.RebalancingStatus;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.util.logging.events.EventLogManager;
/**
* Class to be extended to allow some control over the local topology manager when testing Infinispan.
* <p/>
* Note: create before/after method lazily when need.
*
* @author Pedro Ruivo
* @since 6.0
*/
@Scope(Scopes.GLOBAL)
public abstract class AbstractControlledLocalTopologyManager implements LocalTopologyManager {
private final LocalTopologyManager delegate;
@Inject
void inject(BasicComponentRegistry bcr) {
bcr.wireDependencies(delegate, false);
bcr.getComponent(EventLogManager.class).running();
}
@Start
void start() {
TestingUtil.startComponent(delegate);
}
@Stop
void stop() {
TestingUtil.stopComponent(delegate);
}
protected AbstractControlledLocalTopologyManager(LocalTopologyManager delegate) {
this.delegate = delegate;
}
@Override
public final CompletionStage<CacheTopology> join(String cacheName, CacheJoinInfo joinInfo, CacheTopologyHandler stm,
PartitionHandlingManager phm) throws Exception {
return delegate.join(cacheName, joinInfo, stm, phm);
}
@Override
public final void leave(String cacheName, long timeout) {
delegate.leave(cacheName, timeout);
}
@Override
public final void confirmRebalancePhase(String cacheName, int topologyId, int rebalanceId, Throwable throwable) {
TestingUtil.sequence(beforeConfirmRebalancePhase(cacheName, topologyId, throwable), () -> {
delegate.confirmRebalancePhase(cacheName, topologyId, rebalanceId, throwable);
return CompletableFutures.completedNull();
});
}
@Override
public final CompletionStage<ManagerStatusResponse> handleStatusRequest(int viewId) {
return delegate.handleStatusRequest(viewId);
}
@Override
public final CompletionStage<Void> handleTopologyUpdate(String cacheName, CacheTopology cacheTopology,
AvailabilityMode availabilityMode, int viewId,
Address sender) {
return TestingUtil.sequence(beforeHandleTopologyUpdate(cacheName, cacheTopology, viewId),
() -> delegate.handleTopologyUpdate(cacheName, cacheTopology, availabilityMode, viewId, sender));
}
@Override
public final CompletionStage<Void> handleRebalance(String cacheName, CacheTopology cacheTopology, int viewId,
Address sender) {
return TestingUtil.sequence(beforeHandleRebalance(cacheName, cacheTopology, viewId),
() -> delegate.handleRebalance(cacheName, cacheTopology, viewId, sender));
}
@Override
public final CacheTopology getCacheTopology(String cacheName) {
return delegate.getCacheTopology(cacheName);
}
@Override
public CompletionStage<Void> handleStableTopologyUpdate(String cacheName, CacheTopology cacheTopology,
final Address sender, int viewId) {
return delegate.handleStableTopologyUpdate(cacheName, cacheTopology, sender, viewId);
}
@Override
public CacheTopology getStableCacheTopology(String cacheName) {
return delegate.getStableCacheTopology(cacheName);
}
@Override
public boolean isRebalancingEnabled() throws Exception {
return delegate.isRebalancingEnabled();
}
@Override
public void setRebalancingEnabled(boolean enabled) throws Exception {
delegate.setRebalancingEnabled(enabled);
}
@Override
public boolean isCacheRebalancingEnabled(String cacheName) throws Exception {
return delegate.isCacheRebalancingEnabled(cacheName);
}
@Override
public void setCacheRebalancingEnabled(String cacheName, boolean enabled) throws Exception {
delegate.setCacheRebalancingEnabled(cacheName, enabled);
}
@Override
public RebalancingStatus getRebalancingStatus(String cacheName) throws Exception {
return delegate.getRebalancingStatus(cacheName);
}
@Override
public AvailabilityMode getCacheAvailability(String cacheName) {
return delegate.getCacheAvailability(cacheName);
}
@Override
public void setCacheAvailability(String cacheName, AvailabilityMode availabilityMode) throws Exception {
delegate.setCacheAvailability(cacheName, availabilityMode);
}
// Arbitrary value, only need to start after JGroupsTransport
@Start(priority = 100)
public final void startDelegate() {
if (delegate instanceof LocalTopologyManagerImpl) {
((LocalTopologyManagerImpl) delegate).start();
}
}
// Need to stop before the JGroupsTransport
@Stop(priority = 9)
public final void stopDelegate() {
if (delegate instanceof LocalTopologyManagerImpl) {
((LocalTopologyManagerImpl) delegate).stop();
}
}
protected CompletionStage<Void> beforeHandleTopologyUpdate(String cacheName, CacheTopology cacheTopology, int viewId) {
return CompletableFutures.completedNull();
}
protected CompletionStage<Void> beforeHandleRebalance(String cacheName, CacheTopology cacheTopology, int viewId) {
return CompletableFutures.completedNull();
}
protected CompletionStage<Void> beforeConfirmRebalancePhase(String cacheName, int topologyId, Throwable throwable) {
//no-op by default
return CompletableFutures.completedNull();
}
@Override
public PersistentUUID getPersistentUUID() {
return delegate.getPersistentUUID();
}
@Override
public void cacheShutdown(String name) {
delegate.cacheShutdown(name);
}
@Override
public CompletionStage<Void> handleCacheShutdown(String cacheName) {
return delegate.handleCacheShutdown(cacheName);
}
@Override
public CompletionStage<Void> stableTopologyCompletion(String cacheName) {
return CompletableFutures.completedNull();
}
}
| 7,046
| 34.590909
| 122
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ControlledRpcManager.java
|
package org.infinispan.util;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.factories.KnownComponentNames.NON_BLOCKING_EXECUTOR;
import static org.infinispan.factories.KnownComponentNames.TIMEOUT_SCHEDULE_EXECUTOR;
import static org.infinispan.test.TestingUtil.extractComponent;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertSame;
import static org.testng.AssertJUnit.assertTrue;
import static org.testng.AssertJUnit.fail;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.factories.annotations.ComponentName;
import org.infinispan.factories.annotations.Inject;
import org.infinispan.factories.annotations.Stop;
import org.infinispan.factories.scopes.Scope;
import org.infinispan.factories.scopes.Scopes;
import org.infinispan.remoting.responses.CacheNotFoundResponse;
import org.infinispan.remoting.responses.ExceptionResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.test.TestException;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* @author Mircea.Markus@jboss.com
* @author Dan Berindei
* @since 4.2
*/
@Scope(Scopes.NAMED_CACHE)
public class ControlledRpcManager extends AbstractDelegatingRpcManager {
private static final Log log = LogFactory.getLog(ControlledRpcManager.class);
private static final int TIMEOUT_SECONDS = 10;
@Inject Cache<?, ?> cache;
@Inject @ComponentName(TIMEOUT_SCHEDULE_EXECUTOR)
ScheduledExecutorService timeoutExecutor;
@Inject @ComponentName(NON_BLOCKING_EXECUTOR)
ExecutorService nonBlockingExecutor;
private volatile boolean stopped = false;
private final Set<Class<? extends ReplicableCommand>> excludedCommands =
Collections.synchronizedSet(new HashSet<>());
private final BlockingQueue<CompletableFuture<ControlledRequest<?>>> waiters = new LinkedBlockingDeque<>();
private RuntimeException globalError;
protected ControlledRpcManager(RpcManager realOne, Cache<?, ?> cache) {
super(realOne);
this.cache = cache;
}
public static ControlledRpcManager replaceRpcManager(Cache<?, ?> cache) {
RpcManager rpcManager = extractComponent(cache, RpcManager.class);
if (rpcManager instanceof ControlledRpcManager) {
throw new IllegalStateException("One ControlledRpcManager per cache should be enough");
}
ControlledRpcManager controlledRpcManager = new ControlledRpcManager(rpcManager, cache);
log.tracef("Installing ControlledRpcManager on %s", controlledRpcManager.getAddress());
TestingUtil.replaceComponent(cache, RpcManager.class, controlledRpcManager, true);
return controlledRpcManager;
}
public void revertRpcManager() {
stopBlocking();
log.tracef("Restoring regular RpcManager on %s", getAddress());
RpcManager rpcManager = extractComponent(cache, RpcManager.class);
assertSame(this, rpcManager);
TestingUtil.replaceComponent(cache, RpcManager.class, realOne, true);
}
@SafeVarargs
public final void excludeCommands(Class<? extends ReplicableCommand>... excluded) {
if (stopped) {
throw new IllegalStateException("Trying to exclude commands but we already stopped intercepting");
}
excludedCommands.clear();
excludedCommands.addAll(Arrays.asList(excluded));
}
public void stopBlocking() {
log.debugf("Stopping intercepting RPC calls on %s", realOne.getAddress());
stopped = true;
throwGlobalError();
if (!waiters.isEmpty()) {
fail("Stopped intercepting RPCs on " + realOne.getAddress() + ", but there are " + waiters.size() + " waiters in the queue");
}
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand> BlockedRequest<T> expectCommand(Class<T> expectedCommandClass) {
return uncheckedGet(expectCommandAsync(expectedCommandClass));
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand>
BlockedRequest<T> expectCommand(Class<T> expectedCommandClass, Consumer<T> checker) {
BlockedRequest<T> blockedRequest = uncheckedGet(expectCommandAsync(expectedCommandClass));
T command = expectedCommandClass.cast(blockedRequest.request.getCommand());
checker.accept(command);
return blockedRequest;
}
public <T extends ReplicableCommand>
BlockedRequests<T> expectCommands(Class<T> expectedCommandClass, Address... targets) {
return expectCommands(expectedCommandClass, Arrays.asList(targets));
}
public <T extends ReplicableCommand>
BlockedRequests<T> expectCommands(Class<T> expectedCommandClass, Collection<Address> targets) {
Map<Address, BlockedRequest<T>> requests = new HashMap<>();
for (int i = 0; i < targets.size(); i++) {
BlockedRequest<T> request = expectCommand(expectedCommandClass);
requests.put(request.getTarget(), request);
}
assertEquals(new HashSet<>(targets), requests.keySet());
return new BlockedRequests<>(requests);
}
/**
* Expect a command to be invoked remotely and send replies using the {@link BlockedRequest} methods.
*/
public <T extends ReplicableCommand>
CompletableFuture<BlockedRequest<T>> expectCommandAsync(Class<T> expectedCommandClass) {
throwGlobalError();
log.tracef("Waiting for command %s", expectedCommandClass);
CompletableFuture<ControlledRequest<?>> future = new CompletableFuture<>();
waiters.add(future);
return future.thenApply(request -> {
log.tracef("Blocked command %s", request.command);
assertTrue("Expecting a " + expectedCommandClass.getName() + ", got " + request.getCommand(),
expectedCommandClass.isInstance(request.getCommand()));
return new BlockedRequest<>(request);
});
}
public void expectNoCommand() {
throwGlobalError();
assertNull("There should be no queued commands", waiters.poll());
}
public void expectNoCommand(long timeout, TimeUnit timeUnit) throws InterruptedException {
throwGlobalError();
assertNull("There should be no queued commands", waiters.poll(timeout, timeUnit));
}
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
RpcOptions rpcOptions) {
if (stopped || isCommandExcluded(command)) {
log.tracef("Not blocking excluded command %s", command);
return invoker.apply(collector);
}
log.debugf("Intercepted command to %s: %s", targets, command);
// Ignore the SingleRpcCommand wrapper
if (command instanceof SingleRpcCommand) {
command = ((SingleRpcCommand) command).getCommand();
}
Address excluded = realOne.getAddress();
ControlledRequest<T> controlledRequest =
new ControlledRequest<>(command, targets, collector, invoker, nonBlockingExecutor, excluded);
try {
CompletableFuture<ControlledRequest<?>> waiter = waiters.poll(TIMEOUT_SECONDS, SECONDS);
if (waiter == null) {
TimeoutException t = new TimeoutException("Found no waiters for command " + command);
addGlobalError(t);
throw t;
}
waiter.complete(controlledRequest);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new TestException(e);
} catch (Exception e) {
throw new TestException(e);
}
if (collector != null) {
ScheduledFuture<?> cancelTask = timeoutExecutor.schedule(() -> {
TimeoutException e = new TimeoutException("Timed out waiting for test to unblock command " +
controlledRequest.getCommand());
addGlobalError(e);
controlledRequest.fail(e);
}, TIMEOUT_SECONDS * 2, SECONDS);
controlledRequest.resultFuture.whenComplete((ignored, throwable) -> cancelTask.cancel(false));
}
// resultFuture is completed from a test thread, and we don't want to run the interceptor callbacks there
return controlledRequest.resultFuture.whenCompleteAsync((r, t) -> {}, nonBlockingExecutor);
}
private void addGlobalError(RuntimeException t) {
if (globalError == null) {
globalError = t;
} else {
globalError.addSuppressed(t);
}
}
@Override
protected <T> void performSend(Collection<Address> targets, ReplicableCommand command,
Function<ResponseCollector<T>, CompletionStage<T>> invoker) {
performRequest(targets, command, null, invoker, null);
}
@Stop
void stop() {
stopBlocking();
TestingUtil.stopComponent(realOne);
}
private boolean isCommandExcluded(ReplicableCommand command) {
for (Class<? extends ReplicableCommand> excludedCommand : excludedCommands) {
if (excludedCommand.isInstance(command))
return true;
}
return false;
}
private void throwGlobalError() {
if (globalError != null) {
throw globalError;
}
}
static <T> T uncheckedGet(CompletionStage<T> stage) {
try {
return stage.toCompletableFuture().get(TIMEOUT_SECONDS, SECONDS);
} catch (Exception e) {
throw new TestException(e);
}
}
/**
* A controlled request.
*
* The real RpcManager will not send the command to the targets until the test calls {@link #send()}.
* Responses received from the targets are stored in {@link #responseFutures}, and after the last response
* is received they are also stored in the {@link #finishFuture} map.
*
* The responses are only passed to the real response collector when the test calls
* {@link #collectResponse(Address, Response)}, and {@link #collectFinish()} finishes the collector.
*/
static class ControlledRequest<T> {
private final ReplicableCommand command;
private final Collection<Address> targets;
private final Function<ResponseCollector<T>, CompletionStage<T>> invoker;
private final ExecutorService executor;
private final CompletableFuture<T> resultFuture = new CompletableFuture<>();
private final LinkedHashMap<Address, CompletableFuture<Response>> responseFutures = new LinkedHashMap<>();
private final CompletableFuture<Map<Address, Response>> finishFuture = new CompletableFuture<>();
private final CompletableFuture<Void> sendFuture = new CompletableFuture<>();
private final Lock collectLock = new ReentrantLock();
@GuardedBy("collectLock")
private final ResponseCollector<T> collector;
@GuardedBy("collectLock")
private final Set<Address> collectedResponses = new HashSet<>();
@GuardedBy("collectLock")
private boolean collectedFinish;
ControlledRequest(ReplicableCommand command, Collection<Address> targets, ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>> invoker,
ExecutorService executor, Address excluded) {
this.command = command;
this.targets = targets;
this.collector = collector;
this.invoker = invoker;
this.executor = executor;
for (Address target : targets) {
if (!target.equals(excluded)) {
responseFutures.put(target, new CompletableFuture<>());
}
}
}
void send() {
invoker.apply(new ResponseCollector<T>() {
@Override
public T addResponse(Address sender, Response response) {
queueResponse(sender, response);
return null;
}
@Override
public T finish() {
queueFinish();
return null;
}
});
sendFuture.complete(null);
}
void skipSend() {
sendFuture.complete(null);
for (CompletableFuture<Response> responseFuture : responseFutures.values()) {
responseFuture.complete(null);
}
}
void awaitSend() {
uncheckedGet(sendFuture);
}
private void queueResponse(Address sender, Response response) {
log.tracef("Queueing response from %s for command %s", sender, command);
CompletableFuture<Response> responseFuture = responseFutures.get(sender);
boolean completedNow = responseFuture.complete(response);
if (!completedNow) {
fail(new IllegalStateException("Duplicate response received from " + sender + ": " + response));
}
}
private void queueFinish() {
log.tracef("Queueing finish for command %s", command);
Map<Address, Response> responseMap = new LinkedHashMap<>();
for (Map.Entry<Address, CompletableFuture<Response>> entry : responseFutures.entrySet()) {
Address sender = entry.getKey();
CompletableFuture<Response> responseFuture = entry.getValue();
// Don't wait for all responses in case this is a staggered request
if (responseFuture.isDone()) {
responseMap.put(sender, uncheckedGet(responseFuture));
} else {
responseFuture.complete(null);
}
}
boolean completedNow = finishFuture.complete(responseMap);
if (!completedNow) {
fail(new IllegalStateException("Finish queued more than once"));
}
}
void collectResponse(Address sender, Response response) {
try {
T result;
collectLock.lock();
try {
throwIfFailed();
assertTrue(collectedResponses.add(sender));
result = collector.addResponse(sender, response);
if (result != null) {
// Don't allow collectFinish on this request
collectedFinish = true;
}
} finally {
collectLock.unlock();
}
if (result != null) {
resultFuture.complete(result);
}
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
void collectFinish() {
try {
T result;
collectLock.lock();
try {
throwIfFailed();
assertFalse(collectedFinish);
collectedFinish = true;
result = collector.finish();
} finally {
collectLock.unlock();
}
resultFuture.complete(result);
} catch (Throwable t) {
resultFuture.completeExceptionally(t);
}
}
void skipFinish() {
collectLock.lock();
try {
assertFalse(collectedFinish);
} finally {
collectLock.unlock();
}
assertTrue(resultFuture.isDone());
}
void fail(Throwable t) {
log.tracef("Failing execution of %s with %s", command, t);
resultFuture.completeExceptionally(t);
// Unblock the thread waiting for the request to be sent, if it's not already sent
sendFuture.completeExceptionally(t);
}
void throwIfFailed() {
if (resultFuture.isCompletedExceptionally()) {
resultFuture.join();
}
}
boolean isDone() {
return resultFuture.isDone();
}
ReplicableCommand getCommand() {
return command;
}
Collection<Address> getTargets() {
return targets;
}
boolean hasCollector() {
return collector != null;
}
CompletableFuture<Response> responseFuture(Address sender) {
return responseFutures.get(sender);
}
CompletableFuture<Map<Address, Response>> finishFuture() {
return finishFuture;
}
}
/**
* Unblock and wait for the responses of a blocked remote invocation.
* <p>
* For example, {@code request.send().expectResponse(a1, r1).replace(r2).receiveAll()}.
*/
public static class BlockedRequest<C extends ReplicableCommand> {
private final ControlledRequest<?> request;
public BlockedRequest(ControlledRequest<?> request) {
this.request = request;
}
/**
* Unblock the request, sending it to its targets.
* <p>
* It will block again when waiting for responses.
*/
public SentRequest send() {
log.tracef("Sending command %s", request.getCommand());
request.send();
if (request.hasCollector()) {
return new SentRequest(request);
} else {
return null;
}
}
/**
* Avoid sending the request, and finish it with the given responses instead.
*/
public FakeResponses skipSend() {
log.tracef("Not sending request %s", request.getCommand());
request.skipSend();
if (request.hasCollector()) {
return new FakeResponses(request);
} else {
return null;
}
}
public void fail() {
fail(new TestException("Induced failure!"));
}
public void fail(Exception e) {
request.fail(e);
}
public C getCommand() {
return (C) request.getCommand();
}
public Collection<Address> getTargets() {
return request.getTargets();
}
public Address getTarget() {
Collection<Address> targets = request.getTargets();
assertEquals(1, targets.size());
return targets.iterator().next();
}
}
public static class SentRequest {
private final ControlledRequest<?> request;
SentRequest(ControlledRequest<?> request) {
this.request = request;
}
/**
* Complete the request with a {@link TimeoutException}
*/
public void forceTimeout() {
assertFalse(request.isDone());
request.fail(log.requestTimedOut(-1, "Induced timeout failure", "some time"));
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender, Consumer<Response> checker) {
BlockedResponse br = uncheckedGet(expectResponseAsync(sender));
checker.accept(br.response);
return br;
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender) {
return uncheckedGet(expectResponseAsync(sender));
}
/**
* Wait for a response from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectResponse(Address sender, Response expectedResponse) {
return expectResponse(sender, r -> assertEquals(expectedResponse, r));
}
/**
* Wait for a {@code CacheNotFoundResponse} from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectLeaver(Address a) {
return expectResponse(a, CacheNotFoundResponse.INSTANCE);
}
/**
* Wait for an {@code ExceptionResponse} from {@code sender}, but keep the request blocked.
*/
public BlockedResponse expectException(Address a, Class<? extends Exception> expectedException) {
return expectResponse(a, r -> {
Exception exception = ((ExceptionResponse) r).getException();
Exceptions.assertException(expectedException, exception);
});
}
/**
* Wait for all the responses.
*/
public BlockedResponseMap expectAllResponses() {
return uncheckedGet(expectAllResponsesAsync());
}
/**
* Wait for all the responses.
*/
public BlockedResponseMap expectAllResponses(BiConsumer<Address, Response> checker) {
BlockedResponseMap blockedResponseMap = uncheckedGet(expectAllResponsesAsync());
blockedResponseMap.responseMap.forEach(checker);
return blockedResponseMap;
}
/**
* Wait for all the responses and process them.
*/
public void receiveAll() {
expectAllResponses().receive();
}
public void receiveAllAsync() {
expectAllResponsesAsync().thenAccept(BlockedResponseMap::receive);
}
/**
* Complete a request after expecting and receiving responses individually, e.g. with
* {@link #expectResponse(Address)}.
*
* This method blocks until all the responses have been received internally, but doesn't pass them on
* to the original response collector (it only calls {@link ResponseCollector#finish()}).
*/
public void finish() {
uncheckedGet(request.finishFuture());
request.collectFinish();
}
public void noFinish() {
request.skipFinish();
}
public CompletionStage<BlockedResponse> expectResponseAsync(Address sender) {
request.throwIfFailed();
assertFalse(request.isDone());
return request.responseFuture(sender).thenApply(response -> {
log.debugf("Got response for %s from %s: %s", request.getCommand(), sender, response);
return new BlockedResponse(request, this, sender, response);
});
}
public CompletionStage<BlockedResponseMap> expectAllResponsesAsync() {
request.throwIfFailed();
assertFalse(request.isDone());
return request.finishFuture()
.thenApply(responseMap -> new BlockedResponseMap(request, responseMap));
}
}
public static class BlockedResponse {
private final ControlledRequest<?> request;
final SentRequest sentRequest;
final Address sender;
final Response response;
private BlockedResponse(ControlledRequest<?> request, SentRequest sentRequest, Address sender,
Response response) {
this.request = request;
this.sentRequest = sentRequest;
this.sender = sender;
this.response = response;
}
/**
* Process the response from this {@code BlockedResponse}'s target.
* <p>
* Note that processing the last response will NOT complete the request, you still need to call
* {@link SentRequest#receiveAll()}.
*/
public SentRequest receive() {
log.tracef("Unblocking response from %s: %s", sender, response);
request.collectResponse(this.sender, response);
return sentRequest;
}
/**
* Replace the response from this {@code BlockedResponse}'s target with a fake response and process it.
*/
public SentRequest replace(Response newResponse) {
log.tracef("Replacing response from %s: %s (was %s)", sender, newResponse, response);
request.collectResponse(this.sender, newResponse);
return sentRequest;
}
public CompletionStage<SentRequest> receiveAsync() {
return CompletableFuture.supplyAsync(this::receive, request.executor);
}
public CompletionStage<SentRequest> replaceAsync(Response newResponse) {
return CompletableFuture.supplyAsync(() -> replace(newResponse), request.executor);
}
public Address getSender() {
return sender;
}
public Response getResponse() {
return response;
}
}
public static class BlockedResponseMap {
private final ControlledRequest<?> request;
private final Map<Address, Response> responseMap;
private BlockedResponseMap(ControlledRequest<?> request,
Map<Address, Response> responseMap) {
this.request = request;
this.responseMap = responseMap;
}
public void receive() {
assertFalse(request.resultFuture.isDone());
log.tracef("Unblocking responses for %s: %s", request.getCommand(), responseMap);
responseMap.forEach(request::collectResponse);
if (!request.isDone()) {
uncheckedGet(request.finishFuture());
request.collectFinish();
}
}
public void replace(Map<Address, Response> newResponses) {
assertFalse(request.resultFuture.isDone());
log.tracef("Replacing responses for %s: %s (was %s)", request.getCommand(), newResponses, responseMap);
newResponses.forEach(request::collectResponse);
if (!request.isDone()) {
uncheckedGet(request.finishFuture());
request.collectFinish();
}
}
public CompletionStage<Void> receiveAsync() {
return CompletableFuture.runAsync(this::receive, request.executor);
}
public CompletionStage<Void> replaceAsync(Map<Address, Response> newResponses) {
return CompletableFuture.runAsync(() -> replace(newResponses), request.executor);
}
public Map<Address, Response> getResponses() {
return responseMap;
}
}
public static class FakeResponses {
private final ControlledRequest<?> request;
public FakeResponses(ControlledRequest<?> request) {
this.request = request;
}
public void receive(Map<Address, Response> responses) {
log.tracef("Faking responses for %s: %s", request.getCommand(), responses);
responses.forEach((sender, response) -> {
// For staggered requests we allow the test to specify only the primary owner's response
assertTrue(responses.containsKey(sender));
request.collectResponse(sender, response);
});
if (!request.isDone()) {
assertEquals(responses.keySet(), request.responseFutures.keySet());
request.collectFinish();
}
}
public void receive(Address sender, Response response) {
receive(Collections.singletonMap(sender, response));
}
public void receive(Address sender1, Response response1,
Address sender2, Response response2) {
Map<Address, Response> responses = new LinkedHashMap<>();
responses.put(sender1, response1);
responses.put(sender2, response2);
receive(responses);
}
public void receive(Address sender1, Response response1,
Address sender2, Response response2,
Address sender3, Response response3) {
Map<Address, Response> responses = new LinkedHashMap<>();
responses.put(sender1, response1);
responses.put(sender2, response2);
responses.put(sender3, response3);
receive(responses);
}
public CompletionStage<Void> receiveAsync(Map<Address, Response> responses) {
return CompletableFuture.runAsync(() -> receive(responses), request.executor);
}
public CompletionStage<Void> receiveAsync(Address sender, Response response) {
return CompletableFuture.runAsync(() -> receive(sender, response), request.executor);
}
public CompletionStage<Void> receiveAsync(Address sender1, Response response1,
Address sender2, Response response2) {
return CompletableFuture.runAsync(() -> receive(sender1, response1, sender2, response2), request.executor);
}
/**
* Complete the request with a {@link TimeoutException}
*/
public void forceTimeout() {
fail(log.requestTimedOut(-1, "Induced failure", "some time"));
}
/**
* Complete the request with a custom exception.
*/
private void fail(Throwable e) {
assertFalse(request.resultFuture.isDone());
request.fail(e);
}
public Collection<Address> getTargets() {
return request.getTargets();
}
public Address getTarget() {
Collection<Address> targets = request.getTargets();
assertEquals(1, targets.size());
return targets.iterator().next();
}
}
/**
* Multiple requests sent to individual targets in parallel, e.g. with
* {@link RpcManager#invokeCommands(Collection, Function, ResponseCollector, RpcOptions)}.
*/
public static class BlockedRequests<T extends ReplicableCommand> {
private final Map<Address, BlockedRequest<T>> requests;
public BlockedRequests(Map<Address, BlockedRequest<T>> requests) {
this.requests = requests;
}
/**
* Unblock the request, sending it to its targets.
* <p>
* It will block again when waiting for responses.
*/
public SentRequest send(Address target) {
return requests.get(target).send();
}
/**
* Avoid sending the request, and finish it with the given responses instead.
*/
public FakeResponses skipSend(Address target) {
return requests.get(target).skipSend();
}
public void skipSendAndReceive(Address target, Response fakeResponse) {
requests.get(target).skipSend().receive(target, fakeResponse);
}
public void skipSendAndReceiveAsync(Address target, Response fakeResponse) {
requests.get(target).skipSend().receiveAsync(target, fakeResponse);
}
}
}
| 31,188
| 35.224158
| 134
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/ReplicatedControlledConsistentHashFactory.java
|
package org.infinispan.util;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.infinispan.commons.marshall.MarshallUtil;
import org.infinispan.commons.marshall.SerializeWith;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.impl.ReplicatedConsistentHash;
import org.infinispan.remoting.transport.Address;
/**
* ConsistentHashFactory implementation that allows the user to control who the owners are.
*
* @author Dan Berindei
* @since 7.0
*/
@SerializeWith(ReplicatedControlledConsistentHashFactory.Externalizer.class)
public class ReplicatedControlledConsistentHashFactory
implements ConsistentHashFactory<ReplicatedConsistentHash>, Serializable {
private volatile List<Address> membersToUse;
private int[] primaryOwnerIndices;
private ReplicatedControlledConsistentHashFactory(List<Address> membersToUse, int[] primaryOwnerIndices) {
this.membersToUse = membersToUse;
this.primaryOwnerIndices = primaryOwnerIndices;
}
/**
* Create a consistent hash factory with a single segment.
*/
public ReplicatedControlledConsistentHashFactory(int primaryOwner1, int... otherPrimaryOwners) {
setOwnerIndexes(primaryOwner1, otherPrimaryOwners);
}
public void setOwnerIndexes(int primaryOwner1, int... otherPrimaryOwners) {
primaryOwnerIndices = concatOwners(primaryOwner1, otherPrimaryOwners);
}
@Override
public ReplicatedConsistentHash create(int numOwners, int numSegments, List<Address> members, Map<Address, Float> capacityFactors) {
int[] thePrimaryOwners = new int[primaryOwnerIndices.length];
for (int i = 0; i < primaryOwnerIndices.length; i++) {
if (membersToUse != null) {
int membersToUseIndex = Math.min(primaryOwnerIndices[i], membersToUse.size() - 1);
int membersIndex = members.indexOf(membersToUse.get(membersToUseIndex));
thePrimaryOwners[i] = membersIndex > 0 ? membersIndex : members.size() - 1;
} else {
thePrimaryOwners[i] = Math.min(primaryOwnerIndices[i], members.size() - 1);
}
}
return new ReplicatedConsistentHash(members, thePrimaryOwners);
}
@Override
public ReplicatedConsistentHash updateMembers(ReplicatedConsistentHash baseCH, List<Address> newMembers,
Map<Address, Float> capacityFactors) {
return create(baseCH.getNumOwners(), baseCH.getNumSegments(), newMembers, null);
}
@Override
public ReplicatedConsistentHash rebalance(ReplicatedConsistentHash baseCH) {
return create(baseCH.getNumOwners(), baseCH.getNumSegments(), baseCH.getMembers(), null);
}
@Override
public ReplicatedConsistentHash union(ReplicatedConsistentHash ch1, ReplicatedConsistentHash ch2) {
return ch1.union(ch2);
}
private int[] concatOwners(int head, int[] tail) {
int[] firstSegmentOwners;
if (tail == null || tail.length == 0) {
firstSegmentOwners = new int[]{head};
} else {
firstSegmentOwners = new int[tail.length + 1];
firstSegmentOwners[0] = head;
for (int i = 0; i < tail.length; i++) {
firstSegmentOwners[i + 1] = tail[i];
}
}
return firstSegmentOwners;
}
/**
* @param membersToUse Owner indexes will be in this list, instead of the current list of members
*/
public void setMembersToUse(List<Address> membersToUse) {
this.membersToUse = membersToUse;
}
public static class Externalizer implements org.infinispan.commons.marshall.Externalizer<ReplicatedControlledConsistentHashFactory> {
@Override
public void writeObject(ObjectOutput output, ReplicatedControlledConsistentHashFactory object) throws IOException {
MarshallUtil.marshallCollection(object.membersToUse, output);
MarshallUtil.marshallSize(output, object.primaryOwnerIndices.length);
for (int i : object.primaryOwnerIndices)
output.writeInt(i);
}
@Override
public ReplicatedControlledConsistentHashFactory readObject(ObjectInput input) throws IOException, ClassNotFoundException {
List<Address> addresses = MarshallUtil.unmarshallCollection(input, ArrayList::new);
int size = MarshallUtil.unmarshallSize(input);
int[] indices = new int[size];
for (int i = 0; i < size; i++)
indices[i] = input.readInt();
return new ReplicatedControlledConsistentHashFactory(addresses, indices);
}
}
}
| 4,649
| 38.74359
| 136
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/concurrent/BlockingManagerTest.java
|
package org.infinispan.util.concurrent;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import org.infinispan.commons.test.BlockHoundHelper;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.Mocks;
import org.mockito.Mockito;
import org.reactivestreams.Publisher;
import org.testng.annotations.Test;
import io.reactivex.rxjava3.core.Flowable;
import io.reactivex.rxjava3.processors.AsyncProcessor;
import io.reactivex.rxjava3.processors.UnicastProcessor;
import io.reactivex.rxjava3.subscribers.TestSubscriber;
@Test(groups = "unit", testName = "util.concurrent.BlockingManagerTest")
public class BlockingManagerTest extends AbstractInfinispanTest {
Executor nonBlockingExecutor;
Executor blockingExecutor;
public void initializeMocks() {
nonBlockingExecutor = Mockito.mock(Executor.class, Mockito.withSettings()
.defaultAnswer(Mocks.runWithExecutorAnswer(BlockHoundHelper.ensureNonBlockingExecutor())));
blockingExecutor = Mockito.mock(Executor.class, Mockito.withSettings()
.defaultAnswer(Mocks.runWithExecutorAnswer(BlockHoundHelper.allowBlockingExecutor())));
}
private BlockingManager createBlockingManager(boolean blockingInvocation) {
initializeMocks();
BlockingManagerImpl blockingManager = new BlockingManagerImpl() {
@Override
protected boolean isCurrentThreadBlocking() {
return blockingInvocation;
}
};
blockingManager.nonBlockingExecutor = nonBlockingExecutor;
blockingManager.blockingExecutor = blockingExecutor;
blockingManager.start();
return blockingManager;
}
public void testBlockingPublishToVoidStageInvokedBlockingThread() {
BlockingManager blockingManager = createBlockingManager(true);
CompletionStage<Void> stage = blockingManager.blockingPublisherToVoidStage(Flowable.fromArray(new Object[] { 1, 2, 3 })
.doOnNext(BlockHoundHelper::blockingConsume), null);
assertTrue(CompletionStages.isCompletedSuccessfully(stage));
// We should not have used any executor as we were a blocking thread already
Mockito.verifyNoInteractions(nonBlockingExecutor, blockingExecutor);
}
public void testBlockingPublishToVoidStageInvokedNonBlockingThread() {
BlockingManager blockingManager = createBlockingManager(false);
CompletionStage<Void> stage = blockingManager.blockingPublisherToVoidStage(Flowable.just(1)
.doOnNext(BlockHoundHelper::blockingConsume), null);
assertTrue(CompletionStages.isCompletedSuccessfully(stage));
Mockito.verify(blockingExecutor).execute(Mockito.any());
Mockito.verifyNoInteractions(nonBlockingExecutor);
}
public void testBlockingPublishToVoidStageInvokedNonBlockingThreadCompleteAfterSubscribe() {
BlockingManager blockingManager = createBlockingManager(false);
AsyncProcessor<Object> processor = AsyncProcessor.create();
processor.onNext(1);
CompletionStage<Void> stage = blockingManager.blockingPublisherToVoidStage(processor
.doOnNext(BlockHoundHelper::blockingConsume), null);
assertFalse(CompletionStages.isCompletedSuccessfully(stage));
processor.onComplete();
assertTrue(CompletionStages.isCompletedSuccessfully(stage));
Mockito.verify(blockingExecutor).execute(Mockito.any());
Mockito.verify(nonBlockingExecutor).execute(Mockito.any());
}
public void testBlockingPublisherInvokedBlockingThread() {
BlockingManager blockingManager = createBlockingManager(true);
Publisher<Integer> publisher = blockingManager.blockingPublisher(Flowable.just(1)
.doOnNext(BlockHoundHelper::blockingConsume));
TestSubscriber<Integer> subscriber = TestSubscriber.create();
publisher.subscribe(subscriber);
subscriber.assertComplete();
// We should not have used any executor as we were a blocking thread already
Mockito.verifyNoInteractions(nonBlockingExecutor, blockingExecutor);
}
public void testBlockingPublisherInvokedBlockingThreadCompleteAfterSubscribe() {
BlockingManager blockingManager = createBlockingManager(true);
AsyncProcessor<Integer> processor = AsyncProcessor.create();
processor.onNext(1);
Publisher<Integer> publisher = blockingManager.blockingPublisher(processor
.doOnNext(BlockHoundHelper::blockingConsume));
TestSubscriber<Integer> subscriber = TestSubscriber.create();
publisher.subscribe(subscriber);
subscriber.assertNotComplete();
processor.onComplete();
subscriber.assertComplete();
// We should not have used any executor as we were a blocking thread already for onNext and onComplete is done on
// the invoking thread as it happened after publish
Mockito.verifyNoInteractions(nonBlockingExecutor, blockingExecutor);
}
public void testBlockingPublisherInvokedNonBlockingThread() {
BlockingManager blockingManager = createBlockingManager(false);
Publisher<Integer> publisher = blockingManager.blockingPublisher(Flowable.just(1)
.doOnNext(BlockHoundHelper::blockingConsume));
TestSubscriber<Integer> subscriber = TestSubscriber.create();
Flowable.fromPublisher(publisher)
// We should observe any value of returned Publisher from `blockingPublisher` on a non blocking thread
.doOnNext(ignore -> assertTrue(BlockHoundHelper.currentThreadRequiresNonBlocking()))
.subscribe(subscriber);
subscriber.assertComplete();
Mockito.verify(blockingExecutor).execute(Mockito.any());
// This is invoked 3 times because of how AsyncProcessor works - it submits once for request, once for onNext and
// once for onComplete
Mockito.verify(nonBlockingExecutor, Mockito.times(3)).execute(Mockito.any());
}
public void testBlockingPublisherInvokedNonBlockingThreadCompleteAfterSubscribe() {
BlockingManager blockingManager = createBlockingManager(false);
UnicastProcessor<Integer> processor = UnicastProcessor.create();
processor.onNext(1);
Publisher<Integer> publisher = blockingManager.blockingPublisher(processor
.doOnNext(BlockHoundHelper::blockingConsume));
TestSubscriber<Integer> subscriber = TestSubscriber.create();
Flowable.fromPublisher(publisher)
// We should observe any value of returned Publisher from `blockingPublisher` on a non blocking thread
.doOnNext(ignore -> assertTrue(BlockHoundHelper.currentThreadRequiresNonBlocking()))
.subscribe(subscriber);
subscriber.assertNotComplete();
processor.onComplete();
subscriber.assertComplete();
Mockito.verify(blockingExecutor).execute(Mockito.any());
// This is invoked 3 times because of how AsyncProcessor works - it submits once for request, once for onNext and
// once for onComplete
Mockito.verify(nonBlockingExecutor, Mockito.times(3)).execute(Mockito.any());
}
}
| 7,170
| 39.061453
| 125
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/concurrent/ConditionFutureTest.java
|
package org.infinispan.util.concurrent;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.infinispan.commons.test.Exceptions.expectCompletionException;
import static org.infinispan.util.concurrent.CompletionStages.isCompletedSuccessfully;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
import java.util.function.Predicate;
import org.infinispan.commons.IllegalLifecycleStateException;
import org.infinispan.test.AbstractInfinispanTest;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
@Test(groups = "unit", testName = "util.concurrent.ConditionFutureTest")
public class ConditionFutureTest extends AbstractInfinispanTest {
ScheduledExecutorService timeoutExecutor =
Executors.newSingleThreadScheduledExecutor(getTestThreadFactory("timeout"));
@AfterClass(alwaysRun = true)
public void tearDown() {
timeoutExecutor.shutdownNow();
}
public void testBeforeFirstUpdate() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
CompletionStage<Void> stage = conditionFuture.newConditionStage(i -> i > 0, 10, SECONDS);
assertFalse(stage.toCompletableFuture().isDone());
conditionFuture.update(1);
assertTrue(stage.toCompletableFuture().isDone());
}
public void testAlreadyCompleted() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
conditionFuture.update(1);
CompletionStage<Void> stage = conditionFuture.newConditionStage(i -> i > 0, 10, SECONDS);
assertTrue(stage.toCompletableFuture().isDone());
}
public void testConcurrentModification() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
CompletionStage<Void> stage11 = conditionFuture.newConditionStage(i -> i > 0, 10, SECONDS);
CompletionStage<Void> stage12 = conditionFuture.newConditionStage(i -> i > 0, 10, SECONDS);
// Block the completion of stage11
CompletableFuture<Void> updateReleased = new CompletableFuture<>();
stage11.thenRun(updateReleased::join);
stage12.thenRun(updateReleased::join);
// Update the condition future, triggering the completion of stage1x
conditionFuture.updateAsync(1, testExecutor());
eventually(() -> isCompletedSuccessfully(stage11) || isCompletedSuccessfully(stage12));
// Add 2 new condition stages while the update is blocked, to increment modCount by 2
CompletionStage<Void> stage21 = conditionFuture.newConditionStage(i -> i > 1, 10, SECONDS);
CompletionStage<Void> stage22 = conditionFuture.newConditionStage(i -> i > 1, 10, SECONDS);
// Unblock the condition future update
updateReleased.complete(null);
CompletionStages.join(stage11);
CompletionStages.join(stage12);
// Update again to complete stage21 and stage22
conditionFuture.update(2);
CompletionStages.join(stage21);
CompletionStages.join(stage22);
}
public void testUpdateAsyncException() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
CompletionStage<Void> stage1 = conditionFuture.newConditionStage(i -> i > 0, 10, SECONDS);
ExecutorService executor = Executors.newSingleThreadExecutor(getTestThreadFactory(""));
executor.shutdown();
conditionFuture.updateAsync(1, executor);
expectCompletionException(RejectedExecutionException.class, stage1);
}
public void testStopException() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
CompletionStage<Void> stage = conditionFuture.newConditionStage(i -> i > 1, 10, SECONDS);
assertFalse(stage.toCompletableFuture().isDone());
conditionFuture.stop();
expectCompletionException(IllegalLifecycleStateException.class, stage);
}
public void testDuplicatePredicate() {
ConditionFuture<Integer> conditionFuture = new ConditionFuture<>(timeoutExecutor);
Predicate<Integer> test = i -> i > 0;
CompletionStage<Void> stage1 = conditionFuture.newConditionStage(test, 10, SECONDS);
CompletionStage<Void> stage2 = conditionFuture.newConditionStage(test, 10, SECONDS);
assertFalse(stage1.toCompletableFuture().isDone());
assertFalse(stage2.toCompletableFuture().isDone());
conditionFuture.update(1);
assertTrue(stage1.toCompletableFuture().isDone());
assertTrue(stage2.toCompletableFuture().isDone());
}
}
| 4,839
| 41.45614
| 97
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/concurrent/BlockingManagerTestUtil.java
|
package org.infinispan.util.concurrent;
import org.infinispan.manager.CacheContainer;
import org.infinispan.test.TestingUtil;
public class BlockingManagerTestUtil {
/**
* Replaces the cache container's {@link BlockingManager} and {@link NonBlockingManager} components with ones that
* runs all blocking and non blocking operations in the invoking thread. This is useful for testing when you want
* operations to be done sequentially.
* <p>
* This operation will be undone if component registry is rewired
*
* @param cacheContainer Container of which the blocking manager is to be replaced
*/
public static void replaceManagersWithInline(CacheContainer cacheContainer) {
NonBlockingManagerImpl nonBlockingManager = (NonBlockingManagerImpl) TestingUtil.extractGlobalComponent(cacheContainer, NonBlockingManager.class);
nonBlockingManager.executor = new WithinThreadExecutor();
BlockingManagerImpl manager = (BlockingManagerImpl) TestingUtil.extractGlobalComponent(cacheContainer, BlockingManager.class);
manager.blockingExecutor = new WithinThreadExecutor();
manager.nonBlockingExecutor = new WithinThreadExecutor();
manager.nonBlockingManager = nonBlockingManager;
}
}
| 1,249
| 47.076923
| 152
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/logging/events/BasicEventLoggerTest.java
|
package org.infinispan.util.logging.events;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertNotNull;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import org.infinispan.commons.time.DefaultTimeService;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestException;
import org.infinispan.util.logging.events.impl.BasicEventLogger;
import org.mockito.ArgumentCaptor;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
@Test(testName = "events.BasicEventLoggerTest", groups = "unit")
public class BasicEventLoggerTest extends AbstractInfinispanTest {
@DataProvider(name = "levels-categories")
public Object[][] levelCategoriesProvider() {
return Stream.of(EventLogLevel.values())
.flatMap(level -> Stream.of(EventLogCategory.values())
.map(category -> new Object[]{ level, category }))
.toArray(Object[][]::new);
}
@Test(dataProvider = "levels-categories")
public void shouldSendNotification(EventLogLevel level, EventLogCategory category) throws InterruptedException {
final EventLoggerNotifier notifier = mock(EventLoggerNotifier.class);
CountDownLatch latch = new CountDownLatch(1);
ArgumentCaptor<EventLog> logged = ArgumentCaptor.forClass(EventLog.class);
EventLogger logger = new BasicEventLogger(notifier, DefaultTimeService.INSTANCE);
when(notifier.notifyEventLogged(logged.capture())).thenAnswer(invocation -> {
latch.countDown();
return CompletableFutures.completedNull();
});
logger.log(level, category, "Lorem");
if (!latch.await(5, TimeUnit.SECONDS)) {
throw new TestException("Failed notifying about logged data");
}
EventLog actual = logged.getValue();
assertNotNull(actual);
assertEquals(actual.getLevel(), level);
assertEquals(actual.getCategory(), category);
assertEquals(actual.getMessage(), "Lorem");
}
}
| 2,219
| 37.275862
| 115
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/logging/events/TestingEventLogManager.java
|
package org.infinispan.util.logging.events;
import org.infinispan.commons.time.DefaultTimeService;
import org.infinispan.util.logging.events.impl.BasicEventLogger;
import org.infinispan.util.logging.events.impl.EventLoggerNotifierImpl;
public class TestingEventLogManager implements EventLogManager {
private EventLogger logger = new BasicEventLogger(new EventLoggerNotifierImpl(), DefaultTimeService.INSTANCE);
@Override
public EventLogger getEventLogger() {
return logger;
}
@Override
public EventLogger replaceEventLogger(EventLogger newLogger) {
EventLogger oldLogger = logger;
logger = newLogger;
return oldLogger;
}
}
| 673
| 29.636364
| 113
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/tx/lookup/GeronimoTransactionManagerLookup.java
| 0
| 0
| 0
|
java
|
|
null |
infinispan-main/core/src/test/java/org/infinispan/util/mocks/ControlledCommandFactory.java
|
package org.infinispan.util.mocks;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.control.LockControlCommand;
import org.infinispan.commands.functional.Mutation;
import org.infinispan.commands.functional.ReadOnlyKeyCommand;
import org.infinispan.commands.functional.ReadOnlyManyCommand;
import org.infinispan.commands.functional.ReadWriteKeyCommand;
import org.infinispan.commands.functional.ReadWriteKeyValueCommand;
import org.infinispan.commands.functional.ReadWriteManyCommand;
import org.infinispan.commands.functional.ReadWriteManyEntriesCommand;
import org.infinispan.commands.functional.TxReadOnlyKeyCommand;
import org.infinispan.commands.functional.TxReadOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyCommand;
import org.infinispan.commands.functional.WriteOnlyKeyValueCommand;
import org.infinispan.commands.functional.WriteOnlyManyCommand;
import org.infinispan.commands.functional.WriteOnlyManyEntriesCommand;
import org.infinispan.commands.irac.IracCleanupKeysCommand;
import org.infinispan.commands.irac.IracClearKeysCommand;
import org.infinispan.commands.irac.IracMetadataRequestCommand;
import org.infinispan.commands.irac.IracPutManyCommand;
import org.infinispan.commands.irac.IracRequestStateCommand;
import org.infinispan.commands.irac.IracStateResponseCommand;
import org.infinispan.commands.irac.IracTombstoneCleanupCommand;
import org.infinispan.commands.irac.IracTombstonePrimaryCheckCommand;
import org.infinispan.commands.irac.IracTombstoneRemoteSiteCheckCommand;
import org.infinispan.commands.irac.IracTombstoneStateResponseCommand;
import org.infinispan.commands.irac.IracTouchKeyCommand;
import org.infinispan.commands.irac.IracUpdateVersionCommand;
import org.infinispan.commands.read.EntrySetCommand;
import org.infinispan.commands.read.GetAllCommand;
import org.infinispan.commands.read.GetCacheEntryCommand;
import org.infinispan.commands.read.GetKeyValueCommand;
import org.infinispan.commands.read.KeySetCommand;
import org.infinispan.commands.read.SizeCommand;
import org.infinispan.commands.remote.CheckTransactionRpcCommand;
import org.infinispan.commands.remote.ClusteredGetAllCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.remote.SingleRpcCommand;
import org.infinispan.commands.remote.recovery.CompleteTransactionCommand;
import org.infinispan.commands.remote.recovery.GetInDoubtTransactionsCommand;
import org.infinispan.commands.remote.recovery.GetInDoubtTxInfoCommand;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.statetransfer.ConflictResolutionStartCommand;
import org.infinispan.commands.statetransfer.StateResponseCommand;
import org.infinispan.commands.statetransfer.StateTransferCancelCommand;
import org.infinispan.commands.statetransfer.StateTransferGetListenersCommand;
import org.infinispan.commands.statetransfer.StateTransferGetTransactionsCommand;
import org.infinispan.commands.statetransfer.StateTransferStartCommand;
import org.infinispan.commands.triangle.BackupNoopCommand;
import org.infinispan.commands.triangle.MultiEntriesFunctionalBackupWriteCommand;
import org.infinispan.commands.triangle.MultiKeyFunctionalBackupWriteCommand;
import org.infinispan.commands.triangle.PutMapBackupWriteCommand;
import org.infinispan.commands.triangle.SingleKeyBackupWriteCommand;
import org.infinispan.commands.triangle.SingleKeyFunctionalBackupWriteCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.VersionedCommitCommand;
import org.infinispan.commands.tx.VersionedPrepareCommand;
import org.infinispan.commands.write.BackupAckCommand;
import org.infinispan.commands.write.BackupMultiKeyAckCommand;
import org.infinispan.commands.write.ClearCommand;
import org.infinispan.commands.write.ComputeCommand;
import org.infinispan.commands.write.ComputeIfAbsentCommand;
import org.infinispan.commands.write.EvictCommand;
import org.infinispan.commands.write.ExceptionAckCommand;
import org.infinispan.commands.write.InvalidateCommand;
import org.infinispan.commands.write.IracPutKeyValueCommand;
import org.infinispan.commands.write.PutKeyValueCommand;
import org.infinispan.commands.write.PutMapCommand;
import org.infinispan.commands.write.RemoveCommand;
import org.infinispan.commands.write.RemoveExpiredCommand;
import org.infinispan.commands.write.ReplaceCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.commons.tx.XidImpl;
import org.infinispan.commons.util.IntSet;
import org.infinispan.configuration.cache.XSiteStateTransferMode;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.versioning.irac.IracEntryVersion;
import org.infinispan.container.versioning.irac.IracTombstoneInfo;
import org.infinispan.encoding.DataConversion;
import org.infinispan.expiration.impl.TouchCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.functional.EntryView;
import org.infinispan.functional.impl.Params;
import org.infinispan.metadata.Metadata;
import org.infinispan.metadata.impl.PrivateMetadata;
import org.infinispan.notifications.cachelistener.cluster.ClusterEvent;
import org.infinispan.notifications.cachelistener.cluster.MultiClusterEventCommand;
import org.infinispan.reactive.publisher.impl.DeliveryGuarantee;
import org.infinispan.reactive.publisher.impl.commands.batch.CancelPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.InitialPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.batch.NextPublisherCommand;
import org.infinispan.reactive.publisher.impl.commands.reduction.ReductionPublisherRequestCommand;
import org.infinispan.remoting.transport.Address;
import org.infinispan.statetransfer.StateChunk;
import org.infinispan.test.TestingUtil;
import org.infinispan.transaction.xa.GlobalTransaction;
import org.infinispan.util.concurrent.ReclosableLatch;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.infinispan.xsite.SingleXSiteRpcCommand;
import org.infinispan.xsite.commands.XSiteAmendOfflineStatusCommand;
import org.infinispan.xsite.commands.XSiteAutoTransferStatusCommand;
import org.infinispan.xsite.commands.XSiteBringOnlineCommand;
import org.infinispan.xsite.commands.XSiteOfflineStatusCommand;
import org.infinispan.xsite.commands.XSiteSetStateTransferModeCommand;
import org.infinispan.xsite.commands.XSiteStateTransferCancelSendCommand;
import org.infinispan.xsite.commands.XSiteStateTransferClearStatusCommand;
import org.infinispan.xsite.commands.XSiteStateTransferFinishReceiveCommand;
import org.infinispan.xsite.commands.XSiteStateTransferFinishSendCommand;
import org.infinispan.xsite.commands.XSiteStateTransferRestartSendingCommand;
import org.infinispan.xsite.commands.XSiteStateTransferStartReceiveCommand;
import org.infinispan.xsite.commands.XSiteStateTransferStartSendCommand;
import org.infinispan.xsite.commands.XSiteStateTransferStatusRequestCommand;
import org.infinispan.xsite.commands.XSiteStatusCommand;
import org.infinispan.xsite.commands.XSiteTakeOfflineCommand;
import org.infinispan.xsite.irac.IracManagerKeyInfo;
import org.infinispan.xsite.statetransfer.XSiteState;
import org.infinispan.xsite.statetransfer.XSiteStatePushCommand;
import org.reactivestreams.Publisher;
/**
* @author Mircea Markus
* @since 5.2
*/
public class ControlledCommandFactory implements CommandsFactory {
private final static Log log = LogFactory.getLog(ControlledCommandFactory.class);
public final CommandsFactory actual;
public final ReclosableLatch gate = new ReclosableLatch(true);
public final AtomicInteger remoteCommandsReceived = new AtomicInteger(0);
public final AtomicInteger blockTypeCommandsReceived = new AtomicInteger(0);
public final List<ReplicableCommand> receivedCommands = new ArrayList<>();
public final Class<? extends ReplicableCommand> toBlock;
public ControlledCommandFactory(CommandsFactory actual, Class<? extends ReplicableCommand> toBlock) {
this.actual = actual;
this.toBlock = toBlock;
}
public int received(Class<? extends ReplicableCommand> command) {
int result = 0;
for (ReplicableCommand r : receivedCommands) {
if (r.getClass() == command) {
result++;
}
}
return result;
}
@Override
public void initializeReplicableCommand(ReplicableCommand command, boolean isRemote) {
log.tracef("Received command %s", command);
receivedCommands.add(command);
if (isRemote) {
remoteCommandsReceived.incrementAndGet();
if (toBlock != null && command.getClass().isAssignableFrom(toBlock)) {
blockTypeCommandsReceived.incrementAndGet();
try {
gate.await(30, TimeUnit.SECONDS);
log.tracef("gate is opened, processing the lock cleanup: %s", command);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
actual.initializeReplicableCommand(command, isRemote);
}
public static ControlledCommandFactory registerControlledCommandFactory(Cache cache, Class<? extends ReplicableCommand> toBlock) {
ComponentRegistry componentRegistry = cache.getAdvancedCache().getComponentRegistry();
final ControlledCommandFactory ccf = new ControlledCommandFactory(componentRegistry.getCommandsFactory(), toBlock);
TestingUtil.replaceComponent(cache, CommandsFactory.class, ccf, true);
//hack: re-add the component registry to the GlobalComponentRegistry's "namedComponents" (CHM) in order to correctly publish it for
// when it will be read by the InboundInvocationHandlder. InboundInvocationHandlder reads the value from the GlobalComponentRegistry.namedComponents before using it
componentRegistry.getGlobalComponentRegistry().registerNamedComponentRegistry(componentRegistry, TestingUtil.getDefaultCacheName(cache.getCacheManager()));
return ccf;
}
@Override
public PutKeyValueCommand buildPutKeyValueCommand(Object key, Object value, int segment, Metadata metadata,
long flagsBitSet, boolean returnEntry) {
return actual.buildPutKeyValueCommand(key, value, segment, metadata, flagsBitSet, returnEntry);
}
@Override
public RemoveCommand buildRemoveCommand(Object key, Object value, int segment, long flagsBitSet, boolean returnEntry) {
return actual.buildRemoveCommand(key, value, segment, flagsBitSet, returnEntry);
}
@Override
public InvalidateCommand buildInvalidateCommand(long flagsBitSet, Object... keys) {
return actual.buildInvalidateCommand(flagsBitSet, keys);
}
@Override
public InvalidateCommand buildInvalidateFromL1Command(long flagsBitSet, Collection<Object> keys) {
return actual.buildInvalidateFromL1Command(flagsBitSet, keys);
}
@Override
public InvalidateCommand buildInvalidateFromL1Command(Address origin, long flagsBitSet, Collection<Object> keys) {
return actual.buildInvalidateFromL1Command(origin, flagsBitSet, keys);
}
@Override
public RemoveExpiredCommand buildRemoveExpiredCommand(Object key, Object value, int segment, Long lifespan,
long flagsBitSet) {
return actual.buildRemoveExpiredCommand(key, value, segment, lifespan, flagsBitSet);
}
@Override
public RemoveExpiredCommand buildRemoveExpiredCommand(Object key, Object value, int segment, long flagsBitSet) {
return actual.buildRemoveExpiredCommand(key, value, segment, flagsBitSet);
}
@Override
public ReplaceCommand buildReplaceCommand(Object key, Object oldValue, Object newValue, int segment,
Metadata metadata, long flagsBitSet, boolean loadEntry) {
return actual.buildReplaceCommand(key, oldValue, newValue, segment, metadata, flagsBitSet, loadEntry);
}
@Override
public ComputeCommand buildComputeCommand(Object key, BiFunction mappingFunction, boolean computeIfPresent,
int segment, Metadata metadata, long flagsBitSet) {
return actual.buildComputeCommand(key, mappingFunction, computeIfPresent, segment, metadata, flagsBitSet);
}
@Override
public ComputeIfAbsentCommand buildComputeIfAbsentCommand(Object key, Function mappingFunction, int segment,
Metadata metadata, long flagsBitSet) {
return actual.buildComputeIfAbsentCommand(key, mappingFunction, segment, metadata, flagsBitSet);
}
@Override
public SizeCommand buildSizeCommand(IntSet segments, long flagsBitSet) {
return actual.buildSizeCommand(segments, flagsBitSet);
}
@Override
public GetKeyValueCommand buildGetKeyValueCommand(Object key, int segment, long flagsBitSet) {
return actual.buildGetKeyValueCommand(key, segment, flagsBitSet);
}
@Override
public GetAllCommand buildGetAllCommand(Collection<?> keys, long flagsBitSet, boolean returnEntries) {
return actual.buildGetAllCommand(keys, flagsBitSet, returnEntries);
}
@Override
public KeySetCommand buildKeySetCommand(long flagsBitSet) {
return actual.buildKeySetCommand(flagsBitSet);
}
@Override
public EntrySetCommand buildEntrySetCommand(long flagsBitSet) {
return actual.buildEntrySetCommand(flagsBitSet);
}
@Override
public PutMapCommand buildPutMapCommand(Map<?, ?> map, Metadata metadata, long flagsBitSet) {
return actual.buildPutMapCommand(map, metadata, flagsBitSet);
}
@Override
public ClearCommand buildClearCommand(long flagsBitSet) {
return actual.buildClearCommand(flagsBitSet);
}
@Override
public EvictCommand buildEvictCommand(Object key, int segment, long flagsBitSet) {
return actual.buildEvictCommand(key, segment, flagsBitSet);
}
@Override
public PrepareCommand buildPrepareCommand(GlobalTransaction gtx, List<WriteCommand> modifications, boolean onePhaseCommit) {
return actual.buildPrepareCommand(gtx, modifications, onePhaseCommit);
}
@Override
public VersionedPrepareCommand buildVersionedPrepareCommand(GlobalTransaction gtx, List<WriteCommand> modifications, boolean onePhase) {
return actual.buildVersionedPrepareCommand(gtx, modifications, onePhase);
}
@Override
public CommitCommand buildCommitCommand(GlobalTransaction gtx) {
return actual.buildCommitCommand(gtx);
}
@Override
public VersionedCommitCommand buildVersionedCommitCommand(GlobalTransaction gtx) {
return actual.buildVersionedCommitCommand(gtx);
}
@Override
public RollbackCommand buildRollbackCommand(GlobalTransaction gtx) {
return actual.buildRollbackCommand(gtx);
}
@Override
public SingleRpcCommand buildSingleRpcCommand(VisitableCommand call) {
return actual.buildSingleRpcCommand(call);
}
@Override
public ClusteredGetCommand buildClusteredGetCommand(Object key, Integer segment, long flagsBitSet) {
return actual.buildClusteredGetCommand(key, segment, flagsBitSet);
}
@Override
public ClusteredGetAllCommand buildClusteredGetAllCommand(List<?> keys, long flagsBitSet, GlobalTransaction gtx) {
return actual.buildClusteredGetAllCommand(keys, flagsBitSet, gtx);
}
@Override
public LockControlCommand buildLockControlCommand(Collection<?> keys, long flagsBitSet, GlobalTransaction gtx) {
return actual.buildLockControlCommand(keys, flagsBitSet, gtx);
}
@Override
public LockControlCommand buildLockControlCommand(Object key, long flagsBitSet, GlobalTransaction gtx) {
return actual.buildLockControlCommand(key, flagsBitSet, gtx);
}
@Override
public LockControlCommand buildLockControlCommand(Collection<?> keys, long flagsBitSet) {
return actual.buildLockControlCommand(keys, flagsBitSet);
}
@Override
public ConflictResolutionStartCommand buildConflictResolutionStartCommand(int topologyId, IntSet segments) {
return actual.buildConflictResolutionStartCommand(topologyId, segments);
}
@Override
public StateTransferCancelCommand buildStateTransferCancelCommand(int topologyId, IntSet segments) {
return actual.buildStateTransferCancelCommand(topologyId, segments);
}
@Override
public StateTransferGetListenersCommand buildStateTransferGetListenersCommand(int topologyId) {
return actual.buildStateTransferGetListenersCommand(topologyId);
}
@Override
public StateTransferGetTransactionsCommand buildStateTransferGetTransactionsCommand(int topologyId, IntSet segments) {
return actual.buildStateTransferGetTransactionsCommand(topologyId, segments);
}
@Override
public StateTransferStartCommand buildStateTransferStartCommand(int topologyId, IntSet segments) {
return actual.buildStateTransferStartCommand(topologyId, segments);
}
@Override
public StateResponseCommand buildStateResponseCommand(int viewId, Collection<StateChunk> stateChunks, boolean applyState) {
return actual.buildStateResponseCommand(viewId, stateChunks, applyState);
}
@Override
public String getCacheName() {
return actual.getCacheName();
}
@Override
public GetInDoubtTransactionsCommand buildGetInDoubtTransactionsCommand() {
return actual.buildGetInDoubtTransactionsCommand();
}
@Override
public TxCompletionNotificationCommand buildTxCompletionNotificationCommand(XidImpl xid, GlobalTransaction globalTransaction) {
return actual.buildTxCompletionNotificationCommand(xid, globalTransaction);
}
@Override
public GetInDoubtTxInfoCommand buildGetInDoubtTxInfoCommand() {
return actual.buildGetInDoubtTxInfoCommand();
}
@Override
public CompleteTransactionCommand buildCompleteTransactionCommand(XidImpl xid, boolean commit) {
return actual.buildCompleteTransactionCommand(xid, commit);
}
@Override
public TxCompletionNotificationCommand buildTxCompletionNotificationCommand(long internalId) {
return actual.buildTxCompletionNotificationCommand(internalId);
}
@Override
public XSiteStateTransferCancelSendCommand buildXSiteStateTransferCancelSendCommand(String siteName) {
return actual.buildXSiteStateTransferCancelSendCommand(siteName);
}
@Override
public XSiteStateTransferClearStatusCommand buildXSiteStateTransferClearStatusCommand() {
return actual.buildXSiteStateTransferClearStatusCommand();
}
@Override
public XSiteStateTransferFinishReceiveCommand buildXSiteStateTransferFinishReceiveCommand(String siteName) {
return actual.buildXSiteStateTransferFinishReceiveCommand(siteName);
}
@Override
public XSiteStateTransferFinishSendCommand buildXSiteStateTransferFinishSendCommand(String siteName, boolean statusOk) {
return actual.buildXSiteStateTransferFinishSendCommand(siteName, statusOk);
}
@Override
public XSiteStateTransferRestartSendingCommand buildXSiteStateTransferRestartSendingCommand(String siteName, int topologyId) {
return actual.buildXSiteStateTransferRestartSendingCommand(siteName, topologyId);
}
@Override
public XSiteStateTransferStartReceiveCommand buildXSiteStateTransferStartReceiveCommand() {
return actual.buildXSiteStateTransferStartReceiveCommand();
}
@Override
public XSiteStateTransferStartSendCommand buildXSiteStateTransferStartSendCommand(String siteName, int topologyId) {
return actual.buildXSiteStateTransferStartSendCommand(siteName, topologyId);
}
@Override
public XSiteStateTransferStatusRequestCommand buildXSiteStateTransferStatusRequestCommand() {
return actual.buildXSiteStateTransferStatusRequestCommand();
}
@Override
public XSiteAmendOfflineStatusCommand buildXSiteAmendOfflineStatusCommand(String siteName, Integer afterFailures, Long minTimeToWait) {
return actual.buildXSiteAmendOfflineStatusCommand(siteName, afterFailures, minTimeToWait);
}
@Override
public XSiteBringOnlineCommand buildXSiteBringOnlineCommand(String siteName) {
return actual.buildXSiteBringOnlineCommand(siteName);
}
@Override
public XSiteOfflineStatusCommand buildXSiteOfflineStatusCommand(String siteName) {
return actual.buildXSiteOfflineStatusCommand(siteName);
}
@Override
public XSiteStatusCommand buildXSiteStatusCommand() {
return actual.buildXSiteStatusCommand();
}
@Override
public XSiteTakeOfflineCommand buildXSiteTakeOfflineCommand(String siteName) {
return actual.buildXSiteTakeOfflineCommand(siteName);
}
@Override
public XSiteStatePushCommand buildXSiteStatePushCommand(XSiteState[] chunk, long timeoutMillis) {
return actual.buildXSiteStatePushCommand(chunk, timeoutMillis);
}
@Override
public SingleXSiteRpcCommand buildSingleXSiteRpcCommand(VisitableCommand command) {
return actual.buildSingleXSiteRpcCommand(command);
}
@Override
public GetCacheEntryCommand buildGetCacheEntryCommand(Object key, int segment, long flagsBitSet) {
return actual.buildGetCacheEntryCommand(key, segment, flagsBitSet);
}
@Override
public <K, V, R> ReadOnlyKeyCommand<K, V, R> buildReadOnlyKeyCommand(Object key, Function<EntryView.ReadEntryView<K, V>, R> f,
int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadOnlyKeyCommand(key, f, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, R> ReadOnlyManyCommand<K, V, R> buildReadOnlyManyCommand(Collection<?> keys, Function<EntryView.ReadEntryView<K, V>, R> f, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadOnlyManyCommand(keys, f, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, T, R> ReadWriteKeyValueCommand<K, V, T, R> buildReadWriteKeyValueCommand(Object key, Object argument, BiFunction<T, EntryView.ReadWriteEntryView<K, V>, R> f,
int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadWriteKeyValueCommand(key, argument, f, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, R> ReadWriteKeyCommand<K, V, R> buildReadWriteKeyCommand(
Object key, Function<EntryView.ReadWriteEntryView<K, V>, R> f, int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadWriteKeyCommand(key, f, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, R> ReadWriteManyCommand<K, V, R> buildReadWriteManyCommand(Collection<?> keys, Function<EntryView.ReadWriteEntryView<K, V>, R> f, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadWriteManyCommand(keys, f, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, T, R> ReadWriteManyEntriesCommand<K, V, T, R> buildReadWriteManyEntriesCommand(Map<?, ?> entries, BiFunction<T, EntryView.ReadWriteEntryView<K, V>, R> f, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildReadWriteManyEntriesCommand(entries, f, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V> WriteOnlyKeyCommand<K, V> buildWriteOnlyKeyCommand(
Object key, Consumer<EntryView.WriteEntryView<K, V>> f, int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildWriteOnlyKeyCommand(key, f, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, T> WriteOnlyKeyValueCommand<K, V, T> buildWriteOnlyKeyValueCommand(Object key, Object argument, BiConsumer<T, EntryView.WriteEntryView<K, V>> f,
int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildWriteOnlyKeyValueCommand(key, argument, f, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V> WriteOnlyManyCommand<K, V> buildWriteOnlyManyCommand(Collection<?> keys, Consumer<EntryView.WriteEntryView<K, V>> f, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildWriteOnlyManyCommand(keys, f, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, T> WriteOnlyManyEntriesCommand<K, V, T> buildWriteOnlyManyEntriesCommand(
Map<?, ?> arguments, BiConsumer<T, EntryView.WriteEntryView<K, V>> f, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildWriteOnlyManyEntriesCommand(arguments, f, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, R> TxReadOnlyKeyCommand<K, V, R> buildTxReadOnlyKeyCommand(Object key, Function<EntryView.ReadEntryView<K, V>, R> f, List<Mutation<K, V, ?>> mutations, int segment, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildTxReadOnlyKeyCommand(key, f, mutations, segment, params, keyDataConversion, valueDataConversion);
}
@Override
public <K, V, R> TxReadOnlyManyCommand<K, V, R> buildTxReadOnlyManyCommand(Collection<?> keys, List<List<Mutation<K, V, ?>>> mutations, Params params, DataConversion keyDataConversion, DataConversion valueDataConversion) {
return actual.buildTxReadOnlyManyCommand(keys, mutations, params, keyDataConversion, valueDataConversion);
}
@Override
public BackupAckCommand buildBackupAckCommand(long id, int topologyId) {
return actual.buildBackupAckCommand(id, topologyId);
}
@Override
public BackupMultiKeyAckCommand buildBackupMultiKeyAckCommand(long id, int segment, int topologyId) {
return actual.buildBackupMultiKeyAckCommand(id, segment, topologyId);
}
@Override
public ExceptionAckCommand buildExceptionAckCommand(long id, Throwable throwable, int topologyId) {
return actual.buildExceptionAckCommand(id, throwable, topologyId);
}
@Override
public SingleKeyBackupWriteCommand buildSingleKeyBackupWriteCommand() {
return actual.buildSingleKeyBackupWriteCommand();
}
@Override
public SingleKeyFunctionalBackupWriteCommand buildSingleKeyFunctionalBackupWriteCommand() {
return actual.buildSingleKeyFunctionalBackupWriteCommand();
}
@Override
public PutMapBackupWriteCommand buildPutMapBackupWriteCommand() {
return actual.buildPutMapBackupWriteCommand();
}
@Override
public MultiEntriesFunctionalBackupWriteCommand buildMultiEntriesFunctionalBackupWriteCommand() {
return actual.buildMultiEntriesFunctionalBackupWriteCommand();
}
@Override
public MultiKeyFunctionalBackupWriteCommand buildMultiKeyFunctionalBackupWriteCommand() {
return actual.buildMultiKeyFunctionalBackupWriteCommand();
}
@Override
public BackupNoopCommand buildBackupNoopCommand() {
return actual.buildBackupNoopCommand();
}
@Override
public <K, R> ReductionPublisherRequestCommand<K> buildKeyReductionPublisherCommand(boolean parallelStream,
DeliveryGuarantee deliveryGuarantee, IntSet segments, Set<K> keys, Set<K> excludedKeys, long explicitFlags,
Function<? super Publisher<K>, ? extends CompletionStage<R>> transformer,
Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return actual.buildKeyReductionPublisherCommand(parallelStream, deliveryGuarantee, segments, keys, excludedKeys,
explicitFlags, transformer, finalizer);
}
@Override
public <K, V, R> ReductionPublisherRequestCommand<K> buildEntryReductionPublisherCommand(boolean parallelStream, DeliveryGuarantee deliveryGuarantee, IntSet segments, Set<K> keys, Set<K> excludedKeys, long explicitFlags, Function<? super Publisher<CacheEntry<K, V>>, ? extends CompletionStage<R>> transformer, Function<? super Publisher<R>, ? extends CompletionStage<R>> finalizer) {
return actual.buildEntryReductionPublisherCommand(parallelStream, deliveryGuarantee, segments, keys, excludedKeys,
explicitFlags, transformer, finalizer);
}
@Override
public <K, I, R> InitialPublisherCommand<K, I, R> buildInitialPublisherCommand(String requestId, DeliveryGuarantee deliveryGuarantee, int batchSize, IntSet segments, Set<K> keys, Set<K> excludedKeys, long explicitFlags, boolean entryStream, boolean trackKeys, Function<? super Publisher<I>, ? extends Publisher<R>> transformer) {
return actual.buildInitialPublisherCommand(requestId, deliveryGuarantee, batchSize, segments, keys, excludedKeys, explicitFlags, entryStream, trackKeys, transformer);
}
@Override
public NextPublisherCommand buildNextPublisherCommand(String requestId) {
return actual.buildNextPublisherCommand(requestId);
}
@Override
public CancelPublisherCommand buildCancelPublisherCommand(String requestId) {
return actual.buildCancelPublisherCommand(requestId);
}
@Override
public <K, V> MultiClusterEventCommand<K, V> buildMultiClusterEventCommand(Map<UUID, Collection<ClusterEvent<K, V>>> events) {
return actual.buildMultiClusterEventCommand(events);
}
@Override
public CheckTransactionRpcCommand buildCheckTransactionRpcCommand(Collection<GlobalTransaction> globalTransactions) {
return actual.buildCheckTransactionRpcCommand(globalTransactions);
}
@Override
public TouchCommand buildTouchCommand(Object key, int segment, boolean touchEvenIfExpired, long flagBitSet) {
return actual.buildTouchCommand(key, segment, touchEvenIfExpired, flagBitSet);
}
@Override
public IracClearKeysCommand buildIracClearKeysCommand() {
return actual.buildIracClearKeysCommand();
}
@Override
public IracCleanupKeysCommand buildIracCleanupKeyCommand(Collection<? extends IracManagerKeyInfo> state) {
return actual.buildIracCleanupKeyCommand(state);
}
@Override
public IracTombstoneCleanupCommand buildIracTombstoneCleanupCommand(int maxCapacity) {
return actual.buildIracTombstoneCleanupCommand(maxCapacity);
}
@Override
public IracMetadataRequestCommand buildIracMetadataRequestCommand(int segment, IracEntryVersion versionSeen) {
return actual.buildIracMetadataRequestCommand(segment, versionSeen);
}
@Override
public IracRequestStateCommand buildIracRequestStateCommand(IntSet segments) {
return actual.buildIracRequestStateCommand(segments);
}
@Override
public IracStateResponseCommand buildIracStateResponseCommand(int capacity) {
return actual.buildIracStateResponseCommand(capacity);
}
@Override
public IracPutKeyValueCommand buildIracPutKeyValueCommand(Object key, int segment, Object value, Metadata metadata,
PrivateMetadata privateMetadata) {
return actual.buildIracPutKeyValueCommand(key, segment, value, metadata, privateMetadata);
}
@Override
public IracTouchKeyCommand buildIracTouchCommand(Object key) {
return actual.buildIracTouchCommand(key);
}
@Override
public IracUpdateVersionCommand buildIracUpdateVersionCommand(Map<Integer, IracEntryVersion> segmentsVersion) {
return actual.buildIracUpdateVersionCommand(segmentsVersion);
}
@Override
public XSiteAutoTransferStatusCommand buildXSiteAutoTransferStatusCommand(String site) {
return actual.buildXSiteAutoTransferStatusCommand(site);
}
@Override
public XSiteSetStateTransferModeCommand buildXSiteSetStateTransferModeCommand(String site, XSiteStateTransferMode mode) {
return actual.buildXSiteSetStateTransferModeCommand(site, mode);
}
@Override
public IracTombstoneRemoteSiteCheckCommand buildIracTombstoneRemoteSiteCheckCommand(List<Object> keys) {
return actual.buildIracTombstoneRemoteSiteCheckCommand(keys);
}
@Override
public IracTombstoneStateResponseCommand buildIracTombstoneStateResponseCommand(Collection<IracTombstoneInfo> state) {
return actual.buildIracTombstoneStateResponseCommand(state);
}
@Override
public IracTombstonePrimaryCheckCommand buildIracTombstonePrimaryCheckCommand(Collection<IracTombstoneInfo> tombstones) {
return actual.buildIracTombstonePrimaryCheckCommand(tombstones);
}
@Override
public IracPutManyCommand buildIracPutManyCommand(int capacity) {
return actual.buildIracPutManyCommand(capacity);
}
}
| 33,336
| 45.109267
| 386
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/executors/LimitedExecutorTest.java
|
package org.infinispan.executors;
import static java.util.concurrent.TimeUnit.MILLISECONDS;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.concurrent.WithinThreadExecutor;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
/**
* Basic tests for {@link LimitedExecutor}
*
* @author Dan Berindei
* @since 9.0
*/
@Test(groups = "functional", testName = "executors.LimitedExecutorTest")
public class LimitedExecutorTest extends AbstractInfinispanTest {
public static final String NAME = "Test";
private final ThreadPoolExecutor executor = new ThreadPoolExecutor(2, 2,
0L, MILLISECONDS,
new SynchronousQueue<>(),
getTestThreadFactory(NAME));
@AfterClass(alwaysRun = true)
public void stopExecutors() {
executor.shutdownNow();
}
public void testBasicWithinThread() throws Exception {
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, new WithinThreadExecutor(), 1);
CompletableFuture<String> cf = new CompletableFuture<>();
limitedExecutor.execute(() -> cf.complete("value"));
assertEquals("value", cf.getNow("task did not run synchronously"));
}
/**
* Test that no more than 1 task runs at a time.
*/
public void testConcurrencyLimit() throws Exception {
eventuallyEquals(0, executor::getActiveCount);
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, executor, 1);
CompletableFuture<String> blocker1 = new CompletableFuture<>();
CompletableFuture<String> cf1 = new CompletableFuture<>();
limitedExecutor.execute(() -> {
try {
cf1.complete(blocker1.get(10, SECONDS));
} catch (Exception e) {
cf1.completeExceptionally(e);
}
});
verifyTaskIsBlocked(limitedExecutor, blocker1, cf1);
}
/**
* Test that an async task ({@code executeAsync()}) will block another task from running
* until its {@code CompletableFuture} is completed.
*/
public void testConcurrencyLimitExecuteAsync() throws Exception {
eventuallyEquals(0, executor::getActiveCount);
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, executor, 1);
CompletableFuture<String> blocker1 = new CompletableFuture<>();
CompletableFuture<String> cf1 = new CompletableFuture<>();
limitedExecutor.executeAsync(() -> blocker1.thenAccept(cf1::complete));
verifyTaskIsBlocked(limitedExecutor, blocker1, cf1);
}
/**
* Test that no more than 1 task runs at a time when using a {@link WithinThreadExecutor}.
*/
public void testConcurrencyLimitWithinThread() throws Exception {
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, new WithinThreadExecutor(), 1);
CompletableFuture<String> blocker1 = new CompletableFuture<>();
CompletableFuture<String> blocker2 = new CompletableFuture<>();
CompletableFuture<String> cf1 = new CompletableFuture<>();
// execute() will block
Future<?> fork1 = fork(() -> {
limitedExecutor.execute(() -> {
blocker2.complete("blocking");
try {
cf1.complete(blocker1.get(10, SECONDS));
} catch (Exception e) {
cf1.completeExceptionally(e);
}
});
});
assertEquals("blocking", blocker2.get(10, SECONDS));
verifyTaskIsBlocked(limitedExecutor, blocker1, cf1);
fork1.get(10, SECONDS);
}
/**
* Test that an async task ({@code executeAsync()}) will block another task from running
* until its {@code CompletableFuture} is completed, when using a {@link WithinThreadExecutor}.
*/
public void testConcurrencyLimitExecuteAsyncWithinThread() throws Exception {
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, new WithinThreadExecutor(), 1);
CompletableFuture<String> blocker1 = new CompletableFuture<>();
CompletableFuture<String> cf1 = new CompletableFuture<>();
// executeAsync() will not block
limitedExecutor.executeAsync(() -> blocker1.thenAccept(cf1::complete));
verifyTaskIsBlocked(limitedExecutor, blocker1, cf1);
}
public void testExecuteAsyncSupplierReturnsNull() throws Exception {
eventuallyEquals(0, executor::getActiveCount);
LimitedExecutor limitedExecutor = new LimitedExecutor(NAME, executor, 1);
limitedExecutor.executeAsync(() -> null);
CompletableFuture<String> cf1 = new CompletableFuture<>();
limitedExecutor.execute(() -> cf1.complete("a"));
cf1.get(10, TimeUnit.SECONDS);
}
private void verifyTaskIsBlocked(LimitedExecutor limitedExecutor, CompletableFuture<String> blocker1,
CompletableFuture<String> cf1) throws Exception {
CompletableFuture<String> blocker2 = new CompletableFuture<>();
CompletableFuture<String> cf2 = new CompletableFuture<>();
// execute() may block
Future<?> fork2 = fork(() -> {
limitedExecutor.execute(() -> {
try {
cf2.complete(cf1.getNow("task 2 ran too early") + " " + blocker2.get(10, SECONDS));
} catch (Exception e) {
cf2.completeExceptionally(e);
}
});
});
assertFalse(cf1.isDone());
assertFalse(cf2.isDone());
blocker1.complete("value1");
assertEquals("value1", cf1.get(10, SECONDS));
assertFalse(cf2.isDone());
blocker2.complete("value2");
assertEquals("value1 value2", cf2.get(10, SECONDS));
fork2.get(10, SECONDS);
eventuallyEquals(0, executor::getActiveCount);
}
}
| 6,020
| 35.271084
| 104
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/executors/BlockingTaskAwareExecutorServiceTest.java
|
package org.infinispan.executors;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.concurrent.BlockingRunnable;
import org.infinispan.util.concurrent.BlockingTaskAwareExecutorService;
import org.infinispan.util.concurrent.BlockingTaskAwareExecutorServiceImpl;
import org.testng.annotations.Test;
/**
* Simple executor test
*
* @author Pedro Ruivo
* @since 5.3
*/
@Test(groups = "functional", testName = "executors.BlockingTaskAwareExecutorServiceTest")
public class BlockingTaskAwareExecutorServiceTest extends AbstractInfinispanTest {
private static final AtomicInteger THREAD_ID = new AtomicInteger(0);
public void testSimpleExecution() throws Exception {
BlockingTaskAwareExecutorService executorService = createExecutorService();
try {
final DoSomething doSomething = new DoSomething();
executorService.execute(doSomething);
Thread.sleep(100);
assert !doSomething.isReady();
assert !doSomething.isExecuted();
doSomething.markReady();
executorService.checkForReadyTasks();
assert doSomething.isReady();
eventually(doSomething::isExecuted);
} finally {
executorService.shutdownNow();
}
}
public void testMultipleExecutions() throws Exception {
BlockingTaskAwareExecutorServiceImpl executorService = createExecutorService();
try {
List<DoSomething> tasks = new LinkedList<>();
for (int i = 0; i < 30; ++i) {
tasks.add(new DoSomething());
}
tasks.forEach(executorService::execute);
for (DoSomething doSomething : tasks) {
assert !doSomething.isReady();
assert !doSomething.isExecuted();
}
tasks.forEach(BlockingTaskAwareExecutorServiceTest.DoSomething::markReady);
executorService.checkForReadyTasks();
for (final DoSomething doSomething : tasks) {
eventually(doSomething::isExecuted);
}
} finally {
executorService.shutdownNow();
}
}
private BlockingTaskAwareExecutorServiceImpl createExecutorService() {
final ExecutorService realOne = new ThreadPoolExecutor(1, 2, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(),
new DummyThreadFactory());
return new BlockingTaskAwareExecutorServiceImpl(realOne, TIME_SERVICE);
}
public static class DummyThreadFactory implements ThreadFactory {
@Override
public Thread newThread(Runnable runnable) {
return new Thread(runnable, "Remote-" + getClass().getSimpleName() + "-" + THREAD_ID.incrementAndGet());
}
}
public static class DoSomething implements BlockingRunnable {
private volatile boolean ready = false;
private volatile boolean executed = false;
@Override
public synchronized final boolean isReady() {
return ready;
}
@Override
public synchronized final void run() {
executed = true;
}
public synchronized final void markReady() {
ready = true;
}
public synchronized final boolean isExecuted() {
return executed;
}
}
}
| 3,578
| 29.589744
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/executors/SemaphoreCompletionServiceTest.java
|
package org.infinispan.executors;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertNotNull;
import static org.testng.AssertJUnit.assertNull;
import static org.testng.AssertJUnit.assertSame;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.util.concurrent.WithinThreadExecutor;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
/**
* Basic tests for {@link SemaphoreCompletionService}
*
* @author Dan Berindei
*/
@Test(groups = "functional", testName = "executors.SemaphoreCompletionServiceTest")
public class SemaphoreCompletionServiceTest extends AbstractInfinispanTest {
private final ExecutorService executor2Threads = Executors.newFixedThreadPool(2, getTestThreadFactory("Test"));
@AfterClass(alwaysRun = true)
public void stopExecutors() {
executor2Threads.shutdownNow();
}
public void testConcurrency1WithinThread() throws Exception {
SemaphoreCompletionService<String> completionService = new SemaphoreCompletionService<>(new WithinThreadExecutor(), 1);
Future<String> future1 = completionService.submit(new DummyTask());
Future<String> future2 = completionService.poll();
assertSame(future1, future2);
assertNotNull(future2);
assertEquals("bla", future2.get());
}
public void testConcurrencyLimit() throws Exception {
SemaphoreCompletionService<String> completionService = new SemaphoreCompletionService<>(executor2Threads, 1);
CountDownLatch latch = new CountDownLatch(1);
Future<String> blockingFuture = completionService.submit(new BlockingTask(latch));
Future<String> dummyFuture = completionService.submit(new DummyTask());
assertNull(completionService.poll(1, SECONDS));
assertFalse(dummyFuture.isDone());
latch.countDown();
assertEquals("bla", blockingFuture.get(10, SECONDS));
assertEquals("bla", dummyFuture.get(10, SECONDS));
}
public void testBackgroundTasks() throws Exception {
SemaphoreCompletionService<String> completionService = new SemaphoreCompletionService<>(executor2Threads, 1);
CountDownLatch latch = new CountDownLatch(1);
Future<String> backgroundInitFuture = completionService.submit(new BackgroundInitTask(completionService));
assertEquals("bla", backgroundInitFuture.get(1, SECONDS));
Future<String> dummyFuture = completionService.submit(new DummyTask());
assertSame(backgroundInitFuture, completionService.poll(1, SECONDS));
assertFalse(dummyFuture.isDone());
Future<String> backgroundEndFuture = completionService.backgroundTaskFinished(new BlockingTask(latch));
assertNull(completionService.poll(1, SECONDS));
assertFalse(dummyFuture.isDone());
latch.countDown();
assertEquals("bla", backgroundEndFuture.get(10, SECONDS));
assertEquals("bla", dummyFuture.get(10, SECONDS));
}
private static class DummyTask implements Callable<String> {
@Override
public String call() throws Exception {
return "bla";
}
}
private static class BlockingTask implements Callable<String> {
private final CountDownLatch latch;
private BlockingTask(CountDownLatch latch) {
this.latch = latch;
}
@Override
public String call() throws Exception {
latch.await(30, SECONDS);
return "bla";
}
}
private static class BackgroundInitTask implements Callable<String> {
private final SemaphoreCompletionService<String> completionService;
private BackgroundInitTask(SemaphoreCompletionService<String> completionService) {
this.completionService = completionService;
}
@Override
public String call() throws Exception {
completionService.continueTaskInBackground();
return "bla";
}
}
}
| 4,195
| 34.863248
| 125
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/executors/ExecutorAllCompletionServiceTest.java
|
package org.infinispan.executors;
import static org.testng.AssertJUnit.assertEquals;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import org.infinispan.test.AbstractInfinispanTest;
import org.infinispan.test.TestingUtil;
import org.testng.annotations.AfterClass;
import org.testng.annotations.Test;
/**
* @author Radim Vansa <rvansa@redhat.com>
*/
@Test(groups = "functional", testName = "executors.ExecutorAllCompletionServiceTest")
public class ExecutorAllCompletionServiceTest extends AbstractInfinispanTest {
private ExecutorService lastExecutorService;
@AfterClass(alwaysRun = true)
public void stopExecutors() {
if (lastExecutorService != null) {
lastExecutorService.shutdownNow();
}
}
public void testWaitForAll() {
ExecutorAllCompletionService service = createService(1);
long before = System.currentTimeMillis();
service.submit(new WaitRunnable(500), null);
service.submit(new WaitRunnable(500), null);
service.waitUntilAllCompleted();
long after = System.currentTimeMillis();
assertTrue(after - before >= 1000);
assertTrue(service.isAllCompleted());
assertFalse(service.isExceptionThrown());
}
public void testExceptions() {
ExecutorAllCompletionService service = createService(1);
service.submit(new WaitRunnable(1), null);
service.submit(new ExceptionRunnable("second"), null);
service.submit(new WaitRunnable(1), null);
service.submit(new ExceptionRunnable("third"), null);
service.waitUntilAllCompleted();
assertTrue(service.isAllCompleted());
assertTrue(service.isExceptionThrown());
assertEquals("second", findCause(service.getFirstException()).getMessage());
}
public void testParallelWait() throws InterruptedException {
final ExecutorAllCompletionService service = createService(2);
for (int i = 0; i < 300; ++i) {
service.submit(new WaitRunnable(10), null);
}
List<Thread> threads = new ArrayList<>(10);
for (int i = 0; i < 10; ++i) {
Thread t = new Thread(() -> {
service.waitUntilAllCompleted();
assertTrue(service.isAllCompleted());
assertFalse(service.isExceptionThrown());
});
threads.add(t);
t.start();
}
for (Thread t : threads) {
t.join();
}
assertTrue(service.isAllCompleted());
assertFalse(service.isExceptionThrown());
}
public void testParallelException() throws InterruptedException {
final ExecutorAllCompletionService service = createService(2);
for (int i = 0; i < 150; ++i) {
service.submit(new WaitRunnable(10), null);
}
service.submit(new ExceptionRunnable("foobar"), null);
for (int i = 0; i < 150; ++i) {
service.submit(new WaitRunnable(10), null);
}
List<Thread> threads = new ArrayList<>(10);
for (int i = 0; i < 10; ++i) {
Thread t = new Thread(() -> {
service.waitUntilAllCompleted();
assertTrue(service.isAllCompleted());
assertTrue(service.isExceptionThrown());
});
threads.add(t);
t.start();
}
for (Thread t : threads) {
t.join();
}
assertTrue(service.isAllCompleted());
assertTrue(service.isExceptionThrown());
}
private Throwable findCause(ExecutionException e) {
Throwable t = e;
while (t.getCause() != null) t = t.getCause();
return t;
}
private ExecutorAllCompletionService createService(int maxThreads) {
if (lastExecutorService != null) {
lastExecutorService.shutdownNow();
}
lastExecutorService = Executors.newFixedThreadPool(maxThreads, getTestThreadFactory("Worker"));
return new ExecutorAllCompletionService(lastExecutorService);
}
private class WaitRunnable implements Runnable {
private long period;
private WaitRunnable(long period) {
this.period = period;
}
@Override
public void run() {
TestingUtil.sleepThread(period);
}
}
private class ExceptionRunnable implements Runnable {
private final String message;
public ExceptionRunnable(String message) {
this.message = message;
}
@Override
public void run() {
throw new RuntimeException(message);
}
}
}
| 4,653
| 30.876712
| 101
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/TestDataSCI.java
|
package org.infinispan.test;
import org.infinispan.distribution.MagicKey;
import org.infinispan.expiration.impl.ExpirationFunctionalTest;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.protostream.annotations.AutoProtoSchemaBuilder;
import org.infinispan.test.data.Address;
import org.infinispan.test.data.BrokenMarshallingPojo;
import org.infinispan.test.data.CountMarshallingPojo;
import org.infinispan.test.data.DelayedMarshallingPojo;
import org.infinispan.test.data.Key;
import org.infinispan.test.data.Person;
import org.infinispan.test.data.Sex;
import org.infinispan.test.data.Value;
import org.infinispan.xsite.irac.IracCustomConflictTest;
@AutoProtoSchemaBuilder(
// TODO re-evaluate use of Person where Value is more appropriate
includeClasses = {
Address.class,
BrokenMarshallingPojo.class,
DelayedMarshallingPojo.class,
Key.class,
MagicKey.class,
CountMarshallingPojo.class,
Person.class,
Sex.class,
Value.class,
IracCustomConflictTest.MySortedSet.class,
ExpirationFunctionalTest.NoEquals.class,
},
schemaFileName = "test.core.proto",
schemaFilePath = "proto/generated",
schemaPackageName = "org.infinispan.test.core",
service = false
)
public interface TestDataSCI extends SerializationContextInitializer {
TestDataSCI INSTANCE = new TestDataSCIImpl();
}
| 1,490
| 36.275
| 71
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/CacheManagerCallable.java
|
package org.infinispan.test;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* A task that executes operations against a given cache manager.
*
* @author Galder Zamarreño
* @since 5.1
*/
public class CacheManagerCallable {
protected final EmbeddedCacheManager cm;
private final boolean clear;
public CacheManagerCallable(EmbeddedCacheManager cm) {
this(cm, false);
}
public CacheManagerCallable(EmbeddedCacheManager cm, boolean clear) {
this.cm = cm;
this.clear = clear;
}
public void call() throws Exception {
// No-op
}
public final boolean clearBeforeKill() {
return clear;
}
}
| 662
| 18.5
| 72
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/MultipleCacheManagersTest.java
|
package org.infinispan.test;
import static java.util.Arrays.asList;
import static org.infinispan.commons.test.TestResourceTracker.getCurrentTestShortName;
import static org.infinispan.test.fwk.TestCacheManagerFactory.createClusteredCacheManager;
import static org.testng.AssertJUnit.assertTrue;
import java.lang.annotation.Annotation;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Set;
import java.util.function.BiConsumer;
import java.util.function.BiPredicate;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Stream;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.configuration.cache.StorageType;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.configuration.internal.PrivateGlobalConfigurationBuilder;
import org.infinispan.configuration.parsing.ConfigurationBuilderHolder;
import org.infinispan.container.DataContainer;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.MagicKey;
import org.infinispan.distribution.rehash.XAResourceAdapter;
import org.infinispan.manager.CacheContainer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.remoting.transport.Address;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.test.fwk.InCacheMode;
import org.infinispan.test.fwk.InTransactionMode;
import org.infinispan.test.fwk.TestFrameworkFailure;
import org.infinispan.test.fwk.TestSelector;
import org.infinispan.test.fwk.TransportFlags;
import org.infinispan.transaction.LockingMode;
import org.infinispan.transaction.TransactionMode;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.concurrent.IsolationLevel;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.IMethodInstance;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Factory;
import jakarta.transaction.RollbackException;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
/**
* Base class for tests that operates on clusters of caches. The way tests extending this class operates is:
* <pre>
* 1) created cache managers before tests start. The cache managers are only created once
* 2) after each test method runs, the cache instances are being cleared
* 3) next test method will run on same cacheManager instance. This way the test is much faster, as CacheManagers
* are expensive to create.
* </pre>
* If, however, you would like your cache managers destroyed after every <i>test method</i> instead of the </i>test
* class</i>, you could set the <tt>cleanup</tt> field to {@link MultipleCacheManagersTest.CleanupPhase#AFTER_METHOD} in
* your test's constructor. E.g.:
* <pre>
* <p/>
* public void MyTest extends MultipleCacheManagersTest {
* public MyTest() {
* cleanup = CleanupPhase.AFTER_METHOD;
* }
* }
* <p/>
* </pre>
* <p/>
* Note that this will cause {@link #createCacheManagers()} to be called before each method.
*
* @author Mircea.Markus@jboss.com
*/
@TestSelector(filters = {
MultipleCacheManagersTest.CacheModeFilter.class,
MultipleCacheManagersTest.TransactionalModeFilter.class,
MultipleCacheManagersTest.LockingModeFilter.class,
MultipleCacheManagersTest.IsolationLevelFilter.class,
})
public abstract class MultipleCacheManagersTest extends AbstractCacheTest {
protected List<EmbeddedCacheManager> cacheManagers = Collections.synchronizedList(new ArrayList<>());
protected IdentityHashMap<Cache<?, ?>, ReplListener> listeners = new IdentityHashMap<>();
// the cache mode set in configuration is shared in many tests, therefore we'll place the field,
// fluent setter cacheMode(...) and parameters() to this class.
protected CacheMode cacheMode;
protected Boolean transactional;
protected LockingMode lockingMode;
protected IsolationLevel isolationLevel;
// Disables the triangle algorithm if set to Boolean.FALSE
protected Boolean useTriangle;
protected StorageType storageType;
@BeforeClass(alwaysRun = true)
public void createBeforeClass() throws Throwable {
checkFactoryAnnotation();
if (cleanupAfterTest()) callCreateCacheManagers();
}
private void callCreateCacheManagers() throws Throwable {
try {
log.debug("Creating cache managers");
createCacheManagers();
log.debug("Cache managers created, ready to start the test");
} catch (Throwable th) {
log.error("Error in test setup: ", th);
throw th;
}
}
@BeforeMethod(alwaysRun = true)
public void createBeforeMethod() throws Throwable {
if (cleanupAfterMethod()) callCreateCacheManagers();
}
@AfterClass(alwaysRun = true)
protected void destroy() {
if (cleanupAfterTest()) {
TestingUtil.clearContent(cacheManagers);
TestingUtil.killCacheManagers(cacheManagers);
}
if (cacheManagers != null) {
for (EmbeddedCacheManager cm : cacheManagers) {
String nodeName = SecurityActions.getCacheManagerConfiguration(cm).transport().nodeName();
assertTrue("Invalid node name for test " + getCurrentTestShortName() + ": " + nodeName,
nodeName != null && nodeName.contains(getCurrentTestShortName()));
}
cacheManagers.clear();
}
listeners.clear();
}
@AfterMethod(alwaysRun = true)
protected void clearContent() throws Throwable {
if (cleanupAfterTest()) {
log.debug("*** Test method complete; clearing contents on all caches.");
TestingUtil.clearContent(cacheManagers);
} else {
TestingUtil.clearContent(cacheManagers);
TestingUtil.killCacheManagers(cacheManagers);
TestResourceTracker.cleanUpResources(getTestName());
cacheManagers.clear();
}
}
final protected void registerCacheManager(CacheContainer... cacheContainers) {
for (CacheContainer ecm : cacheContainers) {
this.cacheManagers.add((EmbeddedCacheManager) ecm);
}
}
final protected void registerCacheManager(List<? extends EmbeddedCacheManager> cacheContainers) {
for (CacheContainer ecm : cacheContainers) {
this.cacheManagers.add((EmbeddedCacheManager) ecm);
}
}
/**
* Creates a new cache manager, starts it, and adds it to the list of known cache managers on the current thread.
* Uses a default clustered cache manager global config.
*
* @return the new CacheManager
*/
protected EmbeddedCacheManager addClusterEnabledCacheManager() {
return addClusterEnabledCacheManager(new TransportFlags());
}
/**
* Creates a new cache manager, starts it, and adds it to the list of known
* cache managers on the current thread. Uses a default clustered cache
* manager global config.
*
* @param flags properties that allow transport stack to be tweaked
* @return the new CacheManager
*/
protected EmbeddedCacheManager addClusterEnabledCacheManager(TransportFlags flags) {
EmbeddedCacheManager cm = createClusteredCacheManager(false, defaultGlobalConfigurationBuilder(),
null, flags);
amendCacheManagerBeforeStart(cm);
cacheManagers.add(cm);
cm.start();
return cm;
}
/**
* Allows a test to manipulate a cache manager before it is started. Does nothing by default
*/
protected void amendCacheManagerBeforeStart(EmbeddedCacheManager cm) {
// Do nothing
}
/**
* Creates a new non-transactional cache manager, starts it, and adds it to the list of known cache managers on the
* current thread. Uses a default clustered cache manager global config.
*
* @param defaultConfig default cfg to use
* @return the new CacheManager
*/
protected EmbeddedCacheManager addClusterEnabledCacheManager(ConfigurationBuilder defaultConfig) {
return addClusterEnabledCacheManager(defaultConfig, new TransportFlags());
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(SerializationContextInitializer sci) {
return addClusterEnabledCacheManager(sci, null, new TransportFlags());
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(SerializationContextInitializer sci, ConfigurationBuilder defaultConfig) {
return addClusterEnabledCacheManager(sci, defaultConfig, new TransportFlags());
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(SerializationContextInitializer sci,
ConfigurationBuilder defaultConfig, TransportFlags flags) {
GlobalConfigurationBuilder globalBuilder = defaultGlobalConfigurationBuilder();
if (sci != null) globalBuilder.serialization().addContextInitializer(sci);
return addClusterEnabledCacheManager(globalBuilder, defaultConfig, flags);
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(GlobalConfigurationBuilder globalBuilder, ConfigurationBuilder defaultConfig) {
return addClusterEnabledCacheManager(globalBuilder, defaultConfig, new TransportFlags());
}
/**
* Creates a new optionally transactional cache manager, starts it, and adds it to the list of known cache managers on
* the current thread. Uses a default clustered cache manager global config.
*
* @param builder default cfg to use
* @return the new CacheManager
*/
protected EmbeddedCacheManager addClusterEnabledCacheManager(ConfigurationBuilder builder, TransportFlags flags) {
EmbeddedCacheManager cm = createClusteredCacheManager(false, defaultGlobalConfigurationBuilder(),
builder, flags);
amendCacheManagerBeforeStart(cm);
cacheManagers.add(cm);
cm.start();
return cm;
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(ConfigurationBuilderHolder builderHolder) {
EmbeddedCacheManager cm = createClusteredCacheManager(false, builderHolder);
amendCacheManagerBeforeStart(cm);
cacheManagers.add(cm);
cm.start();
return cm;
}
protected EmbeddedCacheManager addClusterEnabledCacheManager(GlobalConfigurationBuilder globalBuilder,
ConfigurationBuilder builder, TransportFlags flags) {
EmbeddedCacheManager cm = createClusteredCacheManager(false, globalBuilder, builder, flags);
amendCacheManagerBeforeStart(cm);
cacheManagers.add(cm);
cm.start();
return cm;
}
protected GlobalConfigurationBuilder defaultGlobalConfigurationBuilder() {
return GlobalConfigurationBuilder.defaultClusteredBuilder();
}
protected void createCluster(int count) {
for (int i = 0; i < count; i++) addClusterEnabledCacheManager();
}
protected void createCluster(ConfigurationBuilder builder, int count) {
for (int i = 0; i < count; i++) addClusterEnabledCacheManager(builder);
}
protected void createCluster(SerializationContextInitializer sci, ConfigurationBuilder builder, int count) {
for (int i = 0; i < count; i++) addClusterEnabledCacheManager(sci, builder);
}
/**
* Allows multiple configurations to be defined for a cache manager before it is started, using the supplied
* {@link ConfigurationBuilderHolder}. These cannot be shared per node so this method doesn't allow the user to make
* the mistake and instead will give you one instance per node.
* <p>
* This method will wait until all nodes are up before returning
* @param consumer consumer to configure the caches
* @param count how many nodes to bring up
*/
protected void createCluster(Consumer<ConfigurationBuilderHolder> consumer, int count) {
for (int i = 0; i < count; ++i) {
ConfigurationBuilderHolder holder = new ConfigurationBuilderHolder();
holder.getGlobalConfigurationBuilder().clusteredDefault();
consumer.accept(holder);
addClusterEnabledCacheManager(holder);
}
waitForClusterToForm();
}
protected void createCluster(GlobalConfigurationBuilder globalBuilder, ConfigurationBuilder builder, int count) {
for (int i = 0; i < count; i++)
addClusterEnabledCacheManager(new GlobalConfigurationBuilder().read(globalBuilder.build()), builder);
}
protected void defineConfigurationOnAllManagers(String cacheName, ConfigurationBuilder b) {
for (EmbeddedCacheManager cm : cacheManagers) {
cm.defineConfiguration(cacheName, b.build());
}
}
protected <K, V> List<Cache<K, V>> getCaches(String cacheName) {
List<Cache<K, V>> caches = new ArrayList<>();
List<EmbeddedCacheManager> managers = new ArrayList<>(cacheManagers);
for (EmbeddedCacheManager cm : managers) {
Cache<K, V> c;
if (cacheName == null)
c = cm.getCache();
else
c = cm.getCache(cacheName);
caches.add(c);
}
return caches;
}
protected void waitForClusterToForm(String cacheName) {
List<Cache<Object, Object>> caches = getCaches(cacheName);
Cache<Object, Object> cache = caches.get(0);
TestingUtil.blockUntilViewsReceived(30000, caches);
if (cache.getCacheConfiguration().clustering().cacheMode().isClustered()) {
TestingUtil.waitForNoRebalance(caches);
}
}
protected void waitForClusterToForm() {
waitForClusterToForm((String) null);
}
protected void waitForClusterToForm(String... names) {
if (names != null && names.length != 0) {
for (String name : names) {
waitForClusterToForm(name);
}
} else {
waitForClusterToForm();
}
}
protected TransactionManager tm(Cache<?, ?> c) {
return c.getAdvancedCache().getTransactionManager();
}
protected TransactionManager tm(int i, String cacheName) {
return cache(i, cacheName).getAdvancedCache().getTransactionManager();
}
protected TransactionManager tm(int i) {
return cache(i).getAdvancedCache().getTransactionManager();
}
protected Transaction tx(int i) {
try {
return cache(i).getAdvancedCache().getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
}
protected void createClusteredCaches(int numMembersInCluster, String cacheName, ConfigurationBuilder builder) {
createClusteredCaches(numMembersInCluster, cacheName, null, builder);
}
protected void createClusteredCaches(int numMembersInCluster, String cacheName, SerializationContextInitializer sci,
ConfigurationBuilder builder) {
createClusteredCaches(numMembersInCluster, cacheName, sci, builder, new TransportFlags());
}
protected void createClusteredCaches(int numMembersInCluster, String cacheName, ConfigurationBuilder builder, TransportFlags flags) {
createClusteredCaches(numMembersInCluster, null, builder, flags, cacheName);
}
protected void createClusteredCaches(
int numMembersInCluster, String cacheName, SerializationContextInitializer sci, ConfigurationBuilder builder, TransportFlags flags) {
createClusteredCaches(numMembersInCluster, sci, builder, flags, cacheName);
}
protected void createClusteredCaches(int numMembersInCluster,
SerializationContextInitializer sci,
ConfigurationBuilder defaultConfigBuilder) {
createClusteredCaches(numMembersInCluster, sci, defaultConfigBuilder, new TransportFlags());
}
protected void createClusteredCaches(int numMembersInCluster,
SerializationContextInitializer sci,
ConfigurationBuilder configBuilder,
TransportFlags flags, String... cacheNames) {
GlobalConfigurationBuilder globalBuilder = defaultGlobalConfigurationBuilder();
if (sci != null) globalBuilder.serialization().addContextInitializer(sci);
createClusteredCaches(numMembersInCluster, globalBuilder, configBuilder, false, flags, cacheNames);
}
protected void createClusteredCaches(int numMembersInCluster,
GlobalConfigurationBuilder globalConfigurationBuilder,
ConfigurationBuilder defaultConfigBuilder,
boolean serverMode, String... cacheNames) {
createClusteredCaches(numMembersInCluster, globalConfigurationBuilder, defaultConfigBuilder, serverMode,
new TransportFlags(), cacheNames);
}
protected void createClusteredCaches(int numMembersInCluster,
GlobalConfigurationBuilder globalConfigurationBuilder,
ConfigurationBuilder configBuilder,
boolean serverMode, TransportFlags flags,
String... cacheNames) {
for (int i = 0; i < numMembersInCluster; i++) {
EmbeddedCacheManager cm;
GlobalConfigurationBuilder global = new GlobalConfigurationBuilder();
global.read(globalConfigurationBuilder.build());
if (serverMode) {
global.addModule(PrivateGlobalConfigurationBuilder.class).serverMode(true);
global.transport().defaultTransport();
}
ConfigurationBuilder defaultBuilder = null;
if (cacheNames.length == 0 || global.defaultCacheName().isPresent()) {
defaultBuilder = configBuilder;
}
cm = addClusterEnabledCacheManager(global, defaultBuilder, flags);
if (cacheNames.length == 0) {
cm.getCache();
} else {
for (String cacheName : cacheNames) {
// The default cache was already defined
if (!global.defaultCacheName().orElse("").equals(cacheName)) {
cm.defineConfiguration(cacheName, configBuilder.build());
}
}
}
}
waitForClusterToForm(cacheNames);
}
protected void createClusteredCaches(int numMembersInCluster,
ConfigurationBuilder defaultConfigBuilder,
boolean serverMode, String... cacheNames) {
createClusteredCaches(numMembersInCluster, defaultGlobalConfigurationBuilder(),
defaultConfigBuilder, serverMode, cacheNames);
}
protected void createClusteredCaches(int numMembersInCluster,
ConfigurationBuilder defaultConfigBuilder) {
createClusteredCaches(numMembersInCluster, defaultConfigBuilder, false);
}
protected void createClusteredCaches(int numMembersInCluster,
ConfigurationBuilder defaultConfig,
TransportFlags flags) {
for (int i = 0; i < numMembersInCluster; i++) {
EmbeddedCacheManager cm = addClusterEnabledCacheManager(defaultConfig, flags);
cm.getCache();
}
waitForClusterToForm();
}
/**
* Create cacheNames.length in each CacheManager (numMembersInCluster cacheManagers).
*/
protected void createClusteredCaches(int numMembersInCluster,
ConfigurationBuilder defaultConfigBuilder, String... cacheNames) {
createClusteredCaches(numMembersInCluster, defaultConfigBuilder, new TransportFlags(), cacheNames);
}
protected void createClusteredCaches(int numMembersInCluster, ConfigurationBuilder configBuilder,
TransportFlags transportFlags, String... cacheNames) {
for (int i = 0; i < numMembersInCluster; i++) {
EmbeddedCacheManager cm = addClusterEnabledCacheManager(null, transportFlags);
for (String cacheName : cacheNames) {
cm.defineConfiguration(cacheName, configBuilder.build());
cm.getCache(cacheName);
}
}
waitForClusterToForm(cacheNames);
}
protected ReplListener replListener(Cache<?, ?> cache) {
return listeners.computeIfAbsent(cache, k -> new ReplListener(cache));
}
protected EmbeddedCacheManager[] managers() {
return cacheManagers.toArray(new EmbeddedCacheManager[0]);
}
protected EmbeddedCacheManager manager(int i) {
return cacheManagers.get(i);
}
public EmbeddedCacheManager manager(Address a) {
for (EmbeddedCacheManager cm : cacheManagers) {
if (cm.getAddress().equals(a)) {
return cm;
}
}
throw new IllegalArgumentException(a + " is not a valid cache manager address!");
}
public int managerIndex(Address a) {
for (int i = 0; i < cacheManagers.size(); i++) {
EmbeddedCacheManager cm = cacheManagers.get(i);
if (cm.getAddress().equals(a)) {
return i;
}
}
throw new IllegalArgumentException(a + " is not a valid cache manager address!");
}
protected <K, V> Cache<K, V> cache(int managerIndex, String cacheName) {
return manager(managerIndex).getCache(cacheName);
}
protected void assertClusterSize(String message, int size) {
for (EmbeddedCacheManager cm : cacheManagers) {
assert cm.getMembers() != null && cm.getMembers().size() == size : message;
}
}
protected void removeCacheFromCluster(String cacheName) {
for (EmbeddedCacheManager cm : cacheManagers) {
TestingUtil.killCaches(cm.getCache(cacheName));
}
}
/**
* Returns the default cache from that manager.
*/
protected <A, B> Cache<A, B> cache(int index) {
return manager(index).getCache();
}
protected <K, V> DataContainer<K, V> dataContainer(int index) {
return this.<K, V>advancedCache(index).getDataContainer();
}
/**
* This is the method you should override when providing factory method.
*/
public Object[] factory() {
throw new IllegalStateException("Only overridden methods should be called!");
}
@Factory
public Object[] defaultFactory() {
try {
// Ignore any inherited factory() method and only run methods defined in the current class
Method factory = getClass().getMethod("factory");
if (factory.getDeclaringClass() == getClass()) {
if (getClass().getAnnotation(InCacheMode.class) != null ||
getClass().getAnnotation(InTransactionMode.class) != null) {
return new Object[]{new TestFrameworkFailure<>(getClass(), new IllegalStateException(
"Tests with factory() methods ignore @InCacheMode and @InTransactionMode annotations, " +
"please remove them."))};
}
Object[] instances = factory();
for (int i = 0; i < instances.length; i++) {
if (instances[i].getClass() != getClass()) {
instances[i] = new TestFrameworkFailure<>(getClass(), "%s.factory() creates instances of %s",
getClass().getName(), instances[i].getClass().getName());
}
}
return instances;
}
} catch (NoSuchMethodException e) {
throw new IllegalStateException("Every class should have factory method, at least inherited", e);
}
List<Consumer<MultipleCacheManagersTest>[]> allModifiers;
try {
Consumer<MultipleCacheManagersTest>[] cacheModeModifiers =
getModifiers(InCacheMode.class, InCacheMode::value, MultipleCacheManagersTest::cacheMode);
Consumer<MultipleCacheManagersTest>[] transactionModifiers =
getModifiers(InTransactionMode.class, InTransactionMode::value,
(t, m) -> t.transactional(m.isTransactional()));
allModifiers = asList(cacheModeModifiers, transactionModifiers);
} catch (Exception e) {
return new Object[]{new TestFrameworkFailure<>(getClass(), e)};
}
int numTests = allModifiers.stream().mapToInt(m -> m.length).reduce(1, (m1, m2) -> m1 * m2);
Object[] tests = new Object[numTests];
tests[0] = this;
Constructor<? extends MultipleCacheManagersTest> ctor;
try {
ctor = getClass().getConstructor();
} catch (NoSuchMethodException e) {
return new Object[]{new TestFrameworkFailure<>(getClass(), "Missing no-arg constructor in %s", getClass().getName())};
}
for (int i = 1; i < tests.length; ++i) {
try {
tests[i] = ctor.newInstance();
} catch (Exception e) {
return new Object[]{new TestFrameworkFailure<>(getClass(), e)};
}
}
int stride = 1;
for (Consumer<MultipleCacheManagersTest>[] modifiers : allModifiers) {
applyModifiers(tests, modifiers, stride);
stride *= modifiers.length;
}
return tests;
}
private void checkFactoryAnnotation() {
for (Method m : getClass().getMethods()) {
if (m.getAnnotation(Factory.class) != null && m.getDeclaringClass() != MultipleCacheManagersTest.class) {
throw new IllegalStateException("Test " + getClass().getName() +
" extends MultipleCacheManagersTest and declares its own @Factory method: " +
m.getName());
}
}
}
private void applyModifiers(Object[] tests, Consumer<MultipleCacheManagersTest>[] modifiers, int stride) {
for (int i = 0, mi = 0; i < tests.length; i += stride, mi = (mi + 1) % modifiers.length) {
for (int j = 0; j < stride; ++j) {
modifiers[mi].accept((MultipleCacheManagersTest) tests[i + j]);
}
}
}
private <Mode, A extends Annotation> Consumer<MultipleCacheManagersTest>[] getModifiers(Class<A> annotationClass, Function<A, Mode[]> methodRetriever, BiConsumer<MultipleCacheManagersTest, Mode> applier) {
Mode[] classModes = classModes(annotationClass, methodRetriever);
Set<Mode> methodModes = methodModes(annotationClass, methodRetriever);
if (classModes == null && methodModes == null) {
return new Consumer[]{t -> {
}}; // no modifications
}
Set<Mode> allModes = new HashSet<>();
if (classModes != null) {
allModes.addAll(asList(classModes));
}
if (methodModes != null && !allModes.containsAll(methodModes)) {
throw new IllegalStateException(
"Test methods cannot declare cache mode/transaction filters that the test class hasn't declared");
}
// if there are only method-level annotations, add a version without setting mode at all
return allModes.stream()
.map(mode -> (Consumer<MultipleCacheManagersTest>) t -> applier.accept(t, mode))
.toArray(Consumer[]::new);
}
protected <Mode, A extends Annotation> Set<Mode> methodModes(Class<A> annotationClass, Function<A, Mode[]> modeRetriever) {
// the annotation is not inherited
Set<Mode> modes = null;
for (Method m : getClass().getMethods()) {
A annotation = m.getAnnotation(annotationClass);
if (annotation == null) continue;
if (modes == null) {
modes = new HashSet<>();
}
Collections.addAll(modes, modeRetriever.apply(annotation));
}
return modes;
}
protected <Mode, A extends Annotation> Mode[] classModes(Class<A> annotationClass, Function<A, Mode[]> modeRetriever) {
A annotation = getClass().getDeclaredAnnotation(annotationClass);
if (annotation == null) return null;
return modeRetriever.apply(annotation);
}
public MultipleCacheManagersTest cacheMode(CacheMode cacheMode) {
this.cacheMode = cacheMode;
return this;
}
public MultipleCacheManagersTest transactional(boolean transactional) {
this.transactional = transactional;
return this;
}
public MultipleCacheManagersTest lockingMode(LockingMode lockingMode) {
this.lockingMode = lockingMode;
return this;
}
public MultipleCacheManagersTest isolationLevel(IsolationLevel isolationLevel) {
this.isolationLevel = isolationLevel;
return this;
}
public TransactionMode transactionMode() {
return transactional ? TransactionMode.TRANSACTIONAL : TransactionMode.NON_TRANSACTIONAL;
}
public MultipleCacheManagersTest storageType(StorageType storageType) {
this.storageType = storageType;
return this;
}
@Override
protected String parameters() {
return defaultParametersString(parameterNames(), parameterValues());
}
protected String[] parameterNames() {
return new String[]{null, "tx", "locking", "isolation", "triangle", null};
}
protected Object[] parameterValues() {
return new Object[]{cacheMode, transactional, lockingMode, isolationLevel, useTriangle, storageType};
}
@SafeVarargs
protected static <T> T[] concat(T[] a1, T... a2) {
T[] na = Arrays.copyOf(a1, a1.length + a2.length);
System.arraycopy(a2, 0, na, a1.length, a2.length);
return na;
}
/**
* Create the cache managers you need for your test. Note that the cache managers you create *must* be created using
* {@link #addClusterEnabledCacheManager()}
*/
protected abstract void createCacheManagers() throws Throwable;
protected Address address(int cacheIndex) {
return manager(cacheIndex).getAddress();
}
protected <A, B> AdvancedCache<A, B> advancedCache(int i) {
return this.<A, B>cache(i).getAdvancedCache();
}
protected <A, B> AdvancedCache<A, B> advancedCache(int i, String cacheName) {
return this.<A, B>cache(i, cacheName).getAdvancedCache();
}
protected <K, V> List<Cache<K, V>> caches(String name) {
return getCaches(name);
}
protected <K, V> List<Cache<K, V>> caches() {
return caches(null);
}
protected Address address(Cache<?, ?> c) {
return c.getAdvancedCache().getRpcManager().getAddress();
}
protected LockManager lockManager(int i) {
return TestingUtil.extractLockManager(cache(i));
}
protected LockManager lockManager(int i, String cacheName) {
return TestingUtil.extractLockManager(getCache(i, cacheName));
}
protected LocalizedCacheTopology cacheTopology(int i) {
return TestingUtil.extractCacheTopology(cache(i));
}
protected LocalizedCacheTopology cacheTopology(int i, String cacheName) {
return TestingUtil.extractCacheTopology(cache(i, cacheName));
}
public List<EmbeddedCacheManager> getCacheManagers() {
return cacheManagers;
}
/**
* Kills the cache manager with the given index and waits for the new cluster to form.
*/
protected void killMember(int cacheIndex) {
killMember(cacheIndex, null);
}
/**
* Kills the cache manager with the given index and waits for the new cluster to form using the provided cache
*/
protected void killMember(int cacheIndex, String cacheName) {
killMember(cacheIndex, cacheName, true);
}
protected void killMember(int cacheIndex, String cacheName, boolean awaitRehash) {
List<Cache<Object, Object>> caches = caches(cacheName);
caches.remove(cacheIndex);
manager(cacheIndex).stop();
cacheManagers.remove(cacheIndex);
if (awaitRehash && caches.size() > 0) {
TestingUtil.blockUntilViewsReceived(60000, false, caches);
TestingUtil.waitForNoRebalance(caches);
}
}
/**
* Creates a {@link org.infinispan.affinity.KeyAffinityService} and uses it for generating a key that maps to the given address.
* @param nodeIndex the index of tha cache where to be the main data owner of the returned key
*/
protected Object getKeyForCache(int nodeIndex) {
final Cache<Object, Object> cache = cache(nodeIndex);
return getKeyForCache(cache);
}
protected Object getKeyForCache(int nodeIndex, String cacheName) {
final Cache<Object, Object> cache = cache(nodeIndex, cacheName);
return getKeyForCache(cache);
}
@SuppressWarnings("unchecked")
protected <K> Supplier<K> supplyKeyForCache(int nodeIndex, String cacheName) {
return () -> (K) getKeyForCache(nodeIndex, cacheName);
}
protected MagicKey getKeyForCache(Cache<?, ?> cache) {
return new MagicKey(cache);
}
protected MagicKey getKeyForCache(Cache<?, ?> primary, Cache<?, ?>... backup) {
return new MagicKey(primary, backup);
}
protected void assertNotLocked(final String cacheName, final Object key) {
eventually(() -> {
boolean aNodeIsLocked = false;
for (int i = 0; i < caches(cacheName).size(); i++) {
final boolean isLocked = lockManager(i, cacheName).isLocked(key);
if (isLocked)
log.trace(key + " is locked on cache index " + i + " by " + lockManager(i, cacheName).getOwner(key));
aNodeIsLocked = aNodeIsLocked || isLocked;
}
return !aNodeIsLocked;
});
}
protected void assertNotLocked(final Object key) {
assertNotLocked((String)null, key);
}
protected boolean checkTxCount(int cacheIndex, int localTx, int remoteTx) {
final int localTxCount = getLocalTxCount(cacheIndex);
final int remoteTxCount = getRemoteTxCount(cacheIndex);
log.tracef("Cache index %s, local tx %4s, remote tx %4s \n", cacheIndex, localTxCount, remoteTxCount);
return localTxCount == localTx && remoteTxCount == remoteTx;
}
protected int getRemoteTxCount(int cacheIndex) {
return TestingUtil.getTransactionTable(cache(cacheIndex)).getRemoteTxCount();
}
protected int getLocalTxCount(int cacheIndex) {
return TestingUtil.getTransactionTable(cache(cacheIndex)).getLocalTxCount();
}
protected void assertNotLocked(int cacheIndex, Object key) {
assertEventuallyNotLocked(cache(cacheIndex), key);
}
protected void assertLocked(int cacheIndex, Object key) {
assertLocked(cache(cacheIndex), key);
}
protected boolean checkLocked(int index, Object key) {
return checkLocked(cache(index), key);
}
protected <K, V> Cache<K, V> getLockOwner(Object key) {
return getLockOwner(key, null);
}
protected <K, V> Cache<K, V> getLockOwner(Object key, String cacheName) {
Configuration c = getCache(0, cacheName).getCacheConfiguration();
if (c.clustering().cacheMode().isInvalidation()) {
return getCache(0, cacheName); //for replicated caches only the coordinator acquires lock
} else if (!c.clustering().cacheMode().isClustered()) {
throw new IllegalStateException("This is not a clustered cache!");
} else {
Address address = getCache(0, cacheName).getAdvancedCache().getDistributionManager().getCacheTopology()
.getDistribution(key).primary();
for (Cache<K, V> cache : this.<K, V>caches(cacheName)) {
if (cache.getAdvancedCache().getRpcManager().getTransport().getAddress().equals(address)) {
return cache;
}
}
throw new IllegalStateException();
}
}
protected void assertKeyLockedCorrectly(Object key) {
assertKeyLockedCorrectly(key, null);
}
protected void assertKeyLockedCorrectly(Object key, String cacheName) {
final Cache<?, ?> lockOwner = getLockOwner(key, cacheName);
for (Cache<?, ?> c : caches(cacheName)) {
if (c != lockOwner) {
assertNotLocked(c, key);
} else {
assertLocked(c, key);
}
}
}
private <K, V> Cache<K, V> getCache(int index, String name) {
return name == null ? this.cache(index) : this.cache(index, name);
}
protected void forceTwoPhase(int cacheIndex) throws SystemException, RollbackException {
TransactionManager tm = tm(cacheIndex);
Transaction tx = tm.getTransaction();
tx.enlistResource(new XAResourceAdapter());
}
protected void assertNoTransactions() {
assertNoTransactions(null);
}
protected void assertNoTransactions(final String cacheName) {
eventually("There are pending transactions!", () -> {
for (Cache<?, ?> cache : caches(cacheName)) {
final TransactionTable transactionTable = TestingUtil.extractComponent(cache, TransactionTable.class);
int localTxCount = transactionTable.getLocalTxCount();
int remoteTxCount = transactionTable.getRemoteTxCount();
if (localTxCount != 0 || remoteTxCount != 0) {
log.tracef("Local tx=%s, remote tx=%s, for cache %s ", transactionTable.getLocalGlobalTransaction(),
transactionTable.getRemoteGlobalTransaction(), address(cache));
return false;
}
}
return true;
});
}
protected TransactionTable transactionTable(int cacheIndex) {
return TestingUtil.extractComponent(cache(cacheIndex), TransactionTable.class);
}
protected void assertEventuallyEquals(
final int cacheIndex, final Object key, final Object value) {
eventually(() -> value == null
? null == cache(cacheIndex).get(key)
: value.equals(cache(cacheIndex).get(key)));
}
public MultipleCacheManagersTest useTriangle(boolean useTriangle) {
this.useTriangle = useTriangle;
return this;
}
protected abstract static class AnnotationFilter<A extends Annotation, AM, CM> {
private final Class<A> annotationClazz;
private final Function<A, AM[]> modesRetriever;
private final BiPredicate<AM, CM> modeChecker;
protected AnnotationFilter(Class<A> annotationClazz, Function<A, AM[]> modesRetriever, BiPredicate<AM, CM> modeChecker) {
this.annotationClazz = annotationClazz;
this.modesRetriever = modesRetriever;
this.modeChecker = modeChecker;
}
public boolean test(CM mode, IMethodInstance method) {
// If both method and class have the annotation, method annotation has priority.
A methodAnnotation = method.getMethod().getConstructorOrMethod().getMethod().getAnnotation(annotationClazz);
if (methodAnnotation != null) {
// If a method-level annotation contains current cache mode, run it, otherwise ignore that
return Stream.of(modesRetriever.apply(methodAnnotation)).anyMatch(m -> modeChecker.test(m, mode));
} else {
// If there is no method-level annotation, always run it
return true;
}
}
}
public static class CacheModeFilter extends AnnotationFilter<InCacheMode, CacheMode, CacheMode> implements Predicate<IMethodInstance> {
private final String cacheModeString = System.getProperty("test.infinispan.cacheMode");
public CacheModeFilter() {
super(InCacheMode.class, InCacheMode::value, (m1, m2) -> m1 == m2);
}
@Override
public boolean test(IMethodInstance method) {
CacheMode cacheMode = ((MultipleCacheManagersTest) method.getInstance()).cacheMode;
if (cacheModeString != null && cacheMode != null && !cacheMode.friendlyCacheModeString().equalsIgnoreCase(cacheModeString)) {
return false;
}
return test(cacheMode, method);
}
}
public static class TransactionalModeFilter extends AnnotationFilter<InTransactionMode, TransactionMode, Boolean> implements Predicate<IMethodInstance> {
private final String txModeString = System.getProperty("test.infinispan.transactional");
public TransactionalModeFilter() {
super(InTransactionMode.class, InTransactionMode::value, (m, b) -> b == Boolean.valueOf(m.isTransactional()));
}
@Override
public boolean test(IMethodInstance method) {
Boolean transactional = ((MultipleCacheManagersTest) method.getInstance()).transactional;
if (txModeString != null && transactional != null && !transactional.toString().equalsIgnoreCase(txModeString)) {
return false;
}
return test(transactional, method);
}
}
protected static abstract class FilterByProperty<T> implements Predicate<IMethodInstance> {
private final String property;
// this could be done through abstract method but this way is more concise
private final Function<MultipleCacheManagersTest, T> getMode;
public FilterByProperty(String property, Function<MultipleCacheManagersTest, T> getMode) {
this.property = System.getProperty(property);
this.getMode = getMode;
}
@Override
public boolean test(IMethodInstance method) {
if (property == null) return true;
T mode = getMode.apply((MultipleCacheManagersTest) method.getInstance());
return mode == null || mode.toString().equalsIgnoreCase(property);
}
}
public static class LockingModeFilter extends FilterByProperty<LockingMode> {
public LockingModeFilter() {
super("test.infinispan.lockingMode", test -> test.lockingMode);
}
}
public static class IsolationLevelFilter extends FilterByProperty<IsolationLevel> {
public IsolationLevelFilter() {
super("test.infinispan.isolationLevel", test -> test.isolationLevel);
}
}
}
| 42,853
| 39.891221
| 208
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/ReplListener.java
|
package org.infinispan.test;
import static org.infinispan.test.TestingUtil.extractInterceptorChain;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import net.jcip.annotations.GuardedBy;
import org.infinispan.Cache;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.write.WriteCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.context.impl.TxInvocationContext;
import org.infinispan.interceptors.DDAsyncInterceptor;
import org.infinispan.remoting.transport.Address;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* A listener that listens for replication events on a cache it is watching. Typical usage: <code> ReplListener r =
* attachReplicationListener(cache); r.expect(RemoveCommand.class); // ... r.waitForRPC(); </code>
*/
public class ReplListener {
private static final Log log = LogFactory.getLog(ReplListener.class);
private final Cache<?, ?> cache;
private final Lock lock = new ReentrantLock();
private final Condition newCommandCondition = lock.newCondition();
@GuardedBy("lock")
private final List<Predicate<VisitableCommand>> expectedCommands = new ArrayList<>();
@GuardedBy("lock")
private final Queue<VisitableCommand> loggedCommands = new ArrayDeque<>();
@GuardedBy("lock")
private boolean watchLocal;
/**
* This listener attaches itself to a cache and when {@link #expect(Class[])} is invoked, will start checking for
* invocations of the command on the cache, waiting for all expected commands to be received in {@link
* #waitForRpc()}.
*
* @param cache cache on which to attach listener
*/
public ReplListener(Cache<?, ?> cache) {
this(cache, false);
}
/**
* As {@link #ReplListener(org.infinispan.Cache)} except that you can optionally configure whether command recording
* is eager (false by default).
* <p/>
* If <tt>recordCommandsEagerly</tt> is true, then commands are recorded from the moment the listener is attached to
* the cache, even before {@link #expect(Class[])} is invoked. As such, when {@link #expect(Class[])} is called, the
* list of commands to wait for will take into account commands already seen thanks to eager recording.
*
* @param cache cache on which to attach listener
* @param recordCommandsEagerly whether to record commands eagerly
*/
public ReplListener(Cache<?, ?> cache, boolean recordCommandsEagerly) {
this(cache, recordCommandsEagerly, false);
}
/**
* Same as {@link #ReplListener(org.infinispan.Cache, boolean)} except that this constructor allows you to set the
* watchLocal parameter. If true, even local events are recorded (not just ones that originate remotely).
*
* @param cache cache on which to attach listener
* @param recordCommandsEagerly whether to record commands eagerly
* @param watchLocal if true, local events are watched for as well
*/
public ReplListener(Cache<?, ?> cache, boolean recordCommandsEagerly, boolean watchLocal) {
this.cache = cache;
this.watchLocal = watchLocal;
extractInterceptorChain(cache).addInterceptor(new ReplListenerInterceptor(), 1);
}
/**
* Expects any commands. The moment a single command is detected, the {@link #waitForRpc()} command will be
* unblocked.
*/
public void expectAny() {
expect(c -> true);
}
/**
* Expects a specific set of commands, within transactional scope (i.e., as a payload to a PrepareCommand). If the
* cache mode is synchronous, a CommitCommand is expected as well.
*
* @param commands commands to expect (not counting transaction boundary commands like PrepareCommand and
* CommitCommand)
*/
@SuppressWarnings("unchecked")
public void expectWithTx(Class<? extends VisitableCommand>... commands) {
List<Class<? extends VisitableCommand>> cmdsToExpect = new ArrayList<>();
cmdsToExpect.add(PrepareCommand.class);
if (commands != null) cmdsToExpect.addAll(Arrays.asList(commands));
//this is because for async replication we have an 1pc transaction
if (cache.getCacheConfiguration().clustering().cacheMode().isSynchronous()) cmdsToExpect.add(CommitCommand.class);
expect(cmdsToExpect.toArray(new Class[cmdsToExpect.size()]));
}
/**
* Expects any commands, within transactional scope (i.e., as a payload to a PrepareCommand). If the cache mode is
* synchronous, a CommitCommand is expected as well.
*/
@SuppressWarnings("unchecked")
public void expectAnyWithTx() {
List<Class<? extends VisitableCommand>> cmdsToExpect = new ArrayList<Class<? extends VisitableCommand>>(2);
cmdsToExpect.add(PrepareCommand.class);
//this is because for async replication we have an 1pc transaction
if (cache.getCacheConfiguration().clustering().cacheMode().isSynchronous()) cmdsToExpect.add(CommitCommand.class);
expect(cmdsToExpect.toArray(new Class[cmdsToExpect.size()]));
}
/**
* Expects a specific set of commands. {@link #waitForRpc()} will block until all of these commands are detected.
*
* @param expectedCommands commands to expect
*/
public void expect(Class<? extends VisitableCommand>... expectedCommands) {
Function<Class<? extends VisitableCommand>, Predicate<VisitableCommand>> predicateGenerator = clazz -> clazz::isInstance;
expect(Stream.of(expectedCommands).map(predicateGenerator).collect(Collectors.toList()));
}
public void expect(Class<? extends VisitableCommand> expectedCommand) {
expect(Collections.singleton(expectedCommand::isInstance));
}
public void expect(Predicate<VisitableCommand> predicate) {
expect(Collections.singleton(predicate));
}
public void expect(Predicate<VisitableCommand>... predicates) {
expect(Arrays.asList(predicates));
}
public void expect(Collection<Predicate<VisitableCommand>> predicates) {
lock.lock();
try {
this.expectedCommands.addAll(predicates);
} finally {
lock.unlock();
}
}
private void debugf(String format, Object... params) {
log.debugf("[" + cache.getCacheManager().getAddress() + "] " + format, params);
}
/**
* Blocks for a predefined amount of time (30 Seconds) until commands defined in any of the expect*() methods have
* been detected. If the commands have not been detected by this time, an exception is thrown.
*/
public void waitForRpc() {
waitForRpc(30, TimeUnit.SECONDS);
}
/**
* The same as {@link #waitForRpc()} except that you are allowed to specify the max wait time.
*/
public void waitForRpc(long time, TimeUnit unit) {
assertFalse("there are no replication expectations; please use ReplListener.expect() before calling this method",
expectedCommands.isEmpty());
lock.lock();
try {
long remainingNanos = unit.toNanos(time);
while (true) {
debugf("Waiting for %d command(s)", expectedCommands.size());
for (Iterator<VisitableCommand> itCommand = loggedCommands.iterator(); itCommand.hasNext(); ) {
VisitableCommand command = itCommand.next();
for (Iterator<Predicate<VisitableCommand>> itExpectation = expectedCommands.iterator();
itExpectation.hasNext(); ) {
Predicate<VisitableCommand> expectation = itExpectation.next();
if (expectation.test(command)) {
debugf("Matched command %s", command);
itCommand.remove();
itExpectation.remove();
break;
}
}
}
if (expectedCommands.isEmpty()) {
newCommandCondition.signalAll();
}
if (expectedCommands.isEmpty())
break;
remainingNanos = newCommandCondition.awaitNanos(remainingNanos);
Address address = cache.getCacheManager().getAddress();
assertTrue("Waiting for more than " + time + " " + unit +
" and some commands did not replicate on cache [" + address + "]",
remainingNanos > 0);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new TestException("Interrupted", e);
} finally {
lock.unlock();
}
}
public void assertNoRpc() {
debugf("Expecting no commands");
for (VisitableCommand command : loggedCommands) {
for (Predicate<VisitableCommand> expectation : expectedCommands) {
assertFalse("Shouldn't have matched command " + command, expectation.test(command));
}
}
}
public Cache<?, ?> getCache() {
return cache;
}
public void resetEager() {
lock.lock();
try {
loggedCommands.clear();
} finally {
lock.unlock();
}
}
public void reconfigureListener(boolean watchLocal) {
lock.lock();
try {
this.watchLocal = watchLocal;
} finally {
lock.unlock();
}
}
private boolean isPrimaryOwner(VisitableCommand cmd) {
if (cmd instanceof DataCommand) {
return cache.getAdvancedCache().getDistributionManager().getCacheTopology()
.getDistribution(((DataCommand) cmd).getKey()).isPrimary();
} else {
return true;
}
}
protected class ReplListenerInterceptor extends DDAsyncInterceptor {
@Override
protected Object handleDefault(InvocationContext ctx, VisitableCommand cmd) throws Throwable {
if (!ctx.isOriginLocal() || (watchLocal && isPrimaryOwner(cmd))) {
debugf("Delaying command %s", cmd);
TestingUtil.sleepRandom(10);
}
// pass up chain
return invokeNextAndFinally(ctx, cmd, (rCtx, rCommand, rv, throwable) -> {
//make sure we do mark this command as received even in the case of exceptions(e.g. timeouts)
if (!ctx.isOriginLocal() || (watchLocal && isPrimaryOwner(cmd))) {
logCommand(cmd);
} else {
debugf("Not logging command (watchLocal=%b) %s", watchLocal, cmd);
}
});
}
@Override
public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand cmd) throws Throwable {
// first pass up chain
return invokeNextAndFinally(ctx, cmd, (rCtx, rCommand, rv, throwable) -> {
if (!ctx.isOriginLocal() || watchLocal) {
logCommand(cmd);
for (WriteCommand mod : cmd.getModifications()) logCommand(mod);
}
});
}
private void logCommand(VisitableCommand cmd) {
lock.lock();
try {
debugf("ReplListener saw command %s", cmd);
loggedCommands.add(cmd);
newCommandCondition.signalAll();
} finally {
lock.unlock();
}
}
}
}
| 11,909
| 38.04918
| 127
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/FeaturesListener.java
|
package org.infinispan.test;
import java.lang.reflect.Field;
import java.util.List;
import org.infinispan.commons.util.Features;
import org.infinispan.test.fwk.NamedTestMethod;
import org.testng.IMethodInstance;
import org.testng.IMethodInterceptor;
import org.testng.ITestClass;
import org.testng.ITestContext;
import org.testng.ITestNGMethod;
import org.testng.internal.BaseTestMethod;
public class FeaturesListener implements IMethodInterceptor {
@Override
public List<IMethodInstance> intercept(List<IMethodInstance> methods, ITestContext context) {
Object instance = methods.get(0).getMethod().getInstance();
Features features = new Features(instance.getClass().getClassLoader());
AbstractInfinispanTest.FeatureCondition featureCondition = instance.getClass().getAnnotation(AbstractInfinispanTest.FeatureCondition.class);
if (featureCondition != null && !features.isAvailable(featureCondition.feature())) {
for (IMethodInstance methodInstance : methods) {
methodInstance.getMethod().setMissingGroup(featureCondition.feature() + " is disabled.");
}
// the annotation is based on the class
BaseTestMethod baseTestMethod = getBaseMethod(methods.get(0));
clearBeforeAfterClassMethods(baseTestMethod.getTestClass());
}
return methods;
}
private void clearBeforeAfterClassMethods(ITestClass testClass) {
Class<?> superclass = testClass.getClass().getSuperclass();
try {
// normally we start the cache managers on before class methods
Field field = getField(superclass, "m_beforeClassMethods");
field.set(testClass, new ITestNGMethod[0]);
// we track threads for tests that ran
field = getField(superclass, "m_afterClassMethods");
field.set(testClass, new ITestNGMethod[0]);
} catch (IllegalAccessException e) {
throw new IllegalStateException(e);
}
}
private BaseTestMethod getBaseMethod(IMethodInstance methodInstance) {
ITestNGMethod testNGMethod = methodInstance.getMethod();
if (testNGMethod instanceof NamedTestMethod) {
return getObject(testNGMethod, "method");
} else if (testNGMethod instanceof BaseTestMethod) {
return (BaseTestMethod) testNGMethod;
} else {
throw new IllegalStateException("Method is not a BaseTestMethod");
}
}
private Field getField(Class<?> clazz, String fieldName) {
try {
Field field = clazz.getDeclaredField(fieldName);
field.setAccessible(true);
return field;
} catch (NoSuchFieldException e) {
throw new IllegalStateException(e);
}
}
private <O> O getObject(Object instance, String fieldName) {
try {
Field field = getField(instance.getClass(), fieldName);
return (O) field.get(instance);
} catch (IllegalAccessException e) {
throw new IllegalStateException(e);
}
}
}
| 2,980
| 37.217949
| 146
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/TopologyChangeListener.java
|
package org.infinispan.test;
import static org.testng.Assert.assertTrue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachelistener.annotation.TopologyChanged;
import org.infinispan.notifications.cachelistener.event.TopologyChangedEvent;
@Listener(observation = Listener.Observation.POST)
public class TopologyChangeListener {
private final CountDownLatch latch = new CountDownLatch(1);
public static TopologyChangeListener install(Cache cache) {
TopologyChangeListener listener = new TopologyChangeListener();
cache.addListener(listener);
return listener;
}
@TopologyChanged
public void onTopologyChange(TopologyChangedEvent event) {
latch.countDown();
}
public void await() throws InterruptedException {
await(10, TimeUnit.SECONDS);
}
public void await(long time, TimeUnit unit) throws InterruptedException {
assertTrue(latch.await(time, unit), "View change not seen after " + time + " " + unit);
}
}
| 1,125
| 30.277778
| 93
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/SecureSingleCacheManagerTest.java
|
package org.infinispan.test;
import javax.security.auth.Subject;
import org.infinispan.manager.EmbeddedCacheManager;
public abstract class SecureSingleCacheManagerTest extends SingleCacheManagerTest {
static final Subject ADMIN = TestingUtil.makeSubject("admin");
@Override
protected EmbeddedCacheManager createCacheManager() throws Exception {
return null;
}
}
| 385
| 23.125
| 83
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/RunningComponentRef.java
|
package org.infinispan.test;
import org.infinispan.factories.impl.ComponentRef;
/**
* Always-started {@link ComponentRef} implementation.
*
* @since 9.4
*/
public class RunningComponentRef<T> implements ComponentRef<T> {
private final String componentName;
private final Class<?> componentType;
private final Object componentInstance;
public RunningComponentRef(String componentName, Class<?> componentType, T componentInstance) {
this.componentName = componentName;
this.componentType = componentType;
this.componentInstance = componentInstance;
}
@Override
public T running() {
return (T) componentInstance;
}
@Override
public T wired() {
return (T) componentInstance;
}
@Override
public boolean isRunning() {
return true;
}
@Override
public boolean isWired() {
return true;
}
@Override
public boolean isAlias() {
return false;
}
@Override
public String getName() {
return componentName != null ? componentName : componentType.getName();
}
}
| 1,080
| 20.196078
| 98
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/SingleCacheManagerTest.java
|
package org.infinispan.test;
import jakarta.transaction.SystemException;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.test.fwk.TestCacheManagerFactory;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.concurrent.locks.LockManager;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.BeforeMethod;
/**
* Base class for tests that operate on a single (most likely local) cache instance. This operates similar to {@link
* org.infinispan.test.MultipleCacheManagersTest}, but on only one CacheManager.
*
* @author Mircea.Markus@jboss.com
* @see org.infinispan.test.MultipleCacheManagersTest
*/
public abstract class SingleCacheManagerTest extends AbstractCacheTest {
protected EmbeddedCacheManager cacheManager;
protected Cache<Object, Object> cache;
protected void setup() throws Exception {
cacheManager = createCacheManager();
if (cache == null && SecurityActions.getCacheManagerConfiguration(cacheManager).defaultCacheName().isPresent()) {
cache = cacheManager.getCache();
}
}
protected void teardown() {
TestingUtil.clearContent(cacheManager);
TestingUtil.killCacheManagers(cacheManager);
cache = null;
cacheManager = null;
}
protected void clearCacheManager() {
TestingUtil.clearContent(cacheManager);
}
@BeforeClass(alwaysRun = true)
protected void createBeforeClass() throws Exception {
try {
if (cleanupAfterTest()) setup();
else assert cleanupAfterMethod() : "you must either cleanup after test or after method";
} catch (Exception e) {
log.error("Unexpected!", e);
throw e;
}
}
@BeforeMethod(alwaysRun = true)
protected void createBeforeMethod() throws Exception {
try {
if (cleanupAfterMethod()) setup();
else assert cleanupAfterTest() : "you must either cleanup after test or after method";
} catch (Exception e) {
log.error("Unexpected!", e);
throw e;
}
}
@AfterClass(alwaysRun=true)
protected void destroyAfterClass() {
try {
if (cleanupAfterTest()) teardown();
} catch (Exception e) {
log.error("Unexpected!", e);
}
}
@AfterMethod(alwaysRun=true)
protected void destroyAfterMethod() {
if (cleanupAfterMethod()) teardown();
}
@AfterMethod(alwaysRun=true)
protected void clearContent() {
if (cleanupAfterTest()) clearCacheManager();
}
protected ConfigurationBuilder getDefaultStandaloneCacheConfig(boolean transactional) {
return TestCacheManagerFactory.getDefaultCacheConfiguration(transactional);
}
protected TransactionManager tm() {
return cache.getAdvancedCache().getTransactionManager();
}
protected Transaction tx() {
try {
return cache.getAdvancedCache().getTransactionManager().getTransaction();
} catch (SystemException e) {
throw new RuntimeException(e);
}
}
protected LockManager lockManager(String cacheName) {
return TestingUtil.extractLockManager(cacheManager.getCache(cacheName));
}
protected LockManager lockManager() {
return TestingUtil.extractLockManager(cache);
}
protected abstract EmbeddedCacheManager createCacheManager() throws Exception;
@SuppressWarnings("unchecked")
protected <K,V> Cache<K, V> cache() {
return (Cache<K, V>)cache;
}
protected <K,V> Cache<K, V> cache(String name) {
return cacheManager.getCache(name);
}
protected void assertNoTransactions() {
assertNoTransactions(cache);
}
protected void assertNoTransactions(Cache<?, ?> cache) {
eventually(() -> {
int localTxCount = TestingUtil.extractComponent(cache, TransactionTable.class).getLocalTxCount();
if (localTxCount != 0) {
log.tracef("Local tx=%s", localTxCount);
return false;
}
return true;
});
}
}
| 4,334
| 29.744681
| 119
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/AbstractInfinispanTest.java
|
package org.infinispan.test;
import static org.testng.AssertJUnit.fail;
import java.lang.invoke.MethodHandles;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.Callable;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.LockSupport;
import java.util.function.Supplier;
import java.util.stream.Stream;
import jakarta.transaction.TransactionManager;
import org.infinispan.commons.api.BasicCache;
import org.infinispan.commons.api.BasicCacheContainer;
import org.infinispan.commons.test.ExceptionRunnable;
import org.infinispan.commons.test.TestNGLongTestsHook;
import org.infinispan.commons.test.TestResourceTracker;
import org.infinispan.commons.time.TimeService;
import org.infinispan.functional.FunctionalMap;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.partitionhandling.BasePartitionHandlingTest;
import org.infinispan.remoting.transport.impl.RequestRepository;
import org.infinispan.test.fwk.ChainMethodInterceptor;
import org.infinispan.test.fwk.FakeTestClass;
import org.infinispan.test.fwk.NamedTestMethod;
import org.infinispan.test.fwk.TestSelector;
import org.infinispan.util.EmbeddedTimeService;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.jgroups.stack.Protocol;
import org.testng.IMethodInstance;
import org.testng.IMethodInterceptor;
import org.testng.ITestContext;
import org.testng.TestNGException;
import org.testng.annotations.AfterClass;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Listeners;
import org.testng.internal.MethodInstance;
/**
* AbstractInfinispanTest is a superclass of all Infinispan tests.
*
* @author Vladimir Blagojevic
* @author Mircea.Markus@jboss.com
* @since 4.0
*/
@Listeners({ChainMethodInterceptor.class, TestNGLongTestsHook.class, FeaturesListener.class})
@TestSelector(interceptors = AbstractInfinispanTest.OrderByInstance.class)
public abstract class AbstractInfinispanTest {
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.TYPE})
public @interface FeatureCondition {
String feature();
}
protected interface Condition {
boolean isSatisfied() throws Exception;
}
protected static final Log log = LogFactory.getLog(MethodHandles.lookup().lookupClass());
private final ThreadFactory defaultThreadFactory = getTestThreadFactory("ForkThread");
private final ThreadPoolExecutor testExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
60L, TimeUnit.SECONDS,
new SynchronousQueue<>(),
defaultThreadFactory);
public static final TimeService TIME_SERVICE = new EmbeddedTimeService();
public static class OrderByInstance implements IMethodInterceptor {
@Override
public List<IMethodInstance> intercept(List<IMethodInstance> methods, ITestContext context) {
Map<Object, List<IMethodInstance>> methodsByInstance = new IdentityHashMap<>();
Map<String, Object> instancesByName = new HashMap<>();
for (IMethodInstance method : methods) {
methodsByInstance.computeIfAbsent(method.getInstance(), k -> new ArrayList<>()).add(method);
}
List<IMethodInstance> newOrder = new ArrayList<>(methods.size());
for (Map.Entry<Object, List<IMethodInstance>> instanceAndMethods : methodsByInstance.entrySet()) {
Object instance = instanceAndMethods.getKey();
if (instance instanceof AbstractInfinispanTest) {
String instanceName = ((AbstractInfinispanTest) instance).getTestName();
Object otherInstance = instancesByName.putIfAbsent(instanceName, instance);
if (otherInstance != null) {
String message = String.format("Duplicate test name: %s, classes %s and %s", instanceName,
instance.getClass().getName(), otherInstance.getClass().getName());
MethodInstance methodInstance =
FakeTestClass.newFailureMethodInstance(new TestNGException(message), context.getCurrentXmlTest(),
context, instance);
newOrder.add(methodInstance);
}
String parameters = ((AbstractInfinispanTest) instance).parameters();
if (parameters != null) {
for (IMethodInstance method : instanceAndMethods.getValue()) {
// TestNG calls intercept twice (bug?) so this prevents adding the parameters two times
if (method.getMethod() instanceof NamedTestMethod) {
newOrder.add(method);
} else {
newOrder.add(new MethodInstance(new NamedTestMethod(method.getMethod(), method.getMethod().getMethodName() + parameters)));
}
}
continue;
}
}
newOrder.addAll(instanceAndMethods.getValue());
}
return newOrder;
}
}
public static String defaultParametersString(String[] names, Object[] params) {
if (names == null || params == null) {
return null;
}
assert names.length == params.length;
boolean[] last = new boolean[params.length];
boolean none = true;
for (int i = params.length - 1; i >= 0; --i) {
last[i] = none;
none &= params[i] == null;
}
if (none) {
return null;
}
StringBuilder sb = new StringBuilder().append('[');
for (int i = 0; i < params.length; ++i) {
if (params[i] != null) {
if (names[i] != null) {
sb.append(names[i]).append('=');
}
sb.append(params[i]);
if (!last[i]) sb.append(", ");
}
}
return sb.append(']').toString();
}
protected String parameters() {
return null;
}
@BeforeClass(alwaysRun = true)
protected void testClassStarted(ITestContext context) {
TestResourceTracker.testStarted(getTestName());
}
@AfterClass(alwaysRun = true)
protected void testClassFinished(ITestContext context) {
killSpawnedThreads();
nullOutFields();
TestResourceTracker.testFinished(getTestName());
}
public String getTestName() {
String className = getClass().getName();
String parameters = parameters();
return parameters == null ? className : className + parameters;
}
protected void killSpawnedThreads() {
List<Runnable> runnables = testExecutor.shutdownNow();
if (!runnables.isEmpty()) {
log.errorf("There were runnables %s left uncompleted in test %s", runnables, getClass().getSimpleName());
}
}
@AfterMethod
protected final void checkThreads() {
int activeTasks = testExecutor.getActiveCount();
if (activeTasks != 0) {
log.errorf("There were %d active tasks found in the test executor service for class %s", activeTasks,
getClass().getSimpleName());
}
}
protected <T> void eventuallyEquals(T expected, Supplier<T> supplier) {
eventually(() -> "expected:<" + expected + ">, got:<" + supplier.get() + ">",
() -> Objects.equals(expected, supplier.get()));
}
protected static <T> void eventuallyEquals(String message, T expected, Supplier<T> supplier) {
eventually(() -> message + " expected:<" + expected + ">, got:<" + supplier.get() + ">",
() -> Objects.equals(expected, supplier.get()));
}
protected static void eventually(Supplier<String> messageSupplier, Condition condition) {
eventually(messageSupplier, condition, 30, TimeUnit.SECONDS);
}
protected static void eventually(Supplier<String> messageSupplier, Condition condition, long timeout,
TimeUnit timeUnit) {
try {
long timeoutNanos = timeUnit.toNanos(timeout);
// We want the sleep time to increase in arithmetic progression
// 30 loops with the default timeout of 30 seconds means the initial wait is ~ 65 millis
int loops = 30;
int progressionSum = loops * (loops + 1) / 2;
long initialSleepNanos = timeoutNanos / progressionSum;
long sleepNanos = initialSleepNanos;
long expectedEndTime = System.nanoTime() + timeoutNanos;
while (expectedEndTime - System.nanoTime() > 0) {
if (condition.isSatisfied())
return;
LockSupport.parkNanos(sleepNanos);
sleepNanos += initialSleepNanos;
}
if (!condition.isSatisfied()) {
fail(messageSupplier.get());
}
} catch (Exception e) {
throw new RuntimeException("Unexpected!", e);
}
}
protected static void eventually(Condition ec, long timeoutMillis) {
eventually(ec, timeoutMillis, TimeUnit.MILLISECONDS);
}
protected static void eventually(Condition ec, long timeout, TimeUnit unit) {
eventually(() -> "Condition is still false after " + timeout + " " + unit, ec, timeout, unit);
}
protected void eventually(String message, Condition ec, long timeout, TimeUnit unit) {
eventually(() -> message, ec, unit.toMillis(timeout), TimeUnit.MILLISECONDS);
}
/**
* This method will actually spawn a fresh thread and will not use underlying pool. The
* {@link org.infinispan.test.AbstractInfinispanTest#fork(ExceptionRunnable)} should be preferred
* unless you require explicit access to the thread.
*
* @param r The runnable to run
* @return The created thread
*/
protected Thread inNewThread(Runnable r) {
final Thread t = defaultThreadFactory.newThread(new RunnableWrapper(r));
log.tracef("About to start thread '%s' as child of thread '%s'", t.getName(), Thread.currentThread().getName());
t.start();
return t;
}
protected Future<Void> fork(ExceptionRunnable r) {
return testExecutor.submit(new CallableWrapper<>(() -> {
r.run();
return null;
}));
}
protected <T> Future<T> fork(Callable<T> c) {
return testExecutor.submit(new CallableWrapper<>(c));
}
/**
* This should normally not be used, use the {@code fork(Runnable|Callable|ExceptionRunnable)}
* method when an executor is required.
*
* Although if you want a limited set of threads this could still be useful for something like
* {@link java.util.concurrent.Executors#newFixedThreadPool(int, java.util.concurrent.ThreadFactory)} or
* {@link java.util.concurrent.Executors#newSingleThreadExecutor(java.util.concurrent.ThreadFactory)}
*
* @param prefix The prefix starting for the thread factory
* @return A thread factory that will use the same naming schema as the other methods
*/
protected ThreadFactory getTestThreadFactory(final String prefix) {
final String className = getClass().getSimpleName();
return new ThreadFactory() {
private final AtomicInteger counter = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
String threadName = prefix + "-" + counter.incrementAndGet() + "," + className;
Thread thread = new Thread(r, threadName);
TestResourceTracker.addResource(AbstractInfinispanTest.this.getTestName(), new ThreadCleaner(thread));
return thread;
}
};
}
/**
* This will run two or more tasks concurrently.
*
* It synchronizes before starting at approximately the same time by ensuring they all start before
* allowing the tasks to proceed.
*
* @param tasks The tasks to run
* @throws InterruptedException Thrown if this thread is interrupted
* @throws ExecutionException Thrown if one of the callables throws any kind of Throwable. The
* thrown Throwable will be wrapped by this exception
* @throws TimeoutException If one of the tasks doesn't complete within the timeout
*/
protected void runConcurrently(long timeout, TimeUnit timeUnit, ExceptionRunnable... tasks) throws Exception {
if (tasks == null || tasks.length < 2) {
throw new IllegalArgumentException("Need at least 2 tasks to run concurrently");
}
long deadlineNanos = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, timeUnit);
List<Future<Void>> futures = new ArrayList<>(tasks.length);
CyclicBarrier barrier = new CyclicBarrier(tasks.length);
for (ExceptionRunnable task : tasks) {
futures.add(testExecutor.submit(new ConcurrentCallable(task, barrier)));
}
List<Exception> exceptions = new ArrayList<>();
for (Future<Void> future : futures) {
try {
future.get(deadlineNanos - System.nanoTime(), TimeUnit.NANOSECONDS);
} catch (Exception e) {
futures.forEach(f -> f.cancel(true));
exceptions.add(e);
}
}
if (!exceptions.isEmpty()) {
Exception exception = exceptions.remove(0);
for (Exception e : exceptions) {
exception.addSuppressed(e);
}
throw exception;
}
}
/**
* This will run two or more tasks concurrently.
*
* It synchronizes before starting at approximately the same time by ensuring they all start before
* allowing the tasks to proceed.
*
* @param tasks The tasks to run
* @throws InterruptedException Thrown if this thread is interrupted
* @throws ExecutionException Thrown if one of the callables throws any kind of Throwable. The
* thrown Throwable will be wrapped by this exception
* @throws TimeoutException If one of the tasks doesn't complete within the timeout
*/
protected void runConcurrently(long timeout, TimeUnit timeUnit, Callable<?>... tasks) throws Exception {
runConcurrently(timeout, timeUnit,
Arrays.stream(tasks).<ExceptionRunnable>map(task -> task::call)
.toArray(ExceptionRunnable[]::new));
}
/**
* Equivalent to {@code runConcurrently(30, SECONDS, tasks)}
*/
protected void runConcurrently(ExceptionRunnable... tasks) throws Exception {
runConcurrently(30, TimeUnit.SECONDS, tasks);
}
/**
* Equivalent to {@code runConcurrently(30, SECONDS, tasks)}
*/
protected void runConcurrently(Callable<?>... tasks) throws Exception {
runConcurrently(
Arrays.stream(tasks).<ExceptionRunnable>map(task -> task::call).toArray(ExceptionRunnable[]::new));
}
protected static void eventually(Condition ec) {
eventually(ec, 10000, TimeUnit.MILLISECONDS);
}
protected void eventually(String message, Condition ec) {
eventually(message, ec, 10000, TimeUnit.MILLISECONDS);
}
public void safeRollback(TransactionManager transactionManager) {
try {
transactionManager.rollback();
} catch (Exception e) {
//ignored
}
}
protected void nullOutFields() {
// TestNG keeps test instances in memory forever, make them leaner by clearing direct references to caches
for (Field field : getAllFields()) {
if (!Modifier.isFinal(field.getModifiers()) && fieldIsMemoryHog(field)) {
field.setAccessible(true);
try {
field.set(this, null);
} catch (IllegalArgumentException | IllegalAccessException e) {
log.error(e);
}
}
}
}
private boolean fieldIsMemoryHog(Field field) {
Class<?>[] memoryHogs = {BasicCacheContainer.class, BasicCache.class, FunctionalMap.class, Protocol.class,
AsyncInterceptor.class, RequestRepository.class,
BasePartitionHandlingTest.Partition.class};
return Stream.of(memoryHogs).anyMatch(clazz -> fieldIsMemoryHog(field, clazz));
}
private boolean fieldIsMemoryHog(Field field, Class<?> clazz) {
if (clazz.isAssignableFrom(field.getType())) {
return true;
} else if (field.getType().isArray()) {
return (clazz.isAssignableFrom(field.getType().getComponentType()));
} else if (Collection.class.isAssignableFrom(field.getType())) {
Type fieldType = field.getGenericType();
if (fieldType instanceof Class<?>) {
return clazz.isAssignableFrom((Class<?>) fieldType);
}
if (fieldType instanceof ParameterizedType) {
ParameterizedType collectionType = (ParameterizedType) fieldType;
Type elementType = collectionType.getActualTypeArguments()[0];
if (elementType instanceof ParameterizedType) {
return clazz.isAssignableFrom(((Class<?>) ((ParameterizedType) elementType).getRawType()));
} else if (elementType instanceof Class<?>) {
return clazz.isAssignableFrom(((Class<?>) elementType));
}
}
return false;
} else {
return false;
}
}
private Collection<Field> getAllFields() {
Collection<Field> fields = new ArrayList<>();
Class<?> clazz = this.getClass();
while (clazz != null) {
fields.addAll(Arrays.asList(clazz.getDeclaredFields()));
clazz = clazz.getSuperclass();
}
return fields;
}
protected ExecutorService testExecutor() {
return testExecutor;
}
private class ThreadCleaner extends TestResourceTracker.Cleaner<Thread> {
public ThreadCleaner(Thread thread) {
super(thread);
}
@Override
public void close() {
if (ref.isAlive() && !ref.isInterrupted()) {
log.warnf("There was a thread %s still alive after test completion - interrupted it",
ref);
ref.interrupt();
}
}
}
/**
* A callable that will first await on the provided barrier before calling the provided callable.
* This is useful to have a better attempt at multiple threads ran at the same time, but still is
* no guarantee since this is controlled by the thread scheduler.
*/
public final class ConcurrentCallable implements Callable<Void> {
private final ExceptionRunnable task;
private final CyclicBarrier barrier;
ConcurrentCallable(ExceptionRunnable task, CyclicBarrier barrier) {
this.task = task;
this.barrier = barrier;
}
@Override
public Void call() throws Exception {
try {
log.trace("Started concurrent callable");
barrier.await(10, TimeUnit.SECONDS);
log.trace("Synchronized with the other concurrent runnables");
task.run();
log.debug("Exiting fork runnable.");
return null;
} catch (Throwable e) {
log.warn("Exiting fork runnable due to exception", e);
throw e;
}
}
}
public final class RunnableWrapper implements Runnable {
final Runnable realOne;
RunnableWrapper(Runnable realOne) {
this.realOne = realOne;
}
@Override
public void run() {
try {
log.trace("Started fork runnable..");
realOne.run();
log.debug("Exiting fork runnable.");
} catch (Throwable e) {
log.warn("Exiting fork runnable due to exception", e);
throw e;
}
}
}
private class CallableWrapper<T> implements Callable<T> {
private final Callable<? extends T> c;
CallableWrapper(Callable<? extends T> c) {
this.c = c;
}
@Override
public T call() throws Exception {
try {
log.trace("Started fork callable..");
T result = c.call();
log.debug("Exiting fork callable.");
return result;
} catch (Exception e) {
log.warn("Exiting fork callable due to exception", e);
throw e;
}
}
}
}
| 21,156
| 37.607664
| 147
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/TestBlocking.java
|
package org.infinispan.test;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Exchanger;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* Class used for various utility methods to encapsulate a blocking invocation. This is useful for tests as all
* methods in this class are automatically excluded from any non blocking verification done by BlockHound.
*/
public class TestBlocking {
private TestBlocking() { }
public static <I> I exchange(Exchanger<I> exchanger, I value, long time, TimeUnit timeUnit)
throws InterruptedException, TimeoutException {
return exchanger.exchange(value, time, timeUnit);
}
public static boolean await(CountDownLatch latch, long time, TimeUnit timeUnit) throws InterruptedException {
return latch.await(time, timeUnit);
}
public static void await(CyclicBarrier barrier, long time, TimeUnit timeUnit) throws InterruptedException,
TimeoutException, BrokenBarrierException {
barrier.await(time, timeUnit);
}
public static <V> V get(Future<V> future, long time, TimeUnit timeUnit) throws ExecutionException,
InterruptedException, TimeoutException {
return future.get(time, timeUnit);
}
}
| 1,443
| 37
| 112
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/CherryPickClassLoader.java
|
package org.infinispan.test;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
import org.jboss.logging.Logger;
/**
* A classloader where the classes that are included, excluded or not found can
* individually configured to suit a particular test in a cherry pick fashion.
*
* @author Galder Zamarreño
* @since 5.0
*/
public class CherryPickClassLoader extends ClassLoader {
private static final Logger log = Logger.getLogger(CherryPickClassLoader.class);
private String[] includedClasses;
private String[] excludedClasses;
private String[] notFoundClasses;
private Map<String, Class> classes = new HashMap<String, Class>();
public CherryPickClassLoader(String[] includedClasses,
String[] excludedClasses, ClassLoader parent) {
this(includedClasses, excludedClasses, null, parent);
}
public CherryPickClassLoader(String[] includedClasses,
String[] excludedClasses,
String[] notFoundClasses, ClassLoader parent) {
super(parent);
this.includedClasses = includedClasses;
this.excludedClasses = excludedClasses;
this.notFoundClasses = notFoundClasses;
log.debugf("Created %s", this);
}
@Override
protected synchronized Class<?> loadClass(String name, boolean resolve)
throws ClassNotFoundException {
log.tracef("loadClass(%s,%b)", name, resolve);
if (isIncluded(name) && (isExcluded(name) == false)) {
Class c = findClass(name);
if (resolve)
resolveClass(c);
return c;
} else if (isNotFound(name)) {
throw new ClassNotFoundException(name + " is discarded");
} else {
return super.loadClass(name, resolve);
}
}
@Override
protected Class<?> findClass(String name) throws ClassNotFoundException {
log.tracef("findClass(%s)", name);
Class result = classes.get(name);
if (result != null)
return result;
if (isIncluded(name) && (isExcluded(name) == false)) {
result = createClass(name);
} else if (isNotFound(name)) {
throw new ClassNotFoundException(name + " is discarded");
} else {
result = super.findClass(name);
}
classes.put(name, result);
return result;
}
protected Class createClass(String name) throws ClassFormatError, ClassNotFoundException {
log.infof("createClass(%s)", name);
try {
InputStream is = getResourceAsStream(name.replace('.', '/').concat(".class"));
byte[] bytes = new byte[1024];
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
int read;
while ((read = is.read(bytes)) > -1) {
baos.write(bytes, 0, read);
}
bytes = baos.toByteArray();
return this.defineClass(name, bytes, 0, bytes.length);
} catch (FileNotFoundException e) {
throw new ClassNotFoundException("cannot find " + name, e);
} catch (IOException e) {
throw new ClassNotFoundException("cannot read " + name, e);
}
}
protected boolean isIncluded(String className) {
if (includedClasses != null) {
for (int i = 0; i < includedClasses.length; i++) {
if (className.startsWith(includedClasses[i])) {
return true;
}
}
}
return false;
}
protected boolean isExcluded(String className) {
if (excludedClasses != null) {
for (int i = 0; i < excludedClasses.length; i++) {
if (className.startsWith(excludedClasses[i])) {
return true;
}
}
}
return false;
}
protected boolean isNotFound(String className) {
if (notFoundClasses != null) {
for (int i = 0; i < notFoundClasses.length; i++) {
if (className.startsWith(notFoundClasses[i])) {
return true;
}
}
}
return false;
}
@Override
public String toString() {
String s = getClass().getName();
s += "[includedClasses=";
s += listClasses(includedClasses);
s += ";excludedClasses=";
s += listClasses(excludedClasses);
s += ";notFoundClasses=";
s += listClasses(notFoundClasses);
s += ";parent=";
s += getParent();
s += "]";
return s;
}
private static String listClasses(String[] classes) {
if (classes == null) return null;
String s = "";
for (int i = 0; i < classes.length; i++) {
if (i > 0) s += ",";
s += classes[i];
}
return s;
}
}
| 4,785
| 29.484076
| 93
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/ValueFuture.java
|
package org.infinispan.test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
* A simple <code>Future</code> implementation whose <code>get()</code> method blocks until another thread calls <code>set()</code>.
*
* @author Dan Berindei <dberinde@redhat.com>
* @since 5.0
*/
public class ValueFuture<V> implements Future<V> {
private CountDownLatch setLatch = new CountDownLatch(1);
private V value;
private Throwable exception;
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
return false;
}
@Override
public boolean isCancelled() {
return false;
}
@Override
public boolean isDone() {
return false;
}
@Override
public V get() throws InterruptedException, ExecutionException {
setLatch.await();
if (exception != null)
throw new ExecutionException(exception);
return value;
}
@Override
public V get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException {
setLatch.await(timeout, unit);
if (exception != null)
throw new ExecutionException(exception);
return value;
}
public void set(V value) {
this.value = value;
setLatch.countDown();
}
public void setException(Throwable exception) {
this.exception = exception;
setLatch.countDown();
}
}
| 1,543
| 24.311475
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/TestingUtil.java
|
package org.infinispan.test;
import static org.infinispan.persistence.manager.PersistenceManager.AccessMode.BOTH;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.fail;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.security.Principal;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.management.JMException;
import javax.management.MBeanInfo;
import javax.management.MBeanOperationInfo;
import javax.management.MBeanParameterInfo;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import javax.security.auth.Subject;
import org.infinispan.AdvancedCache;
import org.infinispan.Cache;
import org.infinispan.cache.impl.AbstractDelegatingCache;
import org.infinispan.commands.CommandsFactory;
import org.infinispan.commons.CacheException;
import org.infinispan.commons.api.Lifecycle;
import org.infinispan.commons.jdkspecific.CallerId;
import org.infinispan.commons.marshall.ProtoStreamMarshaller;
import org.infinispan.commons.marshall.StreamAwareMarshaller;
import org.infinispan.commons.marshall.StreamingMarshaller;
import org.infinispan.commons.test.Exceptions;
import org.infinispan.commons.util.IntSet;
import org.infinispan.commons.util.IntSets;
import org.infinispan.commons.util.Version;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.Configuration;
import org.infinispan.configuration.global.GlobalConfiguration;
import org.infinispan.container.DataContainer;
import org.infinispan.container.entries.CacheEntry;
import org.infinispan.container.entries.InternalCacheEntry;
import org.infinispan.container.impl.InternalDataContainer;
import org.infinispan.context.Flag;
import org.infinispan.distribution.DistributionManager;
import org.infinispan.distribution.LocalizedCacheTopology;
import org.infinispan.distribution.ch.ConsistentHash;
import org.infinispan.distribution.ch.ConsistentHashFactory;
import org.infinispan.distribution.ch.KeyPartitioner;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.factories.GlobalComponentRegistry;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.factories.impl.BasicComponentRegistry;
import org.infinispan.factories.impl.ComponentRef;
import org.infinispan.factories.impl.TestComponentAccessors;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.jmx.CacheJmxRegistration;
import org.infinispan.jmx.CacheManagerJmxRegistration;
import org.infinispan.lifecycle.ComponentStatus;
import org.infinispan.lifecycle.ModuleLifecycle;
import org.infinispan.manager.CacheContainer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.marshall.core.GlobalMarshaller;
import org.infinispan.marshall.persistence.impl.MarshalledEntryUtil;
import org.infinispan.marshall.persistence.impl.PersistenceMarshallerImpl;
import org.infinispan.metadata.EmbeddedMetadata;
import org.infinispan.metadata.Metadata;
import org.infinispan.persistence.dummy.DummyInMemoryStore;
import org.infinispan.persistence.manager.PersistenceManager;
import org.infinispan.persistence.manager.PersistenceManagerImpl;
import org.infinispan.persistence.spi.AdvancedLoadWriteStore;
import org.infinispan.persistence.spi.CacheLoader;
import org.infinispan.persistence.spi.CacheWriter;
import org.infinispan.persistence.spi.MarshallableEntry;
import org.infinispan.persistence.spi.NonBlockingStore;
import org.infinispan.persistence.support.DelegatingNonBlockingStore;
import org.infinispan.persistence.support.DelegatingPersistenceManager;
import org.infinispan.persistence.support.NonBlockingStoreAdapter;
import org.infinispan.persistence.support.SegmentPublisherWrapper;
import org.infinispan.persistence.support.SingleSegmentPublisher;
import org.infinispan.persistence.support.WaitDelegatingNonBlockingStore;
import org.infinispan.protostream.ProtobufUtil;
import org.infinispan.protostream.SerializationContext;
import org.infinispan.protostream.SerializationContextInitializer;
import org.infinispan.registry.InternalCacheRegistry;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.transport.AbstractDelegatingTransport;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.JGroupsAddress;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.security.AuthorizationPermission;
import org.infinispan.security.GroupPrincipal;
import org.infinispan.security.actions.SecurityActions;
import org.infinispan.security.impl.SecureCacheImpl;
import org.infinispan.statetransfer.StateTransferManagerImpl;
import org.infinispan.topology.CacheTopology;
import org.infinispan.transaction.impl.TransactionTable;
import org.infinispan.util.DependencyGraph;
import org.infinispan.util.concurrent.CompletionStages;
import org.infinispan.util.concurrent.TimeoutException;
import org.infinispan.util.concurrent.locks.LockManager;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.jgroups.JChannel;
import org.jgroups.MergeView;
import org.jgroups.View;
import org.jgroups.ViewId;
import org.jgroups.protocols.DELAY;
import org.jgroups.protocols.DISCARD;
import org.jgroups.protocols.TP;
import org.jgroups.protocols.pbcast.GMS;
import org.jgroups.stack.ProtocolStack;
import org.jgroups.util.MutableDigest;
import org.reactivestreams.Publisher;
import org.testng.AssertJUnit;
import io.reactivex.rxjava3.core.Flowable;
import jakarta.transaction.Status;
import jakarta.transaction.TransactionManager;
public class TestingUtil {
private static final Log log = LogFactory.getLog(TestingUtil.class);
private static final Random random = new Random();
private static final int SHORT_TIMEOUT_MILLIS = Integer.getInteger("infinispan.test.shortTimeoutMillis", 500);
private static final ScheduledExecutorService timeoutExecutor;
static {
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, r -> {
Thread t = new Thread(r);
t.setDaemon(true);
t.setName("test-timeout-thread");
return t;
});
executor.setRemoveOnCancelPolicy(true);
timeoutExecutor = executor;
}
public static void assertNotDone(CompletionStage<?> completionStage) {
sleepThread(50);
assertFalse(completionStage.toCompletableFuture().isDone());
}
public static void assertNotDone(Future<?> completionStage) {
sleepThread(50);
assertFalse(completionStage.isDone());
}
public static <T> CompletableFuture<T> orTimeout(CompletableFuture<T> f, long timeout, TimeUnit timeUnit, Executor executor) {
ScheduledFuture<?> scheduled = timeoutExecutor.schedule(() -> {
// Don't run anything on the timeout thread
executor.execute(() -> f.completeExceptionally(new TimeoutException("Timed out!")));
}, timeout, timeUnit);
f.whenComplete((v, t) -> scheduled.cancel(false));
return f;
}
public static <T> CompletionStage<T> startAsync(Callable<CompletionStage<T>> action, Executor executor) {
return CompletableFutures.completedNull().thenComposeAsync(ignored -> Exceptions.unchecked(action), executor);
}
public static <T> CompletionStage<T> sequence(CompletionStage<?> first, Callable<CompletionStage<T>> second) {
return first.thenCompose(ignored -> Exceptions.unchecked(second));
}
public static <T> CompletionStage<T> sequenceAsync(CompletionStage<?> first, Callable<CompletionStage<T>> second, Executor executor) {
return first.thenComposeAsync(ignored -> Exceptions.unchecked(second), executor);
}
/**
* Should be used by tests for a timeout when they need to wait for that timeout to expire.
*
* <p>Can be changed with the {@code org.infinispan.test.shortTimeoutMillis} system property.</p>
*/
public static long shortTimeoutMillis() {
return SHORT_TIMEOUT_MILLIS;
}
/**
* Simulates a node crash, discarding all the messages from/to this node and then stopping the caches.
*/
public static void crashCacheManagers(EmbeddedCacheManager... cacheManagers) {
for (EmbeddedCacheManager cm : cacheManagers) {
JChannel channel = extractJChannel(cm);
try {
DISCARD discard = new DISCARD();
discard.discardAll(true);
channel.getProtocolStack().insertProtocol(discard, ProtocolStack.Position.ABOVE, TP.class);
} catch (Exception e) {
log.warn("Problems inserting discard", e);
throw new RuntimeException(e);
}
View view = View.create(channel.getAddress(), 100, channel.getAddress());
((GMS) channel.getProtocolStack().findProtocol(GMS.class)).installView(view);
}
killCacheManagers(cacheManagers);
}
public static void installNewView(EmbeddedCacheManager... members) {
installNewView(Stream.of(members).map(EmbeddedCacheManager::getAddress), members);
}
public static void installNewView(Function<EmbeddedCacheManager, JChannel> channelRetriever, EmbeddedCacheManager... members) {
installNewView(Stream.of(members).map(EmbeddedCacheManager::getAddress), channelRetriever, members);
}
public static void installNewView(Stream<Address> members, EmbeddedCacheManager... where) {
installNewView(members, TestingUtil::extractJChannel, where);
}
public static JChannel extractJChannel(EmbeddedCacheManager ecm) {
Transport transport = extractGlobalComponent(ecm, Transport.class);
while (!(transport instanceof JGroupsTransport)) {
if (Proxy.isProxyClass(transport.getClass())) {
// Unwrap proxies created by the StateSequencer
transport = extractField(extractField(transport, "h"), "wrappedInstance");
} else if (transport instanceof AbstractDelegatingTransport) {
transport = ((AbstractDelegatingTransport) transport).getDelegate();
} else {
throw new IllegalStateException("Unable to obtain a JGroupsTransport instance from " + transport + " on " + ecm.getAddress());
}
}
return ((JGroupsTransport) transport).getChannel();
}
public static void installNewView(Stream<Address> members, Function<EmbeddedCacheManager, JChannel> channelRetriever, EmbeddedCacheManager... where) {
List<org.jgroups.Address> viewMembers = members.map(a -> ((JGroupsAddress) a).getJGroupsAddress()).collect(Collectors.toList());
List<View> previousViews = new ArrayList<>(where.length);
// Compute the merge digest, without it nodes would request the retransmission of all messages
// Including those that were removed by STABLE earlier
MutableDigest digest = new MutableDigest(viewMembers.toArray(new org.jgroups.Address[0]));
for (EmbeddedCacheManager ecm : where) {
GMS gms = channelRetriever.apply(ecm).getProtocolStack().findProtocol(GMS.class);
previousViews.add(gms.view());
digest.merge(gms.getDigest());
}
long viewId = previousViews.stream().mapToLong(view -> view.getViewId().getId()).max().orElse(0) + 1;
View newView;
if (previousViews.stream().allMatch(view -> view.getMembers().containsAll(viewMembers))) {
newView = View.create(viewMembers.get(0), viewId, viewMembers.toArray(new org.jgroups.Address[0]));
} else {
newView = new MergeView(new ViewId(viewMembers.get(0), viewId), viewMembers, previousViews);
}
log.trace("Before installing new view:" + viewMembers);
for (EmbeddedCacheManager ecm : where) {
((GMS) channelRetriever.apply(ecm).getProtocolStack().findProtocol(GMS.class)).installView(newView, digest);
}
}
public static String wrapXMLWithSchema(String schema, String xml) {
StringBuilder sb = new StringBuilder();
sb.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
sb.append("<infinispan xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" ");
sb.append("xsi:schemaLocation=\"urn:infinispan:config:");
sb.append(schema);
sb.append(" https://infinispan.org/schemas/infinispan-config-");
sb.append(schema);
sb.append(".xsd\" xmlns=\"urn:infinispan:config:");
sb.append(schema);
sb.append("\">\n");
sb.append(xml);
sb.append("</infinispan>");
return sb.toString();
}
public static String wrapXMLWithSchema(String xml) {
return wrapXMLWithSchema(Version.getSchemaVersion(), xml);
}
public static String wrapXMLWithoutSchema(String xml) {
StringBuilder sb = new StringBuilder();
sb.append("<infinispan>\n");
sb.append(xml);
sb.append("</infinispan>");
return sb.toString();
}
/**
* Extracts the value of a field in a given target instance using reflection, able to extract private fields as
* well.
*
* @param target object to extract field from
* @param fieldName name of field to extract
* @return field value
*/
public static <T> T extractField(Object target, String fieldName) {
return extractField(target.getClass(), target, fieldName);
}
public static void replaceField(Object newValue, String fieldName, Object owner, Class<?> baseType) {
Field field;
try {
field = baseType.getDeclaredField(fieldName);
field.setAccessible(true);
field.set(owner, newValue);
} catch (Exception e) {
throw new RuntimeException(e);//just to simplify exception handling
}
}
public static <T> void replaceField(Object owner, String fieldName, Function<T, T> func) {
replaceField(owner.getClass(), owner, fieldName, func);
}
public static <T> void replaceField(Class<?> baseType, Object owner, String fieldName, Function<T, T> func) {
Field field;
try {
field = baseType.getDeclaredField(fieldName);
field.setAccessible(true);
Object prevValue = field.get(owner);
Object newValue = func.apply((T) prevValue);
field.set(owner, newValue);
} catch (Exception e) {
throw new RuntimeException(e);//just to simplify exception handling
}
}
public static <T> T extractField(Class<?> type, Object target, String fieldName) {
while (true) {
Field field;
try {
field = type.getDeclaredField(fieldName);
field.setAccessible(true);
return (T) field.get(target);
} catch (Exception e) {
if (type.equals(Object.class)) {
throw new RuntimeException(e);
} else {
// try with superclass!!
type = type.getSuperclass();
}
}
}
}
public static <T extends AsyncInterceptor> T findInterceptor(Cache<?, ?> cache,
Class<T> interceptorToFind) {
return extractInterceptorChain(cache).findInterceptorExtending(interceptorToFind);
}
public static int getSegmentForKey(Object key, Cache<?, ?> cache) {
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
return keyPartitioner.getSegment(key);
}
/**
* Waits until pendingCH() is null on all caches, currentCH.getMembers() contains all caches provided as the param
* and all segments have numOwners owners.
*/
public static void waitForNoRebalance(Cache... caches) {
final int REBALANCE_TIMEOUT_SECONDS = 60; //Needs to be rather large to prevent sporadic failures on CI
final long giveup = System.nanoTime() + TimeUnit.SECONDS.toNanos(REBALANCE_TIMEOUT_SECONDS);
int zeroCapacityCaches = 0;
for (Cache<?, ?> c : caches) {
if (c.getCacheConfiguration().clustering().hash().capacityFactor() == 0f ||
c.getCacheManager().getCacheManagerConfiguration().isZeroCapacityNode()) {
zeroCapacityCaches++;
}
}
for (Cache<?, ?> c : caches) {
c = unwrapSecureCache(c);
int numOwners = c.getCacheConfiguration().clustering().hash().numOwners();
DistributionManager distributionManager = c.getAdvancedCache().getDistributionManager();
Address cacheAddress = c.getAdvancedCache().getRpcManager().getAddress();
CacheTopology cacheTopology;
while (true) {
cacheTopology = distributionManager.getCacheTopology();
boolean rebalanceInProgress;
boolean chContainsAllMembers;
boolean currentChIsBalanced;
if (cacheTopology != null) {
rebalanceInProgress = cacheTopology.getPhase() != CacheTopology.Phase.NO_REBALANCE;
ConsistentHash currentCH = cacheTopology.getCurrentCH();
ConsistentHashFactory chf = StateTransferManagerImpl.pickConsistentHashFactory(
extractGlobalConfiguration(c.getCacheManager()), c.getCacheConfiguration());
chContainsAllMembers = currentCH.getMembers().size() == caches.length;
currentChIsBalanced = true;
int actualNumOwners = Math.min(numOwners, currentCH.getMembers().size() - zeroCapacityCaches);
for (int i = 0; i < currentCH.getNumSegments(); i++) {
if (currentCH.locateOwnersForSegment(i).size() < actualNumOwners) {
currentChIsBalanced = false;
break;
}
}
// We need to check that the topologyId > 1 to account for nodes restarting
if (chContainsAllMembers && !rebalanceInProgress && cacheTopology.getTopologyId() > 1) {
rebalanceInProgress = !chf.rebalance(currentCH).equals(currentCH);
}
if (chContainsAllMembers && !rebalanceInProgress && currentChIsBalanced)
break;
} else {
rebalanceInProgress = false;
chContainsAllMembers = false;
currentChIsBalanced = true;
}
//System.out.printf("Cache %s Address %s cacheTopology %s rebalanceInProgress %s chContainsAllMembers %s, currentChIsBalanced %s\n", c.getName(), cacheAddress, cacheTopology, rebalanceInProgress, chContainsAllMembers, currentChIsBalanced);
if (System.nanoTime() - giveup > 0) {
String message;
if (!chContainsAllMembers) {
Address[] addresses = new Address[caches.length];
for (int i = 0; i < caches.length; i++) {
addresses[i] = caches[i].getCacheManager().getAddress();
}
message = String.format("Cache %s timed out waiting for rebalancing to complete on node %s, " +
"expected member list is %s, current member list is %s!", c.getName(), cacheAddress,
Arrays.toString(addresses), cacheTopology == null ? "N/A" : cacheTopology.getCurrentCH().getMembers());
} else {
message = String.format("Cache %s timed out waiting for rebalancing to complete on node %s, " +
"current topology is %s. rebalanceInProgress=%s, currentChIsBalanced=%s", c.getName(),
c.getCacheManager().getAddress(), cacheTopology, rebalanceInProgress, currentChIsBalanced);
}
log.error(message);
throw new RuntimeException(message);
}
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
}
log.trace("Node " + cacheAddress + " finished state transfer, has topology " + cacheTopology);
}
}
public static void waitForNoRebalanceAcrossManagers(EmbeddedCacheManager... managers) {
int numberOfManagers = managers.length;
assert numberOfManagers > 0;
Set<String> testCaches = getInternalAndUserCacheNames(managers[0]);
log.debugf("waitForNoRebalance with managers %s, for caches %s", Arrays.toString(managers), testCaches);
for (String cacheName : testCaches) {
ArrayList<Cache<?, ?>> caches = new ArrayList<>(numberOfManagers);
for (EmbeddedCacheManager manager : managers) {
Cache<?, ?> cache = manager.getCache(cacheName, false);
if (cache != null) {
caches.add(cache);
}
}
TestingUtil.waitForNoRebalance(caches.toArray(new Cache[0]));
}
}
public static Set<String> getInternalAndUserCacheNames(EmbeddedCacheManager cacheManager) {
Set<String> testCaches = new HashSet<>(cacheManager.getCacheNames());
testCaches.addAll(getInternalCacheNames(cacheManager));
return testCaches;
}
public static Set<String> getInternalCacheNames(CacheContainer container) {
return extractGlobalComponentRegistry(container).getComponent(InternalCacheRegistry.class).getInternalCacheNames();
}
public static void waitForTopologyPhase(List<Address> expectedMembers, CacheTopology.Phase phase,
Cache<?, ?>... caches) {
final int TOPOLOGY_TIMEOUT_SECONDS = 60; //Needs to be rather large to prevent sporadic failures on CI
final long giveup = System.nanoTime() + TimeUnit.SECONDS.toNanos(TOPOLOGY_TIMEOUT_SECONDS);
for (Cache<?, ?> c : caches) {
c = unwrapSecureCache(c);
DistributionManager distributionManager = c.getAdvancedCache().getDistributionManager();
while (true) {
CacheTopology cacheTopology = distributionManager.getCacheTopology();
boolean allMembersExist = cacheTopology != null && cacheTopology.getMembers().containsAll(expectedMembers);
boolean isCorrectPhase = cacheTopology != null && cacheTopology.getPhase() == phase;
if (allMembersExist && isCorrectPhase) break;
if (System.nanoTime() - giveup > 0) {
String message = String.format("Timed out waiting for a CacheTopology to be installed with members %s and phase %s. Current topology=%s",
expectedMembers, phase, cacheTopology);
log.error(message);
throw new RuntimeException(message);
}
LockSupport.parkNanos(TimeUnit.MILLISECONDS.toNanos(100));
}
}
}
private static Cache<?, ?> unwrapSecureCache(Cache<?, ?> c) {
if (c instanceof SecureCacheImpl) {
c = extractField(SecureCacheImpl.class, c, "delegate");
}
return c;
}
public static void waitForNoRebalance(Collection<? extends Cache> caches) {
waitForNoRebalance(caches.toArray(new Cache[0]));
}
/**
* Loops, continually calling {@link #areCacheViewsComplete(Cache[])} until it either returns true or
* <code>timeout</code> ms have elapsed.
*
* @param caches caches which must all have consistent views
* @param timeout max number of ms to loop
* @throws RuntimeException if <code>timeout</code> ms have elapse without all caches having the same number of
* members.
*/
public static void blockUntilViewsReceived(Cache<?, ?>[] caches, long timeout) {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
sleepThread(100);
if (areCacheViewsComplete(caches)) {
return;
}
}
viewsTimedOut(caches);
}
private static void viewsTimedOut(Cache<?, ?>[] caches) {
CacheContainer[] cacheContainers = new CacheContainer[caches.length];
for (int i = 0; i < caches.length; i++) {
cacheContainers[i] = caches[i].getCacheManager();
}
viewsTimedOut(cacheContainers);
}
private static void viewsTimedOut(CacheContainer[] cacheContainers) {
int length = cacheContainers.length;
List<View> incompleteViews = new ArrayList<>(length);
for (CacheContainer cacheContainer : cacheContainers) {
EmbeddedCacheManager cm = (EmbeddedCacheManager) cacheContainer;
if (cm.getMembers().size() != cacheContainers.length) {
incompleteViews.add(extractJChannel(cm).getView());
log.warnf("Manager %s has an incomplete view: %s", cm.getAddress(), cm.getMembers());
}
}
throw new TimeoutException(String.format(
"Timed out before caches had complete views. Expected %d members in each view. Views are as follows: %s",
cacheContainers.length, incompleteViews));
}
public static void blockUntilViewsReceivedInt(Cache<?, ?>[] caches, long timeout) throws InterruptedException {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
sleepThreadInt(100, null);
if (areCacheViewsComplete(caches)) {
return;
}
}
viewsTimedOut(caches);
}
/**
* Version of blockUntilViewsReceived that uses varargs
*/
public static void blockUntilViewsReceived(long timeout, Cache<?, ?>... caches) {
blockUntilViewsReceived(caches, timeout);
}
/**
* Version of blockUntilViewsReceived that throws back any interruption
*/
public static void blockUntilViewsReceivedInt(long timeout, Cache<?, ?>... caches) throws InterruptedException {
blockUntilViewsReceivedInt(caches, timeout);
}
/**
* Version of blockUntilViewsReceived that uses varargsa and cache managers
*/
public static void blockUntilViewsReceived(long timeout, CacheContainer... cacheContainers) {
blockUntilViewsReceived(timeout, true, cacheContainers);
}
/**
* Waits for the given members to be removed from the cluster. The difference between this and {@link
* #blockUntilViewsReceived(long, org.infinispan.manager.CacheContainer...)} methods(s) is that it does not barf if
* more than expected members is in the cluster - this is because we expect to start with a grater number fo members
* than we eventually expect. It will barf though, if the number of members is not the one expected but only after
* the timeout expires.
*/
public static void blockForMemberToFail(long timeout, CacheContainer... cacheContainers) {
blockUntilViewsReceived(timeout, false, cacheContainers);
areCacheViewsComplete(true, cacheContainers);
}
public static void blockUntilViewsReceived(long timeout, boolean barfIfTooManyMembers, CacheContainer... cacheContainers) {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
if (areCacheViewsComplete(barfIfTooManyMembers, cacheContainers)) {
return;
}
sleepThread(100);
}
viewsTimedOut(cacheContainers);
}
/**
* An overloaded version of {@link #blockUntilViewsReceived(long, Cache[])} that allows for 'shrinking' clusters.
* I.e., the usual method barfs if there are more members than expected. This one takes a param
* (barfIfTooManyMembers) which, if false, will NOT barf but will wait until the cluster 'shrinks' to the desired
* size. Useful if in tests, you kill a member and want to wait until this fact is known across the cluster.
*/
public static void blockUntilViewsReceived(long timeout, boolean barfIfTooManyMembers, Cache<?, ?>... caches) {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
sleepThread(100);
if (areCacheViewsComplete(caches, barfIfTooManyMembers)) {
return;
}
}
viewsTimedOut(caches);
}
/**
* Loops, continually calling {@link #areCacheViewsComplete(Cache[])} until it either returns true or
* <code>timeout</code> ms have elapsed.
*
* @param groupSize number of caches expected in the group
* @param timeout max number of ms to loop
* @throws RuntimeException if <code>timeout</code> ms have elapse without all caches having the same number of
* members.
*/
public static void blockUntilViewReceived(Cache<?, ?> cache, int groupSize, long timeout) {
blockUntilViewReceived(cache, groupSize, timeout, true);
}
/**
* Loops, continually calling {@link #areCacheViewsComplete(Cache[])} until
* it either returns true or a default timeout has elapsed.
*
* @param groupSize number of caches expected in the group
*/
public static void blockUntilViewReceived(Cache<?, ?> cache, int groupSize) {
// Default 10 seconds
blockUntilViewReceived(cache, groupSize, 10000, true);
}
public static void blockUntilViewReceived(Cache<?, ?> cache, int groupSize, long timeout, boolean barfIfTooManyMembersInView) {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
sleepThread(100);
EmbeddedCacheManager cacheManager = cache.getCacheManager();
if (isCacheViewComplete(cacheManager.getMembers(), cacheManager.getAddress(), groupSize, barfIfTooManyMembersInView)) {
return;
}
}
throw new RuntimeException(String.format(
"Timed out before cache had %d members. View is %s",
groupSize, cache.getCacheManager().getMembers()));
}
/**
* Checks each cache to see if the number of elements in the array returned by {@link
* EmbeddedCacheManager#getMembers()} matches the size of the <code>caches</code> parameter.
*
* @param caches caches that should form a View
* @return <code>true</code> if all caches have <code>caches.length</code> members; false otherwise
* @throws IllegalStateException if any of the caches have MORE view members than caches.length
*/
public static boolean areCacheViewsComplete(Cache<?, ?>[] caches) {
return areCacheViewsComplete(caches, true);
}
public static boolean areCacheViewsComplete(Cache<?, ?>[] caches, boolean barfIfTooManyMembers) {
int memberCount = caches.length;
for (Cache<?, ?> cache : caches) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
if (!isCacheViewComplete(cacheManager.getMembers(), cacheManager.getAddress(), memberCount, barfIfTooManyMembers)) {
return false;
}
}
return true;
}
public static boolean areCacheViewsComplete(boolean barfIfTooManyMembers, CacheContainer... cacheContainers) {
if (cacheContainers == null) throw new NullPointerException("Cache Manager array is null");
int memberCount = cacheContainers.length;
for (CacheContainer cacheContainer : cacheContainers) {
EmbeddedCacheManager cacheManager = (EmbeddedCacheManager) cacheContainer;
if (!isCacheViewComplete(cacheManager.getMembers(), cacheManager.getAddress(), memberCount, barfIfTooManyMembers)) {
return false;
}
}
return true;
}
public static boolean isCacheViewComplete(Cache<?, ?> c, int memberCount) {
EmbeddedCacheManager cacheManager = c.getCacheManager();
return isCacheViewComplete(cacheManager.getMembers(), cacheManager.getAddress(), memberCount, true);
}
public static boolean isCacheViewComplete(List<Address> members, Address address, int memberCount, boolean barfIfTooManyMembers) {
if (members == null || memberCount > members.size()) {
return false;
} else if (memberCount < members.size()) {
if (barfIfTooManyMembers) {
// This is an exceptional condition
StringBuilder sb = new StringBuilder("Cache at address ");
sb.append(address);
sb.append(" had ");
sb.append(members.size());
sb.append(" members; expecting ");
sb.append(memberCount);
sb.append(". Members were (");
for (int j = 0; j < members.size(); j++) {
if (j > 0) {
sb.append(", ");
}
sb.append(members.get(j));
}
sb.append(')');
throw new IllegalStateException(sb.toString());
} else return false;
}
return true;
}
/**
* This method blocks until the given caches have a view of whose size
* matches the desired value. This method is particularly useful for
* discovering that members have been split, or that they have joined back
* again.
*
* @param timeout max number of milliseconds to block for
* @param finalViewSize desired final view size
* @param caches caches representing current, or expected members in the cluster.
*/
public static void blockUntilViewsChanged(long timeout, int finalViewSize, Cache<?, ?>... caches) {
blockUntilViewsChanged(caches, timeout, finalViewSize);
}
private static void blockUntilViewsChanged(Cache<?, ?>[] caches, long timeout, int finalViewSize) {
long failTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < failTime) {
sleepThread(100);
if (areCacheViewsChanged(caches, finalViewSize)) {
return;
}
}
List<List<Address>> allViews = new ArrayList<>(caches.length);
for (Cache<?, ?> cache : caches) {
allViews.add(cache.getCacheManager().getMembers());
}
throw new RuntimeException(String.format(
"Timed out before caches had changed views (%s) to contain %d members",
allViews, finalViewSize));
}
private static boolean areCacheViewsChanged(Cache<?, ?>[] caches, int finalViewSize) {
for (Cache<?, ?> cache : caches) {
EmbeddedCacheManager cacheManager = cache.getCacheManager();
if (!isCacheViewChanged(cacheManager.getMembers(), finalViewSize)) {
return false;
}
}
return true;
}
private static boolean isCacheViewChanged(List<Address> members, int finalViewSize) {
return !(members == null || finalViewSize != members.size());
}
/**
* Puts the current thread to sleep for the desired number of ms, suppressing any exceptions.
*
* @param sleeptime number of ms to sleep
*/
public static void sleepThread(long sleeptime) {
sleepThread(sleeptime, null);
}
public static void sleepThread(long sleeptime, String messageOnInterrupt) {
try {
Thread.sleep(sleeptime);
} catch (InterruptedException ie) {
if (messageOnInterrupt != null)
log.error(messageOnInterrupt);
}
}
private static void sleepThreadInt(long sleeptime, String messageOnInterrupt) throws InterruptedException {
try {
Thread.sleep(sleeptime);
} catch (InterruptedException ie) {
if (messageOnInterrupt != null)
log.error(messageOnInterrupt);
throw ie;
}
}
public static void sleepRandom(int maxTime) {
sleepThread(random.nextInt(maxTime));
}
public static void killCacheManagers(CacheContainer... cacheContainers) {
EmbeddedCacheManager[] cms = new EmbeddedCacheManager[cacheContainers.length];
for (int i = 0; i < cacheContainers.length; i++) cms[i] = (EmbeddedCacheManager) cacheContainers[i];
killCacheManagers(cms);
}
public static void killCacheManagers(EmbeddedCacheManager... cacheManagers) {
killCacheManagers(Arrays.asList(cacheManagers));
}
public static void killCacheManagers(List<? extends EmbeddedCacheManager> cacheManagers) {
// Stop the managers in reverse order to prevent each of them from becoming coordinator in turn
if (cacheManagers != null) {
for (int i = cacheManagers.size() - 1; i >= 0; i--) {
EmbeddedCacheManager cm = cacheManagers.get(i);
try {
if (cm != null) {
SecurityActions.stopManager(cm);
}
} catch (Throwable e) {
log.warn("Problems killing cache manager " + cm, e);
}
}
}
}
public static void clearContent(EmbeddedCacheManager... cacheManagers) {
clearContent(Arrays.asList(cacheManagers));
}
public static void clearContent(List<? extends EmbeddedCacheManager> cacheManagers) {
if (cacheManagers != null) {
for (EmbeddedCacheManager cm : cacheManagers) {
try {
clearContent(cm);
} catch (Throwable e) {
log.warn("Problems clearing cache manager " + cm, e);
}
}
}
}
public static void clearContent(EmbeddedCacheManager cacheContainer) {
if (cacheContainer != null && cacheContainer.getStatus().allowInvocations()) {
Set<Cache<?, ?>> runningCaches = getRunningCaches(cacheContainer);
for (Cache<?, ?> cache : runningCaches) {
clearRunningTx(cache);
}
if (!cacheContainer.getStatus().allowInvocations()) return;
for (Cache<?, ?> cache : runningCaches) {
try {
clearCacheLoader(cache);
removeInMemoryData(cache);
} catch (Exception e) {
log.errorf(e, "Failed to clear cache %s after test", cache);
}
}
}
}
private static Set<String> getOrderedCacheNames(EmbeddedCacheManager cacheContainer) {
Set<String> caches = new LinkedHashSet<>();
try {
DependencyGraph<String> graph =
TestingUtil.extractGlobalComponent(cacheContainer, DependencyGraph.class,
KnownComponentNames.CACHE_DEPENDENCY_GRAPH);
caches.addAll(graph.topologicalSort());
} catch (Exception ignored) {
}
return caches;
}
private static Set<Cache<?, ?>> getRunningCaches(EmbeddedCacheManager cacheContainer) {
if (cacheContainer == null || !cacheContainer.getStatus().allowInvocations())
return Collections.emptySet();
Set<String> running = new LinkedHashSet<>(getOrderedCacheNames(cacheContainer));
extractGlobalComponent(cacheContainer, InternalCacheRegistry.class).filterPrivateCaches(running);
running.addAll(cacheContainer.getCacheNames());
extractGlobalConfiguration(cacheContainer).defaultCacheName().ifPresent(running::add);
HashSet<Cache<?, ?>> runningCaches = new LinkedHashSet<>();
for (String cacheName : running) {
Cache<?, ?> cache;
try {
cache = cacheContainer.getCache(cacheName, false);
} catch (CacheException ignoreCache) {
// Ignore caches that have not started correctly
continue;
}
if (cache != null && cache.getStatus().allowInvocations())
runningCaches.add(cache);
}
return runningCaches;
}
public static GlobalConfiguration extractGlobalConfiguration(EmbeddedCacheManager cacheContainer) {
return SecurityActions.getCacheManagerConfiguration(cacheContainer);
}
private static void clearRunningTx(Cache<?, ?> cache) {
if (cache != null) {
TransactionManager txm = TestingUtil.getTransactionManager(cache);
killTransaction(txm);
}
}
public static void clearCacheLoader(Cache<?, ?> cache) {
PersistenceManager persistenceManager = TestingUtil.extractComponent(cache, PersistenceManager.class);
CompletionStages.join(persistenceManager.clearAllStores(BOTH));
}
public static <K, V> List<DummyInMemoryStore> cachestores(List<Cache<K, V>> caches) {
List<DummyInMemoryStore> l = new LinkedList<>();
for (Cache<K, V> c : caches)
l.add(TestingUtil.getFirstStore(c));
return l;
}
private static void removeInMemoryData(Cache<?, ?> cache) {
log.debugf("Cleaning data for cache %s", cache);
InternalDataContainer<?, ?> dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
if (log.isDebugEnabled())
log.debugf("Data container size before clear: %d", dataContainer.sizeIncludingExpired());
dataContainer.clear();
}
/**
* Kills a cache - stops it, clears any data in any stores, and rolls back any associated txs
*/
public static void killCaches(Cache<?, ?>... caches) {
killCaches(Arrays.asList(caches));
}
/**
* Kills a cache - stops it and rolls back any associated txs
*/
public static void killCaches(Collection<? extends Cache<?, ?>> caches) {
for (Cache<?, ?> c : caches) {
try {
if (c != null && c.getStatus() == ComponentStatus.RUNNING) {
TransactionManager tm = c.getAdvancedCache().getTransactionManager();
if (tm != null) {
try {
tm.rollback();
} catch (Exception e) {
// don't care
}
}
// retrieve the size before calling log, as evaluating the set may cause recursive log calls
long size = log.isTraceEnabled() ? c.getAdvancedCache().withFlags(Flag.CACHE_MODE_LOCAL).size() : 0;
if (c.getAdvancedCache().getRpcManager() != null) {
log.tracef("Local size on %s before stopping: %d", c.getAdvancedCache().getRpcManager().getAddress(), size);
} else {
log.tracef("Local size before stopping: %d", size);
}
c.stop();
}
} catch (Throwable t) {
log.errorf(t, "Error killing cache %s", c.getName());
}
}
}
/**
* Clears transaction with the current thread in the given transaction manager.
*
* @param txManager a TransactionManager to be cleared
*/
public static void killTransaction(TransactionManager txManager) {
if (txManager != null) {
try {
if (txManager.getTransaction() != null) {
txManager.rollback();
}
} catch (Exception e) {
// don't care
}
}
}
/**
* Clears any associated transactions with the current thread in the caches' transaction managers.
*/
public static void killTransactions(Cache<?, ?>... caches) {
for (Cache<?, ?> c : caches) {
if (c != null && c.getStatus() == ComponentStatus.RUNNING) {
TransactionManager tm = getTransactionManager(c);
if (tm != null) {
try {
tm.rollback();
} catch (Exception e) {
// don't care
}
}
}
}
}
/**
* For testing only - introspects a cache and extracts the ComponentRegistry
*
* @param cache cache to introspect
* @return component registry
*/
public static ComponentRegistry extractComponentRegistry(Cache<?, ?> cache) {
return SecurityActions.getCacheComponentRegistry(cache.getAdvancedCache());
}
public static GlobalComponentRegistry extractGlobalComponentRegistry(CacheContainer cacheContainer) {
return SecurityActions.getGlobalComponentRegistry((EmbeddedCacheManager) cacheContainer);
}
public static LockManager extractLockManager(Cache<?, ?> cache) {
return extractComponentRegistry(cache).getComponent(LockManager.class);
}
public static GlobalMarshaller extractGlobalMarshaller(EmbeddedCacheManager cm) {
GlobalComponentRegistry gcr = extractGlobalComponentRegistry(cm);
return (GlobalMarshaller) gcr.getComponent(StreamingMarshaller.class, KnownComponentNames.INTERNAL_MARSHALLER);
}
public static PersistenceMarshallerImpl extractPersistenceMarshaller(EmbeddedCacheManager cm) {
return extractGlobalComponentRegistry(cm).getComponent(PersistenceMarshallerImpl.class, KnownComponentNames.PERSISTENCE_MARSHALLER);
}
public static AsyncInterceptorChain extractInterceptorChain(Cache<?, ?> cache) {
return extractComponent(cache, AsyncInterceptorChain.class);
}
public static <K> LocalizedCacheTopology extractCacheTopology(Cache<K, ?> cache) {
return cache.getAdvancedCache().getDistributionManager().getCacheTopology();
}
/**
* Add a hook to cache startup sequence that will allow to replace existing component with a mock.
*/
public static void addCacheStartingHook(EmbeddedCacheManager cacheContainer, BiConsumer<String, ComponentRegistry> consumer) {
GlobalComponentRegistry gcr = extractGlobalComponentRegistry(cacheContainer);
extractField(gcr, "moduleLifecycles");
TestingUtil.<Collection<ModuleLifecycle>>replaceField(gcr, "moduleLifecycles", moduleLifecycles -> {
Collection<ModuleLifecycle> copy = new ArrayList<>(moduleLifecycles);
copy.add(new ModuleLifecycle() {
@Override
public void cacheStarting(ComponentRegistry cr, Configuration configuration, String cacheName) {
consumer.accept(cacheName, cr);
}
});
return copy;
});
}
/**
* Replaces an existing interceptor of the given type in the interceptor chain with a new interceptor
* instance passed
* as parameter.
*
* @param replacingInterceptor the interceptor to add to the interceptor chain
* @param toBeReplacedInterceptorType the type of interceptor that should be swapped with the new one
* @return true if the interceptor was replaced
*/
public static boolean replaceInterceptor(Cache<?, ?> cache, AsyncInterceptor replacingInterceptor, Class<? extends AsyncInterceptor> toBeReplacedInterceptorType) {
ComponentRegistry cr = extractComponentRegistry(cache);
// make sure all interceptors here are wired.
cr.wireDependencies(replacingInterceptor);
AsyncInterceptorChain inch = cr.getComponent(AsyncInterceptorChain.class);
return inch.replaceInterceptor(replacingInterceptor, toBeReplacedInterceptorType);
}
/**
* Blocks until the cache has reached a specified state.
*
* @param cache cache to watch
* @param cacheStatus status to wait for
* @param timeout timeout to wait for
*/
public static void blockUntilCacheStatusAchieved(Cache<?, ?> cache, ComponentStatus cacheStatus, long timeout) {
AdvancedCache<?, ?> spi = cache.getAdvancedCache();
long killTime = System.currentTimeMillis() + timeout;
while (System.currentTimeMillis() < killTime) {
if (spi.getStatus() == cacheStatus) return;
sleepThread(50);
}
throw new RuntimeException("Timed out waiting for condition");
}
public static void blockUntilViewsReceived(int timeout, Collection<?> caches) {
Object first = caches.iterator().next();
if (first instanceof Cache) {
blockUntilViewsReceived(timeout, caches.toArray(new Cache[0]));
} else {
blockUntilViewsReceived(timeout, caches.toArray(new CacheContainer[0]));
}
}
public static void blockUntilViewsReceived(int timeout, boolean barfIfTooManyMembers, Collection<?> caches) {
Object first = caches.iterator().next();
if (first instanceof Cache) {
blockUntilViewsReceived(timeout, barfIfTooManyMembers, caches.toArray(new Cache[]{}));
} else {
blockUntilViewsReceived(timeout, barfIfTooManyMembers, caches.toArray(new CacheContainer[]{}));
}
}
public static CommandsFactory extractCommandsFactory(Cache<?, ?> cache) {
if (cache instanceof AbstractDelegatingCache) {
// Need to unwrap to the base cache
return extractCommandsFactory(extractField(cache, "cache"));
}
return (CommandsFactory) extractField(cache, "commandsFactory");
}
public static void dumpCacheContents(List<Cache<?, ?>> caches) {
System.out.println("**** START: Cache Contents ****");
int count = 1;
for (Cache<?, ?> c : caches) {
if (c == null) {
System.out.println(" ** Cache " + count + " is null!");
} else {
EmbeddedCacheManager cacheManager = c.getCacheManager();
System.out.println(" ** Cache " + count + " is " + cacheManager.getAddress());
}
count++;
}
System.out.println("**** END: Cache Contents ****");
}
public static void dumpCacheContents(Cache<?, ?>... caches) {
dumpCacheContents(Arrays.asList(caches));
}
/**
* Extracts a component of a given type from the cache's internal component registry
*/
public static <T> T extractComponent(Cache<?, ?> cache, Class<T> componentType) {
if (componentType.equals(DataContainer.class)) {
throw new UnsupportedOperationException("Should extract InternalDataContainer");
}
ComponentRegistry cr = extractComponentRegistry(cache.getAdvancedCache());
return cr.getComponent(componentType);
}
/**
* Extracts a component of a given type from the cache's internal component registry
*/
public static <T> T extractGlobalComponent(CacheContainer cacheContainer, Class<T> componentType) {
GlobalComponentRegistry gcr = extractGlobalComponentRegistry(cacheContainer);
return gcr.getComponent(componentType);
}
public static <T> T extractGlobalComponent(CacheContainer cacheContainer, Class<T> componentType, String componentName) {
GlobalComponentRegistry gcr = extractGlobalComponentRegistry(cacheContainer);
return gcr.getComponent(componentType, componentName);
}
public static TransactionManager getTransactionManager(Cache<?, ?> cache) {
return cache == null ? null : cache.getAdvancedCache().getTransactionManager();
}
/**
* Replaces a component in a running cache
*
* @param cache cache in which to replace component
* @param componentType component type of which to replace
* @param replacementComponent new instance
* @param rewire if true, ComponentRegistry.rewire() is called after replacing.
* @return the original component that was replaced
*/
public static <T> T replaceComponent(Cache<?, ?> cache, Class<? extends T> componentType, T replacementComponent, boolean rewire) {
if (componentType.equals(DataContainer.class)) {
throw new UnsupportedOperationException();
}
ComponentRegistry cr = extractComponentRegistry(cache);
BasicComponentRegistry bcr = cr.getComponent(BasicComponentRegistry.class);
ComponentRef<? extends T> old = bcr.getComponent(componentType);
bcr.replaceComponent(componentType.getName(), replacementComponent, true);
cr.cacheComponents();
if (rewire) cr.rewire();
return old != null ? old.wired() : null;
}
/**
* Replaces a component in a running cache. This can also optionally stop the component before rewiring, which can be
* important due to rewiring starts a component (you wouldn't want the component to be started twice).
*
* @param cache cache in which to replace component
* @param componentType component type of which to replace
* @param replacementComponent new instance
* @param rewire if true, ComponentRegistry.rewire() is called after replacing.
* @param stopBeforeWire stops the new component before wiring (the registry will start it again)
* @return the original component that was replaced
*/
public static <T extends Lifecycle> T replaceComponent(Cache<?, ?> cache, Class<T> componentType, T replacementComponent, boolean rewire,
boolean stopBeforeWire) {
if (stopBeforeWire) {
replacementComponent.stop();
}
return replaceComponent(cache, componentType, replacementComponent, rewire);
}
/**
* Replaces a component in a running cache manager (global component registry)
*
* @param cacheContainer cache in which to replace component
* @param componentType component type of which to replace
* @param replacementComponent new instance
* @param rewire if true, ComponentRegistry.rewire() is called after replacing.
* @return the original component that was replaced
*/
public static <T> T replaceComponent(CacheContainer cacheContainer, Class<T> componentType, T replacementComponent,
boolean rewire) {
return replaceComponent(cacheContainer, componentType, componentType.getName(), replacementComponent, rewire);
}
/**
* Same as {@link TestingUtil#replaceComponent(CacheContainer, Class, Object, boolean)} except that you can provide
* an optional name, to replace specifically named components.
*
* @param cacheContainer cache in which to replace component
* @param componentType component type of which to replace
* @param name name of the component
* @param replacementComponent new instance
* @param rewire if true, ComponentRegistry.rewire() is called after replacing.
* @return the original component that was replaced
*/
public static <T> T replaceComponent(CacheContainer cacheContainer, Class<T> componentType, String name, T replacementComponent, boolean rewire) {
GlobalComponentRegistry cr = extractGlobalComponentRegistry(cacheContainer);
BasicComponentRegistry bcr = cr.getComponent(BasicComponentRegistry.class);
ComponentRef<T> old = bcr.getComponent(componentType);
bcr.replaceComponent(name, replacementComponent, true);
if (rewire) {
cr.rewire();
cr.rewireNamedRegistries();
}
return old != null ? old.wired() : null;
}
public static <K, V> CacheLoader<K, V> getCacheLoader(Cache<K, V> cache) {
if (cache.getCacheConfiguration().persistence().usingStores()) {
return TestingUtil.getFirstLoader(cache);
} else {
return null;
}
}
public static String printCache(Cache<?, ?> cache) {
DataContainer<?, ?> dataContainer = TestingUtil.extractComponent(cache, InternalDataContainer.class);
Iterator<? extends InternalCacheEntry<?, ?>> it = dataContainer.iterator();
StringBuilder builder = new StringBuilder(cache.getName() + "[");
while (it.hasNext()) {
InternalCacheEntry<?, ?> ce = it.next();
builder.append(ce.getKey()).append("=").append(ce.getValue()).append(",l=").append(ce.getLifespan())
.append("; ");
}
builder.append("]");
return builder.toString();
}
public static <K> Set<K> getInternalKeys(Cache<K, ?> cache) {
DataContainer<K, ?> dataContainer = cache.getAdvancedCache().getDataContainer();
Set<K> keys = new HashSet<>();
for (CacheEntry<K, ?> entry : dataContainer) {
keys.add(entry.getKey());
}
return keys;
}
public static <V> Collection<V> getInternalValues(Cache<?, V> cache) {
DataContainer<?, V> dataContainer = cache.getAdvancedCache().getDataContainer();
Collection<V> values = new ArrayList<>();
for (CacheEntry<?, V> entry : dataContainer) {
values.add(entry.getValue());
}
return values;
}
public static DISCARD getDiscardForCache(EmbeddedCacheManager cacheManager) throws Exception {
JGroupsTransport jgt = (JGroupsTransport) TestingUtil.extractGlobalComponent(cacheManager, Transport.class);
JChannel ch = jgt.getChannel();
ProtocolStack ps = ch.getProtocolStack();
DISCARD discard = new DISCARD();
discard.excludeItself(false);
ps.insertProtocol(discard, ProtocolStack.Position.ABOVE, TP.class);
return discard;
}
/**
* Inserts a DELAY protocol in the JGroups stack used by the cache, and returns it.
* The DELAY protocol can then be used to inject delays in milliseconds both at receiver
* and sending side.
*
* @param cache cache to inject
* @param in_delay_millis inbound delay in millis
* @param out_delay_millis outbound delay in millis
* @return a reference to the DELAY instance being used by the JGroups stack
*/
public static DELAY setDelayForCache(Cache<?, ?> cache, int in_delay_millis, int out_delay_millis) throws Exception {
JGroupsTransport jgt = (JGroupsTransport) TestingUtil.extractComponent(cache, Transport.class);
JChannel ch = jgt.getChannel();
ProtocolStack ps = ch.getProtocolStack();
DELAY delay = ps.findProtocol(DELAY.class);
if (delay == null) {
delay = new DELAY();
ps.insertProtocol(delay, ProtocolStack.Position.ABOVE, TP.class);
}
delay.setInDelay(in_delay_millis);
delay.setOutDelay(out_delay_millis);
return delay;
}
public static String k() {
return k(0);
}
public static String k(int index) {
return k(CallerId.getCallerMethodName(2), index);
}
public static String k(int index, String prefix) {
return String.format("%s-k%d-%s", prefix, index, CallerId.getCallerMethodName(2));
}
public static String v() {
return v(0);
}
public static String v(int index) {
return v(CallerId.getCallerMethodName(2), index);
}
public static String v(int index, String prefix) {
return String.format("%s-v%d-%s", prefix, index, CallerId.getCallerMethodName(2));
}
public static String k(Method method, int index) {
return "k" + index + '-' + method.getName();
}
public static String k(String method, int index) {
return "k" + index + '-' + method;
}
public static String v(Method method, int index) {
return "v" + index + '-' + method.getName();
}
public static String v(String method, int index) {
return "v" + index + '-' + method;
}
public static String k(Method method) {
return k(method, 0);
}
public static String v(Method method) {
return v(method, 0);
}
public static String k(Method m, String prefix) {
return prefix + m.getName();
}
public static String v(Method m, String prefix) {
return prefix + m.getName();
}
public static String v(Method m, String prefix, int index) {
return String.format("%s-v%d-%s", prefix, index, m.getName());
}
public static TransactionTable getTransactionTable(Cache<?, ?> cache) {
return extractComponent(cache, TransactionTable.class);
}
public static ObjectName getCacheManagerObjectName(String jmxDomain) {
return getCacheManagerObjectName(jmxDomain, "DefaultCacheManager");
}
public static ObjectName getCacheManagerObjectName(String jmxDomain, String cacheManagerName) {
return getCacheManagerObjectName(jmxDomain, cacheManagerName, "CacheManager");
}
public static ObjectName getCacheManagerObjectName(String jmxDomain, String cacheManagerName, String component) {
try {
return new ObjectName(jmxDomain + ":type=CacheManager,name=" + ObjectName.quote(cacheManagerName) + ",component=" + component);
} catch (MalformedObjectNameException e) {
throw new RuntimeException(e);
}
}
public static ObjectName getCacheObjectName(String jmxDomain, String cacheName) {
return getCacheObjectName(jmxDomain, cacheName, "Cache");
}
public static ObjectName getCacheObjectName(String jmxDomain, String cacheName, String component) {
return getCacheObjectName(jmxDomain, cacheName, component, "DefaultCacheManager");
}
public static ObjectName getCacheObjectName(String jmxDomain, String cacheName, String component, String cacheManagerName) {
if (!cacheName.contains("(") || !cacheName.endsWith(")")) {
throw new IllegalArgumentException("Cache name does not appear to include a cache mode suffix: " + cacheName);
}
try {
return new ObjectName(jmxDomain + ":type=Cache,manager=" + ObjectName.quote(cacheManagerName)
+ ",name=" + ObjectName.quote(cacheName) + ",component=" + component);
} catch (MalformedObjectNameException e) {
throw new RuntimeException(e);
}
}
public static ObjectName getJGroupsChannelObjectName(EmbeddedCacheManager cacheManager) {
GlobalConfiguration cfg = cacheManager.getCacheManagerConfiguration();
try {
return new ObjectName(String.format("%s:type=channel,cluster=%s,manager=%s",
cfg.jmx().domain(),
ObjectName.quote(cacheManager.getClusterName()),
ObjectName.quote(cfg.cacheManagerName())));
} catch (MalformedObjectNameException e) {
throw new RuntimeException(e);
}
}
public static boolean existsDomain(MBeanServer mBeanServer, String domain) {
for (String d : mBeanServer.getDomains()) {
if (domain.equals(d)) return true;
}
return false;
}
public static void checkMBeanOperationParameterNaming(MBeanServer mBeanServer, ObjectName objectName) throws JMException {
MBeanInfo mBeanInfo = mBeanServer.getMBeanInfo(objectName);
for (MBeanOperationInfo op : mBeanInfo.getOperations()) {
for (MBeanParameterInfo param : op.getSignature()) {
// assert that all operation parameters have a proper name (not an autogenerated p0, p1, ...
assertFalse(param.getName().matches("p[0-9]+"));
}
}
}
public static String generateRandomString(int numberOfChars) {
return generateRandomString(numberOfChars, new Random(System.currentTimeMillis()));
}
public static String generateRandomString(int numberOfChars, Random r) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < numberOfChars; i++) sb.append((char) (64 + r.nextInt(26)));
return sb.toString();
}
/**
* Verifies the cache doesn't contain any lock
*/
public static void assertNoLocks(Cache<?, ?> cache) {
LockManager lm = TestingUtil.extractLockManager(cache);
if (lm != null) {
for (Object key : cache.keySet()) assert !lm.isLocked(key);
}
}
/**
* Call an operation within a transaction. This method guarantees that the
* right pattern is used to make sure that the transaction is always either
* committed or rollbacked.
*
* @param tm transaction manager
* @param c callable instance to run within a transaction
* @param <T> type returned from the callable
* @return returns whatever the callable returns
*/
public static <T> T withTx(TransactionManager tm, Callable<T> c) throws Exception {
return ((Callable<T>) () -> {
tm.begin();
try {
return ((Callable<? extends T>) c).call();
} catch (Exception e) {
tm.setRollbackOnly();
throw e;
} finally {
if (tm.getStatus() == Status.STATUS_ACTIVE) tm.commit();
else tm.rollback();
}
}).call();
}
/**
* Invoke a task using a cache manager. This method guarantees that the
* cache manager used in the task will be cleaned up after the task has
* completed, regardless of the task outcome.
*
* @param c task to execute
*/
public static void withCacheManager(CacheManagerCallable c) {
try {
c.call();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
if (c.clearBeforeKill()) {
TestingUtil.clearContent(c.cm);
}
TestingUtil.killCacheManagers(c.cm);
}
}
/**
* Invoke a task using a cache manager created by given supplier function.
* This method guarantees that the cache manager created in the task will
* be cleaned up after the task has completed, regardless of the task outcome.
*
* @param s cache manager supplier function
* @param c consumer function to execute with cache manager
*/
public static void withCacheManager(Supplier<EmbeddedCacheManager> s,
Consumer<EmbeddedCacheManager> c) {
EmbeddedCacheManager cm = null;
try {
cm = s.get();
c.accept(cm);
} finally {
if (cm != null) TestingUtil.killCacheManagers(cm);
}
}
/**
* Invoke a task using a cache manager.
* This method guarantees that the cache manager will
* be cleaned up after the task has completed, regardless of the task outcome.
*
* @param cm cache manager
* @param c consumer function to execute with cache manager
*/
public static void withCacheManager(EmbeddedCacheManager cm,
Consumer<EmbeddedCacheManager> c) {
try {
c.accept(cm);
} finally {
TestingUtil.killCacheManagers(cm);
}
}
/**
* Invoke a task using a several cache managers. This method guarantees
* that the cache managers used in the task will be cleaned up after the
* task has completed, regardless of the task outcome.
*
* @param c task to execute
*/
public static void withCacheManagers(MultiCacheManagerCallable c) {
try {
c.call();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
} finally {
TestingUtil.killCacheManagers(c.cms);
}
}
public static String getDefaultCacheName(EmbeddedCacheManager cm) {
return extractGlobalConfiguration(cm).defaultCacheName().get();
}
/**
* Returns true if at least "duration" millis elapsed since the specified "start" time (millis).
*/
public static boolean moreThanDurationElapsed(long start, long duration) {
return now() - duration >= start;
}
/**
* Returns current CPU time in millis.
*/
public static long now() {
return TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
}
public static Metadata metadata(Long lifespan, Long maxIdle) {
return new EmbeddedMetadata.Builder().lifespan(lifespan != null ? lifespan : -1)
.maxIdle(maxIdle != null ? maxIdle : -1).build();
}
public static Metadata metadata(Integer lifespan, Integer maxIdle) {
return new EmbeddedMetadata.Builder().lifespan(lifespan != null ? lifespan : -1)
.maxIdle(maxIdle != null ? maxIdle : -1).build();
}
public static <T extends NonBlockingStore<K, V>, K, V> T getFirstStore(Cache<K, V> cache) {
return getStore(cache, 0, true);
}
@SuppressWarnings({"unchecked", "unchecked cast"})
public static <T extends NonBlockingStore<K, V>, K, V> T getStore(Cache<K, V> cache, int position, boolean unwrapped) {
PersistenceManagerImpl persistenceManager = getActualPersistenceManager(cache);
NonBlockingStore<K, V> nonBlockingStore = persistenceManager.<K, V>getAllStores(characteristics ->
!characteristics.contains(NonBlockingStore.Characteristic.WRITE_ONLY)).get(position);
if (unwrapped && nonBlockingStore instanceof DelegatingNonBlockingStore) {
nonBlockingStore = ((DelegatingNonBlockingStore<K, V>) nonBlockingStore).delegate();
}
return (T) nonBlockingStore;
}
public static <K, V> WaitDelegatingNonBlockingStore<K, V> getFirstStoreWait(Cache<K, V> cache) {
return getStoreWait(cache, 0, true);
}
@SuppressWarnings({"cast"})
public static <K, V> WaitDelegatingNonBlockingStore<K, V> getStoreWait(Cache<K, V> cache, int position, boolean unwrapped) {
NonBlockingStore<K, V> nonBlockingStore = getStore(cache, position, unwrapped);
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
return new WaitDelegatingNonBlockingStore<>(nonBlockingStore, keyPartitioner);
}
public static <T extends CacheLoader<K, V>, K, V> T getFirstLoader(Cache<K, V> cache) {
PersistenceManagerImpl persistenceManager = getActualPersistenceManager(cache);
NonBlockingStore<K, V> nonBlockingStore = persistenceManager.<K, V>getAllStores(characteristics ->
!characteristics.contains(NonBlockingStore.Characteristic.WRITE_ONLY)).get(0);
// TODO: Once stores convert to non blocking implementations this will change
//noinspection unchecked
return (T) ((NonBlockingStoreAdapter<K, V>) nonBlockingStore).loader();
}
@SuppressWarnings("unchecked")
public static <T extends CacheWriter<K, V>, K, V> T getFirstWriter(Cache<K, V> cache) {
return getWriter(cache, 0);
}
public static <T extends CacheWriter<K, V>, K, V> T getWriter(Cache<K, V> cache, int position) {
PersistenceManagerImpl persistenceManager = getActualPersistenceManager(cache);
NonBlockingStore<K, V> nonBlockingStore = persistenceManager.<K, V>getAllStores(characteristics ->
!characteristics.contains(NonBlockingStore.Characteristic.READ_ONLY)).get(position);
// TODO: Once stores convert to non blocking implementations this will change
return (T) ((NonBlockingStoreAdapter<K, V>) nonBlockingStore).writer();
}
@SuppressWarnings("unchecked")
public static <T extends CacheWriter<K, V>, K, V> T getFirstTxWriter(Cache<K, V> cache) {
PersistenceManagerImpl persistenceManager = getActualPersistenceManager(cache);
NonBlockingStore<K, V> nonBlockingStore = persistenceManager.<K, V>getAllStores(characteristics ->
characteristics.contains(NonBlockingStore.Characteristic.TRANSACTIONAL)).get(0);
// TODO: Once stores convert to non blocking implementations this will change
return (T) ((NonBlockingStoreAdapter<K, V>) nonBlockingStore).transactionalStore();
}
private static PersistenceManagerImpl getActualPersistenceManager(Cache<?, ?> cache) {
PersistenceManager persistenceManager = extractComponent(cache, PersistenceManager.class);
if (persistenceManager instanceof DelegatingPersistenceManager) {
return (PersistenceManagerImpl) ((DelegatingPersistenceManager) persistenceManager).getActual();
}
return (PersistenceManagerImpl) persistenceManager;
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(AdvancedLoadWriteStore<K, V> cl, Predicate<K> filter) {
return Flowable.fromPublisher(cl.entryPublisher(filter, true, true))
.collectInto(new HashSet<MarshallableEntry<K, V>>(), Set::add)
.blockingGet();
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(AdvancedLoadWriteStore<K, V> cl) {
return allEntries(cl, null);
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(NonBlockingStore<K, V> store) {
return allEntries(store, IntSets.immutableSet(0), null);
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(NonBlockingStore<K, V> store, Predicate<? super K> filter) {
return allEntries(store, IntSets.immutableSet(0), filter);
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(NonBlockingStore<K, V> store, IntSet segments) {
return allEntries(store, segments, null);
}
public static <K, V> Set<MarshallableEntry<K, V>> allEntries(NonBlockingStore<K, V> store, IntSet segments,
Predicate<? super K> filter) {
return Flowable.fromPublisher(store.publishEntries(segments, filter, true))
.collectInto(new HashSet<MarshallableEntry<K, V>>(), Set::add)
.blockingGet();
}
public static void outputPropertiesToXML(String outputFile, Properties properties) throws IOException {
Properties sorted = new Properties() {
@Override
public Set<Object> keySet() {
return Collections.unmodifiableSet(new TreeSet<>(super.keySet()));
}
@Override
public synchronized Enumeration<Object> keys() {
return Collections.enumeration(new TreeSet<>(super.keySet()));
}
@Override
public Set<String> stringPropertyNames() {
return Collections.unmodifiableSet(new TreeSet<>(super.stringPropertyNames()));
}
};
sorted.putAll(properties);
try (OutputStream stream = new FileOutputStream(outputFile)) {
sorted.storeToXML(stream, null);
}
}
public static <K, V> void writeToAllStores(K key, V value, Cache<K, V> cache) {
PersistenceManager pm = extractComponent(cache, PersistenceManager.class);
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
CompletionStages.join(pm.writeToAllNonTxStores(MarshalledEntryUtil.create(key, value, cache), keyPartitioner.getSegment(key), BOTH));
}
public static <K, V> boolean deleteFromAllStores(K key, Cache<K, V> cache) {
PersistenceManager pm = extractComponent(cache, PersistenceManager.class);
KeyPartitioner keyPartitioner = extractComponent(cache, KeyPartitioner.class);
return CompletionStages.join(pm.deleteFromAllStores(key, keyPartitioner.getSegment(key), BOTH));
}
public static Subject makeSubject(String... principals) {
Set<Principal> set = new LinkedHashSet<>();
if (principals.length > 0) {
set.add(new TestingUtil.TestPrincipal(principals[0]));
for (int i = 1; i < principals.length; i++) {
set.add(new GroupPrincipal(principals[i]));
}
}
return new Subject(true, set, Collections.emptySet(), Collections.emptySet());
}
public static Map<AuthorizationPermission, Subject> makeAllSubjects() {
HashMap<AuthorizationPermission, Subject> subjects = new HashMap<>(AuthorizationPermission.values().length);
for (AuthorizationPermission perm : AuthorizationPermission.values()) {
subjects.put(perm, makeSubject(perm.toString() + "_user", perm.toString()));
}
return subjects;
}
static public void assertAnyEquals(Object expected, Object actual) {
if (expected instanceof byte[] && actual instanceof byte[])
AssertJUnit.assertArrayEquals((byte[]) expected, (byte[]) actual);
else
AssertJUnit.assertEquals(expected, actual);
}
public static void assertBetween(double lowerBound, double upperBound, double actual) {
if (actual < lowerBound || upperBound < actual) {
fail("Expected between:<" + lowerBound + "> and:<" + upperBound + "> but was:<" + actual + ">");
}
}
public static void assertBetween(long lowerBound, long upperBound, long actual) {
if (actual < lowerBound || upperBound < actual) {
fail("Expected between:<" + lowerBound + "> and:<" + upperBound + "> but was:<" + actual + ">");
}
}
public static MBeanServer getMBeanServer(Cache<?, ?> cache) {
return extractComponent(cache, CacheJmxRegistration.class).getMBeanServer();
}
public static MBeanServer getMBeanServer(EmbeddedCacheManager cacheManager) {
return extractGlobalComponent(cacheManager, CacheManagerJmxRegistration.class).getMBeanServer();
}
public static class TestPrincipal implements Principal, Serializable {
String name;
public TestPrincipal(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
@Override
public String toString() {
return "TestPrincipal [name=" + name + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
TestPrincipal other = (TestPrincipal) obj;
if (name == null) {
return other.name == null;
} else return name.equals(other.name);
}
}
public static <T, W extends T> W wrapGlobalComponent(CacheContainer cacheContainer, Class<T> tClass,
WrapFactory<T, W, CacheContainer> factory, boolean rewire) {
T current = extractGlobalComponent(cacheContainer, tClass);
W wrap = factory.wrap(cacheContainer, current);
replaceComponent(cacheContainer, tClass, wrap, rewire);
return wrap;
}
public static <T, W extends T> W wrapGlobalComponent(CacheContainer cacheContainer, Class<T> tClass,
Function<T, W> ctor, boolean rewire) {
T current = extractGlobalComponent(cacheContainer, tClass);
W wrap = ctor.apply(current);
replaceComponent(cacheContainer, tClass, wrap, rewire);
return wrap;
}
public static <T, W extends T> W wrapComponent(Cache<?, ?> cache, Class<T> tClass,
WrapFactory<T, W, Cache<?, ?>> factory, boolean rewire) {
T current = extractComponent(cache, tClass);
W wrap = factory.wrap(cache, current);
replaceComponent(cache, tClass, wrap, rewire);
return wrap;
}
public static <T, W extends T> W wrapComponent(Cache<?, ?> cache, Class<T> tClass, Function<T, W> ctor) {
T current = extractComponent(cache, tClass);
W wrap = ctor.apply(current);
replaceComponent(cache, tClass, wrap, true);
return wrap;
}
public static <T extends PerCacheInboundInvocationHandler>
T wrapInboundInvocationHandler(Cache<?, ?> cache, Function<PerCacheInboundInvocationHandler, T> ctor) {
PerCacheInboundInvocationHandler current = extractComponent(cache, PerCacheInboundInvocationHandler.class);
T wrap = ctor.apply(current);
replaceComponent(cache, PerCacheInboundInvocationHandler.class, wrap, true);
return wrap;
}
public interface WrapFactory<T, W, C> {
W wrap(C wrapOn, T current);
}
public static void expectCause(Throwable t, Class<? extends Throwable> c, String messageRegex) {
for (; ; ) {
if (c.isAssignableFrom(t.getClass())) {
if (messageRegex != null && !Pattern.matches(messageRegex, t.getMessage())) {
throw new RuntimeException(String.format("Exception message '%s' does not match regex '%s'", t.getMessage(), messageRegex), t);
}
return;
}
Throwable cause = t.getCause();
if (cause == null || cause == t) {
throw new RuntimeException("Cannot find a cause of type " + c.getName(), cause);
} else {
t = cause;
}
}
}
public static boolean isTriangleAlgorithm(CacheMode cacheMode, boolean transactional) {
return cacheMode.isDistributed() && !transactional;
}
public static <K, V> Map.Entry<K, V> createMapEntry(K key, V value) {
return new AbstractMap.SimpleEntry<>(key, value);
}
public static <T, U> Map<T, U> mapOf(Object... keyValueKeyValueKeyValue) {
Map<T, U> map = new HashMap<>();
for (int i = 0; i < keyValueKeyValueKeyValue.length; ) {
map.put((T) keyValueKeyValueKeyValue[i++], (U) keyValueKeyValueKeyValue[i++]);
}
return map;
}
@SafeVarargs
public static <T> Set<T> setOf(T... elements) {
if (elements == null)
return Collections.emptySet();
return new HashSet<>(Arrays.asList(elements));
}
/**
* This method sets only fields annotated with <code>@Inject</code>, it does not invoke any injecting methods.
* Named setters are not handled either.
*/
public static void inject(Object instance, Object... components) {
TestComponentAccessors.wire(instance, components);
}
public static void startComponent(Object component) {
try {
TestComponentAccessors.start(component);
} catch (Exception e) {
throw new TestException(e);
}
}
public static void stopComponent(Object component) {
try {
TestComponentAccessors.stop(component);
} catch (Exception e) {
throw new TestException(e);
}
}
public static Object named(String name, Object instance) {
return new TestComponentAccessors.NamedComponent(name, instance);
}
public static void cleanUpDataContainerForCache(Cache<?, ?> cache) {
InternalDataContainer<?, ?> dataContainer = extractComponent(cache, InternalDataContainer.class);
dataContainer.cleanUp();
}
// The first call to JbossMarshall::isMarshallable results in an object actually being serialized, the additional
// call to PersistenceMarshaller::isMarshallable in the GlobalMarshaller may break stats on test Externalizer implementations
// this is simply a convenience method to initialise MarshallableTypeHints
public static void initJbossMarshallerTypeHints(EmbeddedCacheManager cm, Object... objects) {
StreamAwareMarshaller marshaller = extractPersistenceMarshaller(cm);
for (Object o : objects)
marshaller.isMarshallable(o);
}
public static void copy(InputStream is, OutputStream os) throws IOException {
byte[] buffer = new byte[1024];
int length;
while ((length = is.read(buffer)) > 0) {
os.write(buffer, 0, length);
}
}
public static ProtoStreamMarshaller createProtoStreamMarshaller(SerializationContextInitializer sci) {
SerializationContext ctx = ProtobufUtil.newSerializationContext();
sci.registerSchema(ctx);
sci.registerMarshallers(ctx);
return new ProtoStreamMarshaller(ctx);
}
public static <E> Publisher<NonBlockingStore.SegmentedPublisher<E>> singleSegmentPublisher(Publisher<E> flowable) {
return Flowable.just(SingleSegmentPublisher.singleSegment(flowable));
}
public static <E> Publisher<NonBlockingStore.SegmentedPublisher<E>> multipleSegmentPublisher(Publisher<E> flowable,
Function<E, Object> toKeyFunction, KeyPartitioner keyPartitioner) {
return Flowable.fromPublisher(flowable)
.groupBy(e -> keyPartitioner.getSegment(toKeyFunction.apply(e)))
.map(SegmentPublisherWrapper::wrap);
}
public static void defineConfiguration(EmbeddedCacheManager cacheManager, String cacheName, Configuration configuration) {
SecurityActions.defineConfiguration(cacheManager, cacheName, configuration);
}
}
| 84,106
| 40.699058
| 251
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/Mocks.java
|
package org.infinispan.test;
import static org.infinispan.test.TestingUtil.extractGlobalComponent;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.withSettings;
import java.lang.reflect.InvocationTargetException;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import org.infinispan.Cache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.commons.util.ByRef;
import org.infinispan.factories.KnownComponentNames;
import org.infinispan.reactive.publisher.impl.Notifications;
import org.infinispan.reactive.publisher.impl.SegmentAwarePublisherSupplier;
import org.infinispan.reactive.publisher.impl.SegmentPublisherSupplier;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.test.fwk.CheckPoint;
import org.infinispan.commons.util.concurrent.CompletableFutures;
import org.mockito.AdditionalAnswers;
import org.mockito.MockSettings;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.mockito.stubbing.Stubber;
import org.reactivestreams.Publisher;
import io.reactivex.rxjava3.core.Flowable;
/**
* Utility methods for dealing with Mockito mocks.
*
* @author Dan Berindei
* @since 9.0
*/
public class Mocks {
private Mocks() { }
/**
* Checkpoint name that is triggered to tell the test that the code has reached a spot just before invocation. This
* thread will not proceed with the invocation until {@link Mocks#BEFORE_RELEASE} is released.
*/
public static final String BEFORE_INVOCATION = "before_invocation";
/**
* Checkpoint name that this code waits on before the invocation until the test triggers it. This will require
* triggering per invocation if there are more than one.
*/
public static final String BEFORE_RELEASE = "before_release";
/**
* Checkpoint name that is triggered to tell the test that the code has reached a spot just after invocation. This
* thread will not proceed after the invocation until {@link Mocks#AFTER_RELEASE} is released.
*/
public static final String AFTER_INVOCATION = "after_invocation";
/**
* Checkpoint name that this code waits on after the invocation until the test triggers it. This will require
* triggering per invocation if there are more than one.
*/
public static final String AFTER_RELEASE = "after_release";
public static final Answer<Void> EXECUTOR_RUN_ANSWER = invocation -> {
Runnable runnable = invocation.getArgument(0);
runnable.run();
return null;
};
public static Answer<Void> justRunExecutorAnswer() {
return EXECUTOR_RUN_ANSWER;
}
public static Answer<Void> runWithExecutorAnswer(Executor executor) {
return invocation -> {
Runnable runnable = invocation.getArgument(0);
executor.execute(runnable);
return null;
};
}
/**
* Delegates a Mockito invocation to a target object, and returns the mock instead of the target object.
*
* Useful when {@code Mockito.spy(object)} doesn't work and the mocked class has a fluent interface.
*/
public static <T, R> R invokeAndReturnMock(InvocationOnMock i, T target)
throws IllegalAccessException, InvocationTargetException {
Object returnValue = i.getMethod().invoke(target, i.getArguments());
// If necessary, replace the return value with the mock
return (returnValue == target) ? (R) i.getMock() : (R) returnValue;
}
public static <T> T callRealMethod(InvocationOnMock invocation) {
try {
return (T) invocation.callRealMethod();
} catch (Throwable throwable) {
throw CompletableFutures.asCompletionException(throwable);
}
}
public static <T> T callAnotherAnswer(Answer<?> answer, InvocationOnMock invocation) {
try {
return (T) answer.answer(invocation);
} catch (Throwable throwable) {
throw CompletableFutures.asCompletionException(throwable);
}
}
/**
* Helper that provides the ability to replace a component from the cache and automatically mocks around it, returning
* the same results as the original object. The provided stub will provide blocking when the mock uses this
* <pre>
* {@code (stubber, mock) -> stubber.when(mock).methodToBlock(); }
* </pre>The caller can then
* control how the blocking occurs as the mock will do the following
*
* <pre> {@code
* checkpoint.trigger(BEFORE_INVOCATION);
* checkpoint.await(BEFORE_RELEASE);
* ... do actual invocation ...
* checkpoint.trigger(AFTER_INVOCATION);
* checkpoint.await(AFTER_RELEASE);
* return result;
* }
* </pre>
*
* The user must release the BEFORE_RELEASE and AFTER_RELEASE checkpoints or else these will timeout and cause
* test instabilities.
* @param checkPoint the check point to use to control blocking
* @param componentClass the actual class from the component registry to mock
* @param cache the cache to replace the object on
* @param mockStubConsumer the consumer to invoke the method on the stubber and the actual mock
* @param <Mock> the class of objec to replace
* @return the original object to put back into the cache
*/
public static <Mock> Mock blockingMock(final CheckPoint checkPoint, Class<? extends Mock> componentClass,
Cache<?, ?> cache, BiConsumer<? super Stubber, ? super Mock> mockStubConsumer,
Class<?>... extraInterfaces) {
return interceptComponent(componentClass, cache, (realObject, mock) -> {
mockStubConsumer.accept(doAnswer(blockingAnswer(AdditionalAnswers.delegatesTo(realObject), checkPoint)), mock);
}, extraInterfaces);
}
public static <Mock> Mock interceptComponent(Class<? extends Mock> componentClass, Cache<?, ?> cache,
BiConsumer<? super Mock, ? super Mock> methodInterceptor,
Class<?>... extraInterfaces) {
Mock realObject = TestingUtil.extractComponent(cache, componentClass);
Answer<?> forwardingAnswer = AdditionalAnswers.delegatesTo(realObject);
MockSettings mockSettings = withSettings().defaultAnswer(forwardingAnswer);
if (extraInterfaces != null && extraInterfaces.length > 0) {
mockSettings.extraInterfaces(extraInterfaces);
}
Mock mock = mock(componentClass, mockSettings);
methodInterceptor.accept(realObject, mock);
TestingUtil.replaceComponent(cache, componentClass, mock, true);
return realObject;
}
/**
* Allows for decorating an existing answer to apply before and after invocation and release checkpoints as
* described in {@link Mocks#blockingMock(CheckPoint, Class, Cache, BiConsumer, Class[])}.
* @param answer the answer to decorate with a blocking one
* @param checkPoint the checkpoint to register with
* @param <T> the result type of the answer
* @return the new blocking answer
*/
public static <T> Answer<T> blockingAnswer(Answer<T> answer, CheckPoint checkPoint) {
return invocation -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
try {
return answer.answer(invocation);
} finally {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
}
};
}
/**
* Blocks before creation of the completable future and then subsequently blocks after the completable future
* is completed. Uses the same checkpoint names as {@link Mocks#blockingMock(CheckPoint, Class, Cache, BiConsumer, Class[])}.
* Note this method returns another Callable as we may not want to always block the invoking thread.
* @param completableFutureCallable callable to invoke between blocking
* @param checkPoint the checkpoint to use
* @param executor the executor to run the after the stage has completed - this allows the caller to control which
* thread this is ran on - as it is nondeterministic since the stage may or not be complete
* when applying chained methods to it
* @param <V> the answer from the future
* @return a callable that will block
*/
public static <V> Callable<CompletableFuture<V>> blockingCompletableFuture(Callable<CompletableFuture<V>> completableFutureCallable,
CheckPoint checkPoint, Executor executor) {
return () -> {
checkPoint.trigger(BEFORE_INVOCATION);
try {
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new AssertionError(e);
}
CompletableFuture<V> completableFuture = completableFutureCallable.call();
return completableFuture.thenCompose(v -> {
checkPoint.trigger(AFTER_INVOCATION);
return checkPoint.future(AFTER_RELEASE, 20, TimeUnit.SECONDS, executor)
.thenApply(ignore -> v);
});
};
}
public static <E> Publisher<E> blockingPublisher(Publisher<E> publisher, CheckPoint checkPoint) {
return Flowable.fromPublisher(publisher)
.doOnSubscribe(s -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
public static <E> SegmentPublisherSupplier<E> blockingPublisher(SegmentPublisherSupplier<E> publisher, CheckPoint checkPoint) {
return new SegmentPublisherSupplier<E>() {
@Override
public Publisher<Notification<E>> publisherWithSegments() {
return Flowable.fromPublisher(publisher.publisherWithSegments())
.doOnSubscribe(subscription -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
@Override
public Publisher<E> publisherWithoutSegments() {
return Flowable.fromPublisher(publisher.publisherWithoutSegments())
.doOnSubscribe(subscription -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
};
}
public static <E> SegmentAwarePublisherSupplier<E> blockingPublisherAware(SegmentAwarePublisherSupplier<E> publisher, CheckPoint checkPoint) {
return new SegmentAwarePublisherSupplier<E>() {
@Override
public Publisher<Notification<E>> publisherWithSegments() {
return Flowable.fromPublisher(publisher.publisherWithSegments())
.doOnSubscribe(s -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
@Override
public Publisher<E> publisherWithoutSegments() {
return Flowable.fromPublisher(publisher.publisherWithoutSegments())
.doOnSubscribe(subscription -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
@Override
public Publisher<NotificationWithLost<E>> publisherWithLostSegments(boolean reuseNotifications) {
return Flowable.fromPublisher(publisher.publisherWithLostSegments(reuseNotifications))
.doOnSubscribe(subscription -> {
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
})
.doOnComplete(() -> {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
});
}
};
}
/**
* Returns a publisher that will block just before sending an element that matches the given predicate and then
* subsequently unblocks after it finds the next element or is completed. Uses the same checkpoint names as
* {@link Mocks#blockingMock(CheckPoint, Class, Cache, BiConsumer, Class[])}. This method can work with multiple
* entries that pass the predicate but there is no way to distinguish which is which.
*
* @param publisher the publisher to use as the upstream source
* @param checkPoint the checkpoint to block on with the mock names
* @param predicate the predicate to test
* @param <E> the type of the values
* @return Publisher that will block the checkpoint while processing the elements that pass the predicate
*/
public static <E> Publisher<E> blockingPublisherOnElement(Publisher<E> publisher, CheckPoint checkPoint,
Predicate<? super E> predicate) {
return Flowable.defer(() -> {
ByRef.Boolean byRef = new ByRef.Boolean(false);
return Flowable.fromPublisher(publisher)
.doOnNext(e -> {
if (byRef.get()) {
byRef.set(false);
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
}
if (predicate.test(e)) {
byRef.set(true);
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
}
}).doFinally(() -> {
if (byRef.get()) {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
}
});
});
}
/**
* Creates a {@link SegmentPublisherSupplier} that will block on a given entry that matches the given predicate.
* Note that if the {@link SegmentPublisherSupplier#publisherWithoutSegments()} method is invoked the provided
* segment will be -1 for predicate checks.
*/
public static <E> SegmentPublisherSupplier<E> blockingSegmentPublisherOnElement(SegmentPublisherSupplier<E> publisher,
CheckPoint checkPoint, Predicate<? super SegmentPublisherSupplier.Notification<E>> predicate) {
return new SegmentPublisherSupplier<E>() {
@Override
public Publisher<Notification<E>> publisherWithSegments() {
ByRef.Boolean byRef = new ByRef.Boolean(false);
return Flowable.fromPublisher(publisher.publisherWithSegments())
.doOnNext(e -> {
if (byRef.get()) {
byRef.set(false);
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
}
if (predicate.test(e)) {
byRef.set(true);
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.awaitStrict(BEFORE_RELEASE, 20, TimeUnit.SECONDS);
}
}).doFinally(() -> {
if (byRef.get()) {
checkPoint.trigger(AFTER_INVOCATION);
checkPoint.awaitStrict(AFTER_RELEASE, 20, TimeUnit.SECONDS);
}
});
}
@Override
public Publisher<E> publisherWithoutSegments() {
return blockingPublisherOnElement((Publisher<E>) publisher, checkPoint,
value -> predicate.test(Notifications.value(value, -1)));
}
};
}
public static AbstractDelegatingHandler blockInboundCacheRpcCommand(Cache<?, ?> cache, CheckPoint checkPoint,
Predicate<? super CacheRpcCommand> predicate) {
Executor executor = extractGlobalComponent(cache.getCacheManager(), ExecutorService.class,
KnownComponentNames.NON_BLOCKING_EXECUTOR);
return TestingUtil.wrapInboundInvocationHandler(cache, handler -> new AbstractDelegatingHandler(handler) {
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
if (!predicate.test(command)) {
delegate.handle(command, reply, order);
return;
}
checkPoint.trigger(BEFORE_INVOCATION);
checkPoint.future(BEFORE_RELEASE, 20, TimeUnit.SECONDS, executor)
.thenRun(() -> delegate.handle(command, reply, order))
.thenCompose(ignored -> {
checkPoint.trigger(AFTER_INVOCATION);
return checkPoint.future(AFTER_RELEASE, 20, TimeUnit.SECONDS, executor);
});
}
});
}
/**
* Replaces the given component with a spy and returns it for further mocking as needed. Note the original component
* is not retrieved and thus requires retrieving before invoking this method if needed.
*
* @param cache the cache to get the component from
* @param componentClass the class of the component to retrieve
* @param <C> the component class
* @return the spied component which has already been replaced and wired in the cache
*/
public static <C> C replaceComponentWithSpy(Cache<?, ?> cache, Class<C> componentClass) {
C component = TestingUtil.extractComponent(cache, componentClass);
C spiedComponent = spy(component);
TestingUtil.replaceComponent(cache, componentClass, spiedComponent, true);
reset(spiedComponent);
return spiedComponent;
}
}
| 19,342
| 44.945368
| 145
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/AbstractCacheTest.java
|
package org.infinispan.test;
import static java.lang.String.format;
import static org.testng.AssertJUnit.assertFalse;
import static org.testng.AssertJUnit.assertTrue;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.configuration.cache.CacheMode;
import org.infinispan.configuration.cache.ConfigurationBuilder;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.fwk.CleanupAfterMethod;
import org.infinispan.test.fwk.CleanupAfterTest;
import org.infinispan.test.fwk.TestCacheManagerFactory;
/**
* Base class for {@link org.infinispan.test.SingleCacheManagerTest} and {@link org.infinispan.test.MultipleCacheManagersTest}.
*
* @author Mircea.Markus@jboss.com
*/
public abstract class AbstractCacheTest extends AbstractInfinispanTest {
public enum CleanupPhase {
AFTER_METHOD, AFTER_TEST
}
protected CleanupPhase cleanup = CleanupPhase.AFTER_TEST;
protected boolean cleanupAfterTest() {
return getClass().getAnnotation(CleanupAfterTest.class) != null || (
getClass().getAnnotation(CleanupAfterMethod.class) == null &&
cleanup == CleanupPhase.AFTER_TEST
);
}
protected boolean cleanupAfterMethod() {
return getClass().getAnnotation(CleanupAfterMethod.class) != null || (
getClass().getAnnotation(CleanupAfterTest.class) == null &&
cleanup == CleanupPhase.AFTER_METHOD
);
}
public static ConfigurationBuilder getDefaultClusteredCacheConfig(CacheMode mode) {
return getDefaultClusteredCacheConfig(mode, false, false);
}
public static ConfigurationBuilder getDefaultClusteredCacheConfig(CacheMode mode, boolean transactional) {
return getDefaultClusteredCacheConfig(mode, transactional, false);
}
public static ConfigurationBuilder getDefaultClusteredCacheConfig(CacheMode mode, boolean transactional, boolean useCustomTxLookup) {
ConfigurationBuilder builder = TestCacheManagerFactory.getDefaultCacheConfiguration(transactional, useCustomTxLookup);
builder.
clustering()
.cacheMode(mode)
.transaction().cacheStopTimeout(0L);
return builder;
}
protected boolean xor(boolean b1, boolean b2) {
return (b1 || b2) && !(b1 && b2);
}
protected void assertEventuallyNotLocked(final Cache<?, ?> cache, final Object key) {
//lock release happens async, hence the eventually...
eventually(() -> format("Expected key '%s' to be unlocked on cache '%s'", key, cache),
() -> !checkLocked(cache, key), 20000, TimeUnit.MILLISECONDS);
}
protected void assertEventuallyLocked(final Cache<?, ?> cache, final Object key) {
eventually(() -> format("Expected key '%s' to be locked on cache '%s'", key, cache),
() -> checkLocked(cache, key), 20000, TimeUnit.MILLISECONDS);
}
protected void assertLocked(Cache<?, ?> cache, Object key) {
assertTrue(format("Expected key '%s' to be locked on cache '%s'", key, cache), checkLocked(cache, key));
}
protected void assertNotLocked(Cache<?, ?> cache, Object key) {
assertFalse(format("Expected key '%s' to not be locked on cache '%s'", key, cache), checkLocked(cache, key));
}
protected boolean checkLocked(Cache<?, ?> cache, Object key) {
return TestingUtil.extractLockManager(cache).isLocked(key);
}
public EmbeddedCacheManager manager(Cache<?, ?> c) {
return c.getCacheManager();
}
public String getDefaultCacheName() {
return TestCacheManagerFactory.DEFAULT_CACHE_NAME;
}
}
| 3,610
| 36.226804
| 136
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/TestException.java
|
package org.infinispan.test;
import java.io.Serializable;
/**
* Well-known exception used for testing exception propagation.
*
* @author Dan Berindei
* @since 8.2
*/
public class TestException extends RuntimeException implements Serializable {
public TestException() {
}
public TestException(String message) {
super(message);
}
public TestException(String message, Throwable cause) {
super(message, cause);
}
public TestException(Throwable cause) {
super(cause);
}
}
| 519
| 18.259259
| 77
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/ViewChangeListener.java
|
package org.infinispan.test;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.infinispan.Cache;
import org.infinispan.manager.CacheContainer;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.notifications.Listener;
import org.infinispan.notifications.cachemanagerlistener.annotation.ViewChanged;
import org.infinispan.notifications.cachemanagerlistener.event.ViewChangedEvent;
/**
* Listens for view changes. Note that you do NOT have to register this listener; it does so automatically when
* constructed.
*/
@Listener(observation = Listener.Observation.POST)
public class ViewChangeListener {
CacheContainer cm;
final CountDownLatch latch = new CountDownLatch(1);
public ViewChangeListener(Cache c) {
this(c.getCacheManager());
}
public ViewChangeListener(EmbeddedCacheManager cm) {
this.cm = cm;
cm.addListener(this);
}
@ViewChanged
public void onViewChange(ViewChangedEvent e) {
latch.countDown();
}
/**
* Blocks for a certain amount of time until a view change is received. Note that this class will start listening
* for the view change the moment it is constructed.
*
* @param time time to wait
* @param unit time unit
*/
public void waitForViewChange(long time, TimeUnit unit) throws InterruptedException {
if (!latch.await(time, unit)) assert false : "View change not seen after " + time + " " + unit;
}
}
| 1,486
| 30.638298
| 117
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/PerCacheExecutorThread.java
|
package org.infinispan.test;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import jakarta.transaction.Transaction;
import jakarta.transaction.TransactionManager;
import org.infinispan.Cache;
import org.infinispan.distribution.rehash.XAResourceAdapter;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
/**
* Utility class that can be used for writing tests that need to access a cache instance from multiple threads.
*
* @author Mircea.Markus@jboss.com
* @see Operations
* @see OperationsResult
*/
public final class PerCacheExecutorThread extends Thread {
private static final Log log = LogFactory.getLog(PerCacheExecutorThread.class);
private Cache<Object, Object> cache;
private BlockingQueue<Object> toExecute = new ArrayBlockingQueue<Object>(1);
private volatile Object response;
private CountDownLatch responseLatch = new CountDownLatch(1);
private volatile Transaction ongoingTransaction;
private volatile Object key, value;
public void setKeyValue(Object key, Object value) {
this.key = key;
this.value = value;
}
public PerCacheExecutorThread(Cache<Object, Object> cache, int index) {
super("PerCacheExecutorThread-" + index + "," + cache.getCacheManager().getAddress());
this.cache = cache;
start();
}
public Object execute(Operations op) {
try {
responseLatch = new CountDownLatch(1);
toExecute.put(op);
responseLatch.await();
return response;
} catch (InterruptedException e) {
throw new RuntimeException("Unexpected", e);
}
}
public void executeNoResponse(Operations op) {
try {
responseLatch = null;
response = null;
toExecute.put(op);
} catch (InterruptedException e) {
throw new RuntimeException("Unexpected", e);
}
}
@Override
public void run() {
Operations operation;
boolean run = true;
while (run) {
try {
operation = (Operations) toExecute.take();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
log.tracef("about to process operation %s", operation);
switch (operation) {
case BEGIN_TX: {
TransactionManager txManager = TestingUtil.getTransactionManager(cache);
try {
txManager.begin();
ongoingTransaction = txManager.getTransaction();
setResponse(OperationsResult.BEGIN_TX_OK);
} catch (Exception e) {
log.trace("Failure on beginning tx", e);
setResponse(e);
}
break;
}
case COMMIT_TX: {
TransactionManager txManager = TestingUtil.getTransactionManager(cache);
try {
txManager.commit();
ongoingTransaction = null;
setResponse(OperationsResult.COMMIT_TX_OK);
} catch (Exception e) {
log.trace("Exception while committing tx", e);
setResponse(e);
}
break;
}
case PUT_KEY_VALUE: {
try {
cache.put(key, value);
log.trace("Successfully executed putKeyValue(" + key + ", " + value + ")");
setResponse(OperationsResult.PUT_KEY_VALUE_OK);
} catch (Exception e) {
log.trace("Exception while executing putKeyValue(" + key + ", " + value + ")", e);
setResponse(e);
}
break;
}
case REMOVE_KEY: {
try {
cache.remove(key);
log.trace("Successfully executed remove(" + key + ")");
setResponse(OperationsResult.REMOVE_KEY_OK);
} catch (Exception e) {
log.trace("Exception while executing remove(" + key + ")", e);
setResponse(e);
}
break;
}
case REPLACE_KEY_VALUE: {
try {
cache.replace(key, value);
log.trace("Successfully executed replace(" + key + "," + value + ")");
setResponse(OperationsResult.REPLACE_KEY_VALUE_OK);
} catch (Exception e) {
log.trace("Exception while executing replace(" + key + "," + value + ")", e);
setResponse(e);
}
break;
}
case FORCE2PC: {
try {
TransactionManager txManager = TestingUtil.getTransactionManager(cache);
txManager.getTransaction().enlistResource(new XAResourceAdapter());
setResponse(OperationsResult.FORCE2PC_OK);
} catch (Exception e) {
log.trace("Exception while executing replace(" + key + "," + value + ")", e);
setResponse(e);
}
break;
}
case STOP_THREAD: {
log.trace("Exiting...");
toExecute = null;
run = false;
break;
}
default : {
setResponse(new IllegalStateException("Unknown operation!" + operation));
}
}
if (responseLatch != null) responseLatch.countDown();
}
}
private void setResponse(Object e) {
log.tracef("setResponse to %s", e);
response = e;
}
public void stopThread() {
execute(Operations.STOP_THREAD);
while (!this.getState().equals(State.TERMINATED)) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
}
}
public Object lastResponse() {
return response;
}
public void clearResponse() {
response = null;
}
public Object waitForResponse() {
while (response == null) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return response;
}
/**
* Defines allowed operations for {@link PerCacheExecutorThread}.
*
* @author Mircea.Markus@jboss.com
*/
public static enum Operations {
BEGIN_TX, COMMIT_TX, PUT_KEY_VALUE, REMOVE_KEY, REPLACE_KEY_VALUE, STOP_THREAD, FORCE2PC;
public OperationsResult getCorrespondingOkResult() {
switch (this) {
case BEGIN_TX:
return OperationsResult.BEGIN_TX_OK;
case COMMIT_TX:
return OperationsResult.COMMIT_TX_OK;
case PUT_KEY_VALUE:
return OperationsResult.PUT_KEY_VALUE_OK;
case REMOVE_KEY:
return OperationsResult.REMOVE_KEY_OK;
case REPLACE_KEY_VALUE:
return OperationsResult.REPLACE_KEY_VALUE_OK;
case STOP_THREAD:
return OperationsResult.STOP_THREAD_OK;
case FORCE2PC:
return OperationsResult.FORCE2PC_OK;
default:
throw new IllegalStateException("Unrecognized operation: " + this);
}
}
}
/**
* Defines operation results returned by {@link PerCacheExecutorThread}.
*
* @author Mircea.Markus@jboss.com
*/
public static enum OperationsResult {
BEGIN_TX_OK, COMMIT_TX_OK, PUT_KEY_VALUE_OK, REMOVE_KEY_OK, REPLACE_KEY_VALUE_OK, STOP_THREAD_OK , FORCE2PC_OK
}
public Transaction getOngoingTransaction() {
return ongoingTransaction;
}
}
| 7,824
| 31.878151
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/MultiCacheManagerCallable.java
|
package org.infinispan.test;
import org.infinispan.manager.EmbeddedCacheManager;
/**
* A task that executes operations against a group of cache managers.
*
* @author Galder Zamarreño
* @since 5.1
*/
public class MultiCacheManagerCallable {
protected final EmbeddedCacheManager[] cms;
public MultiCacheManagerCallable(EmbeddedCacheManager... cms) {
this.cms = cms;
}
public void call() throws Exception {
// No-op
}
}
| 455
| 18
| 69
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/OutboundRpcSequencerAction.java
|
package org.infinispan.test.concurrent;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CompletionStage;
import java.util.function.Function;
import org.infinispan.Cache;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.factories.ComponentRegistry;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.rpc.RpcOptions;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.ResponseCollector;
import org.infinispan.test.TestingUtil;
import org.infinispan.util.AbstractDelegatingRpcManager;
/**
* Replaces the {@link RpcManager} with a wrapper that can interact with a {@link StateSequencer} when a
* command that matches a {@link CommandMatcher} is invoked remotely.
*
* @author Dan Berindei
* @since 7.0
*/
public class OutboundRpcSequencerAction {
private final StateSequencer stateSequencer;
private final Cache<?, ?> cache;
private final CommandMatcher matcher;
private SequencerRpcManager ourRpcManager;
public OutboundRpcSequencerAction(StateSequencer stateSequencer, Cache<?, ?> cache, CommandMatcher matcher) {
this.stateSequencer = stateSequencer;
this.cache = cache;
this.matcher = matcher;
}
/**
* Set up a list of sequencer states before interceptor {@code interceptorClass} is called.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public OutboundRpcSequencerAction before(String state1, String... additionalStates) {
replaceRpcManager();
ourRpcManager.beforeStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
private void replaceRpcManager() {
if (ourRpcManager == null) {
ComponentRegistry componentRegistry = cache.getAdvancedCache().getComponentRegistry();
RpcManager rpcManager = componentRegistry.getComponent(RpcManager.class);
ourRpcManager = new SequencerRpcManager(rpcManager, stateSequencer, matcher);
TestingUtil.replaceComponent(cache, RpcManager.class, ourRpcManager, true);
}
}
/**
* Set up a list of sequencer states after interceptor {@code interceptorClass} has returned.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public OutboundRpcSequencerAction after(String state1, String... additionalStates) {
replaceRpcManager();
ourRpcManager.afterStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
public static class SequencerRpcManager extends AbstractDelegatingRpcManager {
private final StateSequencer stateSequencer;
private final CommandMatcher matcher;
private volatile List<String> statesBefore;
private volatile List<String> statesAfter;
public SequencerRpcManager(RpcManager rpcManager, StateSequencer stateSequencer, CommandMatcher matcher) {
super(rpcManager);
this.stateSequencer = stateSequencer;
this.matcher = matcher;
}
@Override
protected <T> CompletionStage<T> performRequest(Collection<Address> targets, ReplicableCommand command,
ResponseCollector<T> collector,
Function<ResponseCollector<T>, CompletionStage<T>>
invoker, RpcOptions rpcOptions) {
boolean accept;
try {
accept = matcher.accept(command);
StateSequencerUtil.advanceMultiple(stateSequencer, accept, statesBefore);
} catch (Exception e) {
throw new RuntimeException(e);
}
CompletionStage<T> stage = super.performRequest(targets, command, collector, invoker, rpcOptions);
if (stage != null) {
return stage.whenComplete((result, throwable) -> advanceNoThrow(accept));
} else {
advanceNoThrow(accept);
return null;
}
}
private void advanceNoThrow(boolean accept) {
try {
StateSequencerUtil.advanceMultiple(stateSequencer, accept, statesAfter);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void beforeStates(List<String> states) {
this.statesBefore = StateSequencerUtil.listCopy(states);
}
public void afterStates(List<String> states) {
this.statesAfter = StateSequencerUtil.listCopy(states);
}
}
}
| 4,689
| 38.745763
| 141
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/InterceptorSequencerAction.java
|
package org.infinispan.test.concurrent;
import java.util.List;
import org.infinispan.Cache;
import org.infinispan.commands.VisitableCommand;
import org.infinispan.context.InvocationContext;
import org.infinispan.interceptors.AsyncInterceptor;
import org.infinispan.interceptors.AsyncInterceptorChain;
import org.infinispan.interceptors.BaseAsyncInterceptor;
/**
* Replaces an {@link AsyncInterceptor} with a wrapper that can interact with a {@link StateSequencer} when a
* command that matches a {@link CommandMatcher} is visited.
*
* @author Dan Berindei
* @since 7.0
*/
public class InterceptorSequencerAction {
private final StateSequencer stateSequencer;
private final Cache<?, ?> cache;
private final Class<? extends AsyncInterceptor> interceptorClass;
private CommandMatcher matcher;
private SequencerInterceptor ourInterceptor;
public InterceptorSequencerAction(StateSequencer stateSequencer, Cache<?, ?> cache, Class<? extends AsyncInterceptor> interceptorClass, CommandMatcher matcher) {
this.stateSequencer = stateSequencer;
this.cache = cache;
this.interceptorClass = interceptorClass;
this.matcher = matcher;
}
/**
* Set up a list of sequencer states before interceptor {@code interceptorClass} is called.
*/
public InterceptorSequencerAction before(String state1, String... additionalStates) {
initOurInterceptor();
ourInterceptor.beforeStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
// TODO Should we add beforeInvokeNext() and afterInvokeNext()?
private void initOurInterceptor() {
if (ourInterceptor == null) {
ourInterceptor = SequencerInterceptor.createUniqueInterceptor(cache.getAdvancedCache().getAsyncInterceptorChain());
ourInterceptor.init(stateSequencer, matcher);
cache.getAdvancedCache().getAsyncInterceptorChain().addInterceptorBefore(ourInterceptor, interceptorClass);
}
}
/**
* Set up a list of sequencer states after interceptor {@code interceptorClass} has returned.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public InterceptorSequencerAction after(String state1, String... additionalStates) {
initOurInterceptor();
ourInterceptor.afterStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
public static class SequencerInterceptor extends BaseAsyncInterceptor {
private static final Class[] uniqueInterceptorClasses = {
U1.class, U2.class, U3.class, U4.class, U5.class, U6.class, U7.class, U8.class, U9.class
};
private StateSequencer stateSequencer;
private CommandMatcher matcher;
private volatile List<String> statesBefore;
private volatile List<String> statesAfter;
public static SequencerInterceptor createUniqueInterceptor(AsyncInterceptorChain chain) {
Class uniqueClass = findUniqueClass(chain);
try {
return (SequencerInterceptor) uniqueClass.newInstance();
} catch (Exception e) {
throw new RuntimeException("Cannot instantiate unique interceptor", e);
}
}
public static Class<?> findUniqueClass(AsyncInterceptorChain chain) {
for (Class<? extends AsyncInterceptor> clazz : uniqueInterceptorClasses) {
if (!chain.containsInterceptorType(clazz)) {
return clazz;
}
}
throw new IllegalStateException("Too many sequencer interceptors added to the same chain");
}
public void init(StateSequencer stateSequencer, CommandMatcher matcher) {
this.stateSequencer = stateSequencer;
this.matcher = matcher;
}
@Override
public Object visitCommand(InvocationContext ctx, VisitableCommand command) throws Throwable {
boolean commandAccepted = matcher.accept(command);
StateSequencerUtil.advanceMultiple(stateSequencer, commandAccepted, statesBefore);
return invokeNextAndFinally(ctx, command, (rCtx, rCommand, rv, t) -> {
StateSequencerUtil.advanceMultiple(stateSequencer, commandAccepted, statesAfter);
});
}
public void beforeStates(List<String> states) {
this.statesBefore = StateSequencerUtil.listCopy(states);
}
public void afterStates(List<String> states) {
this.statesAfter = StateSequencerUtil.listCopy(states);
}
public static class U1 extends SequencerInterceptor {
}
public static class U2 extends SequencerInterceptor {
}
public static class U3 extends SequencerInterceptor {
}
public static class U4 extends SequencerInterceptor {
}
public static class U5 extends SequencerInterceptor {
}
public static class U6 extends SequencerInterceptor {
}
public static class U7 extends SequencerInterceptor {
}
public static class U8 extends SequencerInterceptor {
}
public static class U9 extends SequencerInterceptor {
}
}
}
| 5,167
| 35.394366
| 164
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/CommandMatcherBuilder.java
|
package org.infinispan.test.concurrent;
import org.infinispan.Cache;
import org.infinispan.commands.DataCommand;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.remoting.transport.Address;
/**
* Builds {@link CommandMatcher}s.
*
* @author Dan Berindei
* @since 7.0
*/
public class CommandMatcherBuilder<T extends ReplicableCommand> {
private final Class<T> commandClass;
private String cacheName;
private Address origin;
private Object key;
private int matchCount = -1;
public CommandMatcherBuilder(Class<T> commandClass) {
this.commandClass = commandClass;
}
public CommandMatcher build() {
if (matchCount < 0) {
return buildInternal();
} else {
return new MatchCountMatcher(buildInternal(), matchCount);
}
}
private CommandMatcher buildInternal() {
if (CacheRpcCommand.class.isAssignableFrom(commandClass)) {
return new DefaultCommandMatcher(((Class<? extends CacheRpcCommand>) commandClass), cacheName, origin);
} else if (DataCommand.class.isAssignableFrom(commandClass)) {
return new DefaultCommandMatcher(((Class<? extends DataCommand>) commandClass), key);
} else {
return new DefaultCommandMatcher(commandClass);
}
}
public CommandMatcherBuilder withCache(Cache cache) {
return withCache(cache.getName());
}
public CommandMatcherBuilder withCache(String cacheName) {
this.cacheName = cacheName;
return this;
}
/**
* Note that a {@code null} origin means any origin, including local. If you need to match only local
* commands, use {@link #localOnly()}.
*/
public CommandMatcherBuilder withOrigin(Address origin) {
this.origin = origin;
return this;
}
public CommandMatcherBuilder localOnly() {
this.origin = DefaultCommandMatcher.LOCAL_ORIGIN_PLACEHOLDER;
return this;
}
public CommandMatcherBuilder remoteOnly() {
this.origin = DefaultCommandMatcher.ANY_REMOTE_PLACEHOLDER;
return this;
}
public CommandMatcherBuilder withKey(Object key) {
this.key = key;
return this;
}
/**
* Accept only the {@code nth} invocation that matches <b>all</b> the other conditions.
*
* <p>The default, {@code matchCount = -1}, matches all invocations.
* Use {@code matchCount >= 0} to match only one invocation, e.g. {@code matchCount = 0} matches the first invocation.
*/
public CommandMatcherBuilder matchCount(int matchCount) {
this.matchCount = matchCount;
return this;
}
}
| 2,647
| 29.090909
| 121
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/InboundRpcSequencerAction.java
|
package org.infinispan.test.concurrent;
import static org.infinispan.test.TestingUtil.wrapInboundInvocationHandler;
import java.util.List;
import java.util.concurrent.TimeoutException;
import org.infinispan.Cache;
import org.infinispan.commands.remote.CacheRpcCommand;
import org.infinispan.remoting.inboundhandler.AbstractDelegatingHandler;
import org.infinispan.remoting.inboundhandler.DeliverOrder;
import org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler;
import org.infinispan.remoting.inboundhandler.Reply;
import org.infinispan.remoting.responses.ExceptionResponse;
/**
* Replaces the {@link org.infinispan.remoting.inboundhandler.PerCacheInboundInvocationHandler} with a wrapper that can interact with a {@link StateSequencer} when a
* command that matches a {@link CommandMatcher} is invoked.
*
* @author Dan Berindei
* @since 7.0
*/
public class InboundRpcSequencerAction {
private final StateSequencer stateSequencer;
private final Cache<?,?> cache;
private final CommandMatcher matcher;
private SequencerPerCacheInboundInvocationHandler ourHandler;
public InboundRpcSequencerAction(StateSequencer stateSequencer, Cache cache, CommandMatcher matcher) {
this.stateSequencer = stateSequencer;
this.cache = cache;
this.matcher = matcher;
}
/**
* Set up a list of sequencer states before interceptor {@code interceptorClass} is called.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public InboundRpcSequencerAction before(String state1, String... additionalStates) {
replaceInboundInvocationHandler();
ourHandler.beforeStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
private void replaceInboundInvocationHandler() {
if (ourHandler == null) {
ourHandler = wrapInboundInvocationHandler(cache, handler ->
new SequencerPerCacheInboundInvocationHandler(handler, stateSequencer, matcher));
}
}
/**
* Set up a list of sequencer states after interceptor {@code interceptorClass} has returned.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public InboundRpcSequencerAction after(String state1, String... additionalStates) {
replaceInboundInvocationHandler();
ourHandler.afterStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
public static class SequencerPerCacheInboundInvocationHandler extends AbstractDelegatingHandler {
private final StateSequencer stateSequencer;
private final CommandMatcher matcher;
private volatile List<String> statesBefore;
private volatile List<String> statesAfter;
public SequencerPerCacheInboundInvocationHandler(PerCacheInboundInvocationHandler delegate, StateSequencer stateSequencer, CommandMatcher matcher) {
super(delegate);
this.stateSequencer = stateSequencer;
this.matcher = matcher;
}
@Override
public void handle(CacheRpcCommand command, Reply reply, DeliverOrder order) {
boolean accepted = matcher.accept(command);
advance(accepted, statesBefore, reply);
try {
delegate.handle(command, response -> {
if (advance(accepted, statesAfter, reply)) {
reply.reply(response);
}
}, order);
} catch (Throwable t) {
advance(accepted, statesAfter, Reply.NO_OP);
}
}
public void beforeStates(List<String> states) {
this.statesBefore = StateSequencerUtil.listCopy(states);
}
public void afterStates(List<String> states) {
this.statesAfter = StateSequencerUtil.listCopy(states);
}
private boolean advance(boolean accepted, List<String> states, Reply reply) {
try {
StateSequencerUtil.advanceMultiple(stateSequencer, accepted, states);
return true;
} catch (TimeoutException e) {
reply.reply(new ExceptionResponse(e));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
reply.reply(new ExceptionResponse(e));
}
return false;
}
}
}
| 4,413
| 38.061947
| 165
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/CommandMatcher.java
|
package org.infinispan.test.concurrent;
import org.infinispan.commands.ReplicableCommand;
/**
* Matches {@link ReplicableCommand}s.
*
* @author Dan Berindei
* @since 7.0
*/
public interface CommandMatcher {
boolean accept(ReplicableCommand command);
}
| 262
| 17.785714
| 49
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/InvocationMatcherBuilder.java
|
package org.infinispan.test.concurrent;
import java.util.ArrayList;
import java.util.List;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Matcher;
/**
* Creates {@link InvocationMatcher}s.
*
* @author Dan Berindei
* @since 7.0
*/
public class InvocationMatcherBuilder {
private final String methodName;
private Matcher instanceMatcher;
private List<Matcher> argumentMatchers;
private int matchCount = -1;
private String inOrAfterState, afterState;
private StateSequencer stateSequencer;
public InvocationMatcherBuilder(String methodName) {
this.methodName = methodName;
}
public InvocationMatcher build() {
Matcher[] matchersArray = argumentMatchers != null ?
argumentMatchers.toArray(new Matcher[argumentMatchers.size()]) : null;
InvocationMatcher matcher = new DefaultInvocationMatcher(methodName, instanceMatcher, matchCount, matchersArray);
if (inOrAfterState != null) {
matcher = new StateInvocationMatcher(matcher, stateSequencer, StateInvocationMatcher.Relation.IN_OR_AFTER, inOrAfterState);
}
if (afterState != null) {
matcher = new StateInvocationMatcher(matcher, stateSequencer, StateInvocationMatcher.Relation.AFTER, afterState);
}
return matcher;
}
public InvocationMatcherBuilder withParam(int index, Object expected) {
Matcher<Object> matcher = CoreMatchers.equalTo(expected);
return withMatcher(index, matcher);
}
public InvocationMatcherBuilder withMatcher(int index, Matcher<?> matcher) {
if (argumentMatchers == null) {
argumentMatchers = new ArrayList<Matcher>(index + 1);
}
while (argumentMatchers.size() <= index) {
argumentMatchers.add(null);
}
argumentMatchers.set(index, matcher);
return this;
}
public InvocationMatcherBuilder matchCount(int matchCount) {
this.matchCount = matchCount;
return this;
}
public InvocationMatcherBuilder withThis(Matcher<Object> matcher) {
instanceMatcher = matcher;
return this;
}
public InvocationMatcherBuilder inOrAfterState(StateSequencer stateSequencer, String stateName) {
assert stateSequencer != null && (this.stateSequencer == null || this.stateSequencer == stateSequencer);
this.stateSequencer = stateSequencer;
this.inOrAfterState = stateName;
return this;
}
public InvocationMatcherBuilder afterState(StateSequencer stateSequencer, String stateName) {
assert stateSequencer != null && (this.stateSequencer == null || this.stateSequencer == stateSequencer);
this.stateSequencer = stateSequencer;
this.afterState = stateName;
return this;
}
}
| 2,713
| 32.925
| 132
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/StateInvocationMatcher.java
|
package org.infinispan.test.concurrent;
public class StateInvocationMatcher implements InvocationMatcher {
private final InvocationMatcher matcher;
private final StateSequencer stateSequencer;
private final String stateName;
private final Relation relation;
public StateInvocationMatcher(InvocationMatcher matcher, StateSequencer stateSequencer, Relation relation, String stateName) {
this.matcher = matcher;
this.stateSequencer = stateSequencer;
this.relation = relation;
this.stateName = stateName;
}
@Override
public boolean accept(Object instance, String methodName, Object[] arguments) {
boolean accept = false;
switch (relation) {
case IN:
accept = stateSequencer.isInState(stateName);
break;
case IN_OR_AFTER:
accept = stateSequencer.isInOrAfterState(stateName);
break;
case AFTER:
accept = stateSequencer.isAfterState(stateName);
break;
default:
throw new IllegalStateException(String.valueOf(relation));
}
if (accept) {
return matcher.accept(instance, methodName, arguments);
}
return false;
}
public enum Relation {
IN,
IN_OR_AFTER,
AFTER
}
}
| 1,297
| 28.5
| 129
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/InvocationMatcher.java
|
package org.infinispan.test.concurrent;
/**
* Matches method invocations by name, parameters and/or target.
*
* @author Dan Berindei
* @since 7.0
*/
public interface InvocationMatcher {
boolean accept(Object instance, String methodName, Object[] arguments);
}
| 269
| 21.5
| 74
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/GlobalComponentSequencerAction.java
|
package org.infinispan.test.concurrent;
import static org.infinispan.commons.test.Exceptions.unchecked;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.List;
import java.util.concurrent.CompletionStage;
import org.infinispan.manager.EmbeddedCacheManager;
import org.infinispan.test.TestingUtil;
/**
* Replaces a global component with a dynamic proxy that can interact with a {@link StateSequencer} when a method that
* matches a {@link InvocationMatcher} is called.
*
* @author Dan Berindei
* @since 7.0
*/
public class GlobalComponentSequencerAction<T> {
protected final StateSequencer stateSequencer;
protected final EmbeddedCacheManager cacheManager;
protected final Class<T> componentClass;
protected final InvocationMatcher matcher;
protected ProxyInvocationHandler ourHandler;
protected T originalComponent;
GlobalComponentSequencerAction(StateSequencer stateSequencer, EmbeddedCacheManager cacheManager, Class<T> componentClass, InvocationMatcher matcher) {
this.matcher = matcher;
this.componentClass = componentClass;
this.stateSequencer = stateSequencer;
this.cacheManager = cacheManager;
}
/**
* Set up a list of sequencer states before interceptor {@code interceptorClass} is called.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public GlobalComponentSequencerAction<T> before(String state1, String... additionalStates) {
replaceComponent();
ourHandler.beforeStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
protected void replaceComponent() {
if (ourHandler == null) {
originalComponent = TestingUtil.extractGlobalComponent(cacheManager, componentClass);
if (originalComponent == null) {
throw new IllegalStateException("Attempting to wrap a non-existing global component: " + componentClass);
}
ourHandler = new ProxyInvocationHandler(originalComponent, stateSequencer, matcher);
T componentProxy = createComponentProxy(componentClass, ourHandler);
TestingUtil.replaceComponent(cacheManager, componentClass, componentProxy, true);
}
}
protected <T> T createComponentProxy(Class<T> componentClass, InvocationHandler handler) {
return (T) Proxy.newProxyInstance(this.getClass().getClassLoader(), new Class[]{componentClass},
handler);
}
/**
* Set up a list of sequencer states after interceptor {@code interceptorClass} has returned.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public GlobalComponentSequencerAction<T> after(String state1, String... additionalStates) {
replaceComponent();
ourHandler.afterStates(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
/**
* Set up a list of sequencer states after interceptor {@code interceptorClass} has returned.
* <p/>
* Each invocation accepted by {@code matcher} will enter/exit the next state from the list, and does nothing after the list is exhausted.
*/
public GlobalComponentSequencerAction<T> afterAsync(String state1, String... additionalStates) {
replaceComponent();
ourHandler.afterStatesAsync(StateSequencerUtil.concat(state1, additionalStates));
return this;
}
public T getOriginalComponent() {
return originalComponent;
}
public static class ProxyInvocationHandler implements InvocationHandler {
private final Object wrappedInstance;
private final StateSequencer stateSequencer;
private final InvocationMatcher matcher;
private boolean async;
private volatile List<String> statesBefore;
private volatile List<String> statesAfter;
public ProxyInvocationHandler(Object wrappedInstance, StateSequencer stateSequencer, InvocationMatcher matcher) {
this.wrappedInstance = wrappedInstance;
this.stateSequencer = stateSequencer;
this.matcher = matcher;
}
public void beforeStates(List<String> states) {
this.statesBefore = StateSequencerUtil.listCopy(states);
}
public void afterStates(List<String> states) {
this.statesAfter = StateSequencerUtil.listCopy(states);
}
public void afterStatesAsync(List<String> states) {
this.async = true;
this.statesAfter = StateSequencerUtil.listCopy(states);
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
boolean matches = matcher.accept(wrappedInstance, method.getName(), args);
StateSequencerUtil.advanceMultiple(stateSequencer, matches, statesBefore);
if (async) {
CompletionStage<?> stage = (CompletionStage<?>) method.invoke(wrappedInstance, args);
return stage.whenComplete((o, throwable) -> unchecked(() -> {
StateSequencerUtil.advanceMultiple(stateSequencer, matches, statesAfter);
}));
}
else {
try {
return method.invoke(wrappedInstance, args);
} finally {
StateSequencerUtil.advanceMultiple(stateSequencer, matches, statesAfter);
}
}
}
public Object getWrappedInstance() {
return wrappedInstance;
}
}
}
| 5,581
| 38.588652
| 153
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/MatchCountMatcher.java
|
package org.infinispan.test.concurrent;
import java.util.concurrent.atomic.AtomicInteger;
import org.infinispan.commands.ReplicableCommand;
/**
* {@link org.infinispan.test.concurrent.CommandMatcher} implementation that can match a single invocation (e.g. the
* 2nd invocation that matches the other conditions).
*
* @author Dan Berindei
* @since 7.0
*/
public class MatchCountMatcher implements CommandMatcher {
private final CommandMatcher matcher;
private final int matchCount;
private final AtomicInteger parentMatchCount = new AtomicInteger(0);
/**
* @param matcher Parent matcher
* @param matchCount Index of invocation to match, e.g. {@code matchCount = 0} matches the first invocation.
*/
MatchCountMatcher(CommandMatcher matcher, int matchCount) {
if (matchCount < 0)
throw new IllegalArgumentException("matchCount must be positive");
this.matcher = matcher;
this.matchCount = matchCount;
}
@Override
public boolean accept(ReplicableCommand command) {
if (!matcher.accept(command))
return false;
// Only increment the counter if all the other conditions are met.
return parentMatchCount.getAndIncrement() == matchCount;
}
}
| 1,240
| 29.268293
| 116
|
java
|
null |
infinispan-main/core/src/test/java/org/infinispan/test/concurrent/StateSequencer.java
|
package org.infinispan.test.concurrent;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import net.jcip.annotations.GuardedBy;
/**
* Defines a set of logical threads, each with a list of states, and a partial ordering between states.
* <p/>
* <p>Logical threads are defined with {@link #logicalThread(String, String, String...)}. States in a logical thread are implicitly
* ordered - they must be entered in the order in which they were defined.</p>
* <p>The ordering between states in different logical threads can be defined with {@link #order(String, String, String...)}</p>
* <p>A state can also have an associated action, defined with {@link #action(String, java.util.concurrent.Callable)}.
* States that depend on another state with an associated action can only be entered after the action has finished.</p>
* <p>Entering a state with {@link #enter(String)} will block until all the other states it depends on have been exited
* with {@link #exit(String)}.</p>
*
* @author Dan Berindei
* @since 7.0
*/
public class StateSequencer {
private static final Log log = LogFactory.getLog(StateSequencer.class);
private final Map<String, LogicalThread> logicalThreads = new HashMap<String, LogicalThread>();
private final Map<String, State> stateMap = new HashMap<String, State>();
private final Lock lock = new ReentrantLock();
private final Condition condition = lock.newCondition();
private final long defaultTimeoutNanos;
private boolean running = true;
public StateSequencer() {
this(30, TimeUnit.SECONDS);
}
public StateSequencer(long defaultTimeout, TimeUnit unit) {
this.defaultTimeoutNanos = unit.toNanos(defaultTimeout);
}
/**
* Define a logical thread.
* <p/>
* States in a logical thread are implicitly ordered - they must be entered in the order in which they were defined.
*/
public StateSequencer logicalThread(String threadName, String initialState, String... additionalStates) {
lock.lock();
try {
if (logicalThreads.containsKey(threadName)) {
throw new IllegalArgumentException("Logical thread " + threadName + " already exists");
}
List<String> states;
if (additionalStates == null) {
states = Collections.singletonList(initialState);
} else {
states = new ArrayList<String>(additionalStates.length + 1);
states.add(initialState);
states.addAll(Arrays.asList(additionalStates));
}
LogicalThread thread = new LogicalThread(threadName, states);
logicalThreads.put(threadName, thread);
for (String stateName : states) {
if (stateMap.containsKey(stateName)) {
throw new IllegalArgumentException("State " + stateName + " already exists");
}
State state = new State(threadName, stateName);
stateMap.put(stateName, state);
}
doOrder(states);
log.tracef("Added logical thread %s, with states %s", threadName, states);
} finally {
lock.unlock();
}
return this;
}
private void doOrder(List<String> orderedStates) {
lock.lock();
try {
for (int i = 0; i < orderedStates.size(); i++) {
State state = stateMap.get(orderedStates.get(i));
if (state == null) {
throw new IllegalArgumentException("Cannot order a non-existing state: " + orderedStates.get(i));
}
if (i > 0) {
state.dependencies.add(orderedStates.get(i - 1));
}
}
verifyCycles();
log.tracef("Order changed: %s", getOrderString());
} finally {
lock.unlock();
}
}
@GuardedBy("lock")
private void verifyCycles() {
visitInOrder(new StatesVisitor() {
@Override
public void visitStates(List<String> visitedStates) {
// Do nothing
}
@Override
public void visitCycle(Collection<String> remainingStates) {
throw new IllegalStateException("Cycle detected: " + remainingStates);
}
});
}
private String getOrderString() {
final StringBuilder sb = new StringBuilder();
visitInOrder(new StatesVisitor() {
@Override
public void visitStates(List<String> visitedStates) {
if (sb.length() > 1) {
sb.append(" < ");
}
if (visitedStates.size() == 1) {
sb.append(visitedStates.get(0));
} else {
sb.append(visitedStates);
}
}
@Override
public void visitCycle(Collection<String> remainingStates) {
sb.append("cycle: ").append(remainingStates);
}
});
return sb.toString();
}
@GuardedBy("lock")
private void visitInOrder(StatesVisitor visitor) {
Set<String> visitedStates = new HashSet<String>();
Set<String> remainingStates = new HashSet<String>(stateMap.keySet());
while (!remainingStates.isEmpty()) {
// In every iteration, we visit the states for which we already visited all their dependencies.
// If there are no such states, it means we found a cycle.
List<String> freeStates = new ArrayList<String>();
for (Iterator<String> it = remainingStates.iterator(); it.hasNext(); ) {
State s = stateMap.get(it.next());
if (visitedStates.containsAll(s.dependencies)) {
freeStates.add(s.name);
it.remove();
}
}
visitedStates.addAll(freeStates);
if (freeStates.size() != 0) {
visitor.visitStates(freeStates);
} else {
visitor.visitCycle(remainingStates);
}
}
}
/**
* Define a partial order between states in different logical threads.
*/
public StateSequencer order(String state1, String state2, String... additionalStates) {
List<String> allStates;
if (additionalStates == null) {
allStates = new ArrayList<String>(Arrays.asList(state1, state2));
} else {
allStates = new ArrayList<String>(additionalStates.length + 2);
allStates.add(state1);
allStates.add(state2);
allStates.addAll(Arrays.asList(additionalStates));
}
doOrder(allStates);
return this;
}
/**
* Define an action for a state.
* <p/>
* States that depend on another state with an associated action can only be entered after the action has finished.
*/
public StateSequencer action(String stateName, Callable<Object> action) {
lock.lock();
try {
State state = stateMap.get(stateName);
if (state == null) {
throw new IllegalArgumentException("Trying to add an action for an invalid state: " + stateName);
}
if (state.action != null) {
throw new IllegalStateException("Trying to overwrite an existing action for state " + stateName);
}
state.action = action;
log.tracef("Action added for state %s", stateName);
} finally {
lock.unlock();
}
return this;
}
/**
* Equivalent to {@code enter(state, timeout, unit); exit(state);}.
*/
public void advance(String state, long timeout, TimeUnit unit) throws TimeoutException, InterruptedException {
enter(state, timeout, unit);
exit(state);
}
/**
* Enter a state and block until all its dependencies have been exited.
*/
public void enter(String stateName, long timeout, TimeUnit unit) throws TimeoutException, InterruptedException {
doEnter(stateName, unit.toNanos(timeout));
}
/**
* Exit a state and signal the waiters on its dependent states.
*/
public void exit(String stateName) {
log.tracef("Exiting state %s", stateName);
lock.lock();
try {
if (!running)
return;
State state = stateMap.get(stateName);
if (state.signalled) {
throw new IllegalStateException(String.format("State %s exited twice", stateName));
}
state.signalled = true;
condition.signalAll();
} finally {
lock.unlock();
}
}
private void doEnter(String stateName, long nanos) throws InterruptedException, TimeoutException {
lock.lock();
try {
State state = stateMap.get(stateName);
if (state == null) {
throw new IllegalArgumentException("Trying to advance to a non-existing state: " + stateName);
}
if (!running) {
log.tracef("Sequencer stopped, not entering state %s", stateName);
return;
}
log.tracef("Waiting for states %s to enter %s", state.dependencies, stateName);
for (String dependency : state.dependencies) {
State depState = stateMap.get(dependency);
nanos = waitForState(depState, nanos);
if (nanos <= 0 && !depState.signalled) {
reportTimeout(state);
}
}
log.tracef("Entering state %s", stateName);
logicalThreads.get(state.threadName).setCurrentState(stateName);
if (state.action != null) {
try {
state.action.call();
} catch (Exception e) {
throw new RuntimeException("Action failed for state " + stateName, e);
}
}
} finally {
lock.unlock();
}
}
@GuardedBy("lock")
private long waitForState(State state, long nanos) throws InterruptedException {
while (running && !state.signalled && nanos > 0L) {
nanos = condition.awaitNanos(nanos);
}
return nanos;
}
@GuardedBy("lock")
private void reportTimeout(State state) throws TimeoutException {
List<String> timedOutStates = new ArrayList<String>(1);
for (String dependencyName : state.dependencies) {
State dependency = stateMap.get(dependencyName);
if (!dependency.signalled) {
timedOutStates.add(dependencyName);
}
}
String errorMessage = String.format("Timed out waiting to enter state %s. Dependencies not satisfied are %s",
state.name, timedOutStates);
log.trace(errorMessage);
throw new TimeoutException(errorMessage);
}
/**
* Equivalent to {@code enter(state); exit(state);}.
*/
public void advance(String state) throws TimeoutException, InterruptedException {
enter(state);
exit(state);
}
/**
* Enter a state and block until all its dependencies have been exited, using the default timeout.
*/
public void enter(String stateName) throws TimeoutException, InterruptedException {
doEnter(stateName, defaultTimeoutNanos);
}
/**
* Stop doing anything on {@code enter()} or {@code exit()}.
* Existing threads waiting in {@code enter()} will be waken up.
*/
public void stop() {
lock.lock();
try {
log.tracef("Stopping sequencer %s", toString());
running = false;
condition.signalAll();
} finally {
lock.unlock();
}
}
public String toString() {
lock.lock();
try {
StringBuilder sb = new StringBuilder();
sb.append("Sequencer{ ");
for (LogicalThread thread : logicalThreads.values()) {
sb.append(thread);
sb.append("; ");
}
sb.append("global order: ").append(getOrderString());
sb.append("}");
return sb.toString();
} finally {
lock.unlock();
}
}
public boolean isInState(String stateName) {
lock.lock();
try {
State state = stateMap.get(stateName);
LogicalThread logicalThread = logicalThreads.get(state.threadName);
return stateName.equals(logicalThread.currentState);
} finally {
lock.unlock();
}
}
public boolean isAfterState(String stateName) {
lock.lock();
try {
State state = stateMap.get(stateName);
return state.signalled;
} finally {
lock.unlock();
}
}
public boolean isInOrAfterState(String stateName) {
lock.lock();
try {
State state = stateMap.get(stateName);
if (state.signalled) return true;
LogicalThread logicalThread = logicalThreads.get(state.threadName);
return stateName.equals(logicalThread.currentState);
} finally {
lock.unlock();
}
}
private interface StatesVisitor {
void visitStates(List<String> visitedStates);
void visitCycle(Collection<String> remainingStates);
}
private static class State {
final String threadName;
final String name;
final List<String> dependencies;
Callable<Object> action;
boolean signalled;
public State(String threadName, String name) {
this.threadName = threadName;
this.name = name;
this.dependencies = new ArrayList<String>();
}
}
private static class LogicalThread {
final String name;
final List<String> states;
String currentState;
public LogicalThread(String name, List<String> states) {
this.name = name;
this.states = states;
}
public void setCurrentState(String state) {
this.currentState = state;
}
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(name).append(": ");
for (int i = 0; i < states.size(); i++) {
String state = states.get(i);
if (i > 0) {
sb.append(" < ");
}
if (state.equals(currentState)) {
sb.append("*");
}
sb.append(state);
}
return sb.toString();
}
}
}
| 14,517
| 31.846154
| 131
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.